diff --git a/.github/workflows/c-cpp.yml b/.github/workflows/c-cpp.yml index ec671246bd..9faf23538d 100644 --- a/.github/workflows/c-cpp.yml +++ b/.github/workflows/c-cpp.yml @@ -9,29 +9,35 @@ on: jobs: debug_builds: runs-on: ubuntu-latest + env: + FCFLAGS: -O0 -g + CXXFLAGS: -O0 -g + NVCCFLAGS: -O0 -g -G strategy: matrix: - folder: [ epochX/cudacpp/ee_mumu.sa/SubProcesses/P1_Sigma_sm_epem_mupmum , epoch1/cuda/ee_mumu/SubProcesses/P1_Sigma_sm_epem_mupmum , epoch2/cuda/ee_mumu/SubProcesses/P1_Sigma_sm_epem_mupmum ] + folder: [ epochX/cudacpp/ee_mumu.sa/SubProcesses/P1_Sigma_sm_epem_mupmum , epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx , epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg ] fail-fast: false steps: - uses: actions/checkout@v2 - name: make debug - run: make -C ${{ matrix.folder }} debug + run: make -C ${{ matrix.folder }} + - name: make check + run: make -C ${{ matrix.folder }} check CPU: runs-on: ubuntu-latest strategy: matrix: - folder: [ epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum , epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg ] + folder: [ epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum , epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg , epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx ] precision: [ d , f , m ] fail-fast: false steps: - uses: actions/checkout@v2 - name: make info - run: make FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} -f cudacpp.mk info + run: make FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} info - name: make run: make FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} - name: make check - run: make FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} -f cudacpp.mk check + run: make FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} check CPU_MAC: runs-on: macos-latest env: @@ -44,11 +50,11 @@ jobs: steps: - uses: actions/checkout@v2 - name: make info - run: make AVX=none OMPFLAGS= FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} -f cudacpp.mk info + run: make OMPFLAGS= FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} info - name: make - run: make AVX=none OMPFLAGS= FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} + run: make OMPFLAGS= FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} cppnative - name: make check - run: make AVX=none OMPFLAGS= FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} -f cudacpp.mk check + run: make OMPFLAGS= FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} check GPU: runs-on: self-hosted env: @@ -57,7 +63,7 @@ jobs: REQUIRE_CUDA: 1 strategy: matrix: - folder: [ epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum , epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg ] + folder: [ epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum , epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg , epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx , epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg ] precision: [ d , f , m ] fail-fast: false steps: @@ -65,8 +71,8 @@ jobs: - name: path run: echo "PATH=$PATH" - name: make info - run: make FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} -f cudacpp.mk info - - name: make - run: make FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} - - name: make check - run: make FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} -f cudacpp.mk check + run: make FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} info + - name: make cuda + run: make FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} cuda + - name: make gcheck + run: make FPTYPE=${{ matrix.precision }} -C ${{ matrix.folder }} gcheck diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/counters.cc b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/counters.cc +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 index fe883a6b25..758271b077 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 @@ -1,5 +1,5 @@ diff --git b/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/auto_dsig1.f a/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/auto_dsig1.f -index 880769442..5a3da931f 100644 +index 0a3dfa449..9346ee4c6 100644 --- b/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/auto_dsig1.f +++ a/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/auto_dsig1.f @@ -484,23 +484,140 @@ C @@ -284,7 +284,7 @@ index 71fbf2b25..0f1d199fc 100644 open(unit=lun,file=tempname,status='old',ERR=20) fopened=.true. diff --git b/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/matrix1.f a/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/matrix1.f -index 3ac962688..daea73a6d 100644 +index 817af778b..0c2ce6ec4 100644 --- b/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/matrix1.f +++ a/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/matrix1.f @@ -72,7 +72,10 @@ C diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common index e19a7d2054..815e113d98 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common @@ -12,93 +12,531 @@ index a59181c70..af7e0efbc 100644 INTEGER MAXTRIES PARAMETER(MAXTRIES=25) C To pass the helicity configuration chosen by the DiscreteSampler to +diff --git b/epochX/cudacpp/gg_tt.mad/Source/make_opts a/epochX/cudacpp/gg_tt.mad/Source/make_opts +index d7dafe3d6..435bed0dc 100644 +--- b/epochX/cudacpp/gg_tt.mad/Source/make_opts ++++ a/epochX/cudacpp/gg_tt.mad/Source/make_opts +@@ -1,6 +1,7 @@ + DEFAULT_CPP_COMPILER=g++ + DEFAULT_F2PY_COMPILER=f2py3 + DEFAULT_F_COMPILER=gfortran ++GLOBAL_FLAG=-O3 -ffast-math + MACFLAG= + MG5AMC_VERSION=SpecifiedByMG5aMCAtRunTime + PYTHIA8_PATH=NotInstalled +@@ -12,31 +13,53 @@ BIASLIBDIR=../../../lib/ + BIASLIBRARY=libbias.$(libext) + + # Rest of the makefile +-ifeq ($(origin FFLAGS),undefined) +-FFLAGS= -w -fPIC +-#FFLAGS+= -g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none +-endif + +-FFLAGS += $(GLOBAL_FLAG) ++#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) ++ ++# Detect O/S kernel (Linux, Darwin...) ++UNAME_S := $(shell uname -s) ++ ++# Detect architecture (x86_64, ppc64le...) ++UNAME_P := $(shell uname -p) ++ ++#------------------------------------------------------------------------------- + + # REMOVE MACFLAG IF NOT ON MAC OR FOR F2PY +-UNAME := $(shell uname -s) + ifdef f2pymode + MACFLAG= + else +-ifneq ($(UNAME), Darwin) ++ifneq ($(UNAME_S), Darwin) + MACFLAG= + endif + endif + ++############################################################ ++# Default compiler flags ++# To change optimisation level, override these as follows: ++# make CXXFLAGS="-O0 -g" ++# or export them as environment variables ++# For debugging Fortran, one could e.g. use: ++# FCFLAGS="-g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none" ++############################################################ ++FCFLAGS ?= $(GLOBAL_FLAG) -fbounds-check ++CXXFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG ++NVCCFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG -use_fast_math -lineinfo ++LDFLAGS ?= $(STDLIB) + +-ifeq ($(origin CXXFLAGS),undefined) +-CXXFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) ++ifneq ($(FFLAGS),) ++# Madgraph used to use FFLAGS, so the user probably tries to change the flags specifically for madgraph: ++FCFLAGS = $(FFLAGS) + endif + +-ifeq ($(origin CFLAGS),undefined) +-CFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) ++# Madgraph-specific flags: ++WARNFLAGS = -Wall -Wshadow -Wextra ++ifeq (,$(findstring -std=,$(CXXFLAGS))) ++CXXSTANDARD= -std=c++17 + endif ++MG_FCFLAGS += -fPIC -w ++MG_CXXFLAGS += -fPIC $(CXXSTANDARD) $(WARNFLAGS) $(MACFLAG) ++MG_NVCCFLAGS += -fPIC $(CXXSTANDARD) --forward-unknown-to-host-compiler $(WARNFLAGS) ++MG_LDFLAGS += $(MACFLAG) + + # Set FC unless it's defined by an environment variable + ifeq ($(origin FC),default) +@@ -48,45 +71,40 @@ endif + + # Increase the number of allowed charcters in a Fortran line + ifeq ($(FC), ftn) +-FFLAGS+= -extend-source # for ifort type of compiler ++MG_FCFLAGS += -extend-source # for ifort type of compiler + else + VERS="$(shell $(FC) --version | grep ifort -i)" + ifeq ($(VERS), "") +-FFLAGS+= -ffixed-line-length-132 ++MG_FCFLAGS += -ffixed-line-length-132 + else +-FFLAGS+= -extend-source # for ifort type of compiler ++MG_FCFLAGS += -extend-source # for ifort type of compiler + endif + endif + + +-UNAME := $(shell uname -s) +-ifeq ($(origin LDFLAGS), undefined) +-LDFLAGS=$(STDLIB) $(MACFLAG) +-endif +- + # Options: dynamic, lhapdf + # Option dynamic + +-ifeq ($(UNAME), Darwin) ++ifeq ($(UNAME_S), Darwin) + dylibext=dylib + else + dylibext=so + endif + + ifdef dynamic +-ifeq ($(UNAME), Darwin) ++ifeq ($(UNAME_S), Darwin) + libext=dylib +-FFLAGS+= -fno-common +-LDFLAGS += -bundle ++MG_FCFLAGS += -fno-common ++MG_LDFLAGS += -bundle + define CREATELIB + $(FC) -dynamiclib -undefined dynamic_lookup -o $(1) $(2) + endef + else + libext=so +-FFLAGS+= -fPIC +-LDFLAGS += -shared ++MG_FCFLAGS += -fPIC ++MG_LDFLAGS += -shared + define CREATELIB +-$(FC) $(FFLAGS) $(LDFLAGS) -o $(1) $(2) ++$(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MG_LDFLAGS) $(LDFLAGS) -o $(1) $(2) + endef + endif + else +@@ -100,17 +118,9 @@ endif + # Option lhapdf + + ifneq ($(lhapdf),) +-CXXFLAGS += $(shell $(lhapdf) --cppflags) ++MG_CXXFLAGS += $(shell $(lhapdf) --cppflags) + alfas_functions=alfas_functions_lhapdf + llhapdf+= $(shell $(lhapdf) --cflags --libs) -lLHAPDF +-# check if we need to activate c++11 (for lhapdf6.2) +-ifeq ($(origin CXX),default) +-ifeq ($lhapdfversion$lhapdfsubversion,62) +-CXX=$(DEFAULT_CPP_COMPILER) -std=c++11 +-else +-CXX=$(DEFAULT_CPP_COMPILER) +-endif +-endif + else + alfas_functions=alfas_functions + llhapdf= +@@ -119,4 +129,207 @@ endif + # Helper function to check MG5 version + define CHECK_MG5AMC_VERSION + python -c 'import re; from distutils.version import StrictVersion; print StrictVersion("$(MG5AMC_VERSION)") >= StrictVersion("$(1)") if re.match("^[\d\.]+$$","$(MG5AMC_VERSION)") else True;' +-endef +\ No newline at end of file ++endef ++ ++#------------------------------------------------------------------------------- ++ ++# Set special cases for non-gcc/clang builds ++# AVX below gets overridden from outside in architecture-specific builds ++AVX ?= none ++# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] ++# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] ++$(info AVX=$(AVX)) ++ifeq ($(UNAME_P),arm) ++ ifeq ($(AVX),sse4) ++ override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) ++ endif ++else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 ++ ifeq ($(AVX),none) ++ override AVXFLAGS = -mno-sse3 # no SIMD ++ else ifeq ($(AVX),sse4) ++ override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) ++ else ifeq ($(AVX),avx2) ++ override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] ++ else ifeq ($(AVX),512y) ++ override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] ++ else ifeq ($(AVX),512z) ++ override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) ++ else ++ $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) ++ endif ++endif ++ ++# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? ++MG_CXXFLAGS+= $(AVXFLAGS) ++ ++#------------------------------------------------------------------------------- ++ ++#=== Configure the CUDA compiler if available ++ ++# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) ++# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below ++ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside ++ $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") ++ override CUDA_HOME=disabled ++endif ++ ++# If CUDA_HOME is not set, try to set it from the location of nvcc ++ifndef CUDA_HOME ++ CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) ++ $(info CUDA_HOME="$(CUDA_HOME)") ++endif ++ ++# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists ++ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) ++ NVCC = $(CUDA_HOME)/bin/nvcc ++ USE_NVTX ?=-DUSE_NVTX ++ # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html ++ # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ ++ # Default: use compute capability 70 (Volta architecture), and embed PTX to support later architectures, too. ++ # Set MADGRAPH_CUDA_ARCHITECTURE to the desired value to change the default. ++ # Build for multiple architectures using a space-separated list, e.g. MADGRAPH_CUDA_ARCHITECTURE="70 80" ++ MADGRAPH_CUDA_ARCHITECTURE ?= 70 ++ # Generate PTX for the first architecture: ++ CUARCHFLAGS := --generate-code arch=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)),code=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)) ++ # Generate device code for all architectures: ++ CUARCHFLAGS += $(foreach arch,$(MADGRAPH_CUDA_ARCHITECTURE), --generate-code arch=compute_$(arch),code=sm_$(arch)) ++ ++ CUINC = -I$(CUDA_HOME)/include/ ++ CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! ++ MG_LDFLAGS += $(CURANDLIBFLAGS) ++ MG_NVCCFLAGS += $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) ++ ++else ifeq ($(AVX),cuda) ++ $(error nvcc is not visible in PATH. Either add it to PATH or export CUDA_HOME to compile with cuda) ++ ifeq ($(AVX),cuda) ++ $(error Cannot compile for cuda without NVCC) ++ endif ++endif ++ ++# Set the host C++ compiler for nvcc via "-ccbin " ++# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) ++MG_NVCCFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) ++ ++# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) ++ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) ++MG_NVCCFLAGS += -allow-unsupported-compiler ++endif ++ ++#------------------------------------------------------------------------------- ++ ++#=== Configure ccache for C++ and CUDA builds ++ ++# Enable ccache if USECCACHE=1 ++ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) ++ override CXX:=ccache $(CXX) ++endif ++ ++ifneq ($(NVCC),) ++ ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) ++ override NVCC:=ccache $(NVCC) ++ endif ++endif ++ ++#------------------------------------------------------------------------------- ++ ++#=== Configure PowerPC-specific compiler flags for C++ and CUDA ++ ++# PowerPC-specific CXX / CUDA compiler flags (being reviewed) ++ifeq ($(UNAME_P),ppc64le) ++ MG_CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 ++ MG_NVCCFLAGS+= -Xcompiler -mno-float128 ++ ++ ifeq ($(AVX),sse4) ++ override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) ++ endif ++endif ++ ++#------------------------------------------------------------------------------- ++#=== Apple-specific compiler/linker options ++ ++# Add -std=c++17 explicitly to avoid build errors on macOS ++# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" ++ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) ++MG_CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 ++endif ++ ++ifeq ($(UNAME_S),Darwin) ++STDLIB = -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) ++MG_LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" ++else ++MG_LDFLAGS += -Xlinker --no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) ++endif ++ ++#------------------------------------------------------------------------------- ++ ++#=== C++/CUDA-specific flags for floating-point types and random generators to use ++ ++# Set the default FPTYPE (floating point type) choice ++FPTYPE ?= m ++ ++# Set the default HELINL (inline helicities?) choice ++HELINL ?= 0 ++ ++# Set the default HRDCOD (hardcode cIPD physics parameters?) choice ++HRDCOD ?= 0 ++ ++# Set the default RNDGEN (random number generator) choice ++ifeq ($(NVCC),) ++ RNDGEN ?= hasNoCurand ++else ++ RNDGEN ?= hasCurand ++endif ++ ++# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so sub-makes don't go back to the defaults ++export AVX ++export AVXFLAGS ++export FPTYPE ++export HELINL ++export HRDCOD ++export RNDGEN ++ ++#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN ++ ++# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") ++# $(info FPTYPE=$(FPTYPE)) ++ifeq ($(FPTYPE),d) ++ COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE ++else ifeq ($(FPTYPE),f) ++ COMMONFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT ++else ifeq ($(FPTYPE),m) ++ COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT ++else ++ $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) ++endif ++ ++# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") ++# $(info HELINL=$(HELINL)) ++ifeq ($(HELINL),1) ++ COMMONFLAGS += -DMGONGPU_INLINE_HELAMPS ++else ifneq ($(HELINL),0) ++ $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) ++endif ++ ++# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") ++# $(info HRDCOD=$(HRDCOD)) ++ifeq ($(HRDCOD),1) ++ COMMONFLAGS += -DMGONGPU_HARDCODE_PARAM ++else ifneq ($(HRDCOD),0) ++ $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) ++endif ++ ++# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") ++$(info RNDGEN=$(RNDGEN)) ++ifeq ($(RNDGEN),hasNoCurand) ++ override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND ++ override CURANDLIBFLAGS = ++else ifeq ($(RNDGEN),hasCurand) ++ CXXFLAGSCURAND = $(CUINC) ++else ++ $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) ++endif ++ ++MG_CXXFLAGS += $(COMMONFLAGS) ++MG_NVCCFLAGS += $(COMMONFLAGS) ++ ++#------------------------------------------------------------------------------- diff --git b/epochX/cudacpp/gg_tt.mad/Source/makefile a/epochX/cudacpp/gg_tt.mad/Source/makefile -index 617f10b93..00c73099a 100644 +index 617f10b93..407b1b753 100644 --- b/epochX/cudacpp/gg_tt.mad/Source/makefile +++ a/epochX/cudacpp/gg_tt.mad/Source/makefile -@@ -120,7 +120,7 @@ $(LIBDIR)libiregi.a: $(IREGIDIR) - cd $(IREGIDIR); make - ln -sf ../Source/$(IREGIDIR)libiregi.a $(LIBDIR)libiregi.a - --clean: -+cleanSource: - $(RM) *.o $(LIBRARIES) $(BINARIES) - cd PDF; make clean; cd .. - cd PDF/gammaUPC; make clean; cd ../../ -@@ -132,4 +132,11 @@ clean: +@@ -10,8 +10,8 @@ include make_opts + + # Source files + +-PROCESS= hfill.o matrix.o myamp.o +-DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o ++PROCESS = hfill.o matrix.o myamp.o ++DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o + HBOOK = hfill.o hcurve.o hbook1.o hbook2.o + GENERIC = $(alfas_functions).o transpole.o invarients.o hfill.o pawgraphs.o ran1.o \ + rw_events.o rw_routines.o kin_functions.o open_file.o basecode.o setrun.o \ +@@ -22,7 +22,7 @@ GENSUDGRID = gensudgrid.o is-sud.o setrun_gen.o rw_routines.o open_file.o + + # Locally compiled libraries + +-LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) ++LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) + + # Binaries + +@@ -32,6 +32,9 @@ BINARIES = $(BINDIR)gen_ximprove $(BINDIR)gensudgrid $(BINDIR)combine_runs + + all: $(LIBRARIES) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(LIBDIR)libbias.$(libext) + ++%.o: %.f *.inc ++ $(FC) -I. $(MG_FCFLAGS) $(FCFLAGS) -c $< -o $@ ++ + # Libraries + + $(LIBDIR)libdsample.$(libext): $(DSAMPLE) +@@ -39,36 +42,35 @@ $(LIBDIR)libdsample.$(libext): $(DSAMPLE) + $(LIBDIR)libgeneric.$(libext): $(GENERIC) + $(call CREATELIB, $@, $^) + $(LIBDIR)libdhelas.$(libext): DHELAS +- cd DHELAS; make; cd .. ++ $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" + $(LIBDIR)libpdf.$(libext): PDF make_opts +- cd PDF; make; cd .. ++ $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" + ifneq (,$(filter edff chff, $(pdlabel1) $(pdlabel2))) + $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC +- cd PDF/gammaUPC; make ; cd ../../ ++ $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" + else + $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC +- cd PDF/gammaUPC; make -f makefile_dummy; cd ../../ +-endif ++ $(MAKE) -C $< -f makefile_dummy FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" ++endif + $(LIBDIR)libcernlib.$(libext): CERNLIB +- cd CERNLIB; make; cd .. ++ $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" + # The bias library is here the dummy by default; compilation of other ones specified in the run_card will be done by MG5aMC directly. + $(LIBDIR)libbias.$(libext): BIAS/dummy +- cd BIAS/dummy; make; cd ../../ ++ $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" + + $(LIBDIR)libmodel.$(libext): MODEL param_card.inc +- cd MODEL; make ++ $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" + param_card.inc: ../Cards/param_card.dat + ../bin/madevent treatcards param ++ touch $@ # madevent doesn't update the time stamp if there's nothing to do + + + + +-$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o +- $(FC) $(LDFLAGS) -o $@ $^ +-#$(BINDIR)combine_events: $(COMBINE) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) run_card.inc $(LIBDIR)libbias.$(libext) +-# $(FC) -o $@ $(COMBINE) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC $(llhapdf) $(LDFLAGS) -lbias ++$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o ++ $(FC) $(MG_LDFLAGS) $(LDFLAGS) -o $@ $^ + $(BINDIR)gensudgrid: $(GENSUDGRID) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libcernlib.$(libext) +- $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(LDFLAGS) ++ $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(MG_LDFLAGS) $(LDFLAGS) + + # Dependencies + +@@ -85,6 +87,7 @@ rw_events.o: rw_events.f run_config.inc + + run_card.inc: ../Cards/run_card.dat + ../bin/madevent treatcards run ++ touch $@ # madevent doesn't update the time stamp if there's nothing to do + + clean4pdf: + rm -f ../lib/libpdf.$(libext) +@@ -132,4 +135,3 @@ clean: cd BIAS/ptj_bias; make clean; cd ../.. if [ -d $(CUTTOOLSDIR) ]; then cd $(CUTTOOLSDIR); make clean; cd ..; fi if [ -d $(IREGIDIR) ]; then cd $(IREGIDIR); make clean; cd ..; fi -+ -+clean: cleanSource - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make clean; cd -; done; -+ -+cleanavx: -+ for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; -+cleanall: cleanSource # THIS IS THE ONE -+ for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; +- for i in `ls -d ../SubProcesses/P*`; do cd $$i; make clean; cd -; done; diff --git b/epochX/cudacpp/gg_tt.mad/SubProcesses/makefile a/epochX/cudacpp/gg_tt.mad/SubProcesses/makefile -index 348c283be..65369d610 100644 +index 348c283be..b69917ee1 100644 --- b/epochX/cudacpp/gg_tt.mad/SubProcesses/makefile +++ a/epochX/cudacpp/gg_tt.mad/SubProcesses/makefile -@@ -1,6 +1,28 @@ +@@ -1,5 +1,30 @@ +-include ../../Source/make_opts +-FFLAGS+= -w +SHELL := /bin/bash + - include ../../Source/make_opts - FFLAGS+= -w - ++# Include general setup ++OPTIONS_MAKEFILE := ../../Source/make_opts ++include $(OPTIONS_MAKEFILE) ++ +# Enable the C preprocessor https://gcc.gnu.org/onlinedocs/gfortran/Preprocessing-Options.html -+FFLAGS+= -cpp ++MG_FCFLAGS += -cpp ++MG_CXXFLAGS += -I. + -+# Compile counters with -O3 as in the cudacpp makefile (avoid being "unfair" to Fortran #740) -+CXXFLAGS = -O3 -Wall -Wshadow -Wextra ++all: help cppnative + -+# Add -std=c++17 explicitly to avoid build errors on macOS -+# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -+ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -+CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 -+endif ++# Target if user does not specify target ++help: ++ $(info No target specified.) ++ $(info Viable targets are 'cppnative' (default), 'cppnone', 'cppsse4', 'cppavx2', 'cpp512y', 'cpp512z' and 'cuda') ++ $(info Or 'cppall' for all C++ targets) ++ $(info Or 'ALL' for all C++ and cuda targets) + -+# Enable ccache if USECCACHE=1 ++ ++# Enable ccache for C++ if USECCACHE=1 (do not enable it for Fortran since it is not supported for Fortran) +ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) + override CXX:=ccache $(CXX) +endif -+ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) -+ override FC:=ccache $(FC) -+endif -+ ++###ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) ++### override FC:=ccache $(FC) ++###endif + # Load additional dependencies of the bias module, if present ifeq (,$(wildcard ../bias_dependencies)) - BIASDEPENDENCIES = -@@ -24,7 +46,26 @@ else +@@ -24,15 +49,25 @@ else MADLOOP_LIB = endif -LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L../../lib/ -ldhelas -ldsample -lmodel -lgeneric -lpdf -lgammaUPC -lcernlib $(llhapdf) -lbias -+LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias -+ -+processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') -+CUDACPP_MAKEFILE=cudacpp.mk -+# NB1 Using ":=" below instead of "=" is much faster (it only runs the subprocess once instead of many times) -+# NB2 Use '|&' in CUDACPP_BUILDDIR to avoid confusing errors about googletest #507 -+# NB3 Do not add a comment inlined "CUDACPP_BUILDDIR=$(shell ...) # comment" as otherwise a trailing space is included... -+# NB4 The variables relevant to the cudacpp Makefile must be explicitly passed to $(shell...) -+CUDACPP_MAKEENV:=$(shell echo '$(.VARIABLES)' | tr " " "\n" | egrep "(USEBUILDDIR|AVX|FPTYPE|HELINL|HRDCOD)") -+###$(info CUDACPP_MAKEENV=$(CUDACPP_MAKEENV)) -+###$(info $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))")) -+CUDACPP_BUILDDIR:=$(shell $(MAKE) $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))") -f $(CUDACPP_MAKEFILE) -pn 2>&1 | awk '/Building/{print $$3}' | sed s/BUILDDIR=//) -+ifeq ($(CUDACPP_BUILDDIR),) -+$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) -+else -+$(info CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)') +- ++LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias + LIBS = $(LIBDIR)libbias.$(libext) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(MADLOOP_LIB) $(LOOP_LIBS) + + ifneq ("$(wildcard ../../Source/RUNNING)","") + LINKLIBS += -lrunning +- LIBS += $(LIBDIR)librunning.$(libext) ++ LIBS += $(LIBDIR)librunning.$(libext) +endif -+CUDACPP_COMMONLIB=mg5amc_common -+CUDACPP_CXXLIB=mg5amc_$(processid_short)_cpp -+CUDACPP_CULIB=mg5amc_$(processid_short)_cuda ++ ++SOURCEDIR_GUARD:=../../Source/.timestamp_guard ++# We use $(SOURCEDIR_GUARD) to figure out if Source is out of date. The Source makefile doesn't correctly ++# update all files, so we need a proxy that is updated every time we run "$(MAKE) -C ../../Source". ++$(SOURCEDIR_GUARD) ../../Source/discretesampler.mod &: ../../Source/*.f ../../Cards/param_card.dat ../../Cards/run_card.dat ++ifneq ($(shell which flock 2>/dev/null),) ++ flock ../../Source/.lock -c "$(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD)" ++else ++ $(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD) + endif - LIBS = $(LIBDIR)libbias.$(libext) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(MADLOOP_LIB) $(LOOP_LIBS) ++$(LIBS): $(SOURCEDIR_GUARD) + + # Source files -@@ -43,41 +84,117 @@ ifeq ($(strip $(MATRIX_HEL)),) +@@ -43,41 +78,105 @@ ifeq ($(strip $(MATRIX_HEL)),) endif @@ -111,242 +549,12390 @@ index 348c283be..65369d610 100644 + +DSIG=driver.o $(patsubst %.f, %.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) +DSIG_cudacpp=driver_cudacpp.o $(patsubst %.f, %_cudacpp.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) - - SYMMETRY = symmetry.o idenparts.o - - # Binaries - --$(PROG): $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX) -- $(FC) -o $(PROG) $(PROCESS) $(MATRIX) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) -fopenmp -+ifeq ($(UNAME),Darwin) -+LDFLAGS += -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) -+LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" -+else -+LDFLAGS += -Wl,--no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) ++ ++SYMMETRY = symmetry.o idenparts.o ++ ++# cudacpp targets: ++CUDACPP_MAKEFILE := cudacpp.mk ++ifneq (,$(wildcard $(CUDACPP_MAKEFILE))) ++include $(CUDACPP_MAKEFILE) +endif - --$(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) -- $(FC) -o $(PROG)_forhel $(PROCESS) $(MATRIX_HEL) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) -fopenmp -+all: $(PROG)_fortran $(CUDACPP_BUILDDIR)/$(PROG)_cpp # also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (#503) - --gensym: $(SYMMETRY) configs.inc $(LIBS) -- $(FC) -o gensym $(SYMMETRY) -L../../lib/ $(LINKLIBS) $(LDFLAGS) ++ ++ifeq ($(CUDACPP_BUILDDIR),) ++$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) ++endif ++CUDACPP_COMMONLIB=mg5amc_common ++CUDACPP_CXXLIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so ++CUDACPP_CULIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so ++ ++ ++# Set up OpenMP if supported ++OMPFLAGS ?= -fopenmp +ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -+override OMPFLAGS = -fopenmp +LINKLIBS += -liomp5 # see #578 +LINKLIBS += -lintlc # undefined reference to `_intel_fast_memcpy' +else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -+override OMPFLAGS = -fopenmp +$(CUDACPP_BUILDDIR)/$(PROG)_cpp: LINKLIBS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 +else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -+override OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang -+else -+override OMPFLAGS = -fopenmp ++OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang +endif -+ -+$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o -+ $(FC) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) --$(LIBDIR)libmodel.$(libext): ../../Cards/param_card.dat -- cd ../../Source/MODEL; make -+$(LIBS): .libs +-SYMMETRY = symmetry.o idenparts.o --$(LIBDIR)libgeneric.$(libext): ../../Cards/run_card.dat -+.libs: ../../Cards/param_card.dat ../../Cards/run_card.dat - cd ../../Source; make -+ touch $@ -+ -+$(CUDACPP_BUILDDIR)/.cudacpplibs: -+ $(MAKE) -f $(CUDACPP_MAKEFILE) -+ touch $@ -+ + # Binaries + +-$(PROG): $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX) +- $(FC) -o $(PROG) $(PROCESS) $(MATRIX) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) -fopenmp ++$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o ++ $(FC) $(MG_FCFLAGS) $(FCFLAGS) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) + +-$(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) +- $(FC) -o $(PROG)_forhel $(PROCESS) $(MATRIX_HEL) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) -fopenmp +# On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH +# Use relative paths with respect to the executables ($ORIGIN on Linux) +# On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary -+ifeq ($(UNAME_S),Darwin) -+ override LIBFLAGSRPATH = -+else ifeq ($(USEBUILDDIR),1) -+ override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' -+else -+ override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/$(LIBDIR)' ++ifneq ($(UNAME_S),Darwin) ++ LIBFLAGSRPATH := -Wl,-rpath,'$$ORIGIN:$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' +endif -+ -+.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link + +-gensym: $(SYMMETRY) configs.inc $(LIBS) +- $(FC) -o gensym $(SYMMETRY) -L../../lib/ $(LINKLIBS) $(LDFLAGS) ++.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link madevent_cppnone_link madevent_cppsse4_link madevent_cppavx2_link madevent_cpp512y_link madevent_cpp512z_link clean cleanall cleansrc + +madevent_fortran_link: $(PROG)_fortran + rm -f $(PROG) + ln -s $(PROG)_fortran $(PROG) ++ ++madevent_cppnone_link: AVX=none ++madevent_cppnone_link: cppnone ++ ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) ++ ++madevent_cppavx2_link: AVX=avx2 ++madevent_cppavx2_link: cppavx2 ++ ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) ++ ++madevent_cpp512y_link: AVX=512y ++madevent_cpp512y_link: cppavx512y ++ ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +-$(LIBDIR)libmodel.$(libext): ../../Cards/param_card.dat +- cd ../../Source/MODEL; make ++madevent_cpp512z_link: AVX=512z ++madevent_cpp512z_link: cppavx512z ++ ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +-$(LIBDIR)libgeneric.$(libext): ../../Cards/run_card.dat +- cd ../../Source; make ++madevent_cuda_link: AVX=cuda ++madevent_cuda_link: cuda ++ ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) -$(LIBDIR)libpdf.$(libext): - cd ../../Source/PDF; make -+madevent_cpp_link: $(CUDACPP_BUILDDIR)/$(PROG)_cpp -+ rm -f $(PROG) -+ ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) - --$(LIBDIR)libgammaUPC.$(libext): -- cd ../../Source/PDF/gammaUPC; make -+madevent_cuda_link: $(CUDACPP_BUILDDIR)/$(PROG)_cuda -+ rm -f $(PROG) -+ ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) -+ -+# Building $(PROG)_cpp also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (improved patch for cpp-only builds #503) -+$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o $(CUDACPP_BUILDDIR)/.cudacpplibs -+ $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CXXLIB) $(LIBFLAGSRPATH) $(LDFLAGS) -+ if [ -f $(LIBDIR)/$(CUDACPP_BUILDDIR)/lib$(CUDACPP_CULIB).* ]; then $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CULIB) $(LIBFLAGSRPATH) $(LDFLAGS); fi ++$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(LIBS) $(CUDACPP_CXXLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o ++ $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) + -+$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(CUDACPP_BUILDDIR)/$(PROG)_cpp ++$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(LIBS) $(CUDACPP_CULIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o ++ $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) + +counters.o: counters.cc timer.h -+ $(CXX) $(CXXFLAGS) -c $< -o $@ ++ $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ + +ompnumthreads.o: ompnumthreads.cc ompnumthreads.h -+ $(CXX) -I. $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ ++ $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ + +$(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) + $(FC) -o $(PROG)_forhel $(PROCESS) $(MATRIX_HEL) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) $(OMPFLAGS) + +gensym: $(SYMMETRY) configs.inc $(LIBS) + $(FC) -o gensym $(SYMMETRY) -L$(LIBDIR) $(LINKLIBS) $(LDFLAGS) -+ -+###ifeq (,$(wildcard fbridge.inc)) # Pointless: fbridge.inc always exists as this is the cudacpp-modified makefile! -+###$(LIBDIR)libmodel.$(libext): ../../Cards/param_card.dat -+### cd ../../Source/MODEL; make -+### -+###$(LIBDIR)libgeneric.$(libext): ../../Cards/run_card.dat -+### cd ../../Source; make -+### -+###$(LIBDIR)libpdf.$(libext): -+### cd ../../Source/PDF; make -+### -+###$(LIBDIR)libgammaUPC.$(libext): -+### cd ../../Source/PDF/gammaUPC; make -+###endif + +-$(LIBDIR)libgammaUPC.$(libext): +- cd ../../Source/PDF/gammaUPC; make # Add source so that the compiler finds the DiscreteSampler module. $(MATRIX): %.o: %.f - $(FC) $(FFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC - %.o: %.f - $(FC) $(FFLAGS) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC +- $(FC) $(FFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC +-%.o: %.f +- $(FC) $(FFLAGS) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC ++ $(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC ++%.o $(CUDACPP_BUILDDIR)/%.o: %.f ++ $(FC) $(MG_FCFLAGS) $(FCFLAGS) -I../../Source/ -I../../Source/PDF/gammaUPC -c $< -o $@ +%_cudacpp.o: %.f -+ $(FC) $(FFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ ++ $(FC) $(MG_FCFLAGS) $(FCFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ # Dependencies -@@ -97,5 +214,61 @@ unwgt.o: genps.inc nexternal.inc symswap.inc cluster.inc run.inc message.inc \ +@@ -97,5 +196,43 @@ unwgt.o: genps.inc nexternal.inc symswap.inc cluster.inc run.inc message.inc \ run_config.inc initcluster.o: message.inc --clean: -- $(RM) *.o gensym madevent madevent_forhel +# Extra dependencies on discretesampler.mod ++../../Source/discretesampler.mod: ../../Source/DiscreteSampler.f + -+auto_dsig.o: .libs -+driver.o: .libs -+driver_cudacpp.o: .libs -+$(MATRIX): .libs -+genps.o: .libs ++auto_dsig.o: ../../Source/discretesampler.mod ++driver.o: ../../Source/discretesampler.mod ++driver_cudacpp.o: ../../Source/discretesampler.mod ++$(MATRIX): ../../Source/discretesampler.mod ++genps.o: ../../Source/discretesampler.mod + +# Cudacpp avxall targets + -+UNAME_P := $(shell uname -p) +ifeq ($(UNAME_P),ppc64le) -+avxall: avxnone avxsse4 ++cppall: cppnative cppnone cppsse4 +else ifeq ($(UNAME_P),arm) -+avxall: avxnone avxsse4 ++cppall: cppnative cppnone cppsse4 +else -+avxall: avxnone avxsse4 avxavx2 avx512y avx512z ++cppall: cppnative cppnone cppsse4 cppavx2 cppavx512y cppavx512z +endif + -+avxnone: $(PROG)_fortran $(DSIG_cudacpp) -+ @echo -+ $(MAKE) USEBUILDDIR=1 AVX=none ++ALL: cppall cuda + -+avxsse4: $(PROG)_fortran $(DSIG_cudacpp) -+ @echo -+ $(MAKE) USEBUILDDIR=1 AVX=sse4 ++# Clean all architecture-specific builds: + clean: +- $(RM) *.o gensym madevent madevent_forhel ++ $(RM) *.o gensym $(PROG) $(PROG)_* ++ $(RM) -rf build.*/*{.o,.so,.exe,.dylib,madevent_*} ++ @for dir in build.*; do if [ -z "$$(ls -A $${dir})" ]; then rm -r $${dir}; else echo "Not cleaning $${dir}; not empty"; fi; done + -+avxavx2: $(PROG)_fortran $(DSIG_cudacpp) -+ @echo -+ $(MAKE) USEBUILDDIR=1 AVX=avx2 ++cleanall: cleansrc ++ for PROCESS in ../P[0-9]*; do $(MAKE) -C $${PROCESS} clean; done + -+avx512y: $(PROG)_fortran $(DSIG_cudacpp) -+ @echo -+ $(MAKE) USEBUILDDIR=1 AVX=512y ++# Clean one architecture-specific build ++clean%: ++ $(RM) -r build.$*_* + -+avx512z: $(PROG)_fortran $(DSIG_cudacpp) -+ @echo -+ $(MAKE) USEBUILDDIR=1 AVX=512z ++# Clean common source directories (interferes with other P*) ++cleansrc: ++ make -C ../../Source clean ++ $(RM) -f $(SOURCEDIR_GUARD) ../../Source/{*.mod,.lock} ../../Source/*/*.mod ++ $(RM) -r $(LIBDIR)libbias.$(libext) ++ if [ -d ../../src ]; then $(MAKE) -C ../../src -f cudacpp_src.mk clean; fi +diff --git b/epochX/cudacpp/gg_tt.mad/bin/internal/banner.py a/epochX/cudacpp/gg_tt.mad/bin/internal/banner.py +index bd1517985..b408679c2 100755 +--- b/epochX/cudacpp/gg_tt.mad/bin/internal/banner.py ++++ a/epochX/cudacpp/gg_tt.mad/bin/internal/banner.py +@@ -2,11 +2,11 @@ + # + # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors + # +-# This file is a part of the MadGraph5_aMC@NLO project, an application which ++# This file is a part of the MadGraph5_aMC@NLO project, an application which + # automatically generates Feynman diagrams and matrix elements for arbitrary + # high-energy processes in the Standard Model and beyond. + # +-# It is subject to the MadGraph5_aMC@NLO license which should accompany this ++# It is subject to the MadGraph5_aMC@NLO license which should accompany this + # distribution. + # + # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch +@@ -53,7 +53,7 @@ else: + MADEVENT = False + import madgraph.various.misc as misc + import madgraph.iolibs.file_writers as file_writers +- import madgraph.iolibs.files as files ++ import madgraph.iolibs.files as files + import models.check_param_card as param_card_reader + from madgraph import MG5DIR, MadGraph5Error, InvalidCmd + +@@ -80,36 +80,36 @@ class Banner(dict): + 'mgproccard': 'MGProcCard', + 'mgruncard': 'MGRunCard', + 'ma5card_parton' : 'MA5Card_parton', +- 'ma5card_hadron' : 'MA5Card_hadron', ++ 'ma5card_hadron' : 'MA5Card_hadron', + 'mggenerationinfo': 'MGGenerationInfo', + 'mgpythiacard': 'MGPythiaCard', + 'mgpgscard': 'MGPGSCard', + 'mgdelphescard': 'MGDelphesCard', + 'mgdelphestrigger': 'MGDelphesTrigger', + 'mgshowercard': 'MGShowerCard' } +- + -+###endif + forbid_cdata = ['initrwgt'] +- + -+# Clean (NB: 'make clean' in Source calls 'make clean' in all P*) + def __init__(self, banner_path=None): + """ """ + + if isinstance(banner_path, Banner): + dict.__init__(self, banner_path) + self.lhe_version = banner_path.lhe_version +- return ++ return + else: + dict.__init__(self) +- + -+clean: # Clean builds: fortran in this Pn; cudacpp executables for one AVX in this Pn -+ $(RM) *.o gensym $(PROG) $(PROG)_fortran $(PROG)_forhel $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(CUDACPP_BUILDDIR)/$(PROG)_cuda + #Look at the version + if MADEVENT: + self['mgversion'] = '#%s\n' % open(pjoin(MEDIR, 'MGMEVersion.txt')).read() + else: + info = misc.get_pkg_info() + self['mgversion'] = info['version']+'\n' +- + -+cleanavxs: clean # Clean builds: fortran in this Pn; cudacpp for all AVX in this Pn and in src -+ $(MAKE) -f $(CUDACPP_MAKEFILE) cleanall -+ rm -f $(CUDACPP_BUILDDIR)/.cudacpplibs -+ rm -f .libs + self.lhe_version = None + +- + -+cleanall: # Clean builds: fortran in all P* and in Source; cudacpp for all AVX in all P* and in src -+ make -C ../../Source cleanall -+ rm -rf $(LIBDIR)libbias.$(libext) -+ rm -f ../../Source/*.mod ../../Source/*/*.mod + if banner_path: + self.read_banner(banner_path) + +@@ -123,7 +123,7 @@ class Banner(dict): + 'mgruncard':'run_card.dat', + 'mgpythiacard':'pythia_card.dat', + 'mgpgscard' : 'pgs_card.dat', +- 'mgdelphescard':'delphes_card.dat', ++ 'mgdelphescard':'delphes_card.dat', + 'mgdelphestrigger':'delphes_trigger.dat', + 'mg5proccard':'proc_card_mg5.dat', + 'mgproccard': 'proc_card.dat', +@@ -137,10 +137,10 @@ class Banner(dict): + 'mgshowercard':'shower_card.dat', + 'pythia8':'pythia8_card.dat', + 'ma5card_parton':'madanalysis5_parton_card.dat', +- 'ma5card_hadron':'madanalysis5_hadron_card.dat', ++ 'ma5card_hadron':'madanalysis5_hadron_card.dat', + 'run_settings':'' + } +- + -+distclean: cleanall # Clean all fortran and cudacpp builds as well as the googletest installation -+ $(MAKE) -f $(CUDACPP_MAKEFILE) distclean -diff --git b/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py a/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py -index ebbc1ac1d..a88d60b28 100755 ---- b/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py -+++ a/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py -@@ -385,8 +385,20 @@ class gensym(object): - done = True - if not done: - raise Exception('Parsing error in gensym: %s' % stdout) -- -- self.cmd.compile(['madevent'], cwd=Pdir) + def read_banner(self, input_path): + """read a banner""" + +@@ -151,7 +151,7 @@ class Banner(dict): + def split_iter(string): + return (x.groups(0)[0] for x in re.finditer(r"([^\n]*\n)", string, re.DOTALL)) + input_path = split_iter(input_path) +- + -+ cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py -+ logger.info("Building madevent in madevent_interface.py with '%s' matrix elements"%cudacpp_backend) -+ if cudacpp_backend == 'FORTRAN': -+ self.cmd.compile(['madevent_fortran_link'], cwd=Pdir) -+ elif cudacpp_backend == 'CPP': -+ self.cmd.compile(['madevent_cpp_link'], cwd=Pdir) -+ elif cudacpp_backend == 'CUDA': -+ self.cmd.compile(['madevent_cuda_link'], cwd=Pdir) -+ else: -+ raise Exception("Invalid cudacpp_backend='%s': only 'FORTRAN', 'CPP', 'CUDA' are supported") -+ ###logger.info("Building madevent with ALL(FORTRAN/CPP/CUDA) matrix elements (cudacpp_backend=%s)"%cudacpp_backend) -+ ###self.cmd.compile(['all'], cwd=Pdir) + text = '' + store = False + for line in input_path: +@@ -170,13 +170,13 @@ class Banner(dict): + text += line + else: + text += '%s%s' % (line, '\n') +- +- #reaching end of the banner in a event file avoid to read full file + - if to_submit: - self.submit_to_cluster(job_list) - job_list = {} -diff --git b/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py a/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py -index 389b93ab8..d72270289 100755 ---- b/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py -+++ a/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py -@@ -3614,8 +3614,20 @@ Beware that this can be dangerous for local multicore runs.""") - logger.info(' %s ' % subdir) - - if os.path.exists(pjoin(Pdir, 'ajob1')): -- self.compile(['madevent'], cwd=Pdir) -- ++ #reaching end of the banner in a event file avoid to read full file + if "" in line: + break + elif "" in line: + break +- + -+ cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py -+ logger.info("Building madevent in madevent_interface.py with '%s' matrix elements"%cudacpp_backend) -+ if cudacpp_backend == 'FORTRAN': -+ self.compile(['madevent_fortran_link'], cwd=Pdir) -+ elif cudacpp_backend == 'CPP': -+ self.compile(['madevent_cpp_link'], cwd=Pdir) -+ elif cudacpp_backend == 'CUDA': -+ self.compile(['madevent_cuda_link'], cwd=Pdir) -+ else: -+ raise Exception("Invalid cudacpp_backend='%s': only 'FORTRAN', 'CPP', 'CUDA' are supported") -+ ###logger.info("Building madevent with ALL (FORTRAN/CPP/CUDA) matrix elements (cudacpp_backend=%s)"%cudacpp_backend) -+ ###self.compile(['all'], cwd=Pdir) + def __getattribute__(self, attr): + """allow auto-build for the run_card/param_card/... """ + try: +@@ -187,23 +187,23 @@ class Banner(dict): + return self.charge_card(attr) + + +- + - alljobs = misc.glob('ajob*', Pdir) - - #remove associated results.dat (ensure to not mix with all data) + def change_lhe_version(self, version): + """change the lhe version associate to the banner""" +- ++ + version = float(version) + if version < 3: + version = 1 + elif version > 3: + raise Exception("Not Supported version") + self.lhe_version = version +- ++ + def get_cross(self, witherror=False): + """return the cross-section of the file""" + + if "init" not in self: + raise Exception +- ++ + text = self["init"].split('\n') + cross = 0 + error = 0 +@@ -217,13 +217,13 @@ class Banner(dict): + return cross + else: + return cross, math.sqrt(error) +- ++ + + def scale_init_cross(self, ratio): + """modify the init information with the associate scale""" + + assert "init" in self +- ++ + all_lines = self["init"].split('\n') + new_data = [] + new_data.append(all_lines[0]) +@@ -231,29 +231,29 @@ class Banner(dict): + line = all_lines[i] + split = line.split() + if len(split) == 4: +- xsec, xerr, xmax, pid = split ++ xsec, xerr, xmax, pid = split + else: + new_data += all_lines[i:] + break + pid = int(pid) +- ++ + line = " %+13.7e %+13.7e %+13.7e %i" % \ + (ratio*float(xsec), ratio* float(xerr), ratio*float(xmax), pid) + new_data.append(line) + self['init'] = '\n'.join(new_data) +- ++ + def get_pdg_beam(self): + """return the pdg of each beam""" +- ++ + assert "init" in self +- ++ + all_lines = self["init"].split('\n') + pdg1,pdg2,_ = all_lines[0].split(None, 2) + return int(pdg1), int(pdg2) +- ++ + def load_basic(self, medir): + """ Load the proc_card /param_card and run_card """ +- ++ + self.add(pjoin(medir,'Cards', 'param_card.dat')) + self.add(pjoin(medir,'Cards', 'run_card.dat')) + if os.path.exists(pjoin(medir, 'SubProcesses', 'procdef_mg5.dat')): +@@ -261,29 +261,29 @@ class Banner(dict): + self.add(pjoin(medir,'Cards', 'proc_card_mg5.dat')) + else: + self.add(pjoin(medir,'Cards', 'proc_card.dat')) +- ++ + def change_seed(self, seed): + """Change the seed value in the banner""" + # 0 = iseed + p = re.compile(r'''^\s*\d+\s*=\s*iseed''', re.M) + new_seed_str = " %s = iseed" % seed + self['mgruncard'] = p.sub(new_seed_str, self['mgruncard']) +- ++ + def add_generation_info(self, cross, nb_event): + """add info on MGGeneration""" +- ++ + text = """ + # Number of Events : %s + # Integrated weight (pb) : %s + """ % (nb_event, cross) + self['MGGenerationInfo'] = text +- ++ + ############################################################################ + # SPLIT BANNER + ############################################################################ + def split(self, me_dir, proc_card=True): + """write the banner in the Cards directory. +- proc_card argument is present to avoid the overwrite of proc_card ++ proc_card argument is present to avoid the overwrite of proc_card + information""" + + for tag, text in self.items(): +@@ -305,37 +305,37 @@ class Banner(dict): + """special routine removing width/mass of particles not present in the model + This is usefull in case of loop model card, when we want to use the non + loop model.""" +- ++ + if not hasattr(self, 'param_card'): + self.charge_card('slha') +- ++ + for tag in ['mass', 'decay']: + block = self.param_card.get(tag) + for data in block: + pid = data.lhacode[0] +- if pid not in list(pid2label.keys()): ++ if pid not in list(pid2label.keys()): + block.remove((pid,)) + + def get_lha_strategy(self): + """get the lha_strategy: how the weight have to be handle by the shower""" +- ++ + if not self["init"]: + raise Exception("No init block define") +- ++ + data = self["init"].split('\n')[0].split() + if len(data) != 10: + misc.sprint(len(data), self['init']) + raise Exception("init block has a wrong format") + return int(float(data[-2])) +- ++ + def set_lha_strategy(self, value): + """set the lha_strategy: how the weight have to be handle by the shower""" +- ++ + if not (-4 <= int(value) <= 4): + six.reraise(Exception, "wrong value for lha_strategy", value) + if not self["init"]: + raise Exception("No init block define") +- ++ + all_lines = self["init"].split('\n') + data = all_lines[0].split() + if len(data) != 10: +@@ -351,13 +351,13 @@ class Banner(dict): + assert isinstance(cross, dict) + # assert "all" in cross + assert "init" in self +- ++ + cross = dict(cross) + for key in cross.keys(): + if isinstance(key, str) and key.isdigit() and int(key) not in cross: + cross[int(key)] = cross[key] +- +- ++ ++ + all_lines = self["init"].split('\n') + new_data = [] + new_data.append(all_lines[0]) +@@ -365,7 +365,7 @@ class Banner(dict): + line = all_lines[i] + split = line.split() + if len(split) == 4: +- xsec, xerr, xmax, pid = split ++ xsec, xerr, xmax, pid = split + else: + new_data += all_lines[i:] + break +@@ -383,23 +383,23 @@ class Banner(dict): + (float(cross[pid]), ratio* float(xerr), ratio*float(xmax), pid) + new_data.append(line) + self['init'] = '\n'.join(new_data) +- ++ + ############################################################################ + # WRITE BANNER + ############################################################################ + def write(self, output_path, close_tag=True, exclude=[]): + """write the banner""" +- ++ + if isinstance(output_path, str): + ff = open(output_path, 'w') + else: + ff = output_path +- ++ + if MADEVENT: + header = open(pjoin(MEDIR, 'Source', 'banner_header.txt')).read() + else: + header = open(pjoin(MG5DIR,'Template', 'LO', 'Source', 'banner_header.txt')).read() +- ++ + if not self.lhe_version: + self.lhe_version = self.get('run_card', 'lhe_version', default=1.0) + if float(self.lhe_version) < 3: +@@ -412,7 +412,7 @@ class Banner(dict): + + for tag in [t for t in self.ordered_items if t in list(self.keys())]+ \ + [t for t in self.keys() if t not in self.ordered_items]: +- if tag in ['init'] or tag in exclude: ++ if tag in ['init'] or tag in exclude: + continue + capitalized_tag = self.capitalized_items[tag] if tag in self.capitalized_items else tag + start_data, stop_data = '', '' +@@ -422,19 +422,19 @@ class Banner(dict): + stop_data = ']]>\n' + out = '<%(tag)s>%(start_data)s\n%(text)s\n%(stop_data)s\n' % \ + {'tag':capitalized_tag, 'text':self[tag].strip(), +- 'start_data': start_data, 'stop_data':stop_data} ++ 'start_data': start_data, 'stop_data':stop_data} + try: + ff.write(out) + except: + ff.write(out.encode('utf-8')) +- +- ++ ++ + if not '/header' in exclude: + out = '\n' + try: + ff.write(out) + except: +- ff.write(out.encode('utf-8')) ++ ff.write(out.encode('utf-8')) + + if 'init' in self and not 'init' in exclude: + text = self['init'] +@@ -444,22 +444,22 @@ class Banner(dict): + ff.write(out) + except: + ff.write(out.encode('utf-8')) +- ++ + if close_tag: +- out = '\n' ++ out = '\n' + try: + ff.write(out) + except: +- ff.write(out.encode('utf-8')) ++ ff.write(out.encode('utf-8')) + return ff +- +- ++ ++ + ############################################################################ + # BANNER + ############################################################################ + def add(self, path, tag=None): + """Add the content of the file to the banner""" +- ++ + if not tag: + card_name = os.path.basename(path) + if 'param_card' in card_name: +@@ -505,33 +505,33 @@ class Banner(dict): + if tag == 'param_card': + tag = 'slha' + elif tag == 'run_card': +- tag = 'mgruncard' ++ tag = 'mgruncard' + elif tag == 'proc_card': +- tag = 'mg5proccard' ++ tag = 'mg5proccard' + elif tag == 'shower_card': + tag = 'mgshowercard' + elif tag == 'FO_analyse_card': + tag = 'foanalyse' +- ++ + self[tag.lower()] = text +- +- ++ ++ + def charge_card(self, tag): + """Build the python object associated to the card""" +- ++ + if tag in ['param_card', 'param']: + tag = 'slha' + elif tag in ['run_card', 'run']: +- tag = 'mgruncard' ++ tag = 'mgruncard' + elif tag == 'proc_card': +- tag = 'mg5proccard' ++ tag = 'mg5proccard' + elif tag == 'shower_card': + tag = 'mgshowercard' + elif tag == 'FO_analyse_card': + tag = 'foanalyse' + + assert tag in ['slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse'], 'invalid card %s' % tag +- ++ + if tag == 'slha': + param_card = self[tag].split('\n') + self.param_card = param_card_reader.ParamCard(param_card) +@@ -544,56 +544,56 @@ class Banner(dict): + self.proc_card = ProcCard(proc_card) + return self.proc_card + elif tag =='mgshowercard': +- shower_content = self[tag] ++ shower_content = self[tag] + if MADEVENT: + import internal.shower_card as shower_card + else: + import madgraph.various.shower_card as shower_card + self.shower_card = shower_card.ShowerCard(shower_content, True) +- # set testing to false (testing = true allow to init using ++ # set testing to false (testing = true allow to init using + # the card content instead of the card path" + self.shower_card.testing = False + return self.shower_card + elif tag =='foanalyse': +- analyse_content = self[tag] ++ analyse_content = self[tag] + if MADEVENT: + import internal.FO_analyse_card as FO_analyse_card + else: + import madgraph.various.FO_analyse_card as FO_analyse_card +- # set testing to false (testing = true allow to init using ++ # set testing to false (testing = true allow to init using + # the card content instead of the card path" + self.FOanalyse_card = FO_analyse_card.FOAnalyseCard(analyse_content, True) + self.FOanalyse_card.testing = False + return self.FOanalyse_card +- ++ + + def get_detail(self, tag, *arg, **opt): + """return a specific """ +- ++ + if tag in ['param_card', 'param']: + tag = 'slha' + attr_tag = 'param_card' + elif tag in ['run_card', 'run']: +- tag = 'mgruncard' ++ tag = 'mgruncard' + attr_tag = 'run_card' + elif tag == 'proc_card': +- tag = 'mg5proccard' ++ tag = 'mg5proccard' + attr_tag = 'proc_card' + elif tag == 'model': +- tag = 'mg5proccard' ++ tag = 'mg5proccard' + attr_tag = 'proc_card' + arg = ('model',) + elif tag == 'generate': +- tag = 'mg5proccard' ++ tag = 'mg5proccard' + attr_tag = 'proc_card' + arg = ('generate',) + elif tag == 'shower_card': + tag = 'mgshowercard' + attr_tag = 'shower_card' + assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], '%s not recognized' % tag +- ++ + if not hasattr(self, attr_tag): +- self.charge_card(attr_tag) ++ self.charge_card(attr_tag) + + card = getattr(self, attr_tag) + if len(arg) == 0: +@@ -613,7 +613,7 @@ class Banner(dict): + if 'default' in opt: + return opt['default'] + else: +- raise ++ raise + elif len(arg) == 2 and tag == 'slha': + try: + return card[arg[0]].get(arg[1:]) +@@ -621,15 +621,15 @@ class Banner(dict): + if 'default' in opt: + return opt['default'] + else: +- raise ++ raise + elif len(arg) == 0: + return card + else: + raise Exception("Unknow command") +- ++ + #convenient alias + get = get_detail +- ++ + def set(self, tag, *args): + """modify one of the cards""" + +@@ -637,27 +637,27 @@ class Banner(dict): + tag = 'slha' + attr_tag = 'param_card' + elif tag == 'run_card': +- tag = 'mgruncard' ++ tag = 'mgruncard' + attr_tag = 'run_card' + elif tag == 'proc_card': +- tag = 'mg5proccard' ++ tag = 'mg5proccard' + attr_tag = 'proc_card' + elif tag == 'model': +- tag = 'mg5proccard' ++ tag = 'mg5proccard' + attr_tag = 'proc_card' + arg = ('model',) + elif tag == 'generate': +- tag = 'mg5proccard' ++ tag = 'mg5proccard' + attr_tag = 'proc_card' + arg = ('generate',) + elif tag == 'shower_card': + tag = 'mgshowercard' + attr_tag = 'shower_card' + assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized' +- ++ + if not hasattr(self, attr_tag): +- self.charge_card(attr_tag) +- ++ self.charge_card(attr_tag) ++ + card = getattr(self, attr_tag) + if len(args) ==2: + if tag == 'mg5proccard': +@@ -666,20 +666,20 @@ class Banner(dict): + card[args[0]] = args[1] + else: + card[args[:-1]] = args[-1] +- +- ++ ++ + @misc.multiple_try() + def add_to_file(self, path, seed=None, out=None): + """Add the banner to a file and change the associate seed in the banner""" + + if seed is not None: + self.set("run_card", "iseed", seed) +- ++ + if not out: + path_out = "%s.tmp" % path + else: + path_out = out +- ++ + ff = self.write(path_out, close_tag=False, + exclude=['MGGenerationInfo', '/header', 'init']) + ff.write("## END BANNER##\n") +@@ -698,44 +698,44 @@ class Banner(dict): + files.mv(path_out, path) + + +- ++ + def split_banner(banner_path, me_dir, proc_card=True): + """a simple way to split a banner""" +- ++ + banner = Banner(banner_path) + banner.split(me_dir, proc_card) +- ++ + def recover_banner(results_object, level, run=None, tag=None): + """as input we receive a gen_crossxhtml.AllResults object. + This define the current banner and load it + """ +- ++ + if not run: +- try: +- _run = results_object.current['run_name'] +- _tag = results_object.current['tag'] ++ try: ++ _run = results_object.current['run_name'] ++ _tag = results_object.current['tag'] + except Exception: + return Banner() + else: + _run = run + if not tag: +- try: +- _tag = results_object[run].tags[-1] ++ try: ++ _tag = results_object[run].tags[-1] + except Exception as error: + if os.path.exists( pjoin(results_object.path,'Events','%s_banner.txt' % (run))): + tag = None + else: +- return Banner() ++ return Banner() + else: + _tag = tag +- + +- path = results_object.path +- if tag: ++ ++ path = results_object.path ++ if tag: + banner_path = pjoin(path,'Events',run,'%s_%s_banner.txt' % (run, tag)) + else: + banner_path = pjoin(results_object.path,'Events','%s_banner.txt' % (run)) +- ++ + if not os.path.exists(banner_path): + if level != "parton" and tag != _tag: + return recover_banner(results_object, level, _run, results_object[_run].tags[0]) +@@ -754,12 +754,12 @@ def recover_banner(results_object, level, run=None, tag=None): + return Banner(lhe.banner) + + # security if the banner was remove (or program canceled before created it) +- return Banner() +- ++ return Banner() ++ + banner = Banner(banner_path) +- +- +- ++ ++ ++ + if level == 'pythia': + if 'mgpythiacard' in banner: + del banner['mgpythiacard'] +@@ -768,13 +768,13 @@ def recover_banner(results_object, level, run=None, tag=None): + if tag in banner: + del banner[tag] + return banner +- ++ + class InvalidRunCard(InvalidCmd): + pass + + class ProcCard(list): + """Basic Proccard object""" +- ++ + history_header = \ + '#************************************************************\n' + \ + '#* MadGraph5_aMC@NLO *\n' + \ +@@ -798,10 +798,10 @@ class ProcCard(list): + '#* run as ./bin/mg5_aMC filename *\n' + \ + '#* *\n' + \ + '#************************************************************\n' +- +- +- +- ++ ++ ++ ++ + def __init__(self, init=None): + """ initialize a basic proc_card""" + self.info = {'model': 'sm', 'generate':None, +@@ -810,13 +810,13 @@ class ProcCard(list): + if init: + self.read(init) + +- ++ + def read(self, init): + """read the proc_card and save the information""" +- ++ + if isinstance(init, str): #path to file + init = open(init, 'r') +- ++ + store_line = '' + for line in init: + line = line.rstrip() +@@ -828,28 +828,28 @@ class ProcCard(list): + store_line = "" + if store_line: + raise Exception("WRONG CARD FORMAT") +- +- ++ ++ + def move_to_last(self, cmd): + """move an element to the last history.""" + for line in self[:]: + if line.startswith(cmd): + self.remove(line) + list.append(self, line) +- ++ + def append(self, line): + """"add a line in the proc_card perform automatically cleaning""" +- ++ + line = line.strip() + cmds = line.split() + if len(cmds) == 0: + return +- ++ + list.append(self, line) +- ++ + # command type: + cmd = cmds[0] +- ++ + if cmd == 'output': + # Remove previous outputs from history + self.clean(allow_for_removal = ['output'], keep_switch=True, +@@ -875,7 +875,7 @@ class ProcCard(list): + elif cmds[1] == 'proc_v4': + #full cleaning + self[:] = [] +- ++ + + def clean(self, to_keep=['set','add','load'], + remove_bef_last=None, +@@ -884,13 +884,13 @@ class ProcCard(list): + keep_switch=False): + """Remove command in arguments from history. + All command before the last occurrence of 'remove_bef_last' +- (including it) will be removed (but if another options tells the opposite). ++ (including it) will be removed (but if another options tells the opposite). + 'to_keep' is a set of line to always keep. +- 'to_remove' is a set of line to always remove (don't care about remove_bef_ ++ 'to_remove' is a set of line to always remove (don't care about remove_bef_ + status but keep_switch acts.). +- if 'allow_for_removal' is define only the command in that list can be ++ if 'allow_for_removal' is define only the command in that list can be + remove of the history for older command that remove_bef_lb1. all parameter +- present in to_remove are always remove even if they are not part of this ++ present in to_remove are always remove even if they are not part of this + list. + keep_switch force to keep the statement remove_bef_??? which changes starts + the removal mode. +@@ -900,8 +900,8 @@ class ProcCard(list): + if __debug__ and allow_for_removal: + for arg in to_keep: + assert arg not in allow_for_removal +- +- ++ ++ + nline = -1 + removal = False + #looping backward +@@ -912,7 +912,7 @@ class ProcCard(list): + if not removal and remove_bef_last: + if self[nline].startswith(remove_bef_last): + removal = True +- switch = True ++ switch = True + + # if this is the switch and is protected pass to the next element + if switch and keep_switch: +@@ -923,12 +923,12 @@ class ProcCard(list): + if any([self[nline].startswith(arg) for arg in to_remove]): + self.pop(nline) + continue +- ++ + # Only if removal mode is active! + if removal: + if allow_for_removal: + # Only a subset of command can be removed +- if any([self[nline].startswith(arg) ++ if any([self[nline].startswith(arg) + for arg in allow_for_removal]): + self.pop(nline) + continue +@@ -936,10 +936,10 @@ class ProcCard(list): + # All command have to be remove but protected + self.pop(nline) + continue +- ++ + # update the counter to pass to the next element + nline -= 1 +- ++ + def get(self, tag, default=None): + if isinstance(tag, int): + list.__getattr__(self, tag) +@@ -954,32 +954,32 @@ class ProcCard(list): + except ValueError: + name, content = line[7:].split(None,1) + out.append((name, content)) +- return out ++ return out + else: + return self.info[tag] +- ++ + def write(self, path): + """write the proc_card to a given path""" +- ++ + fsock = open(path, 'w') + fsock.write(self.history_header) + for line in self: + while len(line) > 70: +- sub, line = line[:70]+"\\" , line[70:] ++ sub, line = line[:70]+"\\" , line[70:] + fsock.write(sub+"\n") + else: + fsock.write(line+"\n") +- +-class InvalidCardEdition(InvalidCmd): pass +- ++ ++class InvalidCardEdition(InvalidCmd): pass ++ + class ConfigFile(dict): + """ a class for storing/dealing with input file. +- """ ++ """ + + def __init__(self, finput=None, **opt): + """initialize a new instance. input can be an instance of MadLoopParam, +- a file, a path to a file, or simply Nothing""" +- ++ a file, a path to a file, or simply Nothing""" ++ + if isinstance(finput, self.__class__): + dict.__init__(self) + for key in finput.__dict__: +@@ -989,7 +989,7 @@ class ConfigFile(dict): + return + else: + dict.__init__(self) +- ++ + # Initialize it with all the default value + self.user_set = set() + self.auto_set = set() +@@ -1000,15 +1000,15 @@ class ConfigFile(dict): + self.comments = {} # comment associated to parameters. can be display via help message + # store the valid options for a given parameter. + self.allowed_value = {} +- ++ + self.default_setup() + self.plugin_input(finput) +- ++ + + # if input is define read that input + if isinstance(finput, (file, str, StringIO.StringIO)): + self.read(finput, **opt) +- ++ + + + +@@ -1028,7 +1028,7 @@ class ConfigFile(dict): + base = self.__class__(self) + #base = copy.copy(self) + base.update((key.lower(),value) for key, value in other.items()) +- ++ + return base + + def __radd__(self, other): +@@ -1036,26 +1036,26 @@ class ConfigFile(dict): + new = copy.copy(other) + new.update((key, value) for key, value in self.items()) + return new +- ++ + def __contains__(self, key): + return dict.__contains__(self, key.lower()) + + def __iter__(self): +- ++ + for name in super(ConfigFile, self).__iter__(): + yield self.lower_to_case[name.lower()] +- +- ++ ++ + #iter = super(ConfigFile, self).__iter__() + #misc.sprint(iter) + #return (self.lower_to_case[name] for name in iter) +- ++ + def keys(self): + return [name for name in self] +- ++ + def items(self): + return [(name,self[name]) for name in self] +- ++ + @staticmethod + def warn(text, level, raiseerror=False): + """convenient proxy to raiseerror/print warning""" +@@ -1071,11 +1071,11 @@ class ConfigFile(dict): + log = lambda t: logger.log(level, t) + elif level: + log = level +- ++ + return log(text) + + def post_set(self, name, value, change_userdefine, raiseerror): +- ++ + if value is None: + value = self[name] + +@@ -1087,25 +1087,25 @@ class ConfigFile(dict): + return getattr(self, 'post_set_%s' % name)(value, change_userdefine, raiseerror) + else: + raise +- ++ + def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): + """set the attribute and set correctly the type if the value is a string. + change_userdefine on True if we have to add the parameter in user_set + """ +- ++ + if not len(self): + #Should never happen but when deepcopy/pickle + self.__init__() +- ++ + name = name.strip() +- lower_name = name.lower() +- ++ lower_name = name.lower() ++ + # 0. check if this parameter is a system only one + if change_userdefine and lower_name in self.system_only: + text='%s is a private entry which can not be modify by the user. Keep value at %s' % (name,self[name]) + self.warn(text, 'critical', raiseerror) + return +- ++ + #1. check if the parameter is set to auto -> pass it to special + if lower_name in self: + targettype = type(dict.__getitem__(self, lower_name)) +@@ -1115,22 +1115,22 @@ class ConfigFile(dict): + self.user_set.remove(lower_name) + #keep old value. + self.post_set(lower_name, 'auto', change_userdefine, raiseerror) +- return ++ return + elif lower_name in self.auto_set: + self.auto_set.remove(lower_name) +- ++ + # 2. Find the type of the attribute that we want + if lower_name in self.list_parameter: + targettype = self.list_parameter[lower_name] +- +- +- ++ ++ ++ + if isinstance(value, str): + # split for each comma/space + value = value.strip() + if value.startswith('[') and value.endswith(']'): + value = value[1:-1] +- #do not perform split within a " or ' block ++ #do not perform split within a " or ' block + data = re.split(r"((? bad input + dropped.append(val) +- ++ + if not new_values: + + text= "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ + % (value, name, self[lower_name]) + text += "allowed values are any list composed of the following entries: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) +- return self.warn(text, 'warning', raiseerror) +- elif dropped: ++ return self.warn(text, 'warning', raiseerror) ++ elif dropped: + text = "some value for entry '%s' are not valid. Invalid items are: '%s'.\n" \ + % (name, dropped) + text += "value will be set to %s" % new_values +- text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) ++ text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) + self.warn(text, 'warning') + + values = new_values + + # make the assignment +- dict.__setitem__(self, lower_name, values) ++ dict.__setitem__(self, lower_name, values) + if change_userdefine: + self.user_set.add(lower_name) + #check for specific action +- return self.post_set(lower_name, None, change_userdefine, raiseerror) ++ return self.post_set(lower_name, None, change_userdefine, raiseerror) + elif lower_name in self.dict_parameter: +- targettype = self.dict_parameter[lower_name] ++ targettype = self.dict_parameter[lower_name] + full_reset = True #check if we just update the current dict or not +- ++ + if isinstance(value, str): + value = value.strip() + # allowed entry: +@@ -1209,7 +1209,7 @@ class ConfigFile(dict): + # name , value => just add the entry + # name value => just add the entry + # {name1:value1, name2:value2} => full reset +- ++ + # split for each comma/space + if value.startswith('{') and value.endswith('}'): + new_value = {} +@@ -1219,23 +1219,23 @@ class ConfigFile(dict): + x, y = pair.split(':') + x, y = x.strip(), y.strip() + if x.startswith(('"',"'")) and x.endswith(x[0]): +- x = x[1:-1] ++ x = x[1:-1] + new_value[x] = y + value = new_value + elif ',' in value: + x,y = value.split(',') + value = {x.strip():y.strip()} + full_reset = False +- ++ + elif ':' in value: + x,y = value.split(':') + value = {x.strip():y.strip()} +- full_reset = False ++ full_reset = False + else: + x,y = value.split() + value = {x:y} +- full_reset = False +- ++ full_reset = False ++ + if isinstance(value, dict): + for key in value: + value[key] = self.format_variable(value[key], targettype, name=name) +@@ -1248,7 +1248,7 @@ class ConfigFile(dict): + if change_userdefine: + self.user_set.add(lower_name) + return self.post_set(lower_name, None, change_userdefine, raiseerror) +- elif name in self: ++ elif name in self: + targettype = type(self[name]) + else: + logger.debug('Trying to add argument %s in %s. ' % (name, self.__class__.__name__) +\ +@@ -1256,22 +1256,22 @@ class ConfigFile(dict): + suggestions = [k for k in self.keys() if k.startswith(name[0].lower())] + if len(suggestions)>0: + logger.debug("Did you mean one of the following: %s"%suggestions) +- self.add_param(lower_name, self.format_variable(UnknownType(value), ++ self.add_param(lower_name, self.format_variable(UnknownType(value), + UnknownType, name)) + self.lower_to_case[lower_name] = name + if change_userdefine: + self.user_set.add(lower_name) + return self.post_set(lower_name, None, change_userdefine, raiseerror) +- ++ + value = self.format_variable(value, targettype, name=name) + #check that the value is allowed: + if lower_name in self.allowed_value and '*' not in self.allowed_value[lower_name]: + valid = False + allowed = self.allowed_value[lower_name] +- ++ + # check if the current value is allowed or not (set valid to True) + if value in allowed: +- valid=True ++ valid=True + elif isinstance(value, str): + value = value.lower().strip() + allowed = [str(v).lower() for v in allowed] +@@ -1279,7 +1279,7 @@ class ConfigFile(dict): + i = allowed.index(value) + value = self.allowed_value[lower_name][i] + valid=True +- ++ + if not valid: + # act if not valid: + text = "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ +@@ -1303,7 +1303,7 @@ class ConfigFile(dict): + if __debug__: + if lower_name in self: + raise Exception("Duplicate case for %s in %s" % (name,self.__class__)) +- ++ + dict.__setitem__(self, lower_name, value) + self.lower_to_case[lower_name] = name + if isinstance(value, list): +@@ -1318,12 +1318,12 @@ class ConfigFile(dict): + elif isinstance(value, dict): + allvalues = list(value.values()) + if any([type(allvalues[0]) != type(v) for v in allvalues]): +- raise Exception("All entry should have the same type") +- self.dict_parameter[lower_name] = type(allvalues[0]) ++ raise Exception("All entry should have the same type") ++ self.dict_parameter[lower_name] = type(allvalues[0]) + if '__type__' in value: + del value['__type__'] + dict.__setitem__(self, lower_name, value) +- ++ + if allowed and allowed != ['*']: + self.allowed_value[lower_name] = allowed + if lower_name in self.list_parameter: +@@ -1333,8 +1333,8 @@ class ConfigFile(dict): + assert value in allowed or '*' in allowed + #elif isinstance(value, bool) and allowed != ['*']: + # self.allowed_value[name] = [True, False] +- +- ++ ++ + if system: + self.system_only.add(lower_name) + if comment: +@@ -1342,7 +1342,7 @@ class ConfigFile(dict): + + def do_help(self, name): + """return a minimal help for the parameter""" +- ++ + out = "## Information on parameter %s from class %s\n" % (name, self.__class__.__name__) + if name.lower() in self: + out += "## current value: %s (parameter should be of type %s)\n" % (self[name], type(self[name])) +@@ -1351,7 +1351,7 @@ class ConfigFile(dict): + else: + out += "## Unknown for this class\n" + if name.lower() in self.user_set: +- out += "## This value is considered as being set by the user\n" ++ out += "## This value is considered as being set by the user\n" + else: + out += "## This value is considered as being set by the system\n" + if name.lower() in self.allowed_value: +@@ -1359,17 +1359,17 @@ class ConfigFile(dict): + out += "Allowed value are: %s\n" % ','.join([str(p) for p in self.allowed_value[name.lower()]]) + else: + out += "Suggested value are : %s\n " % ','.join([str(p) for p in self.allowed_value[name.lower()] if p!='*']) +- ++ + logger.info(out) + return out + + @staticmethod + def guess_type_from_value(value): + "try to guess the type of the string --do not use eval as it might not be safe" +- ++ + if not isinstance(value, str): + return str(value.__class__.__name__) +- ++ + #use ast.literal_eval to be safe since value is untrusted + # add a timeout to mitigate infinite loop, memory stack attack + with misc.stdchannel_redirected(sys.stdout, os.devnull): +@@ -1388,7 +1388,7 @@ class ConfigFile(dict): + @staticmethod + def format_variable(value, targettype, name="unknown"): + """assign the value to the attribute for the given format""" +- ++ + if isinstance(targettype, str): + if targettype in ['str', 'int', 'float', 'bool']: + targettype = eval(targettype) +@@ -1412,7 +1412,7 @@ class ConfigFile(dict): + (name, type(value), targettype, value)) + else: + raise InvalidCmd("Wrong input type for %s found %s and expecting %s for value %s" %\ +- (name, type(value), targettype, value)) ++ (name, type(value), targettype, value)) + else: + if targettype != UnknownType: + value = value.strip() +@@ -1441,8 +1441,8 @@ class ConfigFile(dict): + value = int(value) + elif value.endswith(('k', 'M')) and value[:-1].isdigit(): + convert = {'k':1000, 'M':1000000} +- value =int(value[:-1]) * convert[value[-1]] +- elif '/' in value or '*' in value: ++ value =int(value[:-1]) * convert[value[-1]] ++ elif '/' in value or '*' in value: + try: + split = re.split('(\*|/)',value) + v = float(split[0]) +@@ -1461,7 +1461,7 @@ class ConfigFile(dict): + try: + value = float(value.replace('d','e')) + except ValueError: +- raise InvalidCmd("%s can not be mapped to an integer" % value) ++ raise InvalidCmd("%s can not be mapped to an integer" % value) + try: + new_value = int(value) + except ValueError: +@@ -1471,7 +1471,7 @@ class ConfigFile(dict): + value = new_value + else: + raise InvalidCmd("incorect input: %s need an integer for %s" % (value,name)) +- ++ + elif targettype == float: + if value.endswith(('k', 'M')) and value[:-1].isdigit(): + convert = {'k':1000, 'M':1000000} +@@ -1496,33 +1496,33 @@ class ConfigFile(dict): + value = v + else: + raise InvalidCmd("type %s is not handle by the card" % targettype) +- ++ + return value +- +- ++ ++ + + def __getitem__(self, name): +- ++ + lower_name = name.lower() + if __debug__: + if lower_name not in self: + if lower_name in [key.lower() for key in self] : + raise Exception("Some key are not lower case %s. Invalid use of the class!"\ + % [key for key in self if key.lower() != key]) +- ++ + if lower_name in self.auto_set: + return 'auto' +- ++ + return dict.__getitem__(self, name.lower()) + +- ++ + get = __getitem__ + + def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): + """convenient way to change attribute. + changeifuserset=False means that the value is NOT change is the value is not on default. +- user=True, means that the value will be marked as modified by the user +- (potentially preventing future change to the value) ++ user=True, means that the value will be marked as modified by the user ++ (potentially preventing future change to the value) + """ + + # changeifuserset=False -> we need to check if the user force a value. +@@ -1530,8 +1530,8 @@ class ConfigFile(dict): + if name.lower() in self.user_set: + #value modified by the user -> do nothing + return +- self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) +- ++ self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) ++ + + class RivetCard(ConfigFile): + +@@ -1706,7 +1706,7 @@ class RivetCard(ConfigFile): + yexec_dict = {} + yexec_line = exec_line + "yaxis_relvar = " + self['yaxis_relvar'] + exec(yexec_line, locals(), yexec_dict) +- if self['yaxis_label'] == "": ++ if self['yaxis_label'] == "": + self['yaxis_label'] = "yaxis_relvar" + f_relparams.write("{0} = {1}\n".format(self['yaxis_label'], yexec_dict['yaxis_relvar'])) + else: +@@ -1715,11 +1715,11 @@ class RivetCard(ConfigFile): + + class ProcCharacteristic(ConfigFile): + """A class to handle information which are passed from MadGraph to the madevent +- interface.""" +- ++ interface.""" ++ + def default_setup(self): + """initialize the directory to the default value""" +- ++ + self.add_param('loop_induced', False) + self.add_param('has_isr', False) + self.add_param('has_fsr', False) +@@ -1735,16 +1735,16 @@ class ProcCharacteristic(ConfigFile): + self.add_param('pdg_initial1', [0]) + self.add_param('pdg_initial2', [0]) + self.add_param('splitting_types',[], typelist=str) +- self.add_param('perturbation_order', [], typelist=str) +- self.add_param('limitations', [], typelist=str) +- self.add_param('hel_recycling', False) ++ self.add_param('perturbation_order', [], typelist=str) ++ self.add_param('limitations', [], typelist=str) ++ self.add_param('hel_recycling', False) + self.add_param('single_color', True) +- self.add_param('nlo_mixed_expansion', True) ++ self.add_param('nlo_mixed_expansion', True) + + def read(self, finput): +- """Read the input file, this can be a path to a file, ++ """Read the input file, this can be a path to a file, + a file object, a str with the content of the file.""" +- ++ + if isinstance(finput, str): + if "\n" in finput: + finput = finput.split('\n') +@@ -1752,49 +1752,49 @@ class ProcCharacteristic(ConfigFile): + finput = open(finput) + else: + raise Exception("No such file %s" % finput) +- ++ + for line in finput: + if '#' in line: + line = line.split('#',1)[0] + if not line: + continue +- ++ + if '=' in line: + key, value = line.split('=',1) + self[key.strip()] = value +- ++ + def write(self, outputpath): + """write the file""" + + template ="# Information about the process #\n" + template +="#########################################\n" +- ++ + fsock = open(outputpath, 'w') + fsock.write(template) +- ++ + for key, value in self.items(): + fsock.write(" %s = %s \n" % (key, value)) +- +- fsock.close() +- ++ ++ fsock.close() ++ + + + + class GridpackCard(ConfigFile): + """an object for the GridpackCard""" +- ++ + def default_setup(self): + """default value for the GridpackCard""" +- ++ + self.add_param("GridRun", True) + self.add_param("gevents", 2500) + self.add_param("gseed", 1) +- self.add_param("ngran", -1) +- ++ self.add_param("ngran", -1) ++ + def read(self, finput): +- """Read the input file, this can be a path to a file, ++ """Read the input file, this can be a path to a file, + a file object, a str with the content of the file.""" +- ++ + if isinstance(finput, str): + if "\n" in finput: + finput = finput.split('\n') +@@ -1802,7 +1802,7 @@ class GridpackCard(ConfigFile): + finput = open(finput) + else: + raise Exception("No such file %s" % finput) +- ++ + for line in finput: + line = line.split('#')[0] + line = line.split('!')[0] +@@ -1812,19 +1812,19 @@ class GridpackCard(ConfigFile): + self[line[1].strip()] = line[0].replace('\'','').strip() + + def write(self, output_file, template=None): +- """Write the run_card in output_file according to template ++ """Write the run_card in output_file according to template + (a path to a valid run_card)""" + + if not template: + if not MADEVENT: +- template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', ++ template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + 'grid_card_default.dat') + else: + template = pjoin(MEDIR, 'Cards', 'grid_card_default.dat') + +- ++ + text = "" +- for line in open(template,'r'): ++ for line in open(template,'r'): + nline = line.split('#')[0] + nline = nline.split('!')[0] + comment = line[len(nline):] +@@ -1832,19 +1832,19 @@ class GridpackCard(ConfigFile): + if len(nline) != 2: + text += line + elif nline[1].strip() in self: +- text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) ++ text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) + else: + logger.info('Adding missing parameter %s to current run_card (with default value)' % nline[1].strip()) +- text += line +- ++ text += line ++ + if isinstance(output_file, str): + fsock = open(output_file,'w') + else: + fsock = output_file +- ++ + fsock.write(text) + fsock.close() +- ++ + class PY8Card(ConfigFile): + """ Implements the Pythia8 card.""" + +@@ -1868,7 +1868,7 @@ class PY8Card(ConfigFile): + + def default_setup(self): + """ Sets up the list of available PY8 parameters.""" +- ++ + # Visible parameters + # ================== + self.add_param("Main:numberOfEvents", -1) +@@ -1877,11 +1877,11 @@ class PY8Card(ConfigFile): + self.add_param("JetMatching:qCut", -1.0, always_write_to_card=False) + self.add_param("JetMatching:doShowerKt",False,always_write_to_card=False) + # -1 means that it is automatically set. +- self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) ++ self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) + # for CKKWL merging + self.add_param("Merging:TMS", -1.0, always_write_to_card=False) + self.add_param("Merging:Process", '', always_write_to_card=False) +- # -1 means that it is automatically set. ++ # -1 means that it is automatically set. + self.add_param("Merging:nJetMax", -1, always_write_to_card=False) + # for both merging, chose whether to also consider different merging + # scale values for the extra weights related to scale and PDF variations. +@@ -1918,10 +1918,10 @@ class PY8Card(ConfigFile): + comment='This allows to turn on/off hadronization alltogether.') + self.add_param("partonlevel:mpi", True, hidden=True, always_write_to_card=False, + comment='This allows to turn on/off MPI alltogether.') +- self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, ++ self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, + always_write_to_card=False, + comment='This parameter is automatically set to True by MG5aMC when doing MLM merging with PY8.') +- ++ + # for MLM merging + self.add_param("JetMatching:merge", False, hidden=True, always_write_to_card=False, + comment='Specifiy if we are merging sample of different multiplicity.') +@@ -1931,9 +1931,9 @@ class PY8Card(ConfigFile): + comment='Value of the merging scale below which one does not even write the HepMC event.') + self.add_param("JetMatching:doVeto", False, hidden=True, always_write_to_card=False, + comment='Do veto externally (e.g. in SysCalc).') +- self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) ++ self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) + self.add_param("JetMatching:setMad", False, hidden=True, always_write_to_card=False, +- comment='Specify one must read inputs from the MadGraph banner.') ++ comment='Specify one must read inputs from the MadGraph banner.') + self.add_param("JetMatching:coneRadius", 1.0, hidden=True, always_write_to_card=False) + self.add_param("JetMatching:nQmatch",4,hidden=True, always_write_to_card=False) + # for CKKWL merging (common with UMEPS, UNLOPS) +@@ -1946,7 +1946,7 @@ class PY8Card(ConfigFile): + self.add_param("Merging:applyVeto", False, hidden=True, always_write_to_card=False, + comment='Do veto externally (e.g. in SysCalc).') + self.add_param("Merging:includeWeightInXsection", True, hidden=True, always_write_to_card=False, +- comment='If turned off, then the option belows forces PY8 to keep the original weight.') ++ comment='If turned off, then the option belows forces PY8 to keep the original weight.') + self.add_param("Merging:muRen", 91.188, hidden=True, always_write_to_card=False, + comment='Set renormalization scales of the 2->2 process.') + self.add_param("Merging:muFacInME", 91.188, hidden=True, always_write_to_card=False, +@@ -1958,7 +1958,7 @@ class PY8Card(ConfigFile): + # To be added in subruns for CKKWL + self.add_param("Merging:mayRemoveDecayProducts", False, hidden=True, always_write_to_card=False) + self.add_param("Merging:doKTMerging", False, hidden=True, always_write_to_card=False) +- self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) ++ self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) + self.add_param("Merging:doPTLundMerging", False, hidden=True, always_write_to_card=False) + + # Special Pythia8 paremeters useful to simplify the shower. +@@ -1975,33 +1975,33 @@ class PY8Card(ConfigFile): + # Add parameters controlling the subruns execution flow. + # These parameters should not be part of PY8SubRun daughter. + self.add_default_subruns('parameters') +- ++ + def __init__(self, *args, **opts): +- # Parameters which are not printed in the card unless they are +- # 'user_set' or 'system_set' or part of the ++ # Parameters which are not printed in the card unless they are ++ # 'user_set' or 'system_set' or part of the + # self.hidden_params_to_always_print set. + self.hidden_param = [] + self.hidden_params_to_always_write = set() + self.visible_params_to_always_write = set() + # List of parameters that should never be written out given the current context. + self.params_to_never_write = set() +- ++ + # Parameters which have been set by the system (i.e. MG5 itself during + # the regular course of the shower interface) + self.system_set = set() +- ++ + # Add attributes controlling the subruns execution flow. + # These attributes should not be part of PY8SubRun daughter. + self.add_default_subruns('attributes') +- +- # Parameters which have been set by the ++ ++ # Parameters which have been set by the + super(PY8Card, self).__init__(*args, **opts) + + + +- def add_param(self, name, value, hidden=False, always_write_to_card=True, ++ def add_param(self, name, value, hidden=False, always_write_to_card=True, + comment=None): +- """ add a parameter to the card. value is the default value and ++ """ add a parameter to the card. value is the default value and + defines the type (int/float/bool/str) of the input. + The option 'hidden' decides whether the parameter should be visible to the user. + The option 'always_write_to_card' decides whether it should +@@ -2017,7 +2017,7 @@ class PY8Card(ConfigFile): + self.hidden_params_to_always_write.add(name) + else: + if always_write_to_card: +- self.visible_params_to_always_write.add(name) ++ self.visible_params_to_always_write.add(name) + if not comment is None: + if not isinstance(comment, str): + raise MadGraph5Error("Option 'comment' must be a string, not"+\ +@@ -2036,7 +2036,7 @@ class PY8Card(ConfigFile): + self.subruns[py8_subrun['Main:subrun']] = py8_subrun + if not 'LHEFInputs:nSubruns' in self.user_set: + self['LHEFInputs:nSubruns'] = max(self.subruns.keys()) +- ++ + def userSet(self, name, value, **opts): + """Set an attribute of this card, following a user_request""" + self.__setitem__(name, value, change_userdefine=True, **opts) +@@ -2044,10 +2044,10 @@ class PY8Card(ConfigFile): + self.system_set.remove(name.lower()) + + def vetoParamWriteOut(self, name): +- """ Forbid the writeout of a specific parameter of this card when the ++ """ Forbid the writeout of a specific parameter of this card when the + "write" function will be invoked.""" + self.params_to_never_write.add(name.lower()) +- ++ + def systemSet(self, name, value, **opts): + """Set an attribute of this card, independently of a specific user + request and only if not already user_set.""" +@@ -2058,7 +2058,7 @@ class PY8Card(ConfigFile): + if force or name.lower() not in self.user_set: + self.__setitem__(name, value, change_userdefine=False, **opts) + self.system_set.add(name.lower()) +- ++ + def MadGraphSet(self, name, value, **opts): + """ Sets a card attribute, but only if it is absent or not already + user_set.""" +@@ -2068,18 +2068,18 @@ class PY8Card(ConfigFile): + force = False + if name.lower() not in self or (force or name.lower() not in self.user_set): + self.__setitem__(name, value, change_userdefine=False, **opts) +- self.system_set.add(name.lower()) +- ++ self.system_set.add(name.lower()) ++ + def defaultSet(self, name, value, **opts): + self.__setitem__(name, value, change_userdefine=False, **opts) +- ++ + @staticmethod + def pythia8_formatting(value, formatv=None): + """format the variable into pythia8 card convention. + The type is detected by default""" + if not formatv: + if isinstance(value,UnknownType): +- formatv = 'unknown' ++ formatv = 'unknown' + elif isinstance(value, bool): + formatv = 'bool' + elif isinstance(value, int): +@@ -2095,7 +2095,7 @@ class PY8Card(ConfigFile): + formatv = 'str' + else: + assert formatv +- ++ + if formatv == 'unknown': + # No formatting then + return str(value) +@@ -2116,7 +2116,7 @@ class PY8Card(ConfigFile): + elif formatv == 'float': + return '%.10e' % float(value) + elif formatv == 'shortfloat': +- return '%.3f' % float(value) ++ return '%.3f' % float(value) + elif formatv == 'str': + return "%s" % value + elif formatv == 'list': +@@ -2124,9 +2124,9 @@ class PY8Card(ConfigFile): + return ','.join([PY8Card.pythia8_formatting(arg, 'shortfloat') for arg in value]) + else: + return ','.join([PY8Card.pythia8_formatting(arg) for arg in value]) +- + +- def write(self, output_file, template, read_subrun=False, ++ ++ def write(self, output_file, template, read_subrun=False, + print_only_visible=False, direct_pythia_input=False, add_missing=True): + """ Write the card to output_file using a specific template. + > 'print_only_visible' specifies whether or not the hidden parameters +@@ -2143,28 +2143,28 @@ class PY8Card(ConfigFile): + or p.lower() in self.user_set] + # Filter against list of parameters vetoed for write-out + visible_param = [p for p in visible_param if p.lower() not in self.params_to_never_write] +- ++ + # Now the hidden param which must be written out + if print_only_visible: + hidden_output_param = [] + else: + hidden_output_param = [p for p in self if p.lower() in self.hidden_param and + not p.lower() in self.user_set and +- (p.lower() in self.hidden_params_to_always_write or ++ (p.lower() in self.hidden_params_to_always_write or + p.lower() in self.system_set)] + # Filter against list of parameters vetoed for write-out + hidden_output_param = [p for p in hidden_output_param if p not in self.params_to_never_write] +- ++ + if print_only_visible: + subruns = [] + else: + if not read_subrun: + subruns = sorted(self.subruns.keys()) +- ++ + # Store the subruns to write in a dictionary, with its ID in key + # and the corresponding stringstream in value + subruns_to_write = {} +- ++ + # Sort these parameters nicely so as to put together parameters + # belonging to the same group (i.e. prefix before the ':' in their name). + def group_params(params): +@@ -2191,7 +2191,7 @@ class PY8Card(ConfigFile): + # First dump in a temporary_output (might need to have a second pass + # at the very end to update 'LHEFInputs:nSubruns') + output = StringIO.StringIO() +- ++ + # Setup template from which to read + if isinstance(template, str): + if os.path.isfile(template): +@@ -2199,7 +2199,7 @@ class PY8Card(ConfigFile): + elif '\n' in template: + tmpl = StringIO.StringIO(template) + else: +- raise Exception("File input '%s' not found." % file_input) ++ raise Exception("File input '%s' not found." % file_input) + elif template is None: + # Then use a dummy empty StringIO, hence skipping the reading + tmpl = StringIO.StringIO() +@@ -2257,8 +2257,8 @@ class PY8Card(ConfigFile): + # Remove all of its variables (so that nothing is overwritten) + DummySubrun.clear() + DummySubrun.write(subruns_to_write[int(value)], +- tmpl, read_subrun=True, +- print_only_visible=print_only_visible, ++ tmpl, read_subrun=True, ++ print_only_visible=print_only_visible, + direct_pythia_input=direct_pythia_input) + + logger.info('Adding new unknown subrun with ID %d.'% +@@ -2267,7 +2267,7 @@ class PY8Card(ConfigFile): + last_pos = tmpl.tell() + line = tmpl.readline() + continue +- ++ + # Change parameters which must be output + if param in visible_param: + new_value = PY8Card.pythia8_formatting(self[param]) +@@ -2286,10 +2286,10 @@ class PY8Card(ConfigFile): + last_pos = tmpl.tell() + line = tmpl.readline() + continue +- +- # Substitute the value. ++ ++ # Substitute the value. + # If it is directly the pytia input, then don't write the param if it +- # is not in the list of visible_params_to_always_write and was ++ # is not in the list of visible_params_to_always_write and was + # not user_set or system_set + if ((not direct_pythia_input) or + (param.lower() in self.visible_params_to_always_write) or +@@ -2304,16 +2304,16 @@ class PY8Card(ConfigFile): + + output.write(template%(param_entry, + value_entry.replace(value,new_value))) +- ++ + # Proceed to next line + last_pos = tmpl.tell() + line = tmpl.readline() +- ++ + # If add_missing is False, make sure to empty the list of remaining parameters + if not add_missing: + visible_param = [] + hidden_output_param = [] +- ++ + # Now output the missing parameters. Warn about visible ones. + if len(visible_param)>0 and not template is None: + output.write( +@@ -2343,12 +2343,12 @@ class PY8Card(ConfigFile): + """%(' for subrun %d'%self['Main:subrun'] if 'Main:subrun' in self else '')) + for param in hidden_output_param: + if param.lower() in self.comments: +- comment = '\n'.join('! %s'%c for c in ++ comment = '\n'.join('! %s'%c for c in + self.comments[param.lower()].split('\n')) + output.write(comment+'\n') + output.write('%s=%s\n'%(param,PY8Card.pythia8_formatting(self[param]))) +- +- # Don't close the file if we were reading a subrun, but simply write ++ ++ # Don't close the file if we were reading a subrun, but simply write + # output and return now + if read_subrun: + output_file.write(output.getvalue()) +@@ -2382,12 +2382,12 @@ class PY8Card(ConfigFile): + out.close() + else: + output_file.write(output.getvalue()) +- ++ + def read(self, file_input, read_subrun=False, setter='default'): +- """Read the input file, this can be a path to a file, ++ """Read the input file, this can be a path to a file, + a file object, a str with the content of the file. +- The setter option choses the authority that sets potential +- modified/new parameters. It can be either: ++ The setter option choses the authority that sets potential ++ modified/new parameters. It can be either: + 'default' or 'user' or 'system'""" + if isinstance(file_input, str): + if "\n" in file_input: +@@ -2423,8 +2423,8 @@ class PY8Card(ConfigFile): + raise MadGraph5Error("Could not read line '%s' of Pythia8 card."%\ + line) + if '!' in value: +- value,_ = value.split('!',1) +- ++ value,_ = value.split('!',1) ++ + # Read a subrun if detected: + if param=='Main:subrun': + if read_subrun: +@@ -2451,7 +2451,7 @@ class PY8Card(ConfigFile): + last_pos = finput.tell() + line = finput.readline() + continue +- ++ + # Read parameter. The case of a parameter not defined in the card is + # handled directly in ConfigFile. + +@@ -2478,7 +2478,7 @@ class PY8SubRun(PY8Card): + + def __init__(self, *args, **opts): + """ Initialize a subrun """ +- ++ + # Force user to set it manually. + subrunID = -1 + if 'subrun_id' in opts: +@@ -2489,7 +2489,7 @@ class PY8SubRun(PY8Card): + + def default_setup(self): + """Sets up the list of available PY8SubRun parameters.""" +- ++ + # Add all default PY8Card parameters + super(PY8SubRun, self).default_setup() + # Make sure they are all hidden +@@ -2501,33 +2501,33 @@ class PY8SubRun(PY8Card): + self.add_param("Main:subrun", -1) + self.add_param("Beams:LHEF", "events.lhe.gz") + +- ++ + class RunBlock(object): + """ Class for a series of parameter in the run_card that can be either + visible or hidden. +- name: allow to set in the default run_card $name to set where that ++ name: allow to set in the default run_card $name to set where that + block need to be inserted + template_on: information to include is block is active + template_off: information to include is block is not active + on_fields/off_fields: paramater associated to the block +- can be specify but are otherwise automatically but ++ can be specify but are otherwise automatically but + otherwise determined from the template. +- ++ + function: + status(self,run_card) -> return which template need to be used + check_validity(self, runcard) -> sanity check +- create_default_for_process(self, run_card, proc_characteristic, +- history, proc_def) ++ create_default_for_process(self, run_card, proc_characteristic, ++ history, proc_def) + post_set_XXXX(card, value, change_userdefine, raiseerror) + -> fct called when XXXXX is set + post_set(card, value, change_userdefine, raiseerror, **opt) + -> fct called when a parameter is changed +- -> no access to parameter name ++ -> no access to parameter name + -> not called if post_set_XXXX is defined + """ +- + +- ++ ++ + def __init__(self, name, template_on, template_off, on_fields=False, off_fields=False): + + self.name = name +@@ -2550,7 +2550,7 @@ class RunBlock(object): + def find_fields_from_template(template): + """ return the list of fields from a template. checking line like + %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ +- ++ + return re.findall(r"^\s*%\((.*)\)s\s*=\s*\1", template, re.M) + + def get_template(self, card): +@@ -2565,7 +2565,7 @@ class RunBlock(object): + if self.status(card): + return self.template_off + else: +- return self.template_on ++ return self.template_on + + def status(self, card): + """return False if template_off to be used, True if template_on to be used""" +@@ -2594,20 +2594,20 @@ class RunBlock(object): + written.add(name) + if name in to_write: + to_write.remove(name) +- ++ + def check_validity(self, runcard): + """run self consistency check here --avoid to use runcard[''] = xxx here since it can trigger post_set function""" + return + + def create_default_for_process(self, run_card, proc_characteristic, history, proc_def): +- return ++ return + + # @staticmethod + # def post_set(card, value, change_userdefine, raiseerror, **opt): + # """default action to run when a parameter of the block is defined. + # Here we do not know which parameter is modified. if this is needed. + # then one need to define post_set_XXXXX(card, value, change_userdefine, raiseerror) +-# and then only that function is used ++# and then only that function is used + # """ + # + # if 'pdlabel' in card.user_set: +@@ -2621,7 +2621,7 @@ class RunCard(ConfigFile): + + blocks = [] + parameter_in_block = {} +- allowed_lep_densities = {} ++ allowed_lep_densities = {} + default_include_file = 'run_card.inc' + default_autodef_file = 'run.inc' + donewarning = [] +@@ -2637,7 +2637,7 @@ class RunCard(ConfigFile): + curr_dir = os.path.dirname(os.path.dirname(finput.name)) + elif isinstance(finput, str): + curr_dir = os.path.dirname(os.path.dirname(finput)) +- ++ + if curr_dir: + if os.path.exists(pjoin(curr_dir, 'bin', 'internal', 'plugin_run_card')): + # expected format {} passing everything as optional argument +@@ -2646,7 +2646,7 @@ class RunCard(ConfigFile): + continue + opts = dict(eval(line)) + self.add_param(**opts) +- ++ + @classmethod + def fill_post_set_from_blocks(cls): + """set the post_set function for any parameter defined in a run_block""" +@@ -2659,8 +2659,8 @@ class RunCard(ConfigFile): + elif hasattr(block, 'post_set'): + setattr(cls, 'post_set_%s' % parameter, block.post_set) + cls.parameter_in_block[parameter] = block +- +- ++ ++ + def __new__(cls, finput=None, **opt): + + cls.fill_post_set_from_blocks() +@@ -2718,9 +2718,9 @@ class RunCard(ConfigFile): + return super(RunCard, cls).__new__(cls, finput, **opt) + + def __init__(self, *args, **opts): +- ++ + # The following parameter are updated in the defaultsetup stage. +- ++ + #parameter for which no warning should be raised if not define + self.hidden_param = [] + # in which include file the parameer should be written +@@ -2739,11 +2739,11 @@ class RunCard(ConfigFile): + self.cuts_parameter = {} + # parameter added where legacy requires an older value. + self.system_default = {} +- ++ + self.display_block = [] # set some block to be displayed + self.fct_mod = {} # {param: (fct_pointer, *argument, **opts)} + +- self.cut_class = {} ++ self.cut_class = {} + self.warned=False + + +@@ -2776,11 +2776,11 @@ class RunCard(ConfigFile): + else: + cls.allowed_lep_densities[identity].append(name) + +- def add_param(self, name, value, fortran_name=None, include=True, ++ def add_param(self, name, value, fortran_name=None, include=True, + hidden=False, legacy=False, cut=False, system=False, sys_default=None, + autodef=False, fct_mod=None, + **opts): +- """ add a parameter to the card. value is the default value and ++ """ add a parameter to the card. value is the default value and + defines the type (int/float/bool/str) of the input. + fortran_name: defines what is the associate name in the f77 code + include: defines if we have to put the value in the include file +@@ -2795,7 +2795,7 @@ class RunCard(ConfigFile): + fct_mod: defines a function to run if the parameter is modify in the include file + options of **opts: + - allowed: list of valid options. '*' means anything else should be allowed. +- empty list means anything possible as well. ++ empty list means anything possible as well. + - comment: add comment for writing/help + - typelist: type of the list if default is empty + """ +@@ -2823,9 +2823,9 @@ class RunCard(ConfigFile): + self.fct_mod[name] = fct_mod + + def read(self, finput, consistency=True, unknown_warning=True, **opt): +- """Read the input file, this can be a path to a file, ++ """Read the input file, this can be a path to a file, + a file object, a str with the content of the file.""" +- ++ + if isinstance(finput, str): + if "\n" in finput: + finput = finput.split('\n') +@@ -2836,7 +2836,7 @@ class RunCard(ConfigFile): + finput = open(finput) + else: + raise Exception("No such file %s" % finput) +- ++ + for line in finput: + line = line.split('#')[0] + line = line.split('!')[0] +@@ -2864,8 +2864,8 @@ class RunCard(ConfigFile): + This is based on the guess_entry_fromname for the various syntax providing input. + This then call add_param accordingly. + +- This function does not returns anything. +- """ ++ This function does not returns anything. ++ """ + + if name == "dsqrt_q2fact1" and not self.LO: + raise InvalidRunCard("Looks like you passed a LO run_card for a NLO run. Please correct") +@@ -2903,7 +2903,7 @@ class RunCard(ConfigFile): + " The type was assigned to %s. \n"+\ + " The definition of that variable will %sbe automatically added to fortran file %s\n"+\ + " The value of that variable will %sbe passed to the fortran code via fortran file %s",\ +- name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, ++ name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, + "" if opts.get('autodef', False) else "not", "" if opts.get('autodef', False) in [True,False] else opts.get('autodef'), + "" if opts.get('include', True) else "not", "" if opts.get('include', True) in [True,False] else opts.get('include')) + RunCard.donewarning.append(name) +@@ -2923,19 +2923,19 @@ class RunCard(ConfigFile): + return False + elif line.strip().startswith('%'): + parameter = line[line.find('(')+1:line.find(')')] +- ++ + try: + cond = self.cuts_parameter[parameter] + except KeyError: + return True +- +- ++ ++ + if template_options.get(cond, default) or cond is True: + return True + else: +- return False ++ return False + else: +- return True ++ return True + + + def reset_simd(self, old_value, new_value, name, *args, **opts): +@@ -2946,28 +2946,28 @@ class RunCard(ConfigFile): + raise Exception('pass make clean for ', dir) + + def make_Ptouch(self,old_value, new_value, name, reset): +- raise Exception('pass Ptouch for ', reset) +- ++ raise Exception('pass Ptouch for ', reset) ++ + def write(self, output_file, template=None, python_template=False, + write_hidden=False, template_options=None, **opt): +- """Write the run_card in output_file according to template ++ """Write the run_card in output_file according to template + (a path to a valid run_card)""" + +- to_write = set(self.user_set) ++ to_write = set(self.user_set) + written = set() + if not template: + raise Exception + if not template_options: + template_options = collections.defaultdict(str) +- ++ + if python_template: + text = open(template,'r').read() +- text = text.split('\n') ++ text = text.split('\n') + # remove if templating +- text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] ++ text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] + for l in text if self.valid_line(l, template_options)] + text ='\n'.join(text) +- ++ + if python_template and not to_write: + import string + if self.blocks: +@@ -2981,14 +2981,14 @@ class RunCard(ConfigFile): + if not self.list_parameter: + text = text % self + else: +- data = dict((key.lower(),value) for key, value in self.items()) ++ data = dict((key.lower(),value) for key, value in self.items()) + for name in self.list_parameter: + if self.list_parameter[name] != str: + data[name] = ', '.join(str(v) for v in data[name]) + else: + data[name] = "['%s']" % "', '".join(str(v) for v in data[name]) + text = text % data +- else: ++ else: + text = "" + for line in open(template,'r'): + nline = line.split('#')[0] +@@ -3005,11 +3005,11 @@ class RunCard(ConfigFile): + this_group = this_group[0] + text += this_group.get_template(self) % self + this_group.manage_parameters(self, written, to_write) +- ++ + elif len(nline) != 2: + text += line + elif nline[1].strip() in self: +- ++ + name = nline[1].strip().lower() + value = self[name] + if name in self.list_parameter: +@@ -3026,15 +3026,15 @@ class RunCard(ConfigFile): + else: + endline = '' + text += ' %s\t= %s %s%s' % (value, name, comment, endline) +- written.add(name) ++ written.add(name) + + if name in to_write: + to_write.remove(name) + else: + logger.info('Adding missing parameter %s to current %s (with default value)', + (name, self.filename)) +- written.add(name) +- text += line ++ written.add(name) ++ text += line + + for b in self.blocks: + if b.status(self): +@@ -3057,7 +3057,7 @@ class RunCard(ConfigFile): + else: + #partial writting -> add only what is needed + to_add = [] +- for line in b.get_template(self).split('\n'): ++ for line in b.get_template(self).split('\n'): + nline = line.split('#')[0] + nline = nline.split('!')[0] + nline = nline.split('=') +@@ -3072,8 +3072,8 @@ class RunCard(ConfigFile): + continue #already include before + else: + to_add.append(line % {nline[1].strip():value, name:value}) +- written.add(name) +- ++ written.add(name) ++ + if name in to_write: + to_write.remove(name) + else: +@@ -3095,13 +3095,13 @@ class RunCard(ConfigFile): + text += '\n'.join(to_add) + + if to_write or write_hidden: +- text+="""#********************************************************************* ++ text+="""#********************************************************************* + # Additional hidden parameters + #********************************************************************* +-""" ++""" + if write_hidden: + # +- # do not write hidden parameter not hidden for this template ++ # do not write hidden parameter not hidden for this template + # + if python_template: + written = written.union(set(re.findall('\%\((\w*)\)s', open(template,'r').read(), re.M))) +@@ -3129,7 +3129,7 @@ class RunCard(ConfigFile): + if inc file does not exist we will return the current value (i.e. set has no change) + """ + +- #remember that ++ #remember that + # default_include_file is a class variable + # self.includepath is on the form include_path : [list of param ] + out = {} +@@ -3165,7 +3165,7 @@ class RunCard(ConfigFile): + + with open(pjoin(output_dir,path), 'r') as fsock: + text = fsock.read() +- ++ + for name in list_of_params: + misc.sprint(name, name in self.fortran_name) + misc.sprint(self.fortran_name[name] if name in self.fortran_name[name] else name) +@@ -3191,11 +3191,11 @@ class RunCard(ConfigFile): + misc.sprint(self.fortran_name) + misc.sprint(text) + raise Exception +- return out ++ return out + + + def get_default(self, name, default=None, log_level=None): +- """return self[name] if exist otherwise default. log control if we ++ """return self[name] if exist otherwise default. log control if we + put a warning or not if we use the default value""" + + lower_name = name.lower() +@@ -3216,13 +3216,13 @@ class RunCard(ConfigFile): + log_level = 20 + if not default: + default = dict.__getitem__(self, name.lower()) +- ++ + logger.log(log_level, '%s missed argument %s. Takes default: %s' + % (self.filename, name, default)) + self[name] = default + return default + else: +- return self[name] ++ return self[name] + + def mod_inc_pdlabel(self, value): + """flag pdlabel has 'dressed' if one of the special lepton PDF with beamstralung. +@@ -3237,16 +3237,16 @@ class RunCard(ConfigFile): + filelist is a list of input files (given by the user) + containing a series of function to be placed in replacement of standard + (typically dummy) functions of the code. +- This use LO/NLO class attribute that defines which function name need to +- be placed in which file. ++ This use LO/NLO class attribute that defines which function name need to ++ be placed in which file. + + First time this is used, a backup of the original file is done in order to +- recover if the user remove some of those files. ++ recover if the user remove some of those files. + + The function present in the file are determined automatically via regular expression. + and only that function is replaced in the associated file. + +- function in the filelist starting with user_ will also be include within the ++ function in the filelist starting with user_ will also be include within the + dummy_fct.f file + """ + +@@ -3269,7 +3269,7 @@ class RunCard(ConfigFile): + fsock = file_writers.FortranWriter(tmp,'w') + function_text = fsock.remove_routine(text, fct) + fsock.close() +- test = open(tmp,'r').read() ++ test = open(tmp,'r').read() + if fct not in self.dummy_fct_file: + if fct.startswith('user_'): + self.dummy_fct_file[fct] = self.dummy_fct_file['user_'] +@@ -3315,22 +3315,22 @@ class RunCard(ConfigFile): + - vartype: type of the variable + - name: name of the variable (stripped from metadata) + - options: additional options for the add_param +- rules: +- - if name starts with str_, int_, float_, bool_, list_, dict_ then ++ rules: ++ - if name starts with str_, int_, float_, bool_, list_, dict_ then + - vartype is set accordingly + - name is strip accordingly + - otherwise guessed from value (which is string) + - if name contains min/max + - vartype is set to float + - options has an added {'cut':True} +- - suffixes like ++ - suffixes like + - will be removed from named + - will be added in options (for add_param) as {'cut':True} + see add_param documentation for the list of supported options + - if include is on False set autodef to False (i.e. enforce it False for future change) + + """ +- # local function ++ # local function + def update_typelist(value, name, opts): + """convert a string to a list and update opts to keep track of the type """ + value = value.strip() +@@ -3358,7 +3358,7 @@ class RunCard(ConfigFile): + opts[key] = val + name = name.replace("<%s=%s>" %(key,val), '') + +- # get vartype ++ # get vartype + # first check that name does not force it + supported_type = ["str", "float", "int", "bool", "list", "dict"] + if "_" in name and name.split("_")[0].lower() in supported_type: +@@ -3406,13 +3406,13 @@ class RunCard(ConfigFile): + value = str(value).lower() + else: + assert formatv +- ++ + if formatv == 'bool': + if str(value) in ['1','T','.true.','True']: + return '.true.' + else: + return '.false.' +- ++ + elif formatv == 'int': + try: + return str(int(value)) +@@ -3422,12 +3422,12 @@ class RunCard(ConfigFile): + return str(int(fl)) + else: + raise +- ++ + elif formatv == 'float': + if isinstance(value, str): + value = value.replace('d','e') + return ('%.10e' % float(value)).replace('e','d') +- ++ + elif formatv == 'str': + # Check if it is a list + if value.strip().startswith('[') and value.strip().endswith(']'): +@@ -3437,20 +3437,20 @@ class RunCard(ConfigFile): + enumerate(elements)] + else: + return "'%s'" % value +- + +- ++ ++ + def check_validity(self, log_level=30): + """check that parameter missing in the card are set to the expected value""" + + for name, value in self.system_default.items(): + self.set(name, value, changeifuserset=False) +- ++ + + for name in self.includepath[False]: + to_bypass = self.hidden_param + list(self.legacy_parameter.keys()) + if name not in to_bypass: +- self.get_default(name, log_level=log_level) ++ self.get_default(name, log_level=log_level) + + for name in self.legacy_parameter: + if self[name] != self.legacy_parameter[name]: +@@ -3458,28 +3458,28 @@ class RunCard(ConfigFile): + + for block in self.blocks: + block.check_validity(self) +- ++ + + + def update_system_parameter_for_include(self): +- """update hidden system only parameter for the correct writtin in the ++ """update hidden system only parameter for the correct writtin in the + include""" + return + +- ++ + + def write_include_file(self, output_dir, output_file=None): + """Write the various include file in output_dir. + The entry True of self.includepath will be written in run_card.inc + The entry False will not be written anywhere + output_file allows testing by providing stream. +- This also call the function to add variable definition for the +- variable with autodef=True (handle by write_autodef function) ++ This also call the function to add variable definition for the ++ variable with autodef=True (handle by write_autodef function) + """ +- ++ + # ensure that all parameter are coherent and fix those if needed + self.check_validity() +- ++ + #ensusre that system only parameter are correctly set + self.update_system_parameter_for_include() + +@@ -3490,10 +3490,10 @@ class RunCard(ConfigFile): + self.write_autodef(output_dir, output_file=None) + # check/fix status of customised functions + self.edit_dummy_fct_from_file(self["custom_fcts"], os.path.dirname(output_dir)) +- ++ + for incname in self.includepath: + self.write_one_include_file(output_dir, incname, output_file) +- ++ + for name,value in value_in_old_include.items(): + if value != self[name]: + self.fct_mod[name][0](value, self[name], name, *self.fct_mod[name][1],**self.fct_mod[name][2]) +@@ -3515,13 +3515,13 @@ class RunCard(ConfigFile): + fsock = file_writers.FortranWriter(pjoin(output_dir,pathinc+'.tmp')) + + +- for key in self.includepath[incname]: ++ for key in self.includepath[incname]: + #define the fortran name + if key in self.fortran_name: + fortran_name = self.fortran_name[key] + else: + fortran_name = key +- ++ + if incname in self.include_as_parameter: + fsock.writelines('INTEGER %s\n' % fortran_name) + #get the value with warning if the user didn't set it +@@ -3534,7 +3534,7 @@ class RunCard(ConfigFile): + # in case of a list, add the length of the list as 0th + # element in fortran. Only in case of integer or float + # list (not for bool nor string) +- targettype = self.list_parameter[key] ++ targettype = self.list_parameter[key] + if targettype is bool: + pass + elif targettype is int: +@@ -3550,7 +3550,7 @@ class RunCard(ConfigFile): + elif isinstance(value, dict): + for fortran_name, onevalue in value.items(): + line = '%s = %s \n' % (fortran_name, self.f77_formatting(onevalue)) +- fsock.writelines(line) ++ fsock.writelines(line) + elif isinstance(incname,str) and 'compile' in incname: + if incname in self.include_as_parameter: + line = 'PARAMETER (%s=%s)' %( fortran_name, value) +@@ -3585,7 +3585,7 @@ class RunCard(ConfigFile): + filetocheck = dict(self.definition_path) + if True not in self.definition_path: + filetocheck[True] = [] +- ++ + + for incname in filetocheck: + if incname is True: +@@ -3598,7 +3598,7 @@ class RunCard(ConfigFile): + if output_file: + fsock = output_file + input = fsock.getvalue() +- ++ + else: + input = open(pjoin(output_dir,pathinc),'r').read() + # do not define fsock here since we might not need to overwrite it +@@ -3608,7 +3608,7 @@ class RunCard(ConfigFile): + previous = re.findall(re_pat, input, re.M) + # now check which one needed to be added (and remove those identicaly defined) + to_add = [] +- for key in filetocheck[incname]: ++ for key in filetocheck[incname]: + curr_type = self[key].__class__.__name__ + length = "" + if curr_type in [list, "list"]: +@@ -3640,10 +3640,10 @@ class RunCard(ConfigFile): + fsock.truncate(0) + fsock.seek(0) + +- # remove outdated lines ++ # remove outdated lines + lines = input.split('\n') + if previous: +- out = [line for line in lines if not re.search(re_pat, line, re.M) or ++ out = [line for line in lines if not re.search(re_pat, line, re.M) or + re.search(re_pat, line, re.M).groups() not in previous] + else: + out = lines +@@ -3662,7 +3662,7 @@ class RunCard(ConfigFile): + stop = out.index('C STOP USER COMMON BLOCK') + out = out[:start]+ out[stop+1:] + #add new common-block +- if self.definition_path[incname]: ++ if self.definition_path[incname]: + out.append("C START USER COMMON BLOCK") + if isinstance(pathinc , str): + filename = os.path.basename(pathinc).split('.',1)[0] +@@ -3675,10 +3675,10 @@ class RunCard(ConfigFile): + filename = filename.upper() + out.append(" COMMON/USER_CUSTOM_%s/%s" %(filename,','.join( self.definition_path[incname]))) + out.append('C STOP USER COMMON BLOCK') +- ++ + if not output_file: + fsock.writelines(out) +- fsock.close() ++ fsock.close() + else: + # for iotest + out = ["%s\n" %l for l in out] +@@ -3702,7 +3702,7 @@ class RunCard(ConfigFile): + def get_banner_init_information(self): + """return a dictionary with the information needed to write + the first line of the block of the lhe file.""" +- ++ + output = {} + output["idbmup1"] = self.get_idbmup(self['lpp1']) + output["idbmup2"] = self.get_idbmup(self['lpp2']) +@@ -3713,7 +3713,7 @@ class RunCard(ConfigFile): + output["pdfsup1"] = self.get_pdf_id(self["pdlabel"]) + output["pdfsup2"] = self.get_pdf_id(self["pdlabel"]) + return output +- ++ + def get_pdf_id(self, pdf): + if pdf == "lhapdf": + lhaid = self["lhaid"] +@@ -3721,19 +3721,19 @@ class RunCard(ConfigFile): + return lhaid[0] + else: + return lhaid +- else: ++ else: + try: + return {'none': 0, 'iww': 0, 'eva':0, 'edff':0, 'chff':0, + 'cteq6_m':10000,'cteq6_l':10041,'cteq6l1':10042, + 'nn23lo':246800,'nn23lo1':247000,'nn23nlo':244800 +- }[pdf] ++ }[pdf] + except: +- return 0 +- ++ return 0 ++ + def get_lhapdf_id(self): + return self.get_pdf_id(self['pdlabel']) + +- def remove_all_cut(self): ++ def remove_all_cut(self): + """remove all the cut""" + + for name in self.cuts_parameter: +@@ -3749,7 +3749,7 @@ class RunCard(ConfigFile): + elif 'eta' in name: + self[name] = -1 + else: +- self[name] = 0 ++ self[name] = 0 + + ################################################################################################ + ### Define various template subpart for the LO Run_card +@@ -3767,11 +3767,11 @@ template_on = \ + %(nb_proton1)s = nb_proton1 # number of proton for the first beam + %(nb_neutron1)s = nb_neutron1 # number of neutron for the first beam + %(mass_ion1)s = mass_ion1 # mass of the heavy ion (first beam) +-# Note that seting differently the two beams only work if you use ++# Note that seting differently the two beams only work if you use + # group_subprocess=False when generating your matrix-element + %(nb_proton2)s = nb_proton2 # number of proton for the second beam + %(nb_neutron2)s = nb_neutron2 # number of neutron for the second beam +- %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) ++ %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) + """ + template_off = "# To see heavy ion options: type \"update ion_pdf\"" + +@@ -3834,11 +3834,11 @@ ecut_block = RunBlock('ecut', template_on=template_on, template_off=template_off + # Frame for polarization ------------------------------------------------------------------------------------ + template_on = \ + """#********************************************************************* +-# Frame where to evaluate the matrix-element (not the cut!) for polarization ++# Frame where to evaluate the matrix-element (not the cut!) for polarization + #********************************************************************* + %(me_frame)s = me_frame ! list of particles to sum-up to define the rest-frame + ! in which to evaluate the matrix-element +- ! [1,2] means the partonic center of mass ++ ! [1,2] means the partonic center of mass + """ + template_off = "" + frame_block = RunBlock('frame', template_on=template_on, template_off=template_off) +@@ -3891,7 +3891,7 @@ template_on = \ + # CONTROL The extra running scale (not QCD) * + # Such running is NOT include in systematics computation * + #*********************************************************************** +- %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale ++ %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale + %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode + %(mue_over_ref)s = mue_over_ref ! ratio to mur if dynamical scale + """ +@@ -3908,10 +3908,10 @@ template_on = \ + %(tmin_for_channel)s = tmin_for_channel ! limit the non-singular reach of --some-- channel of integration related to T-channel diagram (value between -1 and 0), -1 is no impact + %(survey_splitting)s = survey_splitting ! for loop-induced control how many core are used at survey for the computation of a single iteration. + %(survey_nchannel_per_job)s = survey_nchannel_per_job ! control how many Channel are integrated inside a single job on cluster/multicore +- %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) ++ %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) ++#********************************************************************* ++# Compilation flag. + #********************************************************************* +-# Compilation flag. +-#********************************************************************* + %(global_flag)s = global_flag ! fortran optimization flag use for the all code. + %(aloha_flag)s = aloha_flag ! fortran optimization flag for aloha function. Suggestions: '-ffast-math' + %(matrix_flag)s = matrix_flag ! fortran optimization flag for matrix.f function. Suggestions: '-O3' +@@ -3948,7 +3948,7 @@ class PDLabelBlock(RunBlock): + if card['pdlabel'] != card['pdlabel1']: + dict.__setitem__(card, 'pdlabel', card['pdlabel1']) + elif card['pdlabel1'] in sum(card.allowed_lep_densities.values(),[]): +- raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") ++ raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") + elif card['pdlabel2'] in sum(card.allowed_lep_densities.values(),[]): + raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") + elif card['pdlabel1'] == 'none': +@@ -3962,7 +3962,7 @@ class PDLabelBlock(RunBlock): + dict.__setitem__(card, 'pdlabel2', card['pdlabel']) + + if abs(card['lpp1']) == 1 == abs(card['lpp2']) and card['pdlabel1'] != card['pdlabel2']: +- raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") ++ raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") + + def status(self, card): + """return False if template_off to be used, True if template_on to be used""" +@@ -4028,7 +4028,7 @@ class FixedfacscaleBlock(RunBlock): + if name == 'fixed_fac_scale2' and 'fixed_fac_scale1' not in card.user_set: + dict.__setitem__(card, 'fixed_fac_scale1', card['fixed_fac_scale']) + if name == 'fixed_fac_scale1' and 'fixed_fac_scale2' not in card.user_set: +- dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) ++ dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) + + + def status(self, card): +@@ -4061,32 +4061,32 @@ fixedfacscale = FixedfacscaleBlock('fixed_fact_scale', template_on=template_on, + + class RunCardLO(RunCard): + """an object to handle in a nice way the run_card information""" +- ++ + blocks = [heavy_ion_block, beam_pol_block, syscalc_block, ecut_block, + frame_block, eva_scale_block, mlm_block, ckkw_block, psoptim_block, + pdlabel_block, fixedfacscale, running_block] + + dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), + "get_dummy_x1": pjoin("SubProcesses","dummy_fct.f"), +- "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), ++ "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), + "dummy_boostframe": pjoin("SubProcesses","dummy_fct.f"), + "user_dynamical_scale": pjoin("SubProcesses","dummy_fct.f"), + "bias_wgt_custom": pjoin("SubProcesses","dummy_fct.f"), + "user_": pjoin("SubProcesses","dummy_fct.f") # all function starting by user will be added to that file + } +- ++ + include_as_parameter = ['vector.inc'] + + if MG5DIR: + default_run_card = pjoin(MG5DIR, "internal", "default_run_card_lo.dat") +- ++ + def default_setup(self): + """default value for the run_card.dat""" +- ++ + self.add_param("run_tag", "tag_1", include=False) + self.add_param("gridpack", False) + self.add_param("time_of_flight", -1.0, include=False) +- self.add_param("nevents", 10000) ++ self.add_param("nevents", 10000) + self.add_param("iseed", 0) + self.add_param("python_seed", -2, include=False, hidden=True, comment="controlling python seed [handling in particular the final unweighting].\n -1 means use default from random module.\n -2 means set to same value as iseed") + self.add_param("lpp1", 1, fortran_name="lpp(1)", allowed=[-1,1,0,2,3,9,-2,-3,4,-4], +@@ -4106,7 +4106,7 @@ class RunCardLO(RunCard): + self.add_param('nb_neutron1', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(1)", + comment='For heavy ion physics nb of neutron in the ion (for both beam but if group_subprocess was False)') + self.add_param('nb_neutron2', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(2)", +- comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') ++ comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') + self.add_param('mass_ion1', -1.0, hidden=True, fortran_name="mass_ion(1)", + allowed=[-1,0, 0.938, 207.9766521*0.938, 0.000511, 0.105, '*'], + comment='For heavy ion physics mass in GeV of the ion (of beam 1)') +@@ -4133,11 +4133,11 @@ class RunCardLO(RunCard): + self.add_param("mue_over_ref", 1.0, hidden=True, comment='ratio mu_other/mu for dynamical scale') + self.add_param("ievo_eva",0,hidden=True, allowed=[0,1],fortran_name="ievo_eva", + comment='eva: 0 for EW pdf muf evolution by q^2; 1 for evo by pT^2') +- ++ + # Bias module options + self.add_param("bias_module", 'None', include=False, hidden=True) + self.add_param('bias_parameters', {'__type__':1.0}, include='BIAS/bias.inc', hidden=True) +- ++ + #matching + self.add_param("scalefact", 1.0) + self.add_param("ickkw", 0, allowed=[0,1], hidden=True, comment="\'0\' for standard fixed order computation.\n\'1\' for MLM merging activates alphas and pdf re-weighting according to a kt clustering of the QCD radiation.") +@@ -4221,7 +4221,7 @@ class RunCardLO(RunCard): + self.add_param("mmaa", 0.0, cut='aa') + self.add_param("mmll", 0.0, cut='ll') + self.add_param("mmjjmax", -1.0, cut='jj') +- self.add_param("mmbbmax", -1.0, cut='bb') ++ self.add_param("mmbbmax", -1.0, cut='bb') + self.add_param("mmaamax", -1.0, cut='aa') + self.add_param("mmllmax", -1.0, cut='ll') + self.add_param("mmnl", 0.0, cut='LL') +@@ -4231,9 +4231,9 @@ class RunCardLO(RunCard): + self.add_param("ptllmax", -1.0, cut='ll') + self.add_param("xptj", 0.0, cut='jj') + self.add_param("xptb", 0.0, cut='bb') +- self.add_param("xpta", 0.0, cut='aa') ++ self.add_param("xpta", 0.0, cut='aa') + self.add_param("xptl", 0.0, cut='ll') +- # ordered pt jet ++ # ordered pt jet + self.add_param("ptj1min", 0.0, cut='jj') + self.add_param("ptj1max", -1.0, cut='jj') + self.add_param("ptj2min", 0.0, cut='jj') +@@ -4241,7 +4241,7 @@ class RunCardLO(RunCard): + self.add_param("ptj3min", 0.0, cut='jjj') + self.add_param("ptj3max", -1.0, cut='jjj') + self.add_param("ptj4min", 0.0, cut='j'*4) +- self.add_param("ptj4max", -1.0, cut='j'*4) ++ self.add_param("ptj4max", -1.0, cut='j'*4) + self.add_param("cutuse", 0, cut='jj') + # ordered pt lepton + self.add_param("ptl1min", 0.0, cut='l'*2) +@@ -4249,7 +4249,7 @@ class RunCardLO(RunCard): + self.add_param("ptl2min", 0.0, cut='l'*2) + self.add_param("ptl2max", -1.0, cut='l'*2) + self.add_param("ptl3min", 0.0, cut='l'*3) +- self.add_param("ptl3max", -1.0, cut='l'*3) ++ self.add_param("ptl3max", -1.0, cut='l'*3) + self.add_param("ptl4min", 0.0, cut='l'*4) + self.add_param("ptl4max", -1.0, cut='l'*4) + # Ht sum of jets +@@ -4257,7 +4257,7 @@ class RunCardLO(RunCard): + self.add_param("htjmax", -1.0, cut='j'*2) + self.add_param("ihtmin", 0.0, cut='J'*2) + self.add_param("ihtmax", -1.0, cut='J'*2) +- self.add_param("ht2min", 0.0, cut='J'*3) ++ self.add_param("ht2min", 0.0, cut='J'*3) + self.add_param("ht3min", 0.0, cut='J'*3) + self.add_param("ht4min", 0.0, cut='J'*4) + self.add_param("ht2max", -1.0, cut='J'*3) +@@ -4267,7 +4267,7 @@ class RunCardLO(RunCard): + self.add_param("ptgmin", 0.0, cut='aj') + self.add_param("r0gamma", 0.4, hidden=True) + self.add_param("xn", 1.0, hidden=True) +- self.add_param("epsgamma", 1.0, hidden=True) ++ self.add_param("epsgamma", 1.0, hidden=True) + self.add_param("isoem", True, hidden=True) + self.add_param("xetamin", 0.0, cut='jj') + self.add_param("deltaeta", 0.0, cut='j'*2) +@@ -4280,7 +4280,7 @@ class RunCardLO(RunCard): + self.add_param("use_syst", True) + self.add_param('systematics_program', 'systematics', include=False, hidden=True, comment='Choose which program to use for systematics computation: none, systematics, syscalc') + self.add_param('systematics_arguments', ['--mur=0.5,1,2', '--muf=0.5,1,2', '--pdf=errorset'], include=False, hidden=True, comment='Choose the argment to pass to the systematics command. like --mur=0.25,1,4. Look at the help of the systematics function for more details.') +- ++ + self.add_param("sys_scalefact", "0.5 1 2", include=False, hidden=True) + self.add_param("sys_alpsfact", "None", include=False, hidden=True) + self.add_param("sys_matchscale", "auto", include=False, hidden=True) +@@ -4315,8 +4315,8 @@ class RunCardLO(RunCard): + self.add_param('aloha_flag', '', include=False, hidden=True, comment='global fortran compilation flag, suggestion: -ffast-math', + fct_mod=(self.make_clean, ('Source/DHELAS'),{})) + self.add_param('matrix_flag', '', include=False, hidden=True, comment='fortran compilation flag for the matrix-element files, suggestion -O3', +- fct_mod=(self.make_Ptouch, ('matrix'),{})) +- self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', ++ fct_mod=(self.make_Ptouch, ('matrix'),{})) ++ self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', + fortran_name='VECSIZE_MEMMAX', fct_mod=(self.reset_simd,(),{})) + + # parameter allowing to define simple cut via the pdg +@@ -4329,24 +4329,24 @@ class RunCardLO(RunCard): + self.add_param('eta_max_pdg',{'__type__':0.}, include=False,cut=True) + self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) + self.add_param('mxx_only_part_antipart', {'default':False}, include=False) +- ++ + self.add_param('pdg_cut',[0], system=True) # store which PDG are tracked + self.add_param('ptmin4pdg',[0.], system=True) # store pt min + self.add_param('ptmax4pdg',[-1.], system=True) + self.add_param('Emin4pdg',[0.], system=True) # store pt min +- self.add_param('Emax4pdg',[-1.], system=True) ++ self.add_param('Emax4pdg',[-1.], system=True) + self.add_param('etamin4pdg',[0.], system=True) # store pt min +- self.add_param('etamax4pdg',[-1.], system=True) ++ self.add_param('etamax4pdg',[-1.], system=True) + self.add_param('mxxmin4pdg',[-1.], system=True) + self.add_param('mxxpart_antipart', [False], system=True) +- +- +- ++ ++ ++ + def check_validity(self): + """ """ +- ++ + super(RunCardLO, self).check_validity() +- ++ + #Make sure that nhel is only either 0 (i.e. no MC over hel) or + #1 (MC over hel with importance sampling). In particular, it can + #no longer be > 1. +@@ -4357,12 +4357,12 @@ class RunCardLO(RunCard): + "not %s." % self['nhel']) + if int(self['maxjetflavor']) > 6: + raise InvalidRunCard('maxjetflavor should be lower than 5! (6 is partly supported)') +- ++ + if len(self['pdgs_for_merging_cut']) > 1000: + raise InvalidRunCard("The number of elements in "+\ + "'pdgs_for_merging_cut' should not exceed 1000.") + +- ++ + # some cut need to be deactivated in presence of isolation + if self['ptgmin'] > 0: + if self['pta'] > 0: +@@ -4370,18 +4370,18 @@ class RunCardLO(RunCard): + self['pta'] = 0.0 + if self['draj'] > 0: + logger.warning('draj cut discarded since photon isolation is used') +- self['draj'] = 0.0 +- +- # special treatment for gridpack use the gseed instead of the iseed ++ self['draj'] = 0.0 ++ ++ # special treatment for gridpack use the gseed instead of the iseed + if self['gridrun']: + self['iseed'] = self['gseed'] +- ++ + #Some parameter need to be fixed when using syscalc + #if self['use_syst']: + # if self['scalefact'] != 1.0: + # logger.warning('Since use_syst=T, changing the value of \'scalefact\' to 1') + # self['scalefact'] = 1.0 +- ++ + # CKKW Treatment + if self['ickkw'] > 0: + if self['ickkw'] != 1: +@@ -4399,7 +4399,7 @@ class RunCardLO(RunCard): + raise InvalidRunCard('maxjetflavor at 6 is NOT supported for matching!') + if self['ickkw'] == 2: + # add warning if ckkw selected but the associate parameter are empty +- self.get_default('highestmult', log_level=20) ++ self.get_default('highestmult', log_level=20) + self.get_default('issgridfile', 'issudgrid.dat', log_level=20) + if self['xqcut'] > 0: + if self['ickkw'] == 0: +@@ -4412,13 +4412,13 @@ class RunCardLO(RunCard): + if self['drjl'] != 0: + if 'drjl' in self.user_set: + logger.warning('Since icckw>0, changing the value of \'drjl\' to 0') +- self['drjl'] = 0 +- if not self['auto_ptj_mjj']: ++ self['drjl'] = 0 ++ if not self['auto_ptj_mjj']: + if self['mmjj'] > self['xqcut']: + logger.warning('mmjj > xqcut (and auto_ptj_mjj = F). MMJJ set to 0') +- self['mmjj'] = 0.0 +- +- # check validity of the pdf set ++ self['mmjj'] = 0.0 ++ ++ # check validity of the pdf set + # note that pdlabel is automatically set to lhapdf if pdlabel1 or pdlabel2 is set to lhapdf + if self['pdlabel'] == 'lhapdf': + #add warning if lhaid not define +@@ -4426,7 +4426,7 @@ class RunCardLO(RunCard): + + mod = False + for i in [1,2]: +- lpp = 'lpp%i' %i ++ lpp = 'lpp%i' %i + pdlabelX = 'pdlabel%i' % i + if self[lpp] == 0: # nopdf + if self[pdlabelX] != 'none': +@@ -4459,12 +4459,12 @@ class RunCardLO(RunCard): + raise InvalidRunCard( "Heavy ion mode is only supported for lpp1=1/2") + if self['lpp2'] not in [1,2]: + if self['nb_proton2'] !=1 or self['nb_neutron2'] !=0: +- raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") ++ raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") + + + # check that fixed_fac_scale(1/2) is setting as expected + # if lpp=2/3/4 -> default is that beam in fixed scale +- # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are ++ # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are + # check that both fixed_fac_scale1/2 are defined together + # ensure that fixed_fac_scale1 and fixed_fac_scale2 are setup as needed + if 'fixed_fac_scale1' in self.user_set: +@@ -4475,13 +4475,13 @@ class RunCardLO(RunCard): + elif 'fixed_fac_scale' in self.user_set: + logger.warning('fixed_fac_scale and fixed_fac_scale1 are defined but not fixed_fac_scale2. The value of fixed_fac_scale2 will be set to the one of fixed_fac_scale.') + self['fixed_fac_scale2'] = self['fixed_fac_scale'] +- elif self['lpp2'] !=0: ++ elif self['lpp2'] !=0: + raise Exception('fixed_fac_scale2 not defined while fixed_fac_scale1 is. Please fix your run_card.') + elif 'fixed_fac_scale2' in self.user_set: + if 'fixed_fac_scale' in self.user_set: + logger.warning('fixed_fac_scale and fixed_fac_scale2 are defined but not fixed_fac_scale1. The value of fixed_fac_scale1 will be set to the one of fixed_fac_scale.') + self['fixed_fac_scale1'] = self['fixed_fac_scale'] +- elif self['lpp1'] !=0: ++ elif self['lpp1'] !=0: + raise Exception('fixed_fac_scale1 not defined while fixed_fac_scale2 is. Please fix your run_card.') + else: + if 'fixed_fac_scale' in self.user_set: +@@ -4500,12 +4500,12 @@ class RunCardLO(RunCard): + logger.warning('fixed_fac_scale1 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale1']) + logger.warning('fixed_fac_scale2 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale2']) + +- # check if lpp = ++ # check if lpp = + if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]): + for i in [1,2]: + if abs(self['lpp%s' % i ]) in [3,4] and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: + logger.warning("Vector boson from lepton PDF is using fixed scale value of muf [dsqrt_q2fact%s]. Looks like you kept the default value (Mz). Is this really the cut-off that you want to use?" % i) +- ++ + if abs(self['lpp%s' % i ]) == 2 and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: + if self['pdlabel'] in ['edff','chff']: + logger.warning("Since 3.5.0 exclusive photon-photon processes in ultraperipheral proton and nuclear collisions from gamma-UPC (arXiv:2207.03012) will ignore the factorisation scale.") +@@ -4515,10 +4515,10 @@ class RunCardLO(RunCard): + + if six.PY2 and self['hel_recycling']: + self['hel_recycling'] = False +- logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. ++ logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. + In general this optimization speeds up the computation by a factor of two.""") + +- ++ + # check that ebeam is bigger than the associated mass. + for i in [1,2]: + if self['lpp%s' % i ] not in [1,2]: +@@ -4529,13 +4529,13 @@ class RunCardLO(RunCard): + logger.warning("At-rest proton mode set: energy beam set to 0.938") + self.set('ebeam%i' %i, 0.938) + else: +- raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") +- elif self['ebeam%i' % i] < self['mass_ion%i' % i]: ++ raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") ++ elif self['ebeam%i' % i] < self['mass_ion%i' % i]: + if self['ebeam%i' %i] == 0: + logger.warning("At rest ion mode set: Energy beam set to %s" % self['mass_ion%i' % i]) + self.set('ebeam%i' %i, self['mass_ion%i' % i]) +- +- ++ ++ + # check the tmin_for_channel is negative + if self['tmin_for_channel'] == 0: + raise InvalidRunCard('tmin_for_channel can not be set to 0.') +@@ -4543,15 +4543,15 @@ class RunCardLO(RunCard): + logger.warning('tmin_for_channel should be negative. Will be using -%f instead' % self['tmin_for_channel']) + self.set('tmin_for_channel', -self['tmin_for_channel']) + +- ++ + def update_system_parameter_for_include(self): + """system parameter need to be setupe""" +- ++ + # polarization + self['frame_id'] = sum(2**(n) for n in self['me_frame']) +- ++ + # set the pdg_for_cut fortran parameter +- pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + ++ pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + + list(self['e_min_pdg'].keys()) +list(self['e_max_pdg'].keys()) + + list(self['eta_min_pdg'].keys()) +list(self['eta_max_pdg'].keys())+ + list(self['mxx_min_pdg'].keys()) + list(self['mxx_only_part_antipart'].keys())) +@@ -4559,15 +4559,15 @@ class RunCardLO(RunCard): + pdg_to_cut.discard('default') + if len(pdg_to_cut)>25: + raise Exception("Maximum 25 different pdgs are allowed for pdg specific cut") +- ++ + if any(int(pdg)<0 for pdg in pdg_to_cut): + logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') + raise MadGraph5Error('Some PDG specific cuts are defined using negative pdg code') +- +- ++ ++ + if any(pdg in pdg_to_cut for pdg in [1,2,3,4,5,21,22,11,13,15]): + raise Exception("Can not use PDG related cut for light quark/b quark/lepton/gluon/photon") +- ++ + if pdg_to_cut: + self['pdg_cut'] = list(pdg_to_cut) + self['ptmin4pdg'] = [] +@@ -4595,7 +4595,7 @@ class RunCardLO(RunCard): + self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) + else: + if str(pdg) not in self[old_var]: +- raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) ++ raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + self[new_var].append(self[old_var][str(pdg)]) + else: + self['pdg_cut'] = [0] +@@ -4605,11 +4605,11 @@ class RunCardLO(RunCard): + self['ptmax4pdg'] = [-1.] + self['Emax4pdg'] = [-1.] + self['etamax4pdg'] =[-1.] +- self['mxxmin4pdg'] =[0.] ++ self['mxxmin4pdg'] =[0.] + self['mxxpart_antipart'] = [False] +- +- +- ++ ++ ++ + def create_default_for_process(self, proc_characteristic, history, proc_def): + """Rules + process 1->N all cut set on off. +@@ -4626,7 +4626,7 @@ class RunCardLO(RunCard): + if proc_characteristic['loop_induced']: + self['nhel'] = 1 + self['pdgs_for_merging_cut'] = proc_characteristic['colored_pdgs'] +- ++ + if proc_characteristic['ninitial'] == 1: + #remove all cut + self.remove_all_cut() +@@ -4636,7 +4636,7 @@ class RunCardLO(RunCard): + # check for beam_id + beam_id = set() + beam_id_split = [set(), set()] +- for proc in proc_def: ++ for proc in proc_def: + for oneproc in proc: + for i,leg in enumerate(oneproc['legs']): + if not leg['state']: +@@ -4654,20 +4654,20 @@ class RunCardLO(RunCard): + maxjetflavor = max([4]+[abs(i) for i in beam_id if -7< i < 7]) + self['maxjetflavor'] = maxjetflavor + self['asrwgtflavor'] = maxjetflavor +- ++ + if any(i in beam_id for i in [1,-1,2,-2,3,-3,4,-4,5,-5,21,22]): + # check for e p collision + if any(id in beam_id for id in [11,-11,13,-13]): + self.display_block.append('beam_pol') + if any(id in beam_id_split[0] for id in [11,-11,13,-13]): +- self['lpp1'] = 0 +- self['lpp2'] = 1 +- self['ebeam1'] = '1k' +- self['ebeam2'] = '6500' ++ self['lpp1'] = 0 ++ self['lpp2'] = 1 ++ self['ebeam1'] = '1k' ++ self['ebeam2'] = '6500' + else: +- self['lpp1'] = 1 +- self['lpp2'] = 0 +- self['ebeam1'] = '6500' ++ self['lpp1'] = 1 ++ self['lpp2'] = 0 ++ self['ebeam1'] = '6500' + self['ebeam2'] = '1k' + + # UPC for p p collision +@@ -4677,7 +4677,7 @@ class RunCardLO(RunCard): + self['ebeam1'] = '6500' + self['ebeam2'] = '6500' + self['pdlabel'] = 'edff' +- ++ + elif any(id in beam_id for id in [11,-11,13,-13]): + self['lpp1'] = 0 + self['lpp2'] = 0 +@@ -4688,7 +4688,7 @@ class RunCardLO(RunCard): + self.display_block.append('ecut') + self.display_block.append('beam_pol') + +- ++ + + # check for possibility of eva + eva_in_b1 = any(i in beam_id_split[0] for i in [23,24,-24]) #,12,-12,14,-14]) +@@ -4701,10 +4701,10 @@ class RunCardLO(RunCard): + self['nhel'] = 1 + self['pdlabel'] = 'eva' + self['fixed_fac_scale'] = True +- self.display_block.append('beam_pol') ++ self.display_block.append('beam_pol') + + elif eva_in_b1: +- self.display_block.append('beam_pol') ++ self.display_block.append('beam_pol') + self['pdlabel1'] = 'eva' + self['fixed_fac_scale1'] = True + self['nhel'] = 1 +@@ -4724,7 +4724,7 @@ class RunCardLO(RunCard): + self['pdlabel2'] = 'eva' + self['fixed_fac_scale2'] = True + self['nhel'] = 1 +- self.display_block.append('beam_pol') ++ self.display_block.append('beam_pol') + for i in beam_id_split[0]: + if abs(i) == 11: + self['lpp1'] = math.copysign(3,i) +@@ -4740,34 +4740,34 @@ class RunCardLO(RunCard): + if any(i in beam_id for i in [22,23,24,-24,12,-12,14,-14]): + self.display_block.append('eva_scale') + +- # automatic polarisation of the beam if neutrino beam ++ # automatic polarisation of the beam if neutrino beam + if any(id in beam_id for id in [12,-12,14,-14,16,-16]): + self.display_block.append('beam_pol') + if any(id in beam_id_split[0] for id in [12,14,16]): +- self['lpp1'] = 0 +- self['ebeam1'] = '1k' ++ self['lpp1'] = 0 ++ self['ebeam1'] = '1k' + self['polbeam1'] = -100 + if not all(id in [12,14,16] for id in beam_id_split[0]): + logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1]. %s') + elif any(id in beam_id_split[0] for id in [-12,-14,-16]): +- self['lpp1'] = 0 +- self['ebeam1'] = '1k' ++ self['lpp1'] = 0 ++ self['ebeam1'] = '1k' + self['polbeam1'] = 100 + if not all(id in [-12,-14,-16] for id in beam_id_split[0]): +- logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') ++ logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') + if any(id in beam_id_split[1] for id in [12,14,16]): +- self['lpp2'] = 0 +- self['ebeam2'] = '1k' ++ self['lpp2'] = 0 ++ self['ebeam2'] = '1k' + self['polbeam2'] = -100 + if not all(id in [12,14,16] for id in beam_id_split[1]): + logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') + elif any(id in beam_id_split[1] for id in [-12,-14,-16]): +- self['lpp2'] = 0 +- self['ebeam2'] = '1k' ++ self['lpp2'] = 0 ++ self['ebeam2'] = '1k' + self['polbeam2'] = 100 + if not all(id in [-12,-14,-16] for id in beam_id_split[1]): + logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') +- ++ + # Check if need matching + min_particle = 99 + max_particle = 0 +@@ -4798,12 +4798,12 @@ class RunCardLO(RunCard): + else: + # all are jet => matching is ON + matching=True +- break +- ++ break ++ + if matching: + self['ickkw'] = 1 + self['xqcut'] = 30 +- #self['use_syst'] = False ++ #self['use_syst'] = False + self['drjj'] = 0 + self['drjl'] = 0 + self['sys_alpsfact'] = "0.5 1 2" +@@ -4811,8 +4811,8 @@ class RunCardLO(RunCard): + self.display_block.append('mlm') + self.display_block.append('ckkw') + self['dynamical_scale_choice'] = -1 +- +- ++ ++ + # For interference module, the systematics are wrong. + # automatically set use_syst=F and set systematics_program=none + no_systematics = False +@@ -4826,14 +4826,14 @@ class RunCardLO(RunCard): + continue + break + +- ++ + if interference or no_systematics: + self['use_syst'] = False + self['systematics_program'] = 'none' + if interference: + self['dynamical_scale_choice'] = 3 + self['sde_strategy'] = 2 +- ++ + # set default integration strategy + # interference case is already handle above + # here pick strategy 2 if only one QCD color flow +@@ -4852,7 +4852,7 @@ class RunCardLO(RunCard): + if pure_lepton and proton_initial: + self['sde_strategy'] = 1 + else: +- # check if multi-jet j ++ # check if multi-jet j + is_multijet = True + for proc in proc_def: + if any(abs(j.get('id')) not in jet_id for j in proc[0]['legs']): +@@ -4860,7 +4860,7 @@ class RunCardLO(RunCard): + break + if is_multijet: + self['sde_strategy'] = 2 +- ++ + # if polarization is used, set the choice of the frame in the run_card + # But only if polarization is used for massive particles + for plist in proc_def: +@@ -4870,7 +4870,7 @@ class RunCardLO(RunCard): + model = proc.get('model') + particle = model.get_particle(l.get('id')) + if particle.get('mass').lower() != 'zero': +- self.display_block.append('frame') ++ self.display_block.append('frame') + break + else: + continue +@@ -4894,15 +4894,15 @@ class RunCardLO(RunCard): + proc = proc_list[0] + if proc['forbidden_onsh_s_channels']: + self['sde_strategy'] = 1 +- ++ + if 'fix_scale' in proc_characteristic['limitations']: + self['fixed_ren_scale'] = 1 + self['fixed_fac_scale'] = 1 + if self['ickkw'] == 1: + logger.critical("MLM matching/merging not compatible with the model! You need to use another method to remove the double counting!") + self['ickkw'] = 0 +- +- # define class of particles present to hide all the cuts associated to ++ ++ # define class of particles present to hide all the cuts associated to + # not present class + cut_class = collections.defaultdict(int) + for proc in proc_def: +@@ -4925,41 +4925,41 @@ class RunCardLO(RunCard): + one_proc_cut['L'] += 1 + elif abs(pdg) in [12,14,16]: + one_proc_cut['n'] += 1 +- one_proc_cut['L'] += 1 ++ one_proc_cut['L'] += 1 + elif str(oneproc.get('model').get_particle(pdg)['mass']) != 'ZERO': + one_proc_cut['H'] += 1 +- ++ + for key, nb in one_proc_cut.items(): + cut_class[key] = max(cut_class[key], nb) + self.cut_class = dict(cut_class) + self.cut_class[''] = True #avoid empty +- ++ + # If model has running functionality add the additional parameter + model = proc_def[0][0].get('model') + if model['running_elements']: +- self.display_block.append('RUNNING') ++ self.display_block.append('RUNNING') + + + # Read file input/default_run_card_lo.dat + # This has to be LAST !! + if os.path.exists(self.default_run_card): + self.read(self.default_run_card, consistency=False) +- ++ + def write(self, output_file, template=None, python_template=False, + **opt): +- """Write the run_card in output_file according to template ++ """Write the run_card in output_file according to template + (a path to a valid run_card)""" + + + if not template: + if not MADEVENT: +- template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', ++ template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + 'run_card.dat') + python_template = True + else: + template = pjoin(MEDIR, 'Cards', 'run_card_default.dat') + python_template = False +- ++ + + hid_lines = {'default':True}#collections.defaultdict(itertools.repeat(True).next) + if isinstance(output_file, str): +@@ -4975,9 +4975,9 @@ class RunCardLO(RunCard): + hid_lines[k1+k2] = True + + super(RunCardLO, self).write(output_file, template=template, +- python_template=python_template, ++ python_template=python_template, + template_options=hid_lines, +- **opt) ++ **opt) + + + class InvalidMadAnalysis5Card(InvalidCmd): +@@ -4986,19 +4986,19 @@ class InvalidMadAnalysis5Card(InvalidCmd): + class MadAnalysis5Card(dict): + """ A class to store a MadAnalysis5 card. Very basic since it is basically + free format.""" +- ++ + _MG5aMC_escape_tag = '@MG5aMC' +- ++ + _default_hadron_inputs = ['*.hepmc', '*.hep', '*.stdhep', '*.lhco','*.root'] + _default_parton_inputs = ['*.lhe'] + _skip_analysis = False +- ++ + @classmethod + def events_can_be_reconstructed(cls, file_path): + """ Checks from the type of an event file whether it can be reconstructed or not.""" + return not (file_path.endswith('.lhco') or file_path.endswith('.lhco.gz') or \ + file_path.endswith('.root') or file_path.endswith('.root.gz')) +- ++ + @classmethod + def empty_analysis(cls): + """ A method returning the structure of an empty analysis """ +@@ -5012,7 +5012,7 @@ class MadAnalysis5Card(dict): + 'reco_output':'lhe'} + + def default_setup(self): +- """define the default value""" ++ """define the default value""" + self['mode'] = 'parton' + self['inputs'] = [] + # None is the default stdout level, it will be set automatically by MG5aMC +@@ -5025,8 +5025,8 @@ class MadAnalysis5Card(dict): + # of this class and some other property could be added to this dictionary + # in the future. + self['analyses'] = {} +- # The recasting structure contains on set of commands and one set of +- # card lines. ++ # The recasting structure contains on set of commands and one set of ++ # card lines. + self['recasting'] = {'commands':[],'card':[]} + # Add the default trivial reconstruction to use an lhco input + # This is just for the structure +@@ -5035,7 +5035,7 @@ class MadAnalysis5Card(dict): + 'root_input': + MadAnalysis5Card.empty_reconstruction()} + self['reconstruction']['lhco_input']['reco_output']='lhco' +- self['reconstruction']['root_input']['reco_output']='root' ++ self['reconstruction']['root_input']['reco_output']='root' + + # Specify in which order the analysis/recasting were specified + self['order'] = [] +@@ -5049,7 +5049,7 @@ class MadAnalysis5Card(dict): + return + else: + dict.__init__(self) +- ++ + # Initialize it with all the default value + self.default_setup() + if not mode is None: +@@ -5058,15 +5058,15 @@ class MadAnalysis5Card(dict): + # if input is define read that input + if isinstance(finput, (file, str, StringIO.StringIO)): + self.read(finput, mode=mode) +- ++ + def read(self, input, mode=None): + """ Read an MA5 card""" +- ++ + if mode not in [None,'parton','hadron']: + raise MadGraph5Error('A MadAnalysis5Card can be read online the modes'+ + "'parton' or 'hadron'") + card_mode = mode +- ++ + if isinstance(input, (file, StringIO.StringIO)): + input_stream = input + elif isinstance(input, str): +@@ -5099,10 +5099,10 @@ class MadAnalysis5Card(dict): + except ValueError: + option = line[len(self._MG5aMC_escape_tag):] + option = option.strip() +- ++ + if option=='inputs': + self['inputs'].extend([v.strip() for v in value.split(',')]) +- ++ + elif option == 'skip_analysis': + self._skip_analysis = True + +@@ -5118,7 +5118,7 @@ class MadAnalysis5Card(dict): + except: + raise InvalidMadAnalysis5Card( + "MA5 output level specification '%s' is incorrect."%str(value)) +- ++ + elif option=='analysis_name': + current_type = 'analyses' + current_name = value +@@ -5127,7 +5127,7 @@ class MadAnalysis5Card(dict): + "Analysis '%s' already defined in MadAnalysis5 card"%current_name) + else: + self[current_type][current_name] = MadAnalysis5Card.empty_analysis() +- ++ + elif option=='set_reconstructions': + try: + reconstructions = eval(value) +@@ -5142,7 +5142,7 @@ class MadAnalysis5Card(dict): + "analysis in a MadAnalysis5 card.") + self[current_type][current_name]['reconstructions']=reconstructions + continue +- ++ + elif option=='reconstruction_name': + current_type = 'reconstruction' + current_name = value +@@ -5161,7 +5161,7 @@ class MadAnalysis5Card(dict): + raise InvalidMadAnalysis5Card( + "Option '%s' can only take the values 'lhe' or 'root'"%option) + self['reconstruction'][current_name]['reco_output'] = value.lower() +- ++ + elif option.startswith('recasting'): + current_type = 'recasting' + try: +@@ -5171,11 +5171,11 @@ class MadAnalysis5Card(dict): + if len(self['recasting'][current_name])>0: + raise InvalidMadAnalysis5Card( + "Only one recasting can be defined in MadAnalysis5 hadron card") +- ++ + else: + raise InvalidMadAnalysis5Card( + "Unreckognized MG5aMC instruction in MadAnalysis5 card: '%s'"%option) +- ++ + if option in ['analysis_name','reconstruction_name'] or \ + option.startswith('recasting'): + self['order'].append((current_type,current_name)) +@@ -5209,7 +5209,7 @@ class MadAnalysis5Card(dict): + self['inputs'] = self._default_hadron_inputs + else: + self['inputs'] = self._default_parton_inputs +- ++ + # Make sure at least one reconstruction is specified for each hadron + # level analysis and that it exists. + if self['mode']=='hadron': +@@ -5221,7 +5221,7 @@ class MadAnalysis5Card(dict): + analysis['reconstructions']): + raise InvalidMadAnalysis5Card('A reconstructions specified in'+\ + " analysis '%s' is not defined."%analysis_name) +- ++ + def write(self, output): + """ Write an MA5 card.""" + +@@ -5232,7 +5232,7 @@ class MadAnalysis5Card(dict): + else: + raise MadGraph5Error('Incorrect input for the write function of'+\ + ' the MadAnalysis5Card card. Received argument type is: %s'%str(type(output))) +- ++ + output_lines = [] + if self._skip_analysis: + output_lines.append('%s skip_analysis'%self._MG5aMC_escape_tag) +@@ -5240,11 +5240,11 @@ class MadAnalysis5Card(dict): + if not self['stdout_lvl'] is None: + output_lines.append('%s stdout_lvl=%s'%(self._MG5aMC_escape_tag,self['stdout_lvl'])) + for definition_type, name in self['order']: +- ++ + if definition_type=='analyses': + output_lines.append('%s analysis_name = %s'%(self._MG5aMC_escape_tag,name)) + output_lines.append('%s set_reconstructions = %s'%(self._MG5aMC_escape_tag, +- str(self['analyses'][name]['reconstructions']))) ++ str(self['analyses'][name]['reconstructions']))) + elif definition_type=='reconstruction': + output_lines.append('%s reconstruction_name = %s'%(self._MG5aMC_escape_tag,name)) + elif definition_type=='recasting': +@@ -5254,23 +5254,23 @@ class MadAnalysis5Card(dict): + output_lines.extend(self[definition_type][name]) + elif definition_type in ['reconstruction']: + output_lines.append('%s reco_output = %s'%(self._MG5aMC_escape_tag, +- self[definition_type][name]['reco_output'])) ++ self[definition_type][name]['reco_output'])) + output_lines.extend(self[definition_type][name]['commands']) + elif definition_type in ['analyses']: +- output_lines.extend(self[definition_type][name]['commands']) +- ++ output_lines.extend(self[definition_type][name]['commands']) ++ + output_stream.write('\n'.join(output_lines)) +- ++ + return +- +- def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, ++ ++ def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, + UFO_model_path=None, run_tag=''): +- """ Returns a list of tuples ('AnalysisTag',['commands']) specifying +- the commands of the MadAnalysis runs required from this card. +- At parton-level, the number of such commands is the number of analysis ++ """ Returns a list of tuples ('AnalysisTag',['commands']) specifying ++ the commands of the MadAnalysis runs required from this card. ++ At parton-level, the number of such commands is the number of analysis + asked for. In the future, the idea is that the entire card can be + processed in one go from MA5 directly.""" +- ++ + if isinstance(inputs_arg, list): + inputs = inputs_arg + elif isinstance(inputs_arg, str): +@@ -5278,21 +5278,21 @@ class MadAnalysis5Card(dict): + else: + raise MadGraph5Error("The function 'get_MA5_cmds' can only take "+\ + " a string or a list for the argument 'inputs_arg'") +- ++ + if len(inputs)==0: + raise MadGraph5Error("The function 'get_MA5_cmds' must have "+\ + " at least one input specified'") +- ++ + if run_dir_path is None: + run_dir_path = os.path.dirname(inputs_arg) +- ++ + cmds_list = [] +- ++ + UFO_load = [] + # first import the UFO if provided + if UFO_model_path: + UFO_load.append('import %s'%UFO_model_path) +- ++ + def get_import(input, type=None): + """ Generates the MA5 import commands for that event file. """ + dataset_name = os.path.basename(input).split('.')[0] +@@ -5304,7 +5304,7 @@ class MadAnalysis5Card(dict): + if not type is None: + res.append('set %s.type = %s'%(dataset_name, type)) + return res +- ++ + fifo_status = {'warned_fifo':False,'fifo_used_up':False} + def warn_fifo(input): + if not input.endswith('.fifo'): +@@ -5317,7 +5317,7 @@ class MadAnalysis5Card(dict): + logger.warning('Only the first MA5 analysis/reconstructions can be run on a fifo. Subsequent runs will skip fifo inputs.') + fifo_status['warned_fifo'] = True + return True +- ++ + # Then the event file(s) input(s) + inputs_load = [] + for input in inputs: +@@ -5325,16 +5325,16 @@ class MadAnalysis5Card(dict): + + if len(inputs) > 1: + inputs_load.append('set main.stacking_method = superimpose') +- ++ + submit_command = 'submit %s'%submit_folder+'_%s' +- ++ + # Keep track of the reconstruction outpus in the MA5 workflow + # Keys are reconstruction names and values are .lhe.gz reco file paths. + # We put by default already the lhco/root ones present + reconstruction_outputs = { +- 'lhco_input':[f for f in inputs if ++ 'lhco_input':[f for f in inputs if + f.endswith('.lhco') or f.endswith('.lhco.gz')], +- 'root_input':[f for f in inputs if ++ 'root_input':[f for f in inputs if + f.endswith('.root') or f.endswith('.root.gz')]} + + # If a recasting card has to be written out, chose here its path +@@ -5343,7 +5343,7 @@ class MadAnalysis5Card(dict): + + # Make sure to only run over one analysis over each fifo. + for definition_type, name in self['order']: +- if definition_type == 'reconstruction': ++ if definition_type == 'reconstruction': + analysis_cmds = list(self['reconstruction'][name]['commands']) + reco_outputs = [] + for i_input, input in enumerate(inputs): +@@ -5365,8 +5365,8 @@ class MadAnalysis5Card(dict): + analysis_cmds.append( + submit_command%('reco_%s_%d'%(name,i_input+1))) + analysis_cmds.append('remove reco_events') +- +- reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) ++ ++ reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) + for rec_out in reco_outputs] + if len(reco_outputs)>0: + cmds_list.append(('_reco_%s'%name,analysis_cmds)) +@@ -5386,7 +5386,7 @@ class MadAnalysis5Card(dict): + analysis_cmds = ['set main.mode = parton'] + else: + analysis_cmds = [] +- analysis_cmds.extend(sum([get_import(rec_out) for ++ analysis_cmds.extend(sum([get_import(rec_out) for + rec_out in reconstruction_outputs[reco]],[])) + analysis_cmds.extend(self['analyses'][name]['commands']) + analysis_cmds.append(submit_command%('%s_%s'%(name,reco))) +@@ -5427,12 +5427,12 @@ template_on = \ + %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode + """ + running_block_nlo = RunBlock('RUNNING', template_on=template_on, template_off="") +- ++ + class RunCardNLO(RunCard): + """A class object for the run_card for a (aMC@)NLO pocess""" +- ++ + LO = False +- ++ + blocks = [running_block_nlo] + + dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), +@@ -5443,11 +5443,11 @@ class RunCardNLO(RunCard): + + if MG5DIR: + default_run_card = pjoin(MG5DIR, "internal", "default_run_card_nlo.dat") +- +- ++ ++ + def default_setup(self): + """define the default value""" +- ++ + self.add_param('run_tag', 'tag_1', include=False) + self.add_param('nevents', 10000) + self.add_param('req_acc', -1.0, include=False) +@@ -5455,27 +5455,27 @@ class RunCardNLO(RunCard): + self.add_param("time_of_flight", -1.0, include=False) + self.add_param('event_norm', 'average') + #FO parameter +- self.add_param('req_acc_fo', 0.01, include=False) ++ self.add_param('req_acc_fo', 0.01, include=False) + self.add_param('npoints_fo_grid', 5000, include=False) + self.add_param('niters_fo_grid', 4, include=False) +- self.add_param('npoints_fo', 10000, include=False) ++ self.add_param('npoints_fo', 10000, include=False) + self.add_param('niters_fo', 6, include=False) + #seed and collider + self.add_param('iseed', 0) +- self.add_param('lpp1', 1, fortran_name='lpp(1)') +- self.add_param('lpp2', 1, fortran_name='lpp(2)') ++ self.add_param('lpp1', 1, fortran_name='lpp(1)') ++ self.add_param('lpp2', 1, fortran_name='lpp(2)') + self.add_param('ebeam1', 6500.0, fortran_name='ebeam(1)') +- self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') ++ self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') + self.add_param('pdlabel', 'nn23nlo', allowed=['lhapdf', 'emela', 'cteq6_m','cteq6_d','cteq6_l','cteq6l1', 'nn23lo','nn23lo1','nn23nlo','ct14q00','ct14q07','ct14q14','ct14q21'] +\ +- sum(self.allowed_lep_densities.values(),[]) ) ++ sum(self.allowed_lep_densities.values(),[]) ) + self.add_param('lhaid', [244600],fortran_name='lhaPDFid') + self.add_param('pdfscheme', 0) + # whether to include or not photon-initiated processes in lepton collisions + self.add_param('photons_from_lepton', True) + self.add_param('lhapdfsetname', ['internal_use_only'], system=True) +- # stuff for lepton collisions +- # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set +- # whether the current PDF set has or not beamstrahlung ++ # stuff for lepton collisions ++ # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set ++ # whether the current PDF set has or not beamstrahlung + self.add_param('has_bstrahl', False, system=True) + # renormalisation scheme of alpha + self.add_param('alphascheme', 0, system=True) +@@ -5486,31 +5486,31 @@ class RunCardNLO(RunCard): + # w contribution included or not in the running of alpha + self.add_param('w_run', 1, system=True) + #shower and scale +- self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') ++ self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') + self.add_param('shower_scale_factor',1.0) + self.add_param('mcatnlo_delta', False) + self.add_param('fixed_ren_scale', False) + self.add_param('fixed_fac_scale', False) + self.add_param('fixed_extra_scale', True, hidden=True, system=True) # set system since running from Ellis-Sexton scale not implemented +- self.add_param('mur_ref_fixed', 91.118) ++ self.add_param('mur_ref_fixed', 91.118) + self.add_param('muf1_ref_fixed', -1.0, hidden=True) +- self.add_param('muf_ref_fixed', 91.118) ++ self.add_param('muf_ref_fixed', 91.118) + self.add_param('muf2_ref_fixed', -1.0, hidden=True) +- self.add_param('mue_ref_fixed', 91.118, hidden=True) +- self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', ++ self.add_param('mue_ref_fixed', 91.118, hidden=True) ++ self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', + allowed = [-2,-1,0,1,2,3,10], comment="\'-1\' is based on CKKW back clustering (following feynman diagram).\n \'1\' is the sum of transverse energy.\n '2' is HT (sum of the transverse mass)\n '3' is HT/2, '0' allows to use the user_hook definition (need to be defined via custom_fct entry) ") + self.add_param('fixed_qes_scale', False, hidden=True) + self.add_param('qes_ref_fixed', -1.0, hidden=True) + self.add_param('mur_over_ref', 1.0) +- self.add_param('muf_over_ref', 1.0) +- self.add_param('muf1_over_ref', -1.0, hidden=True) ++ self.add_param('muf_over_ref', 1.0) ++ self.add_param('muf1_over_ref', -1.0, hidden=True) + self.add_param('muf2_over_ref', -1.0, hidden=True) + self.add_param('mue_over_ref', 1.0, hidden=True, system=True) # forbid the user to modigy due to incorrect handling of the Ellis-Sexton scale + self.add_param('qes_over_ref', -1.0, hidden=True) + self.add_param('reweight_scale', [True], fortran_name='lscalevar') +- self.add_param('rw_rscale_down', -1.0, hidden=True) ++ self.add_param('rw_rscale_down', -1.0, hidden=True) + self.add_param('rw_rscale_up', -1.0, hidden=True) +- self.add_param('rw_fscale_down', -1.0, hidden=True) ++ self.add_param('rw_fscale_down', -1.0, hidden=True) + self.add_param('rw_fscale_up', -1.0, hidden=True) + self.add_param('rw_rscale', [1.0,2.0,0.5], fortran_name='scalevarR') + self.add_param('rw_fscale', [1.0,2.0,0.5], fortran_name='scalevarF') +@@ -5523,60 +5523,60 @@ class RunCardNLO(RunCard): + + #technical + self.add_param('folding', [1,1,1], include=False) +- ++ + #merging + self.add_param('ickkw', 0, allowed=[-1,0,3,4], comment=" - 0: No merging\n - 3: FxFx Merging : http://amcatnlo.cern.ch/FxFx_merging.htm\n - 4: UNLOPS merging (No interface within MG5aMC)\n - -1: NNLL+NLO jet-veto computation. See arxiv:1412.8408 [hep-ph]") + self.add_param('bwcutoff', 15.0) +- #cuts ++ #cuts + self.add_param('jetalgo', 1.0) +- self.add_param('jetradius', 0.7) ++ self.add_param('jetradius', 0.7) + self.add_param('ptj', 10.0 , cut=True) +- self.add_param('etaj', -1.0, cut=True) +- self.add_param('gamma_is_j', True) ++ self.add_param('etaj', -1.0, cut=True) ++ self.add_param('gamma_is_j', True) + self.add_param('ptl', 0.0, cut=True) +- self.add_param('etal', -1.0, cut=True) ++ self.add_param('etal', -1.0, cut=True) + self.add_param('drll', 0.0, cut=True) +- self.add_param('drll_sf', 0.0, cut=True) ++ self.add_param('drll_sf', 0.0, cut=True) + self.add_param('mll', 0.0, cut=True) +- self.add_param('mll_sf', 30.0, cut=True) +- self.add_param('rphreco', 0.1) +- self.add_param('etaphreco', -1.0) +- self.add_param('lepphreco', True) +- self.add_param('quarkphreco', True) ++ self.add_param('mll_sf', 30.0, cut=True) ++ self.add_param('rphreco', 0.1) ++ self.add_param('etaphreco', -1.0) ++ self.add_param('lepphreco', True) ++ self.add_param('quarkphreco', True) + self.add_param('ptgmin', 20.0, cut=True) +- self.add_param('etagamma', -1.0) ++ self.add_param('etagamma', -1.0) + self.add_param('r0gamma', 0.4) +- self.add_param('xn', 1.0) ++ self.add_param('xn', 1.0) + self.add_param('epsgamma', 1.0) +- self.add_param('isoem', True) ++ self.add_param('isoem', True) + self.add_param('maxjetflavor', 4, hidden=True) +- self.add_param('pineappl', False) ++ self.add_param('pineappl', False) + self.add_param('lhe_version', 3, hidden=True, include=False) +- ++ + # customization + self.add_param("custom_fcts",[],typelist="str", include=False, comment="list of files containing function that overwritte dummy function of the code (like adding cuts/...)") + + #internal variable related to FO_analyse_card + self.add_param('FO_LHE_weight_ratio',1e-3, hidden=True, system=True) +- self.add_param('FO_LHE_postprocessing',['grouping','random'], ++ self.add_param('FO_LHE_postprocessing',['grouping','random'], + hidden=True, system=True, include=False) +- ++ + # parameter allowing to define simple cut via the pdg + self.add_param('pt_min_pdg',{'__type__':0.}, include=False,cut=True) + self.add_param('pt_max_pdg',{'__type__':0.}, include=False,cut=True) + self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) + self.add_param('mxx_only_part_antipart', {'default':False}, include=False, hidden=True) +- ++ + #hidden parameter that are transfer to the fortran code + self.add_param('pdg_cut',[0], hidden=True, system=True) # store which PDG are tracked + self.add_param('ptmin4pdg',[0.], hidden=True, system=True) # store pt min + self.add_param('ptmax4pdg',[-1.], hidden=True, system=True) + self.add_param('mxxmin4pdg',[0.], hidden=True, system=True) + self.add_param('mxxpart_antipart', [False], hidden=True, system=True) +- ++ + def check_validity(self): + """check the validity of the various input""" +- ++ + super(RunCardNLO, self).check_validity() + + # for lepton-lepton collisions, ignore 'pdlabel' and 'lhaid' +@@ -5588,12 +5588,12 @@ class RunCardNLO(RunCard): + # for dressed lepton collisions, check that the lhaid is a valid one + if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]) + ['emela']: + raise InvalidRunCard('pdlabel %s not allowed for dressed-lepton collisions' % self['pdlabel']) +- ++ + elif self['pdlabel']!='nn23nlo' or self['reweight_pdf']: + self['pdlabel']='nn23nlo' + self['reweight_pdf']=[False] + logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') +- ++ + if self['lpp1'] == 0 == self['lpp2']: + if self['pdlabel']!='nn23nlo' or self['reweight_pdf']: + self['pdlabel']='nn23nlo' +@@ -5601,8 +5601,8 @@ class RunCardNLO(RunCard): + logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') + + # For FxFx merging, make sure that the following parameters are set correctly: +- if self['ickkw'] == 3: +- # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed ++ if self['ickkw'] == 3: ++ # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed + scales=['fixed_ren_scale','fixed_fac_scale','fixed_QES_scale'] + for scale in scales: + if self[scale]: +@@ -5615,7 +5615,7 @@ class RunCardNLO(RunCard): + self["reweight_scale"]=[self["reweight_scale"][0]] + logger.warning('''For consistency in FxFx merging, dynamical_scale_choice has been set to -1 (default)''' + ,'$MG:BOLD') +- ++ + # 2. Use kT algorithm for jets with pseudo-code size R=1.0 + jetparams=['jetradius','jetalgo'] + for jetparam in jetparams: +@@ -5628,8 +5628,8 @@ class RunCardNLO(RunCard): + self["dynamical_scale_choice"] = [-1] + self["reweight_scale"]=[self["reweight_scale"][0]] + logger.warning('''For consistency with the jet veto, the scale which will be used is ptj. dynamical_scale_choice will be set at -1.''' +- ,'$MG:BOLD') +- ++ ,'$MG:BOLD') ++ + # For interface to PINEAPPL, need to use LHAPDF and reweighting to get scale uncertainties + if self['pineappl'] and self['pdlabel'].lower() != 'lhapdf': + raise InvalidRunCard('PineAPPL generation only possible with the use of LHAPDF') +@@ -5661,7 +5661,7 @@ class RunCardNLO(RunCard): + if (self['rw_fscale_down'] != -1.0 and ['rw_fscale_down'] not in self['rw_fscale']) or\ + (self['rw_fscale_up'] != -1.0 and ['rw_fscale_up'] not in self['rw_fscale']): + self['rw_fscale']=[1.0,self['rw_fscale_up'],self['rw_fscale_down']] +- ++ + # PDF reweighting check + if any(self['reweight_pdf']): + # check that we use lhapdf if reweighting is ON +@@ -5672,7 +5672,7 @@ class RunCardNLO(RunCard): + if self['pdlabel'] != "lhapdf": + self['reweight_pdf']=[self['reweight_pdf'][0]] + self['lhaid']=[self['lhaid'][0]] +- ++ + # make sure set have reweight_scale and dyn_scale_choice of length 1 when fixed scales: + if self['fixed_ren_scale'] and self['fixed_fac_scale']: + self['reweight_scale']=[self['reweight_scale'][0]] +@@ -5685,7 +5685,7 @@ class RunCardNLO(RunCard): + self['reweight_pdf']=self['reweight_pdf']*len(self['lhaid']) + logger.warning("Setting 'reweight_pdf' for all 'lhaid' to %s" % self['reweight_pdf'][0]) + if len(self['reweight_scale']) == 1 and len(self['dynamical_scale_choice']) != 1: +- self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) ++ self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) + logger.warning("Setting 'reweight_scale' for all 'dynamical_scale_choice' to %s" % self['reweight_pdf'][0]) + + # Check that there are no identical elements in lhaid or dynamical_scale_choice +@@ -5693,7 +5693,7 @@ class RunCardNLO(RunCard): + raise InvalidRunCard("'lhaid' has two or more identical entries. They have to be all different for the code to work correctly.") + if len(self['dynamical_scale_choice']) != len(set(self['dynamical_scale_choice'])): + raise InvalidRunCard("'dynamical_scale_choice' has two or more identical entries. They have to be all different for the code to work correctly.") +- ++ + # Check that lenght of lists are consistent + if len(self['reweight_pdf']) != len(self['lhaid']): + raise InvalidRunCard("'reweight_pdf' and 'lhaid' lists should have the same length") +@@ -5730,7 +5730,7 @@ class RunCardNLO(RunCard): + if len(self['folding']) != 3: + raise InvalidRunCard("'folding' should contain exactly three integers") + for ifold in self['folding']: +- if ifold not in [1,2,4,8]: ++ if ifold not in [1,2,4,8]: + raise InvalidRunCard("The three 'folding' parameters should be equal to 1, 2, 4, or 8.") + # Check MC@NLO-Delta + if self['mcatnlo_delta'] and not self['parton_shower'].lower() == 'pythia8': +@@ -5746,11 +5746,11 @@ class RunCardNLO(RunCard): + logger.warning("At-rest proton mode set: energy beam set to 0.938 GeV") + self.set('ebeam%i' %i, 0.938) + else: +- raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") ++ raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + + + def update_system_parameter_for_include(self): +- ++ + # set the pdg_for_cut fortran parameter + pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys())+ + list(self['mxx_min_pdg'].keys())+ list(self['mxx_only_part_antipart'].keys())) +@@ -5758,12 +5758,12 @@ class RunCardNLO(RunCard): + pdg_to_cut.discard('default') + if len(pdg_to_cut)>25: + raise Exception("Maximum 25 different PDGs are allowed for PDG specific cut") +- ++ + if any(int(pdg)<0 for pdg in pdg_to_cut): + logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') + raise MadGraph5Error('Some PDG specific cuts are defined using negative PDG codes') +- +- ++ ++ + if any(pdg in pdg_to_cut for pdg in [21,22,11,13,15]+ list(range(self['maxjetflavor']+1))): + # Note that this will double check in the fortran code + raise Exception("Can not use PDG related cuts for massless SM particles/leptons") +@@ -5790,7 +5790,7 @@ class RunCardNLO(RunCard): + self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) + else: + if str(pdg) not in self[old_var]: +- raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) ++ raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + self[new_var].append(self[old_var][str(pdg)]) + else: + self['pdg_cut'] = [0] +@@ -5800,12 +5800,12 @@ class RunCardNLO(RunCard): + self['mxxpart_antipart'] = [False] + + def write(self, output_file, template=None, python_template=False, **opt): +- """Write the run_card in output_file according to template ++ """Write the run_card in output_file according to template + (a path to a valid run_card)""" + + if not template: + if not MADEVENT: +- template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', ++ template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', + 'run_card.dat') + python_template = True + else: +@@ -5818,7 +5818,7 @@ class RunCardNLO(RunCard): + + def create_default_for_process(self, proc_characteristic, history, proc_def): + """Rules +- e+ e- beam -> lpp:0 ebeam:500 ++ e+ e- beam -> lpp:0 ebeam:500 + p p beam -> set maxjetflavor automatically + process with tagged photons -> gamma_is_j = false + process without QED splittings -> gamma_is_j = false, recombination = false +@@ -5844,19 +5844,19 @@ class RunCardNLO(RunCard): + self['ebeam2'] = 500 + else: + self['lpp1'] = 0 +- self['lpp2'] = 0 +- ++ self['lpp2'] = 0 ++ + if proc_characteristic['ninitial'] == 1: + #remove all cut + self.remove_all_cut() + + # check for tagged photons + tagged_particles = set() +- ++ + # If model has running functionality add the additional parameter + model = proc_def[0].get('model') + if model['running_elements']: +- self.display_block.append('RUNNING') ++ self.display_block.append('RUNNING') + + # Check if need matching + min_particle = 99 +@@ -5885,7 +5885,7 @@ class RunCardNLO(RunCard): + else: + idsmin = [l['id'] for l in procmin['legs']] + break +- ++ + for procmax in proc_def: + if len(procmax['legs']) != max_particle: + continue +@@ -5901,9 +5901,9 @@ class RunCardNLO(RunCard): + else: + # all are jet => matching is ON + matching=True +- break +- +- if matching: ++ break ++ ++ if matching: + self['ickkw'] = 3 + self['fixed_ren_scale'] = False + self["fixed_fac_scale"] = False +@@ -5911,17 +5911,17 @@ class RunCardNLO(RunCard): + self["jetalgo"] = 1 + self["jetradius"] = 1 + self["parton_shower"] = "PYTHIA8" +- ++ + # Read file input/default_run_card_nlo.dat + # This has to be LAST !! + if os.path.exists(self.default_run_card): + self.read(self.default_run_card, consistency=False) +- ++ + class MadLoopParam(ConfigFile): + """ a class for storing/dealing with the file MadLoopParam.dat + contains a parser to read it, facilities to write a new file,... + """ +- ++ + _ID_reduction_tool_map = {1:'CutTools', + 2:'PJFry++', + 3:'IREGI', +@@ -5929,10 +5929,10 @@ class MadLoopParam(ConfigFile): + 5:'Samurai', + 6:'Ninja', + 7:'COLLIER'} +- ++ + def default_setup(self): + """initialize the directory to the default value""" +- ++ + self.add_param("MLReductionLib", "6|7|1") + self.add_param("IREGIMODE", 2) + self.add_param("IREGIRECY", True) +@@ -5954,7 +5954,7 @@ class MadLoopParam(ConfigFile): + self.add_param("HelicityFilterLevel", 2) + self.add_param("LoopInitStartOver", False) + self.add_param("HelInitStartOver", False) +- self.add_param("UseQPIntegrandForNinja", True) ++ self.add_param("UseQPIntegrandForNinja", True) + self.add_param("UseQPIntegrandForCutTools", True) + self.add_param("COLLIERMode", 1) + self.add_param("COLLIERComputeUVpoles", True) +@@ -5966,9 +5966,9 @@ class MadLoopParam(ConfigFile): + self.add_param("COLLIERUseInternalStabilityTest",True) + + def read(self, finput): +- """Read the input file, this can be a path to a file, ++ """Read the input file, this can be a path to a file, + a file object, a str with the content of the file.""" +- ++ + if isinstance(finput, str): + if "\n" in finput: + finput = finput.split('\n') +@@ -5976,7 +5976,7 @@ class MadLoopParam(ConfigFile): + finput = open(finput) + else: + raise Exception("No such file %s" % input) +- ++ + previous_line= '' + for line in finput: + if previous_line.startswith('#'): +@@ -5985,20 +5985,20 @@ class MadLoopParam(ConfigFile): + if len(value) and value[0] not in ['#', '!']: + self.__setitem__(name, value, change_userdefine=True) + previous_line = line +- +- ++ ++ + def write(self, outputpath, template=None,commentdefault=False): +- ++ + if not template: + if not MADEVENT: +- template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', ++ template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', + 'Cards', 'MadLoopParams.dat') + else: + template = pjoin(MEDIR, 'Cards', 'MadLoopParams_default.dat') + fsock = open(template, 'r') + template = fsock.readlines() + fsock.close() +- ++ + if isinstance(outputpath, str): + output = open(outputpath, 'w') + else: +@@ -6019,7 +6019,7 @@ class MadLoopParam(ConfigFile): + return value + else: + raise Exception("Can not format input %s" % type(value)) +- ++ + name = '' + done = set() + for line in template: +@@ -6034,12 +6034,12 @@ class MadLoopParam(ConfigFile): + elif line.startswith('#'): + name = line[1:].split()[0] + output.write(line) +- +- +- +- +- +-class eMELA_info(ConfigFile): ++ ++ ++ ++ ++ ++class eMELA_info(ConfigFile): + """ a class for eMELA (LHAPDF-like) info files + """ + path = '' +@@ -6053,7 +6053,7 @@ class eMELA_info(ConfigFile): + + + def read(self, finput): +- if isinstance(finput, file): ++ if isinstance(finput, file): + lines = finput.open().read().split('\n') + self.path = finput.name + else: +@@ -6066,7 +6066,7 @@ class eMELA_info(ConfigFile): + k, v = l.split(':', 1) # ignore further occurrences of : + try: + self[k.strip()] = eval(v) +- except (NameError, SyntaxError): ++ except (NameError, SyntaxError): + self[k.strip()] = v + + def default_setup(self): +@@ -6091,7 +6091,7 @@ class eMELA_info(ConfigFile): + +"powers of alpha should be reweighted a posteriori") + + +- logger.info('Updating variables according to %s' % self.path) ++ logger.info('Updating variables according to %s' % self.path) + # Flavours in the running of alpha + nd, nu, nl = self['eMELA_ActiveFlavoursAlpha'] + self.log_and_update(banner, 'run_card', 'ndnq_run', nd) +@@ -6130,8 +6130,8 @@ class eMELA_info(ConfigFile): + logger.warning('Cannot treat the following renormalisation schemes for ME and PDFs: %d, %d' \ + % (uvscheme, uvscheme_pdf)) + +- # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref +- # also check that the com energy is equal to qref, otherwise print a ++ # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref ++ # also check that the com energy is equal to qref, otherwise print a + # warning + if uvscheme_pdf == 1: + qref = self['eMELA_AlphaQref'] +@@ -6144,23 +6144,23 @@ class eMELA_info(ConfigFile): + + # LL / NLL PDF (0/1) + pdforder = self['eMELA_PerturbativeOrder'] +- # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) ++ # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) + # 4->mixed (leptonic); 5-> nobeta (leptonic); 6->delta (leptonic) + # if LL, use nobeta scheme unless LEGACYLLPDF > 0 + if pdforder == 0: + if 'eMELA_LEGACYLLPDF' not in self.keys() or self['eMELA_LEGACYLLPDF'] in [-1, 0]: + self.log_and_update(banner, 'run_card', 'pdfscheme', 5) +- elif self['eMELA_LEGACYLLPDF'] == 1: ++ elif self['eMELA_LEGACYLLPDF'] == 1: + # mixed + self.log_and_update(banner, 'run_card', 'pdfscheme', 4) +- elif self['eMELA_LEGACYLLPDF'] == 2: ++ elif self['eMELA_LEGACYLLPDF'] == 2: + # eta + self.log_and_update(banner, 'run_card', 'pdfscheme', 2) +- elif self['eMELA_LEGACYLLPDF'] == 3: ++ elif self['eMELA_LEGACYLLPDF'] == 3: + # beta + self.log_and_update(banner, 'run_card', 'pdfscheme', 3) + elif pdforder == 1: +- # for NLL, use eMELA_FactorisationSchemeInt = 0/1 ++ # for NLL, use eMELA_FactorisationSchemeInt = 0/1 + # for delta/MSbar + if self['eMELA_FactorisationSchemeInt'] == 0: + # MSbar +@@ -6177,7 +6177,7 @@ class eMELA_info(ConfigFile): + + + +- ++ + + def log_and_update(self, banner, card, par, v): + """update the card parameter par to value v +diff --git b/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py a/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py +index fb7efa87c..cc842aa50 100755 +--- b/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py ++++ a/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py +@@ -2,18 +2,18 @@ + # + # Copyright (c) 2014 The MadGraph5_aMC@NLO Development team and Contributors + # +-# This file is a part of the MadGraph5_aMC@NLO project, an application which ++# This file is a part of the MadGraph5_aMC@NLO project, an application which + # automatically generates Feynman diagrams and matrix elements for arbitrary + # high-energy processes in the Standard Model and beyond. + # +-# It is subject to the MadGraph5_aMC@NLO license which should accompany this ++# It is subject to the MadGraph5_aMC@NLO license which should accompany this + # distribution. + # + # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch + # + ################################################################################ + """ A python file to replace the fortran script gen_ximprove. +- This script analyses the result of the survey/ previous refine and ++ This script analyses the result of the survey/ previous refine and + creates the jobs for the following script. + """ + from __future__ import division +@@ -66,77 +66,77 @@ pjoin = os.path.join + class gensym(object): + """a class to call the fortran gensym executable and handle it's output + in order to create the various job that are needed for the survey""" +- ++ + #convenient shortcut for the formatting of variable + @ staticmethod + def format_variable(*args): + return bannermod.ConfigFile.format_variable(*args) +- ++ + combining_job = 2 # number of channel by ajob +- splitted_grid = False ++ splitted_grid = False + min_iterations = 3 + mode= "survey" +- ++ + + def __init__(self, cmd, opt=None): +- ++ + try: + super(gensym, self).__init__(cmd, opt) + except TypeError: + pass +- +- # Run statistics, a dictionary of RunStatistics(), with ++ ++ # Run statistics, a dictionary of RunStatistics(), with + self.run_statistics = {} +- ++ + self.cmd = cmd + self.run_card = cmd.run_card + self.me_dir = cmd.me_dir +- +- ++ ++ + # dictionary to keep track of the precision when combining iteration + self.cross = collections.defaultdict(int) + self.abscross = collections.defaultdict(int) + self.sigma = collections.defaultdict(int) + self.chi2 = collections.defaultdict(int) +- ++ + self.splitted_grid = False + if self.cmd.proc_characteristics['loop_induced']: + nexternal = self.cmd.proc_characteristics['nexternal'] + self.splitted_grid = max(2, (nexternal-2)**2) + if hasattr(self.cmd, "opts") and self.cmd.opts['accuracy'] == 0.1: + self.cmd.opts['accuracy'] = 0.02 +- ++ + if isinstance(cmd.cluster, cluster.MultiCore) and self.splitted_grid > 1: + self.splitted_grid = int(cmd.cluster.nb_core**0.5) + if self.splitted_grid == 1 and cmd.cluster.nb_core >1: + self.splitted_grid = 2 +- ++ + #if the user defines it in the run_card: + if self.run_card['survey_splitting'] != -1: + self.splitted_grid = self.run_card['survey_splitting'] + if self.run_card['survey_nchannel_per_job'] != 1 and 'survey_nchannel_per_job' in self.run_card.user_set: +- self.combining_job = self.run_card['survey_nchannel_per_job'] ++ self.combining_job = self.run_card['survey_nchannel_per_job'] + elif self.run_card['hard_survey'] > 1: + self.combining_job = 1 +- +- ++ ++ + self.splitted_Pdir = {} + self.splitted_for_dir = lambda x,y: self.splitted_grid + self.combining_job_for_Pdir = lambda x: self.combining_job + self.lastoffset = {} +- ++ + done_warning_zero_coupling = False + def get_helicity(self, to_submit=True, clean=True): + """launch a single call to madevent to get the list of non zero helicity""" +- +- self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', ++ ++ self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + 'subproc.mg'))] + subproc = self.subproc + P_zero_result = [] + nb_tot_proc = len(subproc) +- job_list = {} +- +- ++ job_list = {} ++ ++ + for nb_proc,subdir in enumerate(subproc): + self.cmd.update_status('Compiling for process %s/%s.' % \ + (nb_proc+1,nb_tot_proc), level=None) +@@ -154,7 +154,7 @@ class gensym(object): + p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, cwd=Pdir) + #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts +- ++ + (stdout, _) = p.communicate(''.encode()) + stdout = stdout.decode('ascii',errors='ignore') + if stdout: +@@ -166,11 +166,11 @@ class gensym(object): + if os.path.exists(pjoin(self.me_dir, 'error')): + os.remove(pjoin(self.me_dir, 'error')) + continue # bypass bad process +- ++ + self.cmd.compile(['madevent_forhel'], cwd=Pdir) + if not os.path.exists(pjoin(Pdir, 'madevent_forhel')): +- raise Exception('Error make madevent_forhel not successful') +- ++ raise Exception('Error make madevent_forhel not successful') ++ + if not os.path.exists(pjoin(Pdir, 'Hel')): + os.mkdir(pjoin(Pdir, 'Hel')) + ff = open(pjoin(Pdir, 'Hel', 'input_app.txt'),'w') +@@ -180,15 +180,15 @@ class gensym(object): + try: + os.remove(pjoin(Pdir, 'Hel','results.dat')) + except Exception: +- pass ++ pass + # Launch gensym +- p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, ++ p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, cwd=pjoin(Pdir,'Hel'), shell=True) + #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts + (stdout, _) = p.communicate(" ".encode()) + stdout = stdout.decode('ascii',errors='ignore') + if os.path.exists(pjoin(self.me_dir, 'error')): +- raise Exception(pjoin(self.me_dir,'error')) ++ raise Exception(pjoin(self.me_dir,'error')) + # note a continue is not enough here, we have in top to link + # the matrixX_optim.f to matrixX_orig.f to let the code to work + # after this error. +@@ -203,7 +203,7 @@ class gensym(object): + zero_gc = list() + all_zampperhel = set() + all_bad_amps_perhel = set() +- ++ + for line in stdout.splitlines(): + if "=" not in line and ":" not in line: + continue +@@ -229,22 +229,22 @@ class gensym(object): + "%s\n" % (' '.join(zero_gc)) +\ + "This will slow down the computation. Please consider using restricted model:\n" +\ + "https://answers.launchpad.net/mg5amcnlo/+faq/2312") +- +- ++ ++ + all_good_hels = collections.defaultdict(list) + for me_index, hel in all_hel: +- all_good_hels[me_index].append(int(hel)) +- ++ all_good_hels[me_index].append(int(hel)) ++ + #print(all_hel) + if self.run_card['hel_zeroamp']: + all_bad_amps = collections.defaultdict(list) + for me_index, amp in all_zamp: + all_bad_amps[me_index].append(int(amp)) +- ++ + all_bad_amps_perhel = collections.defaultdict(list) + for me_index, hel, amp in all_zampperhel: +- all_bad_amps_perhel[me_index].append((int(hel),int(amp))) +- ++ all_bad_amps_perhel[me_index].append((int(hel),int(amp))) ++ + elif all_zamp: + nb_zero = sum(int(a[1]) for a in all_zamp) + if zero_gc: +@@ -254,7 +254,7 @@ class gensym(object): + else: + logger.warning("The optimization detected that you have %i zero matrix-element for this SubProcess: %s.\n" % nb_zero +\ + "This part can optimize if you set the flag hel_zeroamp to True in the run_card.") +- ++ + #check if we need to do something and write associate information" + data = [all_hel, all_zamp, all_bad_amps_perhel] + if not self.run_card['hel_zeroamp']: +@@ -266,14 +266,14 @@ class gensym(object): + old_data = open(pjoin(Pdir,'Hel','selection')).read() + if old_data == data: + continue +- +- ++ ++ + with open(pjoin(Pdir,'Hel','selection'),'w') as fsock: +- fsock.write(data) +- +- ++ fsock.write(data) ++ ++ + for matrix_file in misc.glob('matrix*orig.f', Pdir): +- ++ + split_file = matrix_file.split('/') + me_index = split_file[-1][len('matrix'):-len('_orig.f')] + +@@ -289,11 +289,11 @@ class gensym(object): + #good_hels = sorted(list(good_hels)) + good_hels = [str(x) for x in sorted(all_good_hels[me_index])] + if self.run_card['hel_zeroamp']: +- ++ + bad_amps = [str(x) for x in sorted(all_bad_amps[me_index])] + bad_amps_perhel = [x for x in sorted(all_bad_amps_perhel[me_index])] + else: +- bad_amps = [] ++ bad_amps = [] + bad_amps_perhel = [] + if __debug__: + mtext = open(matrix_file).read() +@@ -310,7 +310,7 @@ class gensym(object): + + recycler.set_input(matrix_file) + recycler.set_output(out_file) +- recycler.set_template(templ_file) ++ recycler.set_template(templ_file) + recycler.generate_output_file() + del recycler + +@@ -321,19 +321,19 @@ class gensym(object): + + return {}, P_zero_result + +- ++ + def launch(self, to_submit=True, clean=True): + """ """ + + if not hasattr(self, 'subproc'): +- self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', ++ self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + 'subproc.mg'))] + subproc = self.subproc +- ++ + P_zero_result = [] # check the number of times where they are no phase-space +- ++ + nb_tot_proc = len(subproc) +- job_list = {} ++ job_list = {} + for nb_proc,subdir in enumerate(subproc): + self.cmd.update_status('Compiling for process %s/%s.
(previous processes already running)' % \ + (nb_proc+1,nb_tot_proc), level=None) +@@ -341,7 +341,7 @@ class gensym(object): + subdir = subdir.strip() + Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) + logger.info(' %s ' % subdir) +- ++ + # clean previous run + if clean: + for match in misc.glob('*ajob*', Pdir): +@@ -349,17 +349,17 @@ class gensym(object): + os.remove(match) + for match in misc.glob('G*', Pdir): + if os.path.exists(pjoin(match,'results.dat')): +- os.remove(pjoin(match, 'results.dat')) ++ os.remove(pjoin(match, 'results.dat')) + if os.path.exists(pjoin(match, 'ftn25')): +- os.remove(pjoin(match, 'ftn25')) +- ++ os.remove(pjoin(match, 'ftn25')) ++ + #compile gensym + self.cmd.compile(['gensym'], cwd=Pdir) + if not os.path.exists(pjoin(Pdir, 'gensym')): +- raise Exception('Error make gensym not successful') +- ++ raise Exception('Error make gensym not successful') ++ + # Launch gensym +- p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, ++ p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, cwd=Pdir) + #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts + (stdout, _) = p.communicate(''.encode()) +@@ -367,8 +367,8 @@ class gensym(object): + if os.path.exists(pjoin(self.me_dir,'error')): + files.mv(pjoin(self.me_dir,'error'), pjoin(Pdir,'ajob.no_ps.log')) + P_zero_result.append(subdir) +- continue +- ++ continue ++ + jobs = stdout.split() + job_list[Pdir] = jobs + try: +@@ -386,26 +386,38 @@ class gensym(object): + continue + else: + if done: +- raise Exception('Parsing error in gensym: %s' % stdout) +- job_list[Pdir] = l.split() ++ raise Exception('Parsing error in gensym: %s' % stdout) ++ job_list[Pdir] = l.split() + done = True + if not done: + raise Exception('Parsing error in gensym: %s' % stdout) +- +- self.cmd.compile(['madevent'], cwd=Pdir) ++ ++ cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py ++ logger.info("Building madevent in madevent_interface.py with '%s' matrix elements"%cudacpp_backend) ++ if cudacpp_backend == 'FORTRAN': ++ self.cmd.compile(['madevent_fortran_link'], cwd=Pdir) ++ elif cudacpp_backend == 'CPP': ++ self.cmd.compile(['madevent_cpp_link'], cwd=Pdir) ++ elif cudacpp_backend == 'CUDA': ++ self.cmd.compile(['madevent_cuda_link'], cwd=Pdir) ++ else: ++ raise Exception("Invalid cudacpp_backend='%s': only 'FORTRAN', 'CPP', 'CUDA' are supported") ++ ###logger.info("Building madevent with ALL(FORTRAN/CPP/CUDA) matrix elements (cudacpp_backend=%s)"%cudacpp_backend) ++ ###self.cmd.compile(['all'], cwd=Pdir) ++ + if to_submit: + self.submit_to_cluster(job_list) + job_list = {} +- ++ + return job_list, P_zero_result +- ++ + def resubmit(self, min_precision=1.0, resubmit_zero=False): + """collect the result of the current run and relaunch each channel +- not completed or optionally a completed one with a precision worse than ++ not completed or optionally a completed one with a precision worse than + a threshold (and/or the zero result channel)""" +- ++ + job_list, P_zero_result = self.launch(to_submit=False, clean=False) +- ++ + for P , jobs in dict(job_list).items(): + misc.sprint(jobs) + to_resub = [] +@@ -422,7 +434,7 @@ class gensym(object): + elif max(one_result.xerru, one_result.xerrc)/one_result.xsec > min_precision: + to_resub.append(job) + else: +- to_resub.append(job) ++ to_resub.append(job) + if to_resub: + for G in to_resub: + try: +@@ -430,19 +442,19 @@ class gensym(object): + except Exception as error: + misc.sprint(error) + pass +- misc.sprint(to_resub) ++ misc.sprint(to_resub) + self.submit_to_cluster({P: to_resub}) +- +- +- +- +- +- +- +- +- +- +- ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + def submit_to_cluster(self, job_list): + """ """ + +@@ -455,7 +467,7 @@ class gensym(object): + nexternal = self.cmd.proc_characteristics['nexternal'] + current = open(pjoin(path, "nexternal.inc")).read() + ext = re.search(r"PARAMETER \(NEXTERNAL=(\d+)\)", current).group(1) +- ++ + if self.run_card['job_strategy'] == 2: + self.splitted_grid = 2 + if nexternal == int(ext): +@@ -486,18 +498,18 @@ class gensym(object): + return self.submit_to_cluster_no_splitting(job_list) + else: + return self.submit_to_cluster_splitted(job_list) +- +- ++ ++ + def submit_to_cluster_no_splitting(self, job_list): + """submit the survey without the parralelization. + This is the old mode which is still usefull in single core""" +- +- # write the template file for the parameter file ++ ++ # write the template file for the parameter file + self.write_parameter(parralelization=False, Pdirs=list(job_list.keys())) +- +- ++ ++ + # launch the job with the appropriate grouping +- for Pdir, jobs in job_list.items(): ++ for Pdir, jobs in job_list.items(): + jobs = list(jobs) + i=0 + while jobs: +@@ -506,16 +518,16 @@ class gensym(object): + for _ in range(self.combining_job_for_Pdir(Pdir)): + if jobs: + to_submit.append(jobs.pop(0)) +- ++ + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), + argument=to_submit, + cwd=pjoin(self.me_dir,'SubProcesses' , Pdir)) + +- ++ + def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): + """prepare the input_file for submitting the channel""" + +- ++ + if 'SubProcesses' not in Pdir: + Pdir = pjoin(self.me_dir, 'SubProcesses', Pdir) + +@@ -523,8 +535,8 @@ class gensym(object): + self.splitted_Pdir[(Pdir, G)] = int(nb_job) + + +- # 1. write the new input_app.txt +- run_card = self.cmd.run_card ++ # 1. write the new input_app.txt ++ run_card = self.cmd.run_card + options = {'event' : submit_ps, + 'maxiter': 1, + 'miniter': 1, +@@ -533,29 +545,29 @@ class gensym(object): + else run_card['nhel'], + 'gridmode': -2, + 'channel' : G +- } +- ++ } ++ + Gdir = pjoin(Pdir, 'G%s' % G) +- self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) +- ++ self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) ++ + # 2. check that ftn25 exists. +- assert os.path.exists(pjoin(Gdir, "ftn25")) +- +- ++ assert os.path.exists(pjoin(Gdir, "ftn25")) ++ ++ + # 3. Submit the new jobs + #call back function +- packet = cluster.Packet((Pdir, G, step+1), ++ packet = cluster.Packet((Pdir, G, step+1), + self.combine_iteration, + (Pdir, G, step+1)) +- ++ + if step ==0: +- self.lastoffset[(Pdir, G)] = 0 +- +- # resubmit the new jobs ++ self.lastoffset[(Pdir, G)] = 0 ++ ++ # resubmit the new jobs + for i in range(int(nb_job)): + name = "G%s_%s" % (G,i+1) + self.lastoffset[(Pdir, G)] += 1 +- offset = self.lastoffset[(Pdir, G)] ++ offset = self.lastoffset[(Pdir, G)] + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'refine_splitted.sh'), + argument=[name, 'G%s'%G, offset], + cwd= Pdir, +@@ -563,9 +575,9 @@ class gensym(object): + + + def submit_to_cluster_splitted(self, job_list): +- """ submit the version of the survey with splitted grid creation +- """ +- ++ """ submit the version of the survey with splitted grid creation ++ """ ++ + #if self.splitted_grid <= 1: + # return self.submit_to_cluster_no_splitting(job_list) + +@@ -580,7 +592,7 @@ class gensym(object): + + for job in jobs: + packet = cluster.Packet((Pdir, job, 1), self.combine_iteration, (Pdir, job, 1)) +- for i in range(self.splitted_for_dir(Pdir, job)): ++ for i in range(self.splitted_for_dir(Pdir, job)): + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), + argument=[i+1, job], + cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), +@@ -589,15 +601,15 @@ class gensym(object): + def combine_iteration(self, Pdir, G, step): + + grid_calculator, cross, error = self.combine_grid(Pdir, G, step) +- +- # Compute the number of events used for this run. ++ ++ # Compute the number of events used for this run. + nb_events = grid_calculator.target_evt + + Gdirs = [] #build the the list of directory + for i in range(self.splitted_for_dir(Pdir, G)): + path = pjoin(Pdir, "G%s_%s" % (G, i+1)) + Gdirs.append(path) +- ++ + # 4. make the submission of the next iteration + # Three cases - less than 3 iteration -> continue + # - more than 3 and less than 5 -> check error +@@ -615,15 +627,15 @@ class gensym(object): + need_submit = False + else: + need_submit = True +- ++ + elif step >= self.cmd.opts['iterations']: + need_submit = False + elif self.cmd.opts['accuracy'] < 0: + #check for luminosity + raise Exception("Not Implemented") + elif self.abscross[(Pdir,G)] == 0: +- need_submit = False +- else: ++ need_submit = False ++ else: + across = self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) + tot_across = self.get_current_axsec() + if across == 0: +@@ -634,20 +646,20 @@ class gensym(object): + need_submit = True + else: + need_submit = False +- +- ++ ++ + if cross: + grid_calculator.write_grid_for_submission(Pdir,G, + self.splitted_for_dir(Pdir, G), + nb_events,mode=self.mode, + conservative_factor=5.0) +- +- xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) +- if float(cross)!=0.0 and float(error)!=0.0 else 8) ++ ++ xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) ++ if float(cross)!=0.0 and float(error)!=0.0 else 8) + if need_submit: + message = "%%s/G%%s is at %%%s +- %%.3g pb. Now submitting iteration #%s."%(xsec_format, step+1) + logger.info(message%\ +- (os.path.basename(Pdir), G, float(cross), ++ (os.path.basename(Pdir), G, float(cross), + float(error)*float(cross))) + self.resubmit_survey(Pdir,G, Gdirs, step) + elif cross: +@@ -658,26 +670,26 @@ class gensym(object): + newGpath = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) + if not os.path.exists(newGpath): + os.mkdir(newGpath) +- ++ + # copy the new grid: +- files.cp(pjoin(Gdirs[0], 'ftn25'), ++ files.cp(pjoin(Gdirs[0], 'ftn25'), + pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, 'ftn26')) +- ++ + # copy the events + fsock = open(pjoin(newGpath, 'events.lhe'), 'w') + for Gdir in Gdirs: +- fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) +- ++ fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) ++ + # copy one log +- files.cp(pjoin(Gdirs[0], 'log.txt'), ++ files.cp(pjoin(Gdirs[0], 'log.txt'), + pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G)) +- +- ++ ++ + # create the appropriate results.dat + self.write_results(grid_calculator, cross, error, Pdir, G, step) + else: + logger.info("Survey finished for %s/G%s [0 cross]", os.path.basename(Pdir),G) +- ++ + Gdir = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) + if not os.path.exists(Gdir): + os.mkdir(Gdir) +@@ -685,21 +697,21 @@ class gensym(object): + files.cp(pjoin(Gdirs[0], 'log.txt'), Gdir) + # create the appropriate results.dat + self.write_results(grid_calculator, cross, error, Pdir, G, step) +- ++ + return 0 + + def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): + """ exclude_sub_jobs is to remove some of the subjobs if a numerical + issue is detected in one of them. Warning is issue when this occurs. + """ +- ++ + # 1. create an object to combine the grid information and fill it + grid_calculator = combine_grid.grid_information(self.run_card['nhel']) +- ++ + for i in range(self.splitted_for_dir(Pdir, G)): + if i in exclude_sub_jobs: + continue +- path = pjoin(Pdir, "G%s_%s" % (G, i+1)) ++ path = pjoin(Pdir, "G%s_%s" % (G, i+1)) + fsock = misc.mult_try_open(pjoin(path, 'results.dat')) + one_result = grid_calculator.add_results_information(fsock) + fsock.close() +@@ -711,9 +723,9 @@ class gensym(object): + fsock.close() + os.remove(pjoin(path, 'results.dat')) + #os.remove(pjoin(path, 'grid_information')) +- +- +- ++ ++ ++ + #2. combine the information about the total crossection / error + # start by keep the interation in memory + cross, across, sigma = grid_calculator.get_cross_section() +@@ -724,12 +736,12 @@ class gensym(object): + if maxwgt: + nunwgt = grid_calculator.get_nunwgt(maxwgt) + # Make sure not to apply the security below during the first step of the +- # survey. Also, disregard channels with a contribution relative to the ++ # survey. Also, disregard channels with a contribution relative to the + # total cross-section smaller than 1e-8 since in this case it is unlikely + # that this channel will need more than 1 event anyway. + apply_instability_security = False + rel_contrib = 0.0 +- if (self.__class__ != gensym or step > 1): ++ if (self.__class__ != gensym or step > 1): + Pdir_across = 0.0 + Gdir_across = 0.0 + for (mPdir,mG) in self.abscross.keys(): +@@ -738,7 +750,7 @@ class gensym(object): + (self.sigma[(mPdir,mG)]+1e-99)) + if mG == G: + Gdir_across += (self.abscross[(mPdir,mG)]/ +- (self.sigma[(mPdir,mG)]+1e-99)) ++ (self.sigma[(mPdir,mG)]+1e-99)) + rel_contrib = abs(Gdir_across/(Pdir_across+1e-99)) + if rel_contrib > (1.0e-8) and \ + nunwgt < 2 and len(grid_calculator.results) > 1: +@@ -758,14 +770,14 @@ For offline investigation, the problematic discarded events are stored in: + exclude_sub_jobs = list(exclude_sub_jobs) + exclude_sub_jobs.append(th_maxwgt[-1][1]) + grid_calculator.results.run_statistics['skipped_subchannel'] += 1 +- ++ + # Add some monitoring of the problematic events +- gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) ++ gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) + if os.path.isfile(pjoin(gPath,'events.lhe')): + lhe_file = lhe_parser.EventFile(pjoin(gPath,'events.lhe')) + discardedPath = pjoin(Pdir,'DiscardedUnstableEvents') + if not os.path.exists(discardedPath): +- os.mkdir(discardedPath) ++ os.mkdir(discardedPath) + if os.path.isdir(discardedPath): + # Keep only the event with a maximum weight, as it surely + # is the problematic one. +@@ -778,10 +790,10 @@ For offline investigation, the problematic discarded events are stored in: + lhe_file.close() + evtRecord.write(pjoin(gPath,'events.lhe').read()) + evtRecord.close() +- ++ + return self.combine_grid(Pdir, G, step, exclude_sub_jobs) + +- ++ + if across !=0: + if sigma != 0: + self.cross[(Pdir,G)] += cross**3/sigma**2 +@@ -802,10 +814,10 @@ For offline investigation, the problematic discarded events are stored in: + self.chi2[(Pdir,G)] = 0 + cross = self.cross[(Pdir,G)] + error = 0 +- ++ + else: + error = 0 +- ++ + grid_calculator.results.compute_values(update_statistics=True) + if (str(os.path.basename(Pdir)), G) in self.run_statistics: + self.run_statistics[(str(os.path.basename(Pdir)), G)]\ +@@ -813,8 +825,8 @@ For offline investigation, the problematic discarded events are stored in: + else: + self.run_statistics[(str(os.path.basename(Pdir)), G)] = \ + grid_calculator.results.run_statistics +- +- self.warnings_from_statistics(G, grid_calculator.results.run_statistics) ++ ++ self.warnings_from_statistics(G, grid_calculator.results.run_statistics) + stats_msg = grid_calculator.results.run_statistics.nice_output( + '/'.join([os.path.basename(Pdir),'G%s'%G])) + +@@ -824,7 +836,7 @@ For offline investigation, the problematic discarded events are stored in: + # Clean up grid_information to avoid border effects in case of a crash + for i in range(self.splitted_for_dir(Pdir, G)): + path = pjoin(Pdir, "G%s_%s" % (G, i+1)) +- try: ++ try: + os.remove(pjoin(path, 'grid_information')) + except OSError as oneerror: + if oneerror.errno != 2: +@@ -838,7 +850,7 @@ For offline investigation, the problematic discarded events are stored in: + return + + EPS_fraction = float(stats['exceptional_points'])/stats['n_madloop_calls'] +- ++ + msg = "Channel %s has encountered a fraction of %.3g\n"+ \ + "of numerically unstable loop matrix element computations\n"+\ + "(which could not be rescued using quadruple precision).\n"+\ +@@ -849,16 +861,16 @@ For offline investigation, the problematic discarded events are stored in: + elif EPS_fraction > 0.01: + logger.critical((msg%(G,EPS_fraction)).replace('might', 'can')) + raise Exception((msg%(G,EPS_fraction)).replace('might', 'can')) +- ++ + def get_current_axsec(self): +- ++ + across = 0 + for (Pdir,G) in self.abscross: + across += self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) + return across +- ++ + def write_results(self, grid_calculator, cross, error, Pdir, G, step): +- ++ + #compute the value + if cross == 0: + abscross,nw, luminosity = 0, 0, 0 +@@ -876,7 +888,7 @@ For offline investigation, the problematic discarded events are stored in: + maxwgt = grid_calculator.get_max_wgt() + nunwgt = grid_calculator.get_nunwgt() + luminosity = nunwgt/cross +- ++ + #format the results.dat + def fstr(nb): + data = '%E' % nb +@@ -885,20 +897,20 @@ For offline investigation, the problematic discarded events are stored in: + power = int(power) + 1 + return '%.5fE%+03i' %(nb,power) + line = '%s %s %s %i %i %i %i %s %s %s %s 0.0 0\n' % \ +- (fstr(cross), fstr(error*cross), fstr(error*cross), ++ (fstr(cross), fstr(error*cross), fstr(error*cross), + nevents, nw, maxit,nunwgt, + fstr(luminosity), fstr(wgt), fstr(abscross), fstr(maxwgt)) +- ++ + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, +- 'results.dat'),'w') ++ 'results.dat'),'w') + fsock.writelines(line) + fsock.close() +- ++ + def resubmit_survey(self, Pdir, G, Gdirs, step): + """submit the next iteration of the survey""" + + # 1. write the new input_app.txt to double the number of points +- run_card = self.cmd.run_card ++ run_card = self.cmd.run_card + options = {'event' : 2**(step) * self.cmd.opts['points'] / self.splitted_grid, + 'maxiter': 1, + 'miniter': 1, +@@ -907,18 +919,18 @@ For offline investigation, the problematic discarded events are stored in: + else run_card['nhel'], + 'gridmode': -2, + 'channel' : '' +- } +- ++ } ++ + if int(options['helicity']) == 1: + options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) +- ++ + for Gdir in Gdirs: +- self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) +- +- ++ self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) ++ ++ + #2. resubmit the new jobs + packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, \ +- (Pdir, G, step+1)) ++ (Pdir, G, step+1)) + nb_step = len(Gdirs) * (step+1) + for i,subdir in enumerate(Gdirs): + subdir = subdir.rsplit('_',1)[1] +@@ -926,34 +938,34 @@ For offline investigation, the problematic discarded events are stored in: + offset = nb_step+i+1 + offset=str(offset) + tag = "%s.%s" % (subdir, offset) +- ++ + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), + argument=[tag, G], + cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), + packet_member=packet) +- ++ + + + + def write_parameter_file(self, path, options): + """ """ +- ++ + template =""" %(event)s %(maxiter)s %(miniter)s !Number of events and max and min iterations + %(accuracy)s !Accuracy + %(gridmode)s !Grid Adjustment 0=none, 2=adjust + 1 !Suppress Amplitude 1=yes + %(helicity)s !Helicity Sum/event 0=exact +- %(channel)s """ ++ %(channel)s """ + options['event'] = int(options['event']) + open(path, 'w').write(template % options) + +- +- ++ ++ + def write_parameter(self, parralelization, Pdirs=None): + """Write the parameter of the survey run""" + + run_card = self.cmd.run_card +- ++ + options = {'event' : self.cmd.opts['points'], + 'maxiter': self.cmd.opts['iterations'], + 'miniter': self.min_iterations, +@@ -963,36 +975,36 @@ For offline investigation, the problematic discarded events are stored in: + 'gridmode': 2, + 'channel': '' + } +- ++ + if int(options['helicity'])== 1: + options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) +- ++ + if parralelization: + options['gridmode'] = -2 + options['maxiter'] = 1 #this is automatic in dsample anyway + options['miniter'] = 1 #this is automatic in dsample anyway + options['event'] /= self.splitted_grid +- ++ + if not Pdirs: + Pdirs = self.subproc +- ++ + for Pdir in Pdirs: +- path =pjoin(Pdir, 'input_app.txt') ++ path =pjoin(Pdir, 'input_app.txt') + self.write_parameter_file(path, options) + +- +- +-class gen_ximprove(object): +- +- ++ ++ ++class gen_ximprove(object): ++ ++ + # some hardcoded value which impact the generation + gen_events_security = 1.2 # multiply the number of requested event by this number for security + combining_job = 0 # allow to run multiple channel in sequence +- max_request_event = 1000 # split jobs if a channel if it needs more than that ++ max_request_event = 1000 # split jobs if a channel if it needs more than that + max_event_in_iter = 5000 + min_event_in_iter = 1000 +- max_splitting = 130 # maximum duplication of a given channel +- min_iter = 3 ++ max_splitting = 130 # maximum duplication of a given channel ++ min_iter = 3 + max_iter = 9 + keep_grid_for_refine = False # only apply if needed to split the job + +@@ -1010,7 +1022,7 @@ class gen_ximprove(object): + return super(gen_ximprove, cls).__new__(gen_ximprove_gridpack) + elif cls.force_class == 'loop_induced': + return super(gen_ximprove, cls).__new__(gen_ximprove_share) +- ++ + if cmd.proc_characteristics['loop_induced']: + return super(gen_ximprove, cls).__new__(gen_ximprove_share) + elif gen_ximprove.format_variable(cmd.run_card['gridpack'], bool): +@@ -1019,31 +1031,31 @@ class gen_ximprove(object): + return super(gen_ximprove, cls).__new__(gen_ximprove_share) + else: + return super(gen_ximprove, cls).__new__(gen_ximprove_v4) +- +- ++ ++ + def __init__(self, cmd, opt=None): +- ++ + try: + super(gen_ximprove, self).__init__(cmd, opt) + except TypeError: + pass +- ++ + self.run_statistics = {} + self.cmd = cmd + self.run_card = cmd.run_card + run_card = self.run_card + self.me_dir = cmd.me_dir +- ++ + #extract from the run_card the information that we need. + self.gridpack = run_card['gridpack'] + self.nhel = run_card['nhel'] + if "nhel_refine" in run_card: + self.nhel = run_card["nhel_refine"] +- ++ + if self.run_card['refine_evt_by_job'] != -1: + self.max_request_event = run_card['refine_evt_by_job'] +- +- ++ ++ + # Default option for the run + self.gen_events = True + self.parralel = False +@@ -1054,7 +1066,7 @@ class gen_ximprove(object): + # parameter for the gridpack run + self.nreq = 2000 + self.iseed = 4321 +- ++ + # placeholder for information + self.results = 0 #updated in launch/update_html + +@@ -1062,16 +1074,16 @@ class gen_ximprove(object): + self.configure(opt) + elif isinstance(opt, bannermod.GridpackCard): + self.configure_gridpack(opt) +- ++ + def __call__(self): + return self.launch() +- ++ + def launch(self): +- """running """ +- ++ """running """ ++ + #start the run + self.handle_seed() +- self.results = sum_html.collect_result(self.cmd, ++ self.results = sum_html.collect_result(self.cmd, + main_dir=pjoin(self.cmd.me_dir,'SubProcesses')) #main_dir is for gridpack readonly mode + if self.gen_events: + # We run to provide a given number of events +@@ -1083,15 +1095,15 @@ class gen_ximprove(object): + + def configure(self, opt): + """Defines some parameter of the run""" +- ++ + for key, value in opt.items(): + if key in self.__dict__: + targettype = type(getattr(self, key)) + setattr(self, key, self.format_variable(value, targettype, key)) + else: + raise Exception('%s not define' % key) +- +- ++ ++ + # special treatment always do outside the loop to avoid side effect + if 'err_goal' in opt: + if self.err_goal < 1: +@@ -1101,24 +1113,24 @@ class gen_ximprove(object): + logger.info("Generating %s unweighted events." % self.err_goal) + self.gen_events = True + self.err_goal = self.err_goal * self.gen_events_security # security +- ++ + def handle_seed(self): + """not needed but for gridpack --which is not handle here for the moment""" + return +- +- ++ ++ + def find_job_for_event(self): + """return the list of channel that need to be improved""" +- ++ + assert self.err_goal >=1 + self.err_goal = int(self.err_goal) +- +- goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 ++ ++ goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 + logger.info('Effective Luminosity %s pb^-1', goal_lum) +- ++ + all_channels = sum([list(P) for P in self.results],[]) +- all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) +- ++ all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) ++ + to_refine = [] + for C in all_channels: + if C.get('axsec') == 0: +@@ -1129,61 +1141,61 @@ class gen_ximprove(object): + elif C.get('xerr') > max(C.get('axsec'), + (1/(100*math.sqrt(self.err_goal)))*all_channels[-1].get('axsec')): + to_refine.append(C) +- +- logger.info('need to improve %s channels' % len(to_refine)) ++ ++ logger.info('need to improve %s channels' % len(to_refine)) + return goal_lum, to_refine + + def update_html(self): + """update the html from this object since it contains all the information""" +- ++ + + run = self.cmd.results.current['run_name'] + if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): + os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) +- ++ + unit = self.cmd.results.unit +- P_text = "" +- if self.results: +- Presults = self.results ++ P_text = "" ++ if self.results: ++ Presults = self.results + else: + self.results = sum_html.collect_result(self.cmd, None) + Presults = self.results +- ++ + for P_comb in Presults: +- P_text += P_comb.get_html(run, unit, self.cmd.me_dir) +- +- Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) +- ++ P_text += P_comb.get_html(run, unit, self.cmd.me_dir) ++ ++ Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) ++ + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') + fsock.write(sum_html.results_header) + fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) +- fsock.write('%s
' % P_text) +- ++ fsock.write('%s ' % P_text) ++ + self.cmd.results.add_detail('cross', Presults.xsec) +- self.cmd.results.add_detail('error', Presults.xerru) +- +- return Presults.xsec, Presults.xerru ++ self.cmd.results.add_detail('error', Presults.xerru) ++ ++ return Presults.xsec, Presults.xerru ++ + +- + class gen_ximprove_v4(gen_ximprove): +- ++ + # some hardcoded value which impact the generation + gen_events_security = 1.2 # multiply the number of requested event by this number for security + combining_job = 0 # allow to run multiple channel in sequence +- max_request_event = 1000 # split jobs if a channel if it needs more than that ++ max_request_event = 1000 # split jobs if a channel if it needs more than that + max_event_in_iter = 5000 + min_event_in_iter = 1000 +- max_splitting = 130 # maximum duplication of a given channel +- min_iter = 3 ++ max_splitting = 130 # maximum duplication of a given channel ++ min_iter = 3 + max_iter = 9 + keep_grid_for_refine = False # only apply if needed to split the job + + + +- def __init__(self, cmd, opt=None): +- ++ def __init__(self, cmd, opt=None): ++ + super(gen_ximprove_v4, self).__init__(cmd, opt) +- ++ + if cmd.opts['accuracy'] < cmd._survey_options['accuracy'][1]: + self.increase_precision(cmd._survey_options['accuracy'][1]/cmd.opts['accuracy']) + +@@ -1191,7 +1203,7 @@ class gen_ximprove_v4(gen_ximprove): + + for path in misc.glob(pjoin('*', '*','multijob.dat'), pjoin(self.me_dir, 'SubProcesses')): + open(path,'w').write('0\n') +- ++ + def write_multijob(self, Channel, nb_split): + """ """ + if nb_split <=1: +@@ -1199,7 +1211,7 @@ class gen_ximprove_v4(gen_ximprove): + f = open(pjoin(self.me_dir, 'SubProcesses', Channel.get('name'), 'multijob.dat'), 'w') + f.write('%i\n' % nb_split) + f.close() +- ++ + def increase_precision(self, rate=3): + #misc.sprint(rate) + if rate < 3: +@@ -1210,25 +1222,25 @@ class gen_ximprove_v4(gen_ximprove): + rate = rate -2 + self.max_event_in_iter = int((rate+1) * 10000) + self.min_events = int(rate+2) * 2500 +- self.gen_events_security = 1 + 0.1 * (rate+2) +- ++ self.gen_events_security = 1 + 0.1 * (rate+2) ++ + if int(self.nhel) == 1: + self.min_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//3) + self.max_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//2) + +- +- ++ ++ + alphabet = "abcdefghijklmnopqrstuvwxyz" + def get_job_for_event(self): + """generate the script in order to generate a given number of event""" + # correspond to write_gen in the fortran version +- +- ++ ++ + goal_lum, to_refine = self.find_job_for_event() + + #reset the potential multijob of previous run + self.reset_multijob() +- ++ + jobs = [] # list of the refine if some job are split is list of + # dict with the parameter of the run. + +@@ -1245,17 +1257,17 @@ class gen_ximprove_v4(gen_ximprove): + else: + for i in range(len(to_refine) //3): + new_order.append(to_refine[i]) +- new_order.append(to_refine[-2*i-1]) ++ new_order.append(to_refine[-2*i-1]) + new_order.append(to_refine[-2*i-2]) + if len(to_refine) % 3 == 1: +- new_order.append(to_refine[i+1]) ++ new_order.append(to_refine[i+1]) + elif len(to_refine) % 3 == 2: +- new_order.append(to_refine[i+2]) ++ new_order.append(to_refine[i+2]) + #ensure that the reordering is done nicely + assert set([id(C) for C in to_refine]) == set([id(C) for C in new_order]) +- to_refine = new_order +- +- ++ to_refine = new_order ++ ++ + # loop over the channel to refine + for C in to_refine: + #1. Compute the number of points are needed to reach target +@@ -1267,7 +1279,7 @@ class gen_ximprove_v4(gen_ximprove): + nb_split = self.max_splitting + nb_split=max(1, nb_split) + +- ++ + #2. estimate how many points we need in each iteration + if C.get('nunwgt') > 0: + nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) +@@ -1284,21 +1296,21 @@ class gen_ximprove_v4(gen_ximprove): + nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) + logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) + +- ++ + # write the multi-job information + self.write_multijob(C, nb_split) +- ++ + packet = cluster.Packet((C.parent_name, C.name), + combine_runs.CombineRuns, + (pjoin(self.me_dir, 'SubProcesses', C.parent_name)), + {"subproc": C.name, "nb_split":nb_split}) +- +- ++ ++ + #create the info dict assume no splitting for the default + info = {'name': self.cmd.results.current['run_name'], + 'script_name': 'unknown', + 'directory': C.name, # need to be change for splitted job +- 'P_dir': C.parent_name, ++ 'P_dir': C.parent_name, + 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), + 'offset': 1, # need to be change for splitted job + 'nevents': nevents, +@@ -1309,7 +1321,7 @@ class gen_ximprove_v4(gen_ximprove): + 'channel': C.name.replace('G',''), + 'grid_refinment' : 0, #no refinment of the grid + 'base_directory': '', #should be change in splitted job if want to keep the grid +- 'packet': packet, ++ 'packet': packet, + } + + if nb_split == 1: +@@ -1322,19 +1334,19 @@ class gen_ximprove_v4(gen_ximprove): + if self.keep_grid_for_refine: + new_info['base_directory'] = info['directory'] + jobs.append(new_info) +- +- self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) +- ++ ++ self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) ++ + + def create_ajob(self, template, jobs, write_dir=None): + """create the ajob""" +- ++ + if not jobs: + return + + if not write_dir: + write_dir = pjoin(self.me_dir, 'SubProcesses') +- ++ + #filter the job according to their SubProcess directory # no mix submition + P2job= collections.defaultdict(list) + for j in jobs: +@@ -1343,11 +1355,11 @@ class gen_ximprove_v4(gen_ximprove): + for P in P2job.values(): + self.create_ajob(template, P, write_dir) + return +- +- ++ ++ + #Here we can assume that all job are for the same directory. + path = pjoin(write_dir, jobs[0]['P_dir']) +- ++ + template_text = open(template, 'r').read() + # special treatment if needed to combine the script + # computes how many submition miss one job +@@ -1372,8 +1384,8 @@ class gen_ximprove_v4(gen_ximprove): + skip1=0 + combining_job =1 + nb_sub = len(jobs) +- +- ++ ++ + nb_use = 0 + for i in range(nb_sub): + script_number = i+1 +@@ -1392,14 +1404,14 @@ class gen_ximprove_v4(gen_ximprove): + info["base_directory"] = "./" + fsock.write(template_text % info) + nb_use += nb_job +- ++ + fsock.close() + return script_number + + def get_job_for_precision(self): + """create the ajob to achieve a give precision on the total cross-section""" + +- ++ + assert self.err_goal <=1 + xtot = abs(self.results.xsec) + logger.info("Working on precision: %s %%" %(100*self.err_goal)) +@@ -1416,46 +1428,46 @@ class gen_ximprove_v4(gen_ximprove): + rerr *=rerr + if not len(to_refine): + return +- +- # change limit since most don't contribute ++ ++ # change limit since most don't contribute + limit = math.sqrt((self.err_goal * xtot)**2 - rerr/math.sqrt(len(to_refine))) + for C in to_refine[:]: + cerr = C.mfactor*(C.xerru + len(to_refine)*C.xerrc) + if cerr < limit: + to_refine.remove(C) +- ++ + # all the channel are now selected. create the channel information + logger.info('need to improve %s channels' % len(to_refine)) + +- ++ + jobs = [] # list of the refine if some job are split is list of + # dict with the parameter of the run. + + # loop over the channel to refine + for C in to_refine: +- ++ + #1. Determine how many events we need in each iteration + yerr = C.mfactor*(C.xerru+len(to_refine)*C.xerrc) + nevents = 0.2*C.nevents*(yerr/limit)**2 +- ++ + nb_split = int((nevents*(C.nunwgt/C.nevents)/self.max_request_event/ (2**self.min_iter-1))**(2/3)) + nb_split = max(nb_split, 1) +- # **(2/3) to slow down the increase in number of jobs ++ # **(2/3) to slow down the increase in number of jobs + if nb_split > self.max_splitting: + nb_split = self.max_splitting +- ++ + if nb_split >1: + nevents = nevents / nb_split + self.write_multijob(C, nb_split) + # forbid too low/too large value + nevents = min(self.min_event_in_iter, max(self.max_event_in_iter, nevents)) +- +- ++ ++ + #create the info dict assume no splitting for the default + info = {'name': self.cmd.results.current['run_name'], + 'script_name': 'unknown', + 'directory': C.name, # need to be change for splitted job +- 'P_dir': C.parent_name, ++ 'P_dir': C.parent_name, + 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), + 'offset': 1, # need to be change for splitted job + 'nevents': nevents, +@@ -1475,38 +1487,38 @@ class gen_ximprove_v4(gen_ximprove): + new_info['offset'] = i+1 + new_info['directory'] += self.alphabet[i % 26] + str((i+1)//26) + jobs.append(new_info) +- self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) +- ++ self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) ++ + def update_html(self): + """update the html from this object since it contains all the information""" +- ++ + + run = self.cmd.results.current['run_name'] + if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): + os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) +- ++ + unit = self.cmd.results.unit +- P_text = "" +- if self.results: +- Presults = self.results ++ P_text = "" ++ if self.results: ++ Presults = self.results + else: + self.results = sum_html.collect_result(self.cmd, None) + Presults = self.results +- ++ + for P_comb in Presults: +- P_text += P_comb.get_html(run, unit, self.cmd.me_dir) +- +- Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) +- ++ P_text += P_comb.get_html(run, unit, self.cmd.me_dir) ++ ++ Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) ++ + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') + fsock.write(sum_html.results_header) + fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) +- fsock.write('%s
' % P_text) +- ++ fsock.write('%s ' % P_text) ++ + self.cmd.results.add_detail('cross', Presults.xsec) +- self.cmd.results.add_detail('error', Presults.xerru) +- +- return Presults.xsec, Presults.xerru ++ self.cmd.results.add_detail('error', Presults.xerru) ++ ++ return Presults.xsec, Presults.xerru + + + +@@ -1516,27 +1528,27 @@ class gen_ximprove_v4_nogridupdate(gen_ximprove_v4): + # some hardcoded value which impact the generation + gen_events_security = 1.1 # multiply the number of requested event by this number for security + combining_job = 0 # allow to run multiple channel in sequence +- max_request_event = 400 # split jobs if a channel if it needs more than that ++ max_request_event = 400 # split jobs if a channel if it needs more than that + max_event_in_iter = 500 + min_event_in_iter = 250 +- max_splitting = 260 # maximum duplication of a given channel +- min_iter = 2 ++ max_splitting = 260 # maximum duplication of a given channel ++ min_iter = 2 + max_iter = 6 + keep_grid_for_refine = True + + +- def __init__(self, cmd, opt=None): +- ++ def __init__(self, cmd, opt=None): ++ + gen_ximprove.__init__(cmd, opt) +- ++ + if cmd.proc_characteristics['loopinduced'] and \ + cmd.proc_characteristics['nexternal'] > 2: + self.increase_parralelization(cmd.proc_characteristics['nexternal']) +- ++ + def increase_parralelization(self, nexternal): + +- self.max_splitting = 1000 +- ++ self.max_splitting = 1000 ++ + if self.run_card['refine_evt_by_job'] != -1: + pass + elif nexternal == 3: +@@ -1551,27 +1563,27 @@ class gen_ximprove_v4_nogridupdate(gen_ximprove_v4): + class gen_ximprove_share(gen_ximprove, gensym): + """Doing the refine in multicore. Each core handle a couple of PS point.""" + +- nb_ps_by_job = 2000 ++ nb_ps_by_job = 2000 + mode = "refine" + gen_events_security = 1.15 + # Note the real security is lower since we stop the jobs if they are at 96% + # of this target. + + def __init__(self, *args, **opts): +- ++ + super(gen_ximprove_share, self).__init__(*args, **opts) + self.generated_events = {} + self.splitted_for_dir = lambda x,y : self.splitted_Pdir[(x,y)] +- ++ + + def get_job_for_event(self): + """generate the script in order to generate a given number of event""" + # correspond to write_gen in the fortran version +- ++ + + goal_lum, to_refine = self.find_job_for_event() + self.goal_lum = goal_lum +- ++ + # loop over the channel to refine to find the number of PS point to launch + total_ps_points = 0 + channel_to_ps_point = [] +@@ -1581,7 +1593,7 @@ class gen_ximprove_share(gen_ximprove, gensym): + os.remove(pjoin(self.me_dir, "SubProcesses",C.parent_name, C.name, "events.lhe")) + except: + pass +- ++ + #1. Compute the number of points are needed to reach target + needed_event = goal_lum*C.get('axsec') + if needed_event == 0: +@@ -1597,18 +1609,18 @@ class gen_ximprove_share(gen_ximprove, gensym): + nb_split = 1 + if nb_split > self.max_splitting: + nb_split = self.max_splitting +- nevents = self.max_event_in_iter * self.max_splitting ++ nevents = self.max_event_in_iter * self.max_splitting + else: + nevents = self.max_event_in_iter * nb_split + + if nevents > self.max_splitting*self.max_event_in_iter: + logger.warning("Channel %s/%s has a very low efficiency of unweighting. Might not be possible to reach target" % \ + (C.name, C.parent_name)) +- nevents = self.max_event_in_iter * self.max_splitting +- +- total_ps_points += nevents +- channel_to_ps_point.append((C, nevents)) +- ++ nevents = self.max_event_in_iter * self.max_splitting ++ ++ total_ps_points += nevents ++ channel_to_ps_point.append((C, nevents)) ++ + if self.cmd.options["run_mode"] == 1: + if self.cmd.options["cluster_size"]: + nb_ps_by_job = total_ps_points /int(self.cmd.options["cluster_size"]) +@@ -1622,7 +1634,7 @@ class gen_ximprove_share(gen_ximprove, gensym): + nb_ps_by_job = total_ps_points / self.cmd.options["nb_core"] + else: + nb_ps_by_job = self.nb_ps_by_job +- ++ + nb_ps_by_job = int(max(nb_ps_by_job, 500)) + + for C, nevents in channel_to_ps_point: +@@ -1636,20 +1648,20 @@ class gen_ximprove_share(gen_ximprove, gensym): + self.create_resubmit_one_iter(C.parent_name, C.name[1:], submit_ps, nb_job, step=0) + needed_event = goal_lum*C.get('xsec') + logger.debug("%s/%s : need %s event. Need %s split job of %s points", C.parent_name, C.name, needed_event, nb_job, submit_ps) +- +- ++ ++ + def combine_iteration(self, Pdir, G, step): +- ++ + grid_calculator, cross, error = self.combine_grid(Pdir, G, step) +- ++ + # collect all the generated_event + Gdirs = [] #build the the list of directory + for i in range(self.splitted_for_dir(Pdir, G)): + path = pjoin(Pdir, "G%s_%s" % (G, i+1)) + Gdirs.append(path) + assert len(grid_calculator.results) == len(Gdirs) == self.splitted_for_dir(Pdir, G) +- +- ++ ++ + # Check how many events are going to be kept after un-weighting. + needed_event = cross * self.goal_lum + if needed_event == 0: +@@ -1659,19 +1671,19 @@ class gen_ximprove_share(gen_ximprove, gensym): + if self.err_goal >=1: + if needed_event > self.gen_events_security * self.err_goal: + needed_event = int(self.gen_events_security * self.err_goal) +- ++ + if (Pdir, G) in self.generated_events: + old_nunwgt, old_maxwgt = self.generated_events[(Pdir, G)] + else: + old_nunwgt, old_maxwgt = 0, 0 +- ++ + if old_nunwgt == 0 and os.path.exists(pjoin(Pdir,"G%s" % G, "events.lhe")): + # possible for second refine. + lhe = lhe_parser.EventFile(pjoin(Pdir,"G%s" % G, "events.lhe")) + old_nunwgt = lhe.unweight(None, trunc_error=0.005, log_level=0) + old_maxwgt = lhe.max_wgt +- +- ++ ++ + + maxwgt = max(grid_calculator.get_max_wgt(), old_maxwgt) + new_evt = grid_calculator.get_nunwgt(maxwgt) +@@ -1683,35 +1695,35 @@ class gen_ximprove_share(gen_ximprove, gensym): + one_iter_nb_event = max(grid_calculator.get_nunwgt(),1) + drop_previous_iteration = False + # compare the number of events to generate if we discard the previous iteration +- n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) ++ n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + n_target_combined = (needed_event-nunwgt) / efficiency + if n_target_one_iter < n_target_combined: + # the last iteration alone has more event that the combine iteration. +- # it is therefore interesting to drop previous iteration. ++ # it is therefore interesting to drop previous iteration. + drop_previous_iteration = True + nunwgt = one_iter_nb_event + maxwgt = grid_calculator.get_max_wgt() + new_evt = nunwgt +- efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) +- ++ efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) ++ + try: + if drop_previous_iteration: + raise IOError + output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'a') + except IOError: + output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'w') +- ++ + misc.call(["cat"] + [pjoin(d, "events.lhe") for d in Gdirs], + stdout=output_file) + output_file.close() + # For large number of iteration. check the number of event by doing the + # real unweighting. +- if nunwgt < 0.6 * needed_event and step > self.min_iter: ++ if nunwgt < 0.6 * needed_event and step > self.min_iter: + lhe = lhe_parser.EventFile(output_file.name) + old_nunwgt =nunwgt + nunwgt = lhe.unweight(None, trunc_error=0.01, log_level=0) +- +- ++ ++ + self.generated_events[(Pdir, G)] = (nunwgt, maxwgt) + + # misc.sprint("Adding %s event to %s. Currently at %s" % (new_evt, G, nunwgt)) +@@ -1730,21 +1742,21 @@ class gen_ximprove_share(gen_ximprove, gensym): + nevents = grid_calculator.results[0].nevents + if nevents == 0: # possible if some integral returns 0 + nevents = max(g.nevents for g in grid_calculator.results) +- ++ + need_ps_point = (needed_event - nunwgt)/(efficiency+1e-99) +- need_job = need_ps_point // nevents + 1 +- ++ need_job = need_ps_point // nevents + 1 ++ + if step < self.min_iter: + # This is normal but check if we are on the good track +- job_at_first_iter = nb_split_before/2**(step-1) ++ job_at_first_iter = nb_split_before/2**(step-1) + expected_total_job = job_at_first_iter * (2**self.min_iter-1) + done_job = job_at_first_iter * (2**step-1) + expected_remaining_job = expected_total_job - done_job + +- logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) ++ logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) + # increase if needed but not too much + need_job = min(need_job, expected_remaining_job*1.25) +- ++ + nb_job = (need_job-0.5)//(2**(self.min_iter-step)-1) + 1 + nb_job = max(1, nb_job) + grid_calculator.write_grid_for_submission(Pdir,G, +@@ -1756,7 +1768,7 @@ class gen_ximprove_share(gen_ximprove, gensym): + nb_job, step)) + self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) + #self.create_job(Pdir, G, nb_job, nevents, step) +- ++ + elif step < self.max_iter: + if step + 1 == self.max_iter: + need_job = 1.20 * need_job # avoid to have just too few event. +@@ -1765,21 +1777,21 @@ class gen_ximprove_share(gen_ximprove, gensym): + grid_calculator.write_grid_for_submission(Pdir,G, + self.splitted_for_dir(Pdir, G), nb_job*nevents ,mode=self.mode, + conservative_factor=self.max_iter) +- +- ++ ++ + logger.info("%s/G%s is at %i/%i ('%.2g%%') event. Resubmit %i job at iteration %i." \ + % (os.path.basename(Pdir), G, int(nunwgt),int(needed_event)+1, + (float(nunwgt)/needed_event)*100.0 if needed_event>0.0 else 0.0, + nb_job, step)) + self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) +- +- ++ ++ + + return 0 +- +- ++ ++ + def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency): +- ++ + #compute the value + if cross == 0: + abscross,nw, luminosity = 0, 0, 0 +@@ -1795,7 +1807,7 @@ class gen_ximprove_share(gen_ximprove, gensym): + nevents = nunwgt + # make the unweighting to compute the number of events: + luminosity = nunwgt/cross +- ++ + #format the results.dat + def fstr(nb): + data = '%E' % nb +@@ -1804,23 +1816,23 @@ class gen_ximprove_share(gen_ximprove, gensym): + power = int(power) + 1 + return '%.5fE%+03i' %(nb,power) + line = '%s %s %s %i %i %i %i %s %s %s 0.0 0.0 0\n' % \ +- (fstr(cross), fstr(error*cross), fstr(error*cross), ++ (fstr(cross), fstr(error*cross), fstr(error*cross), + nevents, nw, maxit,nunwgt, + fstr(luminosity), fstr(wgt), fstr(abscross)) +- ++ + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, +- 'results.dat'),'w') ++ 'results.dat'),'w') + fsock.writelines(line) + fsock.close() + +- +- +- ++ ++ ++ + class gen_ximprove_gridpack(gen_ximprove_v4): +- +- min_iter = 1 ++ ++ min_iter = 1 + max_iter = 13 +- max_request_event = 1e12 # split jobs if a channel if it needs more than that ++ max_request_event = 1e12 # split jobs if a channel if it needs more than that + max_event_in_iter = 4000 + min_event_in_iter = 500 + combining_job = sys.maxsize +@@ -1832,7 +1844,7 @@ class gen_ximprove_gridpack(gen_ximprove_v4): + return super(gen_ximprove_gridpack, cls).__new__(cls, *args, **opts) + + def __init__(self, *args, **opts): +- ++ + self.ngran = -1 + self.gscalefact = {} + self.readonly = False +@@ -1843,23 +1855,23 @@ class gen_ximprove_gridpack(gen_ximprove_v4): + self.readonly = opts['readonly'] + super(gen_ximprove_gridpack,self).__init__(*args, **opts) + if self.ngran == -1: +- self.ngran = 1 +- ++ self.ngran = 1 ++ + def find_job_for_event(self): + """return the list of channel that need to be improved""" + import random +- ++ + assert self.err_goal >=1 + self.err_goal = int(self.err_goal) + self.gscalefact = {} +- ++ + xtot = self.results.axsec +- goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 ++ goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 + # logger.info('Effective Luminosity %s pb^-1', goal_lum) +- ++ + all_channels = sum([list(P) for P in self.results],[]) + all_channels.sort(key=lambda x : x.get('luminosity'), reverse=True) +- ++ + to_refine = [] + for C in all_channels: + tag = C.get('name') +@@ -1873,27 +1885,27 @@ class gen_ximprove_gridpack(gen_ximprove_v4): + #need to generate events + logger.debug('request events for ', C.get('name'), 'cross=', + C.get('axsec'), 'needed events = ', goal_lum * C.get('axsec')) +- to_refine.append(C) +- +- logger.info('need to improve %s channels' % len(to_refine)) ++ to_refine.append(C) ++ ++ logger.info('need to improve %s channels' % len(to_refine)) + return goal_lum, to_refine + + def get_job_for_event(self): + """generate the script in order to generate a given number of event""" + # correspond to write_gen in the fortran version +- +- ++ ++ + goal_lum, to_refine = self.find_job_for_event() + + jobs = [] # list of the refine if some job are split is list of + # dict with the parameter of the run. +- ++ + # loop over the channel to refine + for C in to_refine: + #1. Compute the number of points are needed to reach target + needed_event = max(goal_lum*C.get('axsec'), self.ngran) + nb_split = 1 +- ++ + #2. estimate how many points we need in each iteration + if C.get('nunwgt') > 0: + nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) +@@ -1908,13 +1920,13 @@ class gen_ximprove_gridpack(gen_ximprove_v4): + # forbid too low/too large value + nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) + logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) +- ++ + + #create the info dict assume no splitting for the default + info = {'name': self.cmd.results.current['run_name'], + 'script_name': 'unknown', + 'directory': C.name, # need to be change for splitted job +- 'P_dir': os.path.basename(C.parent_name), ++ 'P_dir': os.path.basename(C.parent_name), + 'offset': 1, # need to be change for splitted job + 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), + 'nevents': nevents, #int(nevents*self.gen_events_security)+1, +@@ -1926,7 +1938,7 @@ class gen_ximprove_gridpack(gen_ximprove_v4): + 'channel': C.name.replace('G',''), + 'grid_refinment' : 0, #no refinment of the grid + 'base_directory': '', #should be change in splitted job if want to keep the grid +- 'packet': None, ++ 'packet': None, + } + + if self.readonly: +@@ -1934,11 +1946,11 @@ class gen_ximprove_gridpack(gen_ximprove_v4): + info['base_directory'] = basedir + + jobs.append(info) +- + +- write_dir = '.' if self.readonly else None +- self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) +- ++ ++ write_dir = '.' if self.readonly else None ++ self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) ++ + done = [] + for j in jobs: + if j['P_dir'] in done: +@@ -1955,22 +1967,22 @@ class gen_ximprove_gridpack(gen_ximprove_v4): + write_dir = '.' if self.readonly else pjoin(self.me_dir, 'SubProcesses') + + self.check_events(goal_lum, to_refine, jobs, write_dir) +- ++ + def check_events(self, goal_lum, to_refine, jobs, Sdir): + """check that we get the number of requested events if not resubmit.""" +- ++ + new_jobs = [] +- ++ + for C, job_info in zip(to_refine, jobs): +- P = job_info['P_dir'] ++ P = job_info['P_dir'] + G = job_info['channel'] + axsec = C.get('axsec') +- requested_events= job_info['requested_event'] +- ++ requested_events= job_info['requested_event'] ++ + + new_results = sum_html.OneResult((P,G)) + new_results.read_results(pjoin(Sdir,P, 'G%s'%G, 'results.dat')) +- ++ + # need to resubmit? + if new_results.get('nunwgt') < requested_events: + pwd = pjoin(os.getcwd(),job_info['P_dir'],'G%s'%G) if self.readonly else \ +@@ -1980,10 +1992,10 @@ class gen_ximprove_gridpack(gen_ximprove_v4): + job_info['offset'] += 1 + new_jobs.append(job_info) + files.mv(pjoin(pwd, 'events.lhe'), pjoin(pwd, 'events.lhe.previous')) +- ++ + if new_jobs: +- self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) +- ++ self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) ++ + done = [] + for j in new_jobs: + if j['P_dir'] in done: +@@ -2003,9 +2015,9 @@ class gen_ximprove_gridpack(gen_ximprove_v4): + files.put_at_end(pjoin(pwd, 'events.lhe'),pjoin(pwd, 'events.lhe.previous')) + + return self.check_events(goal_lum, to_refine, new_jobs, Sdir) +- +- +- + +- ++ ++ ++ ++ + +diff --git b/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py a/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py +index 8c509e83f..8abba3f33 100755 +--- b/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py ++++ a/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py +@@ -2,11 +2,11 @@ + # + # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors + # +-# This file is a part of the MadGraph5_aMC@NLO project, an application which ++# This file is a part of the MadGraph5_aMC@NLO project, an application which + # automatically generates Feynman diagrams and matrix elements for arbitrary + # high-energy processes in the Standard Model and beyond. + # +-# It is subject to the MadGraph5_aMC@NLO license which should accompany this ++# It is subject to the MadGraph5_aMC@NLO license which should accompany this + # distribution. + # + # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch +@@ -53,10 +53,10 @@ pjoin = os.path.join + # Special logger for the Cmd Interface + logger = logging.getLogger('madevent.stdout') # -> stdout + logger_stderr = logging.getLogger('madevent.stderr') # ->stderr +- ++ + try: + import madgraph +-except ImportError as error: ++except ImportError as error: + # import from madevent directory + MADEVENT = True + import internal.extended_cmd as cmd +@@ -92,7 +92,7 @@ else: + import madgraph.various.lhe_parser as lhe_parser + # import madgraph.various.histograms as histograms # imported later to not slow down the loading of the code + import models.check_param_card as check_param_card +- from madgraph.iolibs.files import ln ++ from madgraph.iolibs.files import ln + from madgraph import InvalidCmd, MadGraph5Error, MG5DIR, ReadWrite + + +@@ -113,10 +113,10 @@ class CmdExtended(common_run.CommonRunCmd): + next_possibility = { + 'start': [], + } +- ++ + debug_output = 'ME5_debug' + error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' +- error_debug += 'More information is found in \'%(debug)s\'.\n' ++ error_debug += 'More information is found in \'%(debug)s\'.\n' + error_debug += 'Please attach this file to your report.' + + config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' +@@ -124,18 +124,18 @@ class CmdExtended(common_run.CommonRunCmd): + + keyboard_stop_msg = """stopping all operation + in order to quit MadGraph5_aMC@NLO please enter exit""" +- ++ + # Define the Error + InvalidCmd = InvalidCmd + ConfigurationError = MadGraph5Error + + def __init__(self, me_dir, options, *arg, **opt): + """Init history and line continuation""" +- ++ + # Tag allowing/forbiding question + self.force = False +- +- # If possible, build an info line with current version number ++ ++ # If possible, build an info line with current version number + # and date, from the VERSION text file + info = misc.get_pkg_info() + info_line = "" +@@ -150,7 +150,7 @@ class CmdExtended(common_run.CommonRunCmd): + else: + version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() + info_line = "#* VERSION %s %s *\n" % \ +- (version, (24 - len(version)) * ' ') ++ (version, (24 - len(version)) * ' ') + + # Create a header for the history file. + # Remember to fill in time at writeout time! +@@ -177,7 +177,7 @@ class CmdExtended(common_run.CommonRunCmd): + '#* run as ./bin/madevent.py filename *\n' + \ + '#* *\n' + \ + '#************************************************************\n' +- ++ + if info_line: + info_line = info_line[1:] + +@@ -203,11 +203,11 @@ class CmdExtended(common_run.CommonRunCmd): + "* *\n" + \ + "************************************************************") + super(CmdExtended, self).__init__(me_dir, options, *arg, **opt) +- ++ + def get_history_header(self): +- """return the history header""" ++ """return the history header""" + return self.history_header % misc.get_time_info() +- ++ + def stop_on_keyboard_stop(self): + """action to perform to close nicely on a keyboard interupt""" + try: +@@ -219,20 +219,20 @@ class CmdExtended(common_run.CommonRunCmd): + self.add_error_log_in_html(KeyboardInterrupt) + except: + pass +- ++ + def postcmd(self, stop, line): + """ Update the status of the run for finishing interactive command """ +- +- stop = super(CmdExtended, self).postcmd(stop, line) ++ ++ stop = super(CmdExtended, self).postcmd(stop, line) + # relaxing the tag forbidding question + self.force = False +- ++ + if not self.use_rawinput: + return stop +- ++ + if self.results and not self.results.current: + return stop +- ++ + arg = line.split() + if len(arg) == 0: + return stop +@@ -240,41 +240,41 @@ class CmdExtended(common_run.CommonRunCmd): + return stop + if isinstance(self.results.status, str) and self.results.status == 'Stop by the user': + self.update_status('%s Stop by the user' % arg[0], level=None, error=True) +- return stop ++ return stop + elif not self.results.status: + return stop + elif str(arg[0]) in ['exit','quit','EOF']: + return stop +- ++ + try: +- self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], ++ self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], + level=None, error=True) + except Exception: + misc.sprint('update_status fails') + pass +- +- ++ ++ + def nice_user_error(self, error, line): + """If a ME run is currently running add a link in the html output""" + + self.add_error_log_in_html() +- return cmd.Cmd.nice_user_error(self, error, line) +- ++ return cmd.Cmd.nice_user_error(self, error, line) ++ + def nice_config_error(self, error, line): + """If a ME run is currently running add a link in the html output""" + + self.add_error_log_in_html() + stop = cmd.Cmd.nice_config_error(self, error, line) +- +- ++ ++ + try: + debug_file = open(self.debug_output, 'a') + debug_file.write(open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat'))) + debug_file.close() + except: +- pass ++ pass + return stop +- ++ + + def nice_error_handling(self, error, line): + """If a ME run is currently running add a link in the html output""" +@@ -294,7 +294,7 @@ class CmdExtended(common_run.CommonRunCmd): + proc_card = pjoin(self.me_dir,'Cards','proc_card_mg5.dat') + if os.path.exists(proc_card): + self.banner.add(proc_card) +- ++ + out_dir = pjoin(self.me_dir, 'Events', self.run_name) + if not os.path.isdir(out_dir): + os.mkdir(out_dir) +@@ -307,7 +307,7 @@ class CmdExtended(common_run.CommonRunCmd): + else: + pass + else: +- self.add_error_log_in_html() ++ self.add_error_log_in_html() + stop = cmd.Cmd.nice_error_handling(self, error, line) + try: + debug_file = open(self.debug_output, 'a') +@@ -316,14 +316,14 @@ class CmdExtended(common_run.CommonRunCmd): + except: + pass + return stop +- +- ++ ++ + #=============================================================================== + # HelpToCmd + #=============================================================================== + class HelpToCmd(object): + """ The Series of help routine for the MadEventCmd""" +- ++ + def help_pythia(self): + logger.info("syntax: pythia [RUN] [--run_options]") + logger.info("-- run pythia on RUN (current one by default)") +@@ -352,29 +352,29 @@ class HelpToCmd(object): + logger.info(" Path should be the path of a valid banner.") + logger.info(" RUN should be the name of a run of the current directory") + self.run_options_help([('-f','answer all question by default'), +- ('--name=X', 'Define the name associated with the new run')]) +- ++ ('--name=X', 'Define the name associated with the new run')]) ++ + def help_open(self): + logger.info("syntax: open FILE ") + logger.info("-- open a file with the appropriate editor.") + logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') + logger.info(' the path to the last created/used directory is used') + logger.info(' The program used to open those files can be chosen in the') +- logger.info(' configuration file ./input/mg5_configuration.txt') +- +- ++ logger.info(' configuration file ./input/mg5_configuration.txt') ++ ++ + def run_options_help(self, data): + if data: + logger.info('-- local options:') + for name, info in data: + logger.info(' %s : %s' % (name, info)) +- ++ + logger.info("-- session options:") +- logger.info(" Note that those options will be kept for the current session") ++ logger.info(" Note that those options will be kept for the current session") + logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) + logger.info(" --multicore : Run in multi-core configuration") + logger.info(" --nb_core=X : limit the number of core to use to X.") +- ++ + + def help_generate_events(self): + logger.info("syntax: generate_events [run_name] [options]",) +@@ -398,16 +398,16 @@ class HelpToCmd(object): + logger.info(" -f : Bypass the edition of MadLoopParams.dat.",'$MG:color:BLUE') + logger.info(" -r : Refresh of the existing filters (erasing them if already present).",'$MG:color:BLUE') + logger.info(" --nPS= : Specify how many phase-space points should be tried to set up the filters.",'$MG:color:BLUE') +- ++ + + + def help_calculate_decay_widths(self): +- ++ + if self.ninitial != 1: + logger.warning("This command is only valid for processes of type A > B C.") + logger.warning("This command can not be run in current context.") + logger.warning("") +- ++ + logger.info("syntax: calculate_decay_widths [run_name] [options])") + logger.info("-- Calculate decay widths and enter widths and BRs in param_card") + logger.info(" for a series of processes of type A > B C ...") +@@ -428,8 +428,8 @@ class HelpToCmd(object): + logger.info("-- evaluate the different channel associate to the process") + self.run_options_help([("--" + key,value[-1]) for (key,value) in \ + self._survey_options.items()]) +- +- ++ ++ + def help_restart_gridpack(self): + logger.info("syntax: restart_gridpack --precision= --restart_zero") + +@@ -439,14 +439,14 @@ class HelpToCmd(object): + logger.info("syntax: launch [run_name] [options])") + logger.info(" --alias for either generate_events/calculate_decay_widths") + logger.info(" depending of the number of particles in the initial state.") +- ++ + if self.ninitial == 1: + logger.info("For this directory this is equivalent to calculate_decay_widths") + self.help_calculate_decay_widths() + else: + logger.info("For this directory this is equivalent to $generate_events") + self.help_generate_events() +- ++ + def help_refine(self): + logger.info("syntax: refine require_precision [max_channel] [--run_options]") + logger.info("-- refine the LAST run to achieve a given precision.") +@@ -454,14 +454,14 @@ class HelpToCmd(object): + logger.info(' or the required relative error') + logger.info(' max_channel:[5] maximal number of channel per job') + self.run_options_help([]) +- ++ + def help_combine_events(self): + """ """ + logger.info("syntax: combine_events [run_name] [--tag=tag_name] [--run_options]") + logger.info("-- Combine the last run in order to write the number of events") + logger.info(" asked in the run_card.") + self.run_options_help([]) +- ++ + def help_store_events(self): + """ """ + logger.info("syntax: store_events [--run_options]") +@@ -481,7 +481,7 @@ class HelpToCmd(object): + logger.info("syntax: import command PATH") + logger.info("-- Execute the command present in the file") + self.run_options_help([]) +- ++ + def help_syscalc(self): + logger.info("syntax: syscalc [RUN] [%s] [-f | --tag=]" % '|'.join(self._plot_mode)) + logger.info("-- calculate systematics information for the RUN (current run by default)") +@@ -506,18 +506,18 @@ class AskRun(cmd.ControlSwitch): + ('madspin', 'Decay onshell particles'), + ('reweight', 'Add weights to events for new hypp.') + ] +- ++ + def __init__(self, question, line_args=[], mode=None, force=False, + *args, **opt): +- ++ + self.check_available_module(opt['mother_interface'].options) + self.me_dir = opt['mother_interface'].me_dir + super(AskRun,self).__init__(self.to_control, opt['mother_interface'], + *args, **opt) +- +- ++ ++ + def check_available_module(self, options): +- ++ + self.available_module = set() + if options['pythia-pgs_path']: + self.available_module.add('PY6') +@@ -540,32 +540,32 @@ class AskRun(cmd.ControlSwitch): + self.available_module.add('Rivet') + else: + logger.warning("Rivet program installed but no parton shower with hepmc output detected.\n Please install pythia8") +- ++ + if not MADEVENT or ('mg5_path' in options and options['mg5_path']): + self.available_module.add('MadSpin') + if misc.has_f2py() or options['f2py_compiler']: + self.available_module.add('reweight') + +-# old mode to activate the shower ++# old mode to activate the shower + def ans_parton(self, value=None): + """None: means that the user type 'pythia' + value: means that the user type pythia=value""" +- ++ + if value is None: + self.set_all_off() + else: + logger.warning('Invalid command: parton=%s' % value) +- +- ++ ++ + # +-# HANDLING SHOWER ++# HANDLING SHOWER + # + def get_allowed_shower(self): + """return valid entry for the shower switch""" +- ++ + if hasattr(self, 'allowed_shower'): + return self.allowed_shower +- ++ + self.allowed_shower = [] + if 'PY6' in self.available_module: + self.allowed_shower.append('Pythia6') +@@ -574,9 +574,9 @@ class AskRun(cmd.ControlSwitch): + if self.allowed_shower: + self.allowed_shower.append('OFF') + return self.allowed_shower +- ++ + def set_default_shower(self): +- ++ + if 'PY6' in self.available_module and\ + os.path.exists(pjoin(self.me_dir,'Cards','pythia_card.dat')): + self.switch['shower'] = 'Pythia6' +@@ -590,10 +590,10 @@ class AskRun(cmd.ControlSwitch): + + def check_value_shower(self, value): + """check an entry is valid. return the valid entry in case of shortcut""" +- ++ + if value in self.get_allowed_shower(): + return True +- ++ + value =value.lower() + if value in ['py6','p6','pythia_6'] and 'PY6' in self.available_module: + return 'Pythia6' +@@ -601,13 +601,13 @@ class AskRun(cmd.ControlSwitch): + return 'Pythia8' + else: + return False +- +- +-# old mode to activate the shower ++ ++ ++# old mode to activate the shower + def ans_pythia(self, value=None): + """None: means that the user type 'pythia' + value: means that the user type pythia=value""" +- ++ + if 'PY6' not in self.available_module: + logger.info('pythia-pgs not available. Ignore commmand') + return +@@ -621,13 +621,13 @@ class AskRun(cmd.ControlSwitch): + self.set_switch('shower', 'OFF') + else: + logger.warning('Invalid command: pythia=%s' % value) +- +- ++ ++ + def consistency_shower_detector(self, vshower, vdetector): + """consistency_XX_YY(val_XX, val_YY) + -> XX is the new key set by the user to a new value val_XX + -> YY is another key +- -> return value should be None or "replace_YY" ++ -> return value should be None or "replace_YY" + """ + + if vshower == 'OFF': +@@ -635,35 +635,35 @@ class AskRun(cmd.ControlSwitch): + return 'OFF' + if vshower == 'Pythia8' and vdetector == 'PGS': + return 'OFF' +- ++ + return None + + +- ++ + # + # HANDLING DETECTOR + # + def get_allowed_detector(self): + """return valid entry for the switch""" +- ++ + if hasattr(self, 'allowed_detector'): +- return self.allowed_detector +- ++ return self.allowed_detector ++ + self.allowed_detector = [] + if 'PGS' in self.available_module: + self.allowed_detector.append('PGS') + if 'Delphes' in self.available_module: + self.allowed_detector.append('Delphes') + +- ++ + if self.allowed_detector: + self.allowed_detector.append('OFF') +- return self.allowed_detector ++ return self.allowed_detector + + def set_default_detector(self): +- ++ + self.set_default_shower() #ensure that this one is called first! +- ++ + if 'PGS' in self.available_module and self.switch['shower'] == 'Pythia6'\ + and os.path.exists(pjoin(self.me_dir,'Cards','pgs_card.dat')): + self.switch['detector'] = 'PGS' +@@ -674,16 +674,16 @@ class AskRun(cmd.ControlSwitch): + self.switch['detector'] = 'OFF' + else: + self.switch['detector'] = 'Not Avail.' +- +-# old mode to activate pgs ++ ++# old mode to activate pgs + def ans_pgs(self, value=None): + """None: means that the user type 'pgs' +- value: means that the user type pgs=value""" +- ++ value: means that the user type pgs=value""" ++ + if 'PGS' not in self.available_module: + logger.info('pythia-pgs not available. Ignore commmand') + return +- ++ + if value is None: + self.set_all_off() + self.switch['shower'] = 'Pythia6' +@@ -696,16 +696,16 @@ class AskRun(cmd.ControlSwitch): + else: + logger.warning('Invalid command: pgs=%s' % value) + +- ++ + # old mode to activate Delphes + def ans_delphes(self, value=None): + """None: means that the user type 'delphes' +- value: means that the user type delphes=value""" +- ++ value: means that the user type delphes=value""" ++ + if 'Delphes' not in self.available_module: + logger.warning('Delphes not available. Ignore commmand') + return +- ++ + if value is None: + self.set_all_off() + if 'PY6' in self.available_module: +@@ -718,15 +718,15 @@ class AskRun(cmd.ControlSwitch): + elif value == 'off': + self.set_switch('detector', 'OFF') + else: +- logger.warning('Invalid command: pgs=%s' % value) ++ logger.warning('Invalid command: pgs=%s' % value) + + def consistency_detector_shower(self,vdetector, vshower): + """consistency_XX_YY(val_XX, val_YY) + -> XX is the new key set by the user to a new value val_XX + -> YY is another key +- -> return value should be None or "replace_YY" ++ -> return value should be None or "replace_YY" + """ +- ++ + if vdetector == 'PGS' and vshower != 'Pythia6': + return 'Pythia6' + if vdetector == 'Delphes' and vshower not in ['Pythia6', 'Pythia8']: +@@ -744,28 +744,28 @@ class AskRun(cmd.ControlSwitch): + # + def get_allowed_analysis(self): + """return valid entry for the shower switch""" +- ++ + if hasattr(self, 'allowed_analysis'): + return self.allowed_analysis +- ++ + self.allowed_analysis = [] + if 'ExRoot' in self.available_module: + self.allowed_analysis.append('ExRoot') + if 'MA4' in self.available_module: + self.allowed_analysis.append('MadAnalysis4') + if 'MA5' in self.available_module: +- self.allowed_analysis.append('MadAnalysis5') ++ self.allowed_analysis.append('MadAnalysis5') + if 'Rivet' in self.available_module: +- self.allowed_analysis.append('Rivet') +- ++ self.allowed_analysis.append('Rivet') ++ + if self.allowed_analysis: + self.allowed_analysis.append('OFF') +- ++ + return self.allowed_analysis +- ++ + def check_analysis(self, value): + """check an entry is valid. return the valid entry in case of shortcut""" +- ++ + if value in self.get_allowed_analysis(): + return True + if value.lower() in ['ma4', 'madanalysis4', 'madanalysis_4','4']: +@@ -786,30 +786,30 @@ class AskRun(cmd.ControlSwitch): + """consistency_XX_YY(val_XX, val_YY) + -> XX is the new key set by the user to a new value val_XX + -> YY is another key +- -> return value should be None or "replace_YY" ++ -> return value should be None or "replace_YY" + """ + + if vshower != 'Pythia8' and vanalysis == 'Rivet': + return 'OFF' #new value for analysis +- ++ + return None +- ++ + def consistency_analysis_shower(self, vanalysis, vshower): + """consistency_XX_YY(val_XX, val_YY) + -> XX is the new key set by the user to a new value val_XX + -> YY is another key +- -> return value should be None or "replace_YY" ++ -> return value should be None or "replace_YY" + """ + + if vshower != 'Pythia8' and vanalysis == 'Rivet': + return 'Pythia8' #new value for analysis +- ++ + return None + + + def set_default_analysis(self): + """initialise the switch for analysis""" +- ++ + if 'MA4' in self.available_module and \ + os.path.exists(pjoin(self.me_dir,'Cards','plot_card.dat')): + self.switch['analysis'] = 'MadAnalysis4' +@@ -818,46 +818,46 @@ class AskRun(cmd.ControlSwitch): + or os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat'))): + self.switch['analysis'] = 'MadAnalysis5' + elif 'ExRoot' in self.available_module: +- self.switch['analysis'] = 'ExRoot' +- elif self.get_allowed_analysis(): ++ self.switch['analysis'] = 'ExRoot' ++ elif self.get_allowed_analysis(): + self.switch['analysis'] = 'OFF' + else: + self.switch['analysis'] = 'Not Avail.' +- ++ + # + # MADSPIN handling + # + def get_allowed_madspin(self): + """ ON|OFF|onshell """ +- ++ + if hasattr(self, 'allowed_madspin'): + return self.allowed_madspin +- ++ + self.allowed_madspin = [] + if 'MadSpin' in self.available_module: + self.allowed_madspin = ['OFF',"ON",'onshell',"full"] + return self.allowed_madspin +- ++ + def check_value_madspin(self, value): + """handle alias and valid option not present in get_allowed_madspin""" +- ++ + if value.upper() in self.get_allowed_madspin(): + return True + elif value.lower() in self.get_allowed_madspin(): + return True +- ++ + if 'MadSpin' not in self.available_module: + return False +- ++ + if value.lower() in ['madspin', 'full']: + return 'full' + elif value.lower() in ['none']: + return 'none' +- +- ++ ++ + def set_default_madspin(self): + """initialise the switch for madspin""" +- ++ + if 'MadSpin' in self.available_module: + if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): + self.switch['madspin'] = 'ON' +@@ -865,10 +865,10 @@ class AskRun(cmd.ControlSwitch): + self.switch['madspin'] = 'OFF' + else: + self.switch['madspin'] = 'Not Avail.' +- ++ + def get_cardcmd_for_madspin(self, value): + """set some command to run before allowing the user to modify the cards.""" +- ++ + if value == 'onshell': + return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] + elif value in ['full', 'madspin']: +@@ -877,36 +877,36 @@ class AskRun(cmd.ControlSwitch): + return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] + else: + return [] +- ++ + # + # ReWeight handling + # + def get_allowed_reweight(self): + """ return the list of valid option for reweight=XXX """ +- ++ + if hasattr(self, 'allowed_reweight'): + return getattr(self, 'allowed_reweight') +- ++ + if 'reweight' not in self.available_module: + self.allowed_reweight = [] + return + self.allowed_reweight = ['OFF', 'ON'] +- ++ + # check for plugin mode + plugin_path = self.mother_interface.plugin_path + opts = misc.from_plugin_import(plugin_path, 'new_reweight', warning=False) + self.allowed_reweight += opts +- ++ + def set_default_reweight(self): + """initialise the switch for reweight""" +- ++ + if 'reweight' in self.available_module: + if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): + self.switch['reweight'] = 'ON' + else: + self.switch['reweight'] = 'OFF' + else: +- self.switch['reweight'] = 'Not Avail.' ++ self.switch['reweight'] = 'Not Avail.' + + #=============================================================================== + # CheckValidForCmd +@@ -916,14 +916,14 @@ class CheckValidForCmd(object): + + def check_banner_run(self, args): + """check the validity of line""" +- ++ + if len(args) == 0: + self.help_banner_run() + raise self.InvalidCmd('banner_run requires at least one argument.') +- ++ + tag = [a[6:] for a in args if a.startswith('--tag=')] +- +- ++ ++ + if os.path.exists(args[0]): + type ='banner' + format = self.detect_card_type(args[0]) +@@ -931,7 +931,7 @@ class CheckValidForCmd(object): + raise self.InvalidCmd('The file is not a valid banner.') + elif tag: + args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ +- (args[0], tag)) ++ (args[0], tag)) + if not os.path.exists(args[0]): + raise self.InvalidCmd('No banner associates to this name and tag.') + else: +@@ -939,7 +939,7 @@ class CheckValidForCmd(object): + type = 'run' + banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) + if not banners: +- raise self.InvalidCmd('No banner associates to this name.') ++ raise self.InvalidCmd('No banner associates to this name.') + elif len(banners) == 1: + args[0] = banners[0] + else: +@@ -947,8 +947,8 @@ class CheckValidForCmd(object): + tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] + tag = self.ask('which tag do you want to use?', tags[0], tags) + args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ +- (args[0], tag)) +- ++ (args[0], tag)) ++ + run_name = [arg[7:] for arg in args if arg.startswith('--name=')] + if run_name: + try: +@@ -970,14 +970,14 @@ class CheckValidForCmd(object): + except Exception: + pass + self.set_run_name(name) +- ++ + def check_history(self, args): + """check the validity of line""" +- ++ + if len(args) > 1: + self.help_history() + raise self.InvalidCmd('\"history\" command takes at most one argument') +- ++ + if not len(args): + return + elif args[0] != 'clean': +@@ -985,16 +985,16 @@ class CheckValidForCmd(object): + if dirpath and not os.path.exists(dirpath) or \ + os.path.isdir(args[0]): + raise self.InvalidCmd("invalid path %s " % dirpath) +- ++ + def check_save(self, args): + """ check the validity of the line""" +- ++ + if len(args) == 0: + args.append('options') + + if args[0] not in self._save_opts: + raise self.InvalidCmd('wrong \"save\" format') +- ++ + if args[0] != 'options' and len(args) != 2: + self.help_save() + raise self.InvalidCmd('wrong \"save\" format') +@@ -1003,7 +1003,7 @@ class CheckValidForCmd(object): + if not os.path.exists(basename): + raise self.InvalidCmd('%s is not a valid path, please retry' % \ + args[1]) +- ++ + if args[0] == 'options': + has_path = None + for arg in args[1:]: +@@ -1024,9 +1024,9 @@ class CheckValidForCmd(object): + has_path = True + if not has_path: + if '--auto' in arg and self.options['mg5_path']: +- args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) ++ args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) + else: +- args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) ++ args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) + + def check_set(self, args): + """ check the validity of the line""" +@@ -1039,20 +1039,20 @@ class CheckValidForCmd(object): + self.help_set() + raise self.InvalidCmd('Possible options for set are %s' % \ + self._set_options) +- ++ + if args[0] in ['stdout_level']: + if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \ + and not args[1].isdigit(): + raise self.InvalidCmd('output_level needs ' + \ +- 'a valid level') +- ++ 'a valid level') ++ + if args[0] in ['timeout']: + if not args[1].isdigit(): +- raise self.InvalidCmd('timeout values should be a integer') +- ++ raise self.InvalidCmd('timeout values should be a integer') ++ + def check_open(self, args): + """ check the validity of the line """ +- ++ + if len(args) != 1: + self.help_open() + raise self.InvalidCmd('OPEN command requires exactly one argument') +@@ -1069,7 +1069,7 @@ class CheckValidForCmd(object): + raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file') + else: + return True +- ++ + path = self.me_dir + if os.path.isfile(os.path.join(path,args[0])): + args[0] = os.path.join(path,args[0]) +@@ -1078,7 +1078,7 @@ class CheckValidForCmd(object): + elif os.path.isfile(os.path.join(path,'HTML',args[0])): + args[0] = os.path.join(path,'HTML',args[0]) + # special for card with _default define: copy the default and open it +- elif '_card.dat' in args[0]: ++ elif '_card.dat' in args[0]: + name = args[0].replace('_card.dat','_card_default.dat') + if os.path.isfile(os.path.join(path,'Cards', name)): + files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0])) +@@ -1086,13 +1086,13 @@ class CheckValidForCmd(object): + else: + raise self.InvalidCmd('No default path for this file') + elif not os.path.isfile(args[0]): +- raise self.InvalidCmd('No default path for this file') +- ++ raise self.InvalidCmd('No default path for this file') ++ + def check_initMadLoop(self, args): + """ check initMadLoop command arguments are valid.""" +- ++ + opt = {'refresh': False, 'nPS': None, 'force': False} +- ++ + for arg in args: + if arg in ['-r','--refresh']: + opt['refresh'] = True +@@ -1105,14 +1105,14 @@ class CheckValidForCmd(object): + except ValueError: + raise InvalidCmd("The number of attempts specified "+ + "'%s' is not a valid integer."%n_attempts) +- ++ + return opt +- ++ + def check_treatcards(self, args): + """check that treatcards arguments are valid + [param|run|all] [--output_dir=] [--param_card=] [--run_card=] + """ +- ++ + opt = {'output_dir':pjoin(self.me_dir,'Source'), + 'param_card':pjoin(self.me_dir,'Cards','param_card.dat'), + 'run_card':pjoin(self.me_dir,'Cards','run_card.dat'), +@@ -1129,14 +1129,14 @@ class CheckValidForCmd(object): + if os.path.isfile(value): + card_name = self.detect_card_type(value) + if card_name != key: +- raise self.InvalidCmd('Format for input file detected as %s while expecting %s' ++ raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + % (card_name, key)) + opt[key] = value + elif os.path.isfile(pjoin(self.me_dir,value)): + card_name = self.detect_card_type(pjoin(self.me_dir,value)) + if card_name != key: +- raise self.InvalidCmd('Format for input file detected as %s while expecting %s' +- % (card_name, key)) ++ raise self.InvalidCmd('Format for input file detected as %s while expecting %s' ++ % (card_name, key)) + opt[key] = value + else: + raise self.InvalidCmd('No such file: %s ' % value) +@@ -1154,14 +1154,14 @@ class CheckValidForCmd(object): + else: + self.help_treatcards() + raise self.InvalidCmd('Unvalid argument %s' % arg) +- +- return mode, opt +- +- ++ ++ return mode, opt ++ ++ + def check_survey(self, args, cmd='survey'): + """check that the argument for survey are valid""" +- +- ++ ++ + self.opts = dict([(key,value[1]) for (key,value) in \ + self._survey_options.items()]) + +@@ -1183,41 +1183,41 @@ class CheckValidForCmd(object): + self.help_survey() + raise self.InvalidCmd('Too many argument for %s command' % cmd) + elif not args: +- # No run name assigned -> assigned one automaticaly ++ # No run name assigned -> assigned one automaticaly + self.set_run_name(self.find_available_run_name(self.me_dir)) + else: + self.set_run_name(args[0], None,'parton', True) + args.pop(0) +- ++ + return True + + def check_generate_events(self, args): + """check that the argument for generate_events are valid""" +- ++ + run = None + if args and args[-1].startswith('--laststep='): + run = args[-1].split('=')[-1] + if run not in ['auto','parton', 'pythia', 'pgs', 'delphes']: + self.help_generate_events() + raise self.InvalidCmd('invalid %s argument'% args[-1]) +- if run != 'parton' and not self.options['pythia-pgs_path']: +- raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. ++ if run != 'parton' and not self.options['pythia-pgs_path']: ++ raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + To do so type: \'install pythia-pgs\' in the mg5 interface''') + if run == 'delphes' and not self.options['delphes_path']: +- raise self.InvalidCmd('''delphes not install. Please install this package first. ++ raise self.InvalidCmd('''delphes not install. Please install this package first. + To do so type: \'install Delphes\' in the mg5 interface''') + del args[-1] + +- ++ + #if len(args) > 1: + # self.help_generate_events() + # raise self.InvalidCmd('Too many argument for generate_events command: %s' % cmd) +- ++ + return run + + def check_calculate_decay_widths(self, args): + """check that the argument for calculate_decay_widths are valid""" +- ++ + if self.ninitial != 1: + raise self.InvalidCmd('Can only calculate decay widths for decay processes A > B C ...') + +@@ -1232,7 +1232,7 @@ class CheckValidForCmd(object): + if len(args) > 1: + self.help_calculate_decay_widths() + raise self.InvalidCmd('Too many argument for calculate_decay_widths command: %s' % cmd) +- ++ + return accuracy + + +@@ -1241,25 +1241,25 @@ class CheckValidForCmd(object): + """check that the argument for survey are valid""" + + run = None +- ++ + if not len(args): + self.help_multi_run() + raise self.InvalidCmd("""multi_run command requires at least one argument for + the number of times that it call generate_events command""") +- ++ + if args[-1].startswith('--laststep='): + run = args[-1].split('=')[-1] + if run not in ['parton', 'pythia', 'pgs', 'delphes']: + self.help_multi_run() + raise self.InvalidCmd('invalid %s argument'% args[-1]) +- if run != 'parton' and not self.options['pythia-pgs_path']: +- raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. ++ if run != 'parton' and not self.options['pythia-pgs_path']: ++ raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + To do so type: \'install pythia-pgs\' in the mg5 interface''') + if run == 'delphes' and not self.options['delphes_path']: +- raise self.InvalidCmd('''delphes not install. Please install this package first. ++ raise self.InvalidCmd('''delphes not install. Please install this package first. + To do so type: \'install Delphes\' in the mg5 interface''') + del args[-1] +- ++ + + elif not args[0].isdigit(): + self.help_multi_run() +@@ -1267,7 +1267,7 @@ class CheckValidForCmd(object): + #pass nb run to an integer + nb_run = args.pop(0) + args.insert(0, int(nb_run)) +- ++ + + return run + +@@ -1284,7 +1284,7 @@ class CheckValidForCmd(object): + self.help_refine() + raise self.InvalidCmd('require_precision argument is require for refine cmd') + +- ++ + if not self.run_name: + if self.results.lastrun: + self.set_run_name(self.results.lastrun) +@@ -1296,17 +1296,17 @@ class CheckValidForCmd(object): + else: + try: + [float(arg) for arg in args] +- except ValueError: +- self.help_refine() ++ except ValueError: ++ self.help_refine() + raise self.InvalidCmd('refine arguments are suppose to be number') +- ++ + return True +- ++ + def check_combine_events(self, arg): + """ Check the argument for the combine events command """ +- ++ + tag = [a for a in arg if a.startswith('--tag=')] +- if tag: ++ if tag: + arg.remove(tag[0]) + tag = tag[0][6:] + elif not self.run_tag: +@@ -1314,53 +1314,53 @@ class CheckValidForCmd(object): + else: + tag = self.run_tag + self.run_tag = tag +- ++ + if len(arg) > 1: + self.help_combine_events() + raise self.InvalidCmd('Too many argument for combine_events command') +- ++ + if len(arg) == 1: + self.set_run_name(arg[0], self.run_tag, 'parton', True) +- ++ + if not self.run_name: + if not self.results.lastrun: + raise self.InvalidCmd('No run_name currently define. Unable to run combine') + else: + self.set_run_name(self.results.lastrun) +- ++ + return True +- ++ + def check_pythia(self, args): + """Check the argument for pythia command +- syntax: pythia [NAME] ++ syntax: pythia [NAME] + Note that other option are already removed at this point + """ +- ++ + mode = None + laststep = [arg for arg in args if arg.startswith('--laststep=')] + if laststep and len(laststep)==1: + mode = laststep[0].split('=')[-1] + if mode not in ['auto', 'pythia', 'pgs', 'delphes']: + self.help_pythia() +- raise self.InvalidCmd('invalid %s argument'% args[-1]) ++ raise self.InvalidCmd('invalid %s argument'% args[-1]) + elif laststep: + raise self.InvalidCmd('only one laststep argument is allowed') +- ++ + if not self.options['pythia-pgs_path']: + logger.info('Retry to read configuration file to find pythia-pgs path') + self.set_configuration() +- ++ + if not self.options['pythia-pgs_path'] or not \ + os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): + error_msg = 'No valid pythia-pgs path set.\n' + error_msg += 'Please use the set command to define the path and retry.\n' + error_msg += 'You can also define it in the configuration file.\n' + raise self.InvalidCmd(error_msg) +- +- +- ++ ++ ++ + tag = [a for a in args if a.startswith('--tag=')] +- if tag: ++ if tag: + args.remove(tag[0]) + tag = tag[0][6:] + +@@ -1368,8 +1368,8 @@ class CheckValidForCmd(object): + if self.results.lastrun: + args.insert(0, self.results.lastrun) + else: +- raise self.InvalidCmd('No run name currently define. Please add this information.') +- ++ raise self.InvalidCmd('No run name currently define. Please add this information.') ++ + if len(args) >= 1: + if args[0] != self.run_name and\ + not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): +@@ -1388,21 +1388,21 @@ class CheckValidForCmd(object): + files.ln(input_file, os.path.dirname(output_file)) + else: + misc.gunzip(input_file, keep=True, stdout=output_file) +- ++ + args.append(mode) +- ++ + def check_pythia8(self, args): + """Check the argument for pythia command +- syntax: pythia8 [NAME] ++ syntax: pythia8 [NAME] + Note that other option are already removed at this point +- """ ++ """ + mode = None + laststep = [arg for arg in args if arg.startswith('--laststep=')] + if laststep and len(laststep)==1: + mode = laststep[0].split('=')[-1] + if mode not in ['auto', 'pythia','pythia8','delphes']: + self.help_pythia8() +- raise self.InvalidCmd('invalid %s argument'% args[-1]) ++ raise self.InvalidCmd('invalid %s argument'% args[-1]) + elif laststep: + raise self.InvalidCmd('only one laststep argument is allowed') + +@@ -1410,7 +1410,7 @@ class CheckValidForCmd(object): + if not self.options['pythia8_path']: + logger.info('Retry reading configuration file to find pythia8 path') + self.set_configuration() +- ++ + if not self.options['pythia8_path'] or not \ + os.path.exists(pjoin(self.options['pythia8_path'],'bin','pythia8-config')): + error_msg = 'No valid pythia8 path set.\n' +@@ -1421,7 +1421,7 @@ class CheckValidForCmd(object): + raise self.InvalidCmd(error_msg) + + tag = [a for a in args if a.startswith('--tag=')] +- if tag: ++ if tag: + args.remove(tag[0]) + tag = tag[0][6:] + +@@ -1430,11 +1430,11 @@ class CheckValidForCmd(object): + args.insert(0, self.results.lastrun) + else: + raise self.InvalidCmd('No run name currently define. '+ +- 'Please add this information.') +- ++ 'Please add this information.') ++ + if len(args) >= 1: + if args[0] != self.run_name and\ +- not os.path.exists(pjoin(self.me_dir,'Events',args[0], ++ not os.path.exists(pjoin(self.me_dir,'Events',args[0], + 'unweighted_events.lhe.gz')): + raise self.InvalidCmd('No events file corresponding to %s run. ' + % args[0]) +@@ -1451,9 +1451,9 @@ class CheckValidForCmd(object): + else: + raise self.InvalidCmd('No event file corresponding to %s run. ' + % self.run_name) +- ++ + args.append(mode) +- ++ + def check_remove(self, args): + """Check that the remove command is valid""" + +@@ -1484,33 +1484,33 @@ class CheckValidForCmd(object): + + madir = self.options['madanalysis_path'] + td = self.options['td_path'] +- ++ + if not madir or not td: + logger.info('Retry to read configuration file to find madanalysis/td') + self.set_configuration() + + madir = self.options['madanalysis_path'] +- td = self.options['td_path'] +- ++ td = self.options['td_path'] ++ + if not madir: + error_msg = 'No valid MadAnalysis path set.\n' + error_msg += 'Please use the set command to define the path and retry.\n' + error_msg += 'You can also define it in the configuration file.\n' +- raise self.InvalidCmd(error_msg) ++ raise self.InvalidCmd(error_msg) + if not td: + error_msg = 'No valid td path set.\n' + error_msg += 'Please use the set command to define the path and retry.\n' + error_msg += 'You can also define it in the configuration file.\n' +- raise self.InvalidCmd(error_msg) +- ++ raise self.InvalidCmd(error_msg) ++ + if len(args) == 0: + if not hasattr(self, 'run_name') or not self.run_name: + self.help_plot() +- raise self.InvalidCmd('No run name currently define. Please add this information.') ++ raise self.InvalidCmd('No run name currently define. Please add this information.') + args.append('all') + return + +- ++ + if args[0] not in self._plot_mode: + self.set_run_name(args[0], level='plot') + del args[0] +@@ -1518,45 +1518,45 @@ class CheckValidForCmd(object): + args.append('all') + elif not self.run_name: + self.help_plot() +- raise self.InvalidCmd('No run name currently define. Please add this information.') +- ++ raise self.InvalidCmd('No run name currently define. Please add this information.') ++ + for arg in args: + if arg not in self._plot_mode and arg != self.run_name: + self.help_plot() +- raise self.InvalidCmd('unknown options %s' % arg) +- ++ raise self.InvalidCmd('unknown options %s' % arg) ++ + def check_syscalc(self, args): + """Check the argument for the syscalc command + syscalc run_name modes""" + + scdir = self.options['syscalc_path'] +- ++ + if not scdir: + logger.info('Retry to read configuration file to find SysCalc') + self.set_configuration() + + scdir = self.options['syscalc_path'] +- ++ + if not scdir: + error_msg = 'No valid SysCalc path set.\n' + error_msg += 'Please use the set command to define the path and retry.\n' + error_msg += 'You can also define it in the configuration file.\n' + error_msg += 'Please note that you need to compile SysCalc first.' +- raise self.InvalidCmd(error_msg) +- ++ raise self.InvalidCmd(error_msg) ++ + if len(args) == 0: + if not hasattr(self, 'run_name') or not self.run_name: + self.help_syscalc() +- raise self.InvalidCmd('No run name currently defined. Please add this information.') ++ raise self.InvalidCmd('No run name currently defined. Please add this information.') + args.append('all') + return + + #deal options + tag = [a for a in args if a.startswith('--tag=')] +- if tag: ++ if tag: + args.remove(tag[0]) + tag = tag[0][6:] +- ++ + if args[0] not in self._syscalc_mode: + self.set_run_name(args[0], tag=tag, level='syscalc') + del args[0] +@@ -1564,61 +1564,61 @@ class CheckValidForCmd(object): + args.append('all') + elif not self.run_name: + self.help_syscalc() +- raise self.InvalidCmd('No run name currently defined. Please add this information.') ++ raise self.InvalidCmd('No run name currently defined. Please add this information.') + elif tag and tag != self.run_tag: + self.set_run_name(self.run_name, tag=tag, level='syscalc') +- ++ + for arg in args: + if arg not in self._syscalc_mode and arg != self.run_name: + self.help_syscalc() +- raise self.InvalidCmd('unknown options %s' % arg) ++ raise self.InvalidCmd('unknown options %s' % arg) + + if self.run_card['use_syst'] not in self.true: + raise self.InvalidCmd('Run %s does not include ' % self.run_name + \ + 'systematics information needed for syscalc.') +- +- ++ ++ + def check_pgs(self, arg, no_default=False): + """Check the argument for pythia command +- syntax is "pgs [NAME]" ++ syntax is "pgs [NAME]" + Note that other option are already remove at this point + """ +- ++ + # If not pythia-pgs path + if not self.options['pythia-pgs_path']: + logger.info('Retry to read configuration file to find pythia-pgs path') + self.set_configuration() +- ++ + if not self.options['pythia-pgs_path'] or not \ + os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): + error_msg = 'No valid pythia-pgs path set.\n' + error_msg += 'Please use the set command to define the path and retry.\n' + error_msg += 'You can also define it in the configuration file.\n' +- raise self.InvalidCmd(error_msg) +- ++ raise self.InvalidCmd(error_msg) ++ + tag = [a for a in arg if a.startswith('--tag=')] +- if tag: ++ if tag: + arg.remove(tag[0]) + tag = tag[0][6:] +- +- ++ ++ + if len(arg) == 0 and not self.run_name: + if self.results.lastrun: + arg.insert(0, self.results.lastrun) + else: +- raise self.InvalidCmd('No run name currently define. Please add this information.') +- ++ raise self.InvalidCmd('No run name currently define. Please add this information.') ++ + if len(arg) == 1 and self.run_name == arg[0]: + arg.pop(0) +- ++ + if not len(arg) and \ + not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): + if not no_default: + self.help_pgs() + raise self.InvalidCmd('''No file file pythia_events.hep currently available + Please specify a valid run_name''') +- +- lock = None ++ ++ lock = None + if len(arg) == 1: + prev_tag = self.set_run_name(arg[0], tag, 'pgs') + if not os.path.exists(pjoin(self.me_dir,'Events',self.run_name,'%s_pythia_events.hep.gz' % prev_tag)): +@@ -1626,25 +1626,25 @@ class CheckValidForCmd(object): + else: + input_file = pjoin(self.me_dir,'Events', self.run_name, '%s_pythia_events.hep.gz' % prev_tag) + output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') +- lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), ++ lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), + argument=['-c', input_file]) + + else: +- if tag: ++ if tag: + self.run_card['run_tag'] = tag + self.set_run_name(self.run_name, tag, 'pgs') +- +- return lock ++ ++ return lock + + def check_display(self, args): + """check the validity of line + syntax is "display XXXXX" + """ +- ++ + if len(args) < 1 or args[0] not in self._display_opts: + self.help_display() + raise self.InvalidCmd +- ++ + if args[0] == 'variable' and len(args) !=2: + raise self.InvalidCmd('variable need a variable name') + +@@ -1654,39 +1654,39 @@ class CheckValidForCmd(object): + + def check_import(self, args): + """check the validity of line""" +- ++ + if not args: + self.help_import() + raise self.InvalidCmd('wrong \"import\" format') +- ++ + if args[0] != 'command': + args.insert(0,'command') +- +- ++ ++ + if not len(args) == 2 or not os.path.exists(args[1]): + raise self.InvalidCmd('PATH is mandatory for import command\n') +- ++ + + #=============================================================================== + # CompleteForCmd + #=============================================================================== + class CompleteForCmd(CheckValidForCmd): + """ The Series of help routine for the MadGraphCmd""" +- +- ++ ++ + def complete_banner_run(self, text, line, begidx, endidx, formatting=True): + "Complete the banner run command" + try: +- +- ++ ++ + args = self.split_arg(line[0:begidx], error=False) +- ++ + if args[-1].endswith(os.path.sep): + return self.path_completion(text, + os.path.join('.',*[a for a in args \ +- if a.endswith(os.path.sep)])) +- +- ++ if a.endswith(os.path.sep)])) ++ ++ + if len(args) > 1: + # only options are possible + tags = misc.glob('%s_*_banner.txt' % args[1], pjoin(self.me_dir, 'Events' , args[1])) +@@ -1697,9 +1697,9 @@ class CompleteForCmd(CheckValidForCmd): + else: + return self.list_completion(text, tags) + return self.list_completion(text, tags +['--name=','-f'], line) +- ++ + # First argument +- possibilites = {} ++ possibilites = {} + + comp = self.path_completion(text, os.path.join('.',*[a for a in args \ + if a.endswith(os.path.sep)])) +@@ -1711,10 +1711,10 @@ class CompleteForCmd(CheckValidForCmd): + run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) + run_list = [n.rsplit('/',2)[1] for n in run_list] + possibilites['RUN Name'] = self.list_completion(text, run_list) +- ++ + return self.deal_multiple_categories(possibilites, formatting) +- +- ++ ++ + except Exception as error: + print(error) + +@@ -1732,12 +1732,12 @@ class CompleteForCmd(CheckValidForCmd): + + if len(args) == 1: + return self.path_completion(text) +- +- def complete_open(self, text, line, begidx, endidx): ++ ++ def complete_open(self, text, line, begidx, endidx): + """ complete the open command """ + + args = self.split_arg(line[0:begidx]) +- ++ + # Directory continuation + if os.path.sep in args[-1] + text: + return self.path_completion(text, +@@ -1751,10 +1751,10 @@ class CompleteForCmd(CheckValidForCmd): + if os.path.isfile(os.path.join(path,'README')): + possibility.append('README') + if os.path.isdir(os.path.join(path,'Cards')): +- possibility += [f for f in os.listdir(os.path.join(path,'Cards')) ++ possibility += [f for f in os.listdir(os.path.join(path,'Cards')) + if f.endswith('.dat')] + if os.path.isdir(os.path.join(path,'HTML')): +- possibility += [f for f in os.listdir(os.path.join(path,'HTML')) ++ possibility += [f for f in os.listdir(os.path.join(path,'HTML')) + if f.endswith('.html') and 'default' not in f] + else: + possibility.extend(['./','../']) +@@ -1763,7 +1763,7 @@ class CompleteForCmd(CheckValidForCmd): + if os.path.exists('MG5_debug'): + possibility.append('MG5_debug') + return self.list_completion(text, possibility) +- ++ + def complete_set(self, text, line, begidx, endidx): + "Complete the set command" + +@@ -1784,27 +1784,27 @@ class CompleteForCmd(CheckValidForCmd): + elif len(args) >2 and args[-1].endswith(os.path.sep): + return self.path_completion(text, + os.path.join('.',*[a for a in args if a.endswith(os.path.sep)]), +- only_dirs = True) +- ++ only_dirs = True) ++ + def complete_survey(self, text, line, begidx, endidx): + """ Complete the survey command """ +- ++ + if line.endswith('nb_core=') and not text: + import multiprocessing + max = multiprocessing.cpu_count() + return [str(i) for i in range(2,max+1)] +- ++ + return self.list_completion(text, self._run_options, line) +- ++ + complete_refine = complete_survey + complete_combine_events = complete_survey + complite_store = complete_survey + complete_generate_events = complete_survey + complete_create_gridpack = complete_survey +- ++ + def complete_generate_events(self, text, line, begidx, endidx): + """ Complete the generate events""" +- ++ + if line.endswith('nb_core=') and not text: + import multiprocessing + max = multiprocessing.cpu_count() +@@ -1813,17 +1813,17 @@ class CompleteForCmd(CheckValidForCmd): + return ['parton','pythia','pgs','delphes'] + elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': + return self.list_completion(text,['parton','pythia','pgs','delphes'],line) +- ++ + opts = self._run_options + self._generate_options + return self.list_completion(text, opts, line) + + + def complete_initMadLoop(self, text, line, begidx, endidx): + "Complete the initMadLoop command" +- ++ + numbers = [str(i) for i in range(10)] + opts = ['-f','-r','--nPS='] +- ++ + args = self.split_arg(line[0:begidx], error=False) + if len(line) >=6 and line[begidx-6:begidx]=='--nPS=': + return self.list_completion(text, numbers, line) +@@ -1840,18 +1840,18 @@ class CompleteForCmd(CheckValidForCmd): + + def complete_calculate_decay_widths(self, text, line, begidx, endidx): + """ Complete the calculate_decay_widths command""" +- ++ + if line.endswith('nb_core=') and not text: + import multiprocessing + max = multiprocessing.cpu_count() + return [str(i) for i in range(2,max+1)] +- ++ + opts = self._run_options + self._calculate_decay_options + return self.list_completion(text, opts, line) +- ++ + def complete_display(self, text, line, begidx, endidx): +- """ Complete the display command""" +- ++ """ Complete the display command""" ++ + args = self.split_arg(line[0:begidx], error=False) + if len(args) >= 2 and args[1] =='results': + start = line.find('results') +@@ -1860,44 +1860,44 @@ class CompleteForCmd(CheckValidForCmd): + + def complete_multi_run(self, text, line, begidx, endidx): + """complete multi run command""" +- ++ + args = self.split_arg(line[0:begidx], error=False) + if len(args) == 1: + data = [str(i) for i in range(0,20)] + return self.list_completion(text, data, line) +- ++ + if line.endswith('run=') and not text: + return ['parton','pythia','pgs','delphes'] + elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': + return self.list_completion(text,['parton','pythia','pgs','delphes'],line) +- ++ + opts = self._run_options + self._generate_options + return self.list_completion(text, opts, line) +- +- +- ++ ++ ++ + if line.endswith('nb_core=') and not text: + import multiprocessing + max = multiprocessing.cpu_count() + return [str(i) for i in range(2,max+1)] + opts = self._run_options + self._generate_options + return self.list_completion(text, opts, line) +- ++ + def complete_plot(self, text, line, begidx, endidx): + """ Complete the plot command """ +- ++ + args = self.split_arg(line[0:begidx], error=False) + if len(args) > 1: + return self.list_completion(text, self._plot_mode) + else: + return self.list_completion(text, self._plot_mode + list(self.results.keys())) +- ++ + def complete_syscalc(self, text, line, begidx, endidx, formatting=True): + """ Complete the syscalc command """ +- ++ + output = {} + args = self.split_arg(line[0:begidx], error=False) +- ++ + if len(args) <=1: + output['RUN_NAME'] = self.list_completion(list(self.results.keys())) + output['MODE'] = self.list_completion(text, self._syscalc_mode) +@@ -1907,12 +1907,12 @@ class CompleteForCmd(CheckValidForCmd): + if run in self.results: + tags = ['--tag=%s' % tag['tag'] for tag in self.results[run]] + output['options'] += tags +- ++ + return self.deal_multiple_categories(output, formatting) +- ++ + def complete_remove(self, text, line, begidx, endidx): + """Complete the remove command """ +- ++ + args = self.split_arg(line[0:begidx], error=False) + if len(args) > 1 and (text.startswith('--t')): + run = args[1] +@@ -1932,8 +1932,8 @@ class CompleteForCmd(CheckValidForCmd): + data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) + data = [n.rsplit('/',2)[1] for n in data] + return self.list_completion(text, ['all'] + data) +- +- ++ ++ + def complete_shower(self,text, line, begidx, endidx): + "Complete the shower command" + args = self.split_arg(line[0:begidx], error=False) +@@ -1941,7 +1941,7 @@ class CompleteForCmd(CheckValidForCmd): + return self.list_completion(text, self._interfaced_showers) + elif len(args)>1 and args[1] in self._interfaced_showers: + return getattr(self, 'complete_%s' % text)\ +- (text, args[1],line.replace(args[0]+' ',''), ++ (text, args[1],line.replace(args[0]+' ',''), + begidx-len(args[0])-1, endidx-len(args[0])-1) + + def complete_pythia8(self,text, line, begidx, endidx): +@@ -1955,11 +1955,11 @@ class CompleteForCmd(CheckValidForCmd): + if not self.run_name: + return tmp1 + else: +- tmp2 = self.list_completion(text, self._run_options + ['-f', ++ tmp2 = self.list_completion(text, self._run_options + ['-f', + '--no_default', '--tag='], line) + return tmp1 + tmp2 + elif line[-1] != '=': +- return self.list_completion(text, self._run_options + ['-f', ++ return self.list_completion(text, self._run_options + ['-f', + '--no_default','--tag='], line) + + def complete_madanalysis5_parton(self,text, line, begidx, endidx): +@@ -1978,19 +1978,19 @@ class CompleteForCmd(CheckValidForCmd): + else: + tmp2 = self.list_completion(text, ['-f', + '--MA5_stdout_lvl=','--no_default','--tag='], line) +- return tmp1 + tmp2 ++ return tmp1 + tmp2 + elif '--MA5_stdout_lvl=' in line and not any(arg.startswith( + '--MA5_stdout_lvl=') for arg in args): +- return self.list_completion(text, +- ['--MA5_stdout_lvl=%s'%opt for opt in ++ return self.list_completion(text, ++ ['--MA5_stdout_lvl=%s'%opt for opt in + ['logging.INFO','logging.DEBUG','logging.WARNING', + 'logging.CRITICAL','90']], line) + else: +- return self.list_completion(text, ['-f', ++ return self.list_completion(text, ['-f', + '--MA5_stdout_lvl=','--no_default','--tag='], line) + + def complete_pythia(self,text, line, begidx, endidx): +- "Complete the pythia command" ++ "Complete the pythia command" + args = self.split_arg(line[0:begidx], error=False) + + if len(args) == 1: +@@ -2001,16 +2001,16 @@ class CompleteForCmd(CheckValidForCmd): + if not self.run_name: + return tmp1 + else: +- tmp2 = self.list_completion(text, self._run_options + ['-f', ++ tmp2 = self.list_completion(text, self._run_options + ['-f', + '--no_default', '--tag='], line) + return tmp1 + tmp2 + elif line[-1] != '=': +- return self.list_completion(text, self._run_options + ['-f', ++ return self.list_completion(text, self._run_options + ['-f', + '--no_default','--tag='], line) + + def complete_pgs(self,text, line, begidx, endidx): + "Complete the pythia command" +- args = self.split_arg(line[0:begidx], error=False) ++ args = self.split_arg(line[0:begidx], error=False) + if len(args) == 1: + #return valid run_name + data = misc.glob(pjoin('*', '*_pythia_events.hep.gz'), pjoin(self.me_dir, 'Events')) +@@ -2019,23 +2019,23 @@ class CompleteForCmd(CheckValidForCmd): + if not self.run_name: + return tmp1 + else: +- tmp2 = self.list_completion(text, self._run_options + ['-f', ++ tmp2 = self.list_completion(text, self._run_options + ['-f', + '--tag=' ,'--no_default'], line) +- return tmp1 + tmp2 ++ return tmp1 + tmp2 + else: +- return self.list_completion(text, self._run_options + ['-f', ++ return self.list_completion(text, self._run_options + ['-f', + '--tag=','--no_default'], line) + +- complete_delphes = complete_pgs +- complete_rivet = complete_pgs ++ complete_delphes = complete_pgs ++ complete_rivet = complete_pgs + + #=============================================================================== + # MadEventCmd + #=============================================================================== + class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCmd): + +- """The command line processor of Mad Graph""" +- ++ """The command line processor of Mad Graph""" ++ + + LO = True + # Truth values +@@ -2063,7 +2063,7 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + cluster_mode = 0 + queue = 'madgraph' + nb_core = None +- ++ + next_possibility = { + 'start': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]', + 'calculate_decay_widths [OPTIONS]', +@@ -2080,9 +2080,9 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + 'pgs': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'], + 'delphes' : ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'] + } +- ++ + asking_for_run = AskRun +- ++ + ############################################################################ + def __init__(self, me_dir = None, options={}, *completekey, **stdin): + """ add information to the cmd """ +@@ -2095,16 +2095,16 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + if self.web: + os.system('touch %s' % pjoin(self.me_dir,'Online')) + +- self.load_results_db() ++ self.load_results_db() + self.results.def_web_mode(self.web) + + self.Gdirs = None +- ++ + self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) + self.configured = 0 # time for reading the card + self._options = {} # for compatibility with extended_cmd +- +- ++ ++ + def pass_in_web_mode(self): + """configure web data""" + self.web = True +@@ -2113,22 +2113,22 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + if os.environ['MADGRAPH_BASE']: + self.options['mg5_path'] = pjoin(os.environ['MADGRAPH_BASE'],'MG5') + +- ############################################################################ ++ ############################################################################ + def check_output_type(self, path): + """ Check that the output path is a valid madevent directory """ +- ++ + bin_path = os.path.join(path,'bin') + if os.path.isfile(os.path.join(bin_path,'generate_events')): + return True +- else: ++ else: + return False + + ############################################################################ + def set_configuration(self, amcatnlo=False, final=True, **opt): +- """assign all configuration variable from file ++ """assign all configuration variable from file + loop over the different config file if config_file not define """ +- +- super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, ++ ++ super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, + final=final, **opt) + + if not final: +@@ -2171,24 +2171,24 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + if not os.path.exists(pjoin(path, 'sys_calc')): + logger.info("No valid SysCalc path found") + continue +- # No else since the next line reinitialize the option to the ++ # No else since the next line reinitialize the option to the + #previous value anyway + self.options[key] = os.path.realpath(path) + continue + else: + self.options[key] = None +- +- ++ ++ + return self.options + + ############################################################################ +- def do_banner_run(self, line): ++ def do_banner_run(self, line): + """Make a run from the banner file""" +- ++ + args = self.split_arg(line) + #check the validity of the arguments +- self.check_banner_run(args) +- ++ self.check_banner_run(args) ++ + # Remove previous cards + for name in ['delphes_trigger.dat', 'delphes_card.dat', + 'pgs_card.dat', 'pythia_card.dat', 'madspin_card.dat', +@@ -2197,20 +2197,20 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + os.remove(pjoin(self.me_dir, 'Cards', name)) + except Exception: + pass +- ++ + banner_mod.split_banner(args[0], self.me_dir, proc_card=False) +- ++ + # Check if we want to modify the run + if not self.force: + ans = self.ask('Do you want to modify the Cards?', 'n', ['y','n']) + if ans == 'n': + self.force = True +- ++ + # Call Generate events + self.exec_cmd('generate_events %s %s' % (self.run_name, self.force and '-f' or '')) +- +- +- ++ ++ ++ + ############################################################################ + def do_display(self, line, output=sys.stdout): + """Display current internal status""" +@@ -2223,7 +2223,7 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + #return valid run_name + data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) + data = [n.rsplit('/',2)[1:] for n in data] +- ++ + if data: + out = {} + for name, tag in data: +@@ -2235,11 +2235,11 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + print('the runs available are:') + for run_name, tags in out.items(): + print(' run: %s' % run_name) +- print(' tags: ', end=' ') ++ print(' tags: ', end=' ') + print(', '.join(tags)) + else: + print('No run detected.') +- ++ + elif args[0] == 'options': + outstr = " Run Options \n" + outstr += " ----------- \n" +@@ -2260,8 +2260,8 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + if value == default: + outstr += " %25s \t:\t%s\n" % (key,value) + else: +- outstr += " %25s \t:\t%s (user set)\n" % (key,value) +- outstr += "\n" ++ outstr += " %25s \t:\t%s (user set)\n" % (key,value) ++ outstr += "\n" + outstr += " Configuration Options \n" + outstr += " --------------------- \n" + for key, default in self.options_configuration.items(): +@@ -2275,15 +2275,15 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + self.do_print_results(' '.join(args[1:])) + else: + super(MadEventCmd, self).do_display(line, output) +- ++ + def do_save(self, line, check=True, to_keep={}): +- """Not in help: Save information to file""" ++ """Not in help: Save information to file""" + + args = self.split_arg(line) + # Check argument validity + if check: + self.check_save(args) +- ++ + if args[0] == 'options': + # First look at options which should be put in MG5DIR/input + to_define = {} +@@ -2295,7 +2295,7 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + for key, default in self.options_madevent.items(): + if self.options[key] != self.options_madevent[key]: + to_define[key] = self.options[key] +- ++ + if '--all' in args: + for key, default in self.options_madgraph.items(): + if self.options[key] != self.options_madgraph[key]: +@@ -2312,12 +2312,12 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + filepath = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') + basefile = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') + basedir = self.me_dir +- ++ + if to_keep: + to_define = to_keep + self.write_configuration(filepath, basefile, basedir, to_define) +- +- ++ ++ + + + def do_edit_cards(self, line): +@@ -2326,80 +2326,80 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + # Check argument's validity + mode = self.check_generate_events(args) + self.ask_run_configuration(mode) +- ++ + return + + ############################################################################ +- ++ + ############################################################################ + def do_restart_gridpack(self, line): + """ syntax restart_gridpack --precision=1.0 --restart_zero + collect the result of the current run and relaunch each channel +- not completed or optionally a completed one with a precision worse than ++ not completed or optionally a completed one with a precision worse than + a threshold (and/or the zero result channel)""" +- +- ++ ++ + args = self.split_arg(line) + # Check argument's validity + self.check_survey(args) +- ++ + # initialize / remove lhapdf mode + #self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) + #self.configure_directory() +- ++ + gensym = gen_ximprove.gensym(self) +- ++ + min_precision = 1.0 + resubmit_zero=False + if '--precision=' in line: + s = line.index('--precision=') + len('--precision=') + arg=line[s:].split(1)[0] + min_precision = float(arg) +- ++ + if '--restart_zero' in line: + resubmit_zero = True +- +- ++ ++ + gensym.resubmit(min_precision, resubmit_zero) + self.monitor(run_type='All jobs submitted for gridpack', html=True) + + #will be done during the refine (more precisely in gen_ximprove) + cross, error = sum_html.make_all_html_results(self) + self.results.add_detail('cross', cross) +- self.results.add_detail('error', error) ++ self.results.add_detail('error', error) + self.exec_cmd("print_results %s" % self.run_name, +- errorhandling=False, printcmd=False, precmd=False, postcmd=False) +- ++ errorhandling=False, printcmd=False, precmd=False, postcmd=False) ++ + self.results.add_detail('run_statistics', dict(gensym.run_statistics)) + +- ++ + #self.exec_cmd('combine_events', postcmd=False) + #self.exec_cmd('store_events', postcmd=False) + self.exec_cmd('decay_events -from_cards', postcmd=False) + self.exec_cmd('create_gridpack', postcmd=False) +- +- + +- ############################################################################ ++ ++ ++ ############################################################################ + + ############################################################################ + def do_generate_events(self, line): + """Main Commands: launch the full chain """ +- ++ + self.banner = None + self.Gdirs = None +- ++ + args = self.split_arg(line) + # Check argument's validity + mode = self.check_generate_events(args) + switch_mode = self.ask_run_configuration(mode, args) + if not args: +- # No run name assigned -> assigned one automaticaly ++ # No run name assigned -> assigned one automaticaly + self.set_run_name(self.find_available_run_name(self.me_dir), None, 'parton') + else: + self.set_run_name(args[0], None, 'parton', True) + args.pop(0) +- ++ + self.run_generate_events(switch_mode, args) + + self.postprocessing() +@@ -2420,8 +2420,8 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + + def rivet_postprocessing(self, rivet_config, postprocess_RIVET, postprocess_CONTUR): + +- # Check number of Rivet jobs to run +- run_dirs = [pjoin(self.me_dir, 'Events',run_name) ++ # Check number of Rivet jobs to run ++ run_dirs = [pjoin(self.me_dir, 'Events',run_name) + for run_name in self.postprocessing_dirs] + + nb_rivet = len(run_dirs) +@@ -2550,10 +2550,10 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm + + wrapper = open(pjoin(self.me_dir, "Analysis", "contur", "run_contur.sh"), "w") + wrapper.write(set_env) +- ++ + wrapper.write('{0}\n'.format(contur_cmd)) + wrapper.close() +- ++ + misc.call(["run_contur.sh"], cwd=(pjoin(self.me_dir, "Analysis", "contur"))) + + logger.info("Contur outputs are stored in {0}".format(pjoin(self.me_dir, "Analysis", "contur","conturPlot"))) +@@ -2572,7 +2572,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + self.do_set('run_mode 2') + self.do_set('nb_core 1') + +- if self.run_card['gridpack'] in self.true: ++ if self.run_card['gridpack'] in self.true: + # Running gridpack warmup + gridpack_opts=[('accuracy', 0.01), + ('points', 2000), +@@ -2593,7 +2593,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + # Regular run mode + logger.info('Generating %s events with run name %s' % + (self.run_card['nevents'], self.run_name)) +- ++ + self.exec_cmd('survey %s %s' % (self.run_name,' '.join(args)), + postcmd=False) + nb_event = self.run_card['nevents'] +@@ -2601,7 +2601,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + self.exec_cmd('refine %s' % nb_event, postcmd=False) + if not float(self.results.current['cross']): + # Zero cross-section. Try to guess why +- text = '''Survey return zero cross section. ++ text = '''Survey return zero cross section. + Typical reasons are the following: + 1) A massive s-channel particle has a width set to zero. + 2) The pdf are zero for at least one of the initial state particles +@@ -2613,17 +2613,17 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + raise ZeroResult('See https://cp3.irmp.ucl.ac.be/projects/madgraph/wiki/FAQ-General-14') + else: + bypass_run = True +- ++ + #we can bypass the following if scan and first result is zero + if not bypass_run: + self.exec_cmd('refine %s --treshold=%s' % (nb_event,self.run_card['second_refine_treshold']) + , postcmd=False) +- ++ + self.exec_cmd('combine_events', postcmd=False,printcmd=False) + self.print_results_in_shell(self.results.current) + + if self.run_card['use_syst']: +- if self.run_card['systematics_program'] == 'auto': ++ if self.run_card['systematics_program'] == 'auto': + scdir = self.options['syscalc_path'] + if not scdir or not os.path.exists(scdir): + to_use = 'systematics' +@@ -2634,26 +2634,26 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + else: + logger.critical('Unvalid options for systematics_program: bypass computation of systematics variations.') + to_use = 'none' +- ++ + if to_use == 'systematics': + if self.run_card['systematics_arguments'] != ['']: + self.exec_cmd('systematics %s %s ' % (self.run_name, +- ' '.join(self.run_card['systematics_arguments'])), ++ ' '.join(self.run_card['systematics_arguments'])), + postcmd=False, printcmd=False) + else: + self.exec_cmd('systematics %s --from_card' % self.run_name, +- postcmd=False,printcmd=False) ++ postcmd=False,printcmd=False) + elif to_use == 'syscalc': + self.run_syscalc('parton') +- +- +- self.create_plot('parton') +- self.exec_cmd('store_events', postcmd=False) ++ ++ ++ self.create_plot('parton') ++ self.exec_cmd('store_events', postcmd=False) + if self.run_card['boost_event'].strip() and self.run_card['boost_event'] != 'False': + self.boost_events() +- +- +- self.exec_cmd('reweight -from_cards', postcmd=False) ++ ++ ++ self.exec_cmd('reweight -from_cards', postcmd=False) + self.exec_cmd('decay_events -from_cards', postcmd=False) + if self.run_card['time_of_flight']>=0: + self.exec_cmd("add_time_of_flight --threshold=%s" % self.run_card['time_of_flight'] ,postcmd=False) +@@ -2664,43 +2664,43 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + self.create_root_file(input , output) + + self.exec_cmd('madanalysis5_parton --no_default', postcmd=False, printcmd=False) +- # shower launches pgs/delphes if needed ++ # shower launches pgs/delphes if needed + self.exec_cmd('shower --no_default', postcmd=False, printcmd=False) + self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) + self.exec_cmd('rivet --no_default', postcmd=False, printcmd=False) + self.store_result() +- +- if self.allow_notification_center: +- misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), +- '%s: %s +- %s ' % (self.results.current['run_name'], ++ ++ if self.allow_notification_center: ++ misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), ++ '%s: %s +- %s ' % (self.results.current['run_name'], + self.results.current['cross'], + self.results.current['error'])) +- ++ + def boost_events(self): +- ++ + if not self.run_card['boost_event']: + return +- ++ + if self.run_card['boost_event'].startswith('lambda'): + if not isinstance(self, cmd.CmdShell): + raise Exception("boost not allowed online") + filter = eval(self.run_card['boost_event']) + else: + raise Exception +- ++ + path = [pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz'), + pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'), + pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz'), + pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')] +- ++ + for p in path: + if os.path.exists(p): + event_path = p + break + else: + raise Exception("fail to find event file for the boost") +- +- ++ ++ + lhe = lhe_parser.EventFile(event_path) + with misc.TMP_directory() as tmp_dir: + output = lhe_parser.EventFile(pjoin(tmp_dir, os.path.basename(event_path)), 'w') +@@ -2711,28 +2711,28 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + event.boost(filter) + #write this modify event + output.write(str(event)) +- output.write('\n') ++ output.write('\n') + lhe.close() +- files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) +- +- +- +- +- ++ files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) ++ ++ ++ ++ ++ + def do_initMadLoop(self,line): +- """Compile and run MadLoop for a certain number of PS point so as to ++ """Compile and run MadLoop for a certain number of PS point so as to + initialize MadLoop (setup the zero helicity and loop filter.)""" +- ++ + args = line.split() + # Check argument's validity + options = self.check_initMadLoop(args) +- ++ + if not options['force']: + self.ask_edit_cards(['MadLoopParams.dat'], mode='fixed', plot=False) + self.exec_cmd('treatcards loop --no_MadLoopInit') + + if options['refresh']: +- for filter in misc.glob('*Filter*', ++ for filter in misc.glob('*Filter*', + pjoin(self.me_dir,'SubProcesses','MadLoop5_resources')): + logger.debug("Resetting filter '%s'."%os.path.basename(filter)) + os.remove(filter) +@@ -2753,14 +2753,14 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + + def do_launch(self, line, *args, **opt): + """Main Commands: exec generate_events for 2>N and calculate_width for 1>N""" +- ++ + if self.ninitial == 1: + logger.info("Note that since 2.3. The launch for 1>N pass in event generation\n"+ + " To have the previous behavior use the calculate_decay_widths function") + # self.do_calculate_decay_widths(line, *args, **opt) + #else: + self.do_generate_events(line, *args, **opt) +- ++ + def print_results_in_shell(self, data): + """Have a nice results prints in the shell, + data should be of type: gen_crossxhtml.OneTagResults""" +@@ -2770,7 +2770,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + + if data['run_statistics']: + globalstat = sum_html.RunStatistics() +- ++ + logger.info(" " ) + logger.debug(" === Run statistics summary ===") + for key, value in data['run_statistics'].items(): +@@ -2786,13 +2786,13 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + logger.warning(globalstat.get_warning_text()) + logger.info(" ") + +- ++ + logger.info(" === Results Summary for run: %s tag: %s ===\n" % (data['run_name'],data['tag'])) +- ++ + total_time = int(sum(_['cumulative_timing'] for _ in data['run_statistics'].values())) + if total_time > 0: + logger.info(" Cumulative sequential time for this run: %s"%misc.format_time(total_time)) +- ++ + if self.ninitial == 1: + logger.info(" Width : %.4g +- %.4g GeV" % (data['cross'], data['error'])) + else: +@@ -2810,18 +2810,18 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + if len(split)!=3: + continue + scale, cross, error = split +- cross_sections[float(scale)] = (float(cross), float(error)) ++ cross_sections[float(scale)] = (float(cross), float(error)) + if len(cross_sections)>0: + logger.info(' Pythia8 merged cross-sections are:') + for scale in sorted(cross_sections.keys()): + logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ + (scale,cross_sections[scale][0],cross_sections[scale][1])) +- ++ + else: + if self.ninitial == 1: + logger.info(" Matched width : %.4g +- %.4g GeV" % (data['cross_pythia'], data['error_pythia'])) + else: +- logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) ++ logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) + logger.info(" Nb of events after matching/merging : %d" % int(data['nb_event_pythia'])) + if self.run_card['use_syst'] in self.true and \ + (int(self.run_card['ickkw'])==1 or self.run_card['ktdurham']>0.0 +@@ -2838,9 +2838,9 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + data should be of type: gen_crossxhtml.OneTagResults""" + if not data: + return +- ++ + fsock = open(path, mode) +- ++ + if data['run_statistics']: + logger.debug(" === Run statistics summary ===") + for key, value in data['run_statistics'].items(): +@@ -2851,7 +2851,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + if format == "full": + fsock.write(" === Results Summary for run: %s tag: %s process: %s ===\n" % \ + (data['run_name'],data['tag'], os.path.basename(self.me_dir))) +- ++ + if self.ninitial == 1: + fsock.write(" Width : %.4g +- %.4g GeV\n" % (data['cross'], data['error'])) + else: +@@ -2861,20 +2861,20 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + if self.ninitial == 1: + fsock.write(" Matched Width : %.4g +- %.4g GeV\n" % (data['cross_pythia'], data['error_pythia'])) + else: +- fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) ++ fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) + fsock.write(" Nb of events after Matching : %s\n" % data['nb_event_pythia']) + fsock.write(" \n" ) + elif format == "short": + if mode == "w": + fsock.write("# run_name tag cross error Nb_event cross_after_matching nb_event_after matching\n") +- ++ + if data['cross_pythia'] and data['nb_event_pythia']: + text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s %(cross_pythia)s %(nb_event_pythia)s\n" + else: + text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s\n" + fsock.write(text % data) +- +- ############################################################################ ++ ++ ############################################################################ + def do_calculate_decay_widths(self, line): + """Main Commands: launch decay width calculation and automatic inclusion of + calculated widths and BRs in the param_card.""" +@@ -2887,21 +2887,21 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + self.Gdirs = None + + if not args: +- # No run name assigned -> assigned one automaticaly ++ # No run name assigned -> assigned one automaticaly + self.set_run_name(self.find_available_run_name(self.me_dir)) + else: + self.set_run_name(args[0], reload_card=True) + args.pop(0) + + self.configure_directory() +- ++ + # Running gridpack warmup + opts=[('accuracy', accuracy), # default 0.01 + ('points', 1000), + ('iterations',9)] + + logger.info('Calculating decay widths with run name %s' % self.run_name) +- ++ + self.exec_cmd('survey %s %s' % \ + (self.run_name, + " ".join(['--' + opt + '=' + str(val) for (opt,val) \ +@@ -2910,26 +2910,26 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + self.refine_mode = "old" # specify how to combine event + self.exec_cmd('combine_events', postcmd=False) + self.exec_cmd('store_events', postcmd=False) +- ++ + self.collect_decay_widths() + self.print_results_in_shell(self.results.current) +- self.update_status('calculate_decay_widths done', +- level='parton', makehtml=False) ++ self.update_status('calculate_decay_widths done', ++ level='parton', makehtml=False) ++ + +- + ############################################################################ + def collect_decay_widths(self): +- """ Collect the decay widths and calculate BRs for all particles, and put +- in param_card form. ++ """ Collect the decay widths and calculate BRs for all particles, and put ++ in param_card form. + """ +- ++ + particle_dict = {} # store the results + run_name = self.run_name + + # Looping over the Subprocesses + for P_path in SubProcesses.get_subP(self.me_dir): + ids = SubProcesses.get_subP_ids(P_path) +- # due to grouping we need to compute the ratio factor for the ++ # due to grouping we need to compute the ratio factor for the + # ungroup resutls (that we need here). Note that initial particles + # grouping are not at the same stage as final particle grouping + nb_output = len(ids) / (len(set([p[0] for p in ids]))) +@@ -2940,30 +2940,30 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + particle_dict[particles[0]].append([particles[1:], result/nb_output]) + except KeyError: + particle_dict[particles[0]] = [[particles[1:], result/nb_output]] +- ++ + self.update_width_in_param_card(particle_dict, + initial = pjoin(self.me_dir, 'Cards', 'param_card.dat'), + output=pjoin(self.me_dir, 'Events', run_name, "param_card.dat")) +- ++ + @staticmethod + def update_width_in_param_card(decay_info, initial=None, output=None): + # Open the param_card.dat and insert the calculated decays and BRs +- ++ + if not output: + output = initial +- ++ + param_card_file = open(initial) + param_card = param_card_file.read().split('\n') + param_card_file.close() + + decay_lines = [] + line_number = 0 +- # Read and remove all decays from the param_card ++ # Read and remove all decays from the param_card + while line_number < len(param_card): + line = param_card[line_number] + if line.lower().startswith('decay'): +- # Read decay if particle in decay_info +- # DECAY 6 1.455100e+00 ++ # Read decay if particle in decay_info ++ # DECAY 6 1.455100e+00 + line = param_card.pop(line_number) + line = line.split() + particle = 0 +@@ -2996,7 +2996,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + break + line=param_card[line_number] + if particle and particle not in decay_info: +- # No decays given, only total width ++ # No decays given, only total width + decay_info[particle] = [[[], width]] + else: # Not decay + line_number += 1 +@@ -3004,7 +3004,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + while not param_card[-1] or param_card[-1].startswith('#'): + param_card.pop(-1) + +- # Append calculated and read decays to the param_card ++ # Append calculated and read decays to the param_card + param_card.append("#\n#*************************") + param_card.append("# Decay widths *") + param_card.append("#*************************") +@@ -3018,7 +3018,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + param_card.append("# BR NDA ID1 ID2 ...") + brs = [[(val[1]/width).real, val[0]] for val in decay_info[key] if val[1]] + for val in sorted(brs, reverse=True): +- param_card.append(" %e %i %s # %s" % ++ param_card.append(" %e %i %s # %s" % + (val[0].real, len(val[1]), + " ".join([str(v) for v in val[1]]), + val[0] * width +@@ -3031,7 +3031,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + + ############################################################################ + def do_multi_run(self, line): +- ++ + args = self.split_arg(line) + # Check argument's validity + mode = self.check_multi_run(args) +@@ -3047,7 +3047,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + self.check_param_card(path, run=False) + #store it locally to avoid relaunch + param_card_iterator, self.param_card_iterator = self.param_card_iterator, [] +- ++ + crossoversig = 0 + inv_sq_err = 0 + nb_event = 0 +@@ -3055,8 +3055,8 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + self.nb_refine = 0 + self.exec_cmd('generate_events %s_%s -f' % (main_name, i), postcmd=False) + # Update collected value +- nb_event += int(self.results[self.run_name][-1]['nb_event']) +- self.results.add_detail('nb_event', nb_event , run=main_name) ++ nb_event += int(self.results[self.run_name][-1]['nb_event']) ++ self.results.add_detail('nb_event', nb_event , run=main_name) + cross = self.results[self.run_name][-1]['cross'] + error = self.results[self.run_name][-1]['error'] + 1e-99 + crossoversig+=cross/error**2 +@@ -3070,7 +3070,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + os.mkdir(pjoin(self.me_dir,'Events', self.run_name)) + except Exception: + pass +- os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' ++ os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' + % {'bin': self.dirbin, 'event': pjoin(self.me_dir,'Events'), + 'name': self.run_name}) + +@@ -3084,19 +3084,19 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + + self.create_root_file('%s/unweighted_events.lhe' % self.run_name, + '%s/unweighted_events.root' % self.run_name) +- +- path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") ++ ++ path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") + self.create_plot('parton', path, + pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') + ) +- + +- if not os.path.exists('%s.gz' % path): ++ ++ if not os.path.exists('%s.gz' % path): + misc.gzip(path) + + self.update_status('', level='parton') +- self.print_results_in_shell(self.results.current) +- ++ self.print_results_in_shell(self.results.current) ++ + cpath = pjoin(self.me_dir,'Cards','param_card.dat') + if param_card_iterator: + +@@ -3112,21 +3112,21 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + path = pjoin(self.me_dir, 'Events','scan_%s.txt' % scan_name) + logger.info("write all cross-section results in %s" % path, '$MG:BOLD') + param_card_iterator.write_summary(path) +- + +- ############################################################################ ++ ++ ############################################################################ + def do_treatcards(self, line, mode=None, opt=None): + """Advanced commands: create .inc files from param_card.dat/run_card.dat""" + + if not mode and not opt: + args = self.split_arg(line) + mode, opt = self.check_treatcards(args) +- ++ + # To decide whether to refresh MadLoop's helicity filters, it is necessary + # to check if the model parameters where modified or not, before doing +- # anything else. ++ # anything else. + need_MadLoopFilterUpdate = False +- # Just to record what triggered the reinitialization of MadLoop for a ++ # Just to record what triggered the reinitialization of MadLoop for a + # nice debug message. + type_of_change = '' + if not opt['forbid_MadLoopInit'] and self.proc_characteristics['loop_induced'] \ +@@ -3137,10 +3137,10 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + (os.path.getmtime(paramDat)-os.path.getmtime(paramInc)) > 0.0: + need_MadLoopFilterUpdate = True + type_of_change = 'model' +- ++ + ML_in = pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat') + ML_out = pjoin(self.me_dir,"SubProcesses", +- "MadLoop5_resources", "MadLoopParams.dat") ++ "MadLoop5_resources", "MadLoopParams.dat") + if (not os.path.isfile(ML_in)) or (not os.path.isfile(ML_out)) or \ + (os.path.getmtime(ML_in)-os.path.getmtime(ML_out)) > 0.0: + need_MadLoopFilterUpdate = True +@@ -3148,7 +3148,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + + #check if no 'Auto' are present in the file + self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) +- ++ + if mode in ['param', 'all']: + model = self.find_model_name() + tmp_model = os.path.basename(model) +@@ -3160,9 +3160,9 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + check_param_card.check_valid_param_card(mg5_param) + opt['param_card'] = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') + else: +- check_param_card.check_valid_param_card(opt['param_card']) +- +- logger.debug('write compile file for card: %s' % opt['param_card']) ++ check_param_card.check_valid_param_card(opt['param_card']) ++ ++ logger.debug('write compile file for card: %s' % opt['param_card']) + param_card = check_param_card.ParamCard(opt['param_card']) + outfile = pjoin(opt['output_dir'], 'param_card.inc') + ident_card = pjoin(self.me_dir,'Cards','ident_card.dat') +@@ -3185,10 +3185,10 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + devnull.close() + default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat') + +- need_mp = self.proc_characteristics['loop_induced'] ++ need_mp = self.proc_characteristics['loop_induced'] + param_card.write_inc_file(outfile, ident_card, default, need_mp=need_mp) +- +- ++ ++ + if mode in ['run', 'all']: + if not hasattr(self, 'run_card'): + run_card = banner_mod.RunCard(opt['run_card'], path=pjoin(self.me_dir, 'Cards', 'run_card.dat')) +@@ -3202,7 +3202,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + run_card['lpp2'] = 0 + run_card['ebeam1'] = 0 + run_card['ebeam2'] = 0 +- ++ + # Ensure that the bias parameters has all the required input from the + # run_card + if run_card['bias_module'].lower() not in ['dummy','none']: +@@ -3219,7 +3219,7 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + mandatory_file,run_card['bias_module'])) + misc.copytree(run_card['bias_module'], pjoin(self.me_dir,'Source','BIAS', + os.path.basename(run_card['bias_module']))) +- ++ + #check expected parameters for the module. + default_bias_parameters = {} + start, last = False,False +@@ -3244,50 +3244,50 @@ Beware that MG5aMC now changes your runtime options to a multi-core mode with on + for pair in line.split(','): + if not pair.strip(): + continue +- x,y =pair.split(':') ++ x,y =pair.split(':') + x=x.strip() + if x.startswith(('"',"'")) and x.endswith(x[0]): +- x = x[1:-1] ++ x = x[1:-1] + default_bias_parameters[x] = y + elif ':' in line: + x,y = line.split(':') + x = x.strip() + if x.startswith(('"',"'")) and x.endswith(x[0]): +- x = x[1:-1] ++ x = x[1:-1] + default_bias_parameters[x] = y + for key,value in run_card['bias_parameters'].items(): + if key not in default_bias_parameters: + logger.warning('%s not supported by the bias module. We discard this entry.', key) + else: + default_bias_parameters[key] = value +- run_card['bias_parameters'] = default_bias_parameters +- +- +- # Finally write the include file ++ run_card['bias_parameters'] = default_bias_parameters ++ ++ ++ # Finally write the include file + run_card.write_include_file(opt['output_dir']) +- ++ + + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: +- self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, ++ self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, + 'Cards', 'MadLoopParams.dat')) + # The writing out of MadLoop filter is potentially dangerous + # when running in multi-core with a central disk. So it is turned +- # off here. If these filters were not initialized then they will ++ # off here. If these filters were not initialized then they will + # have to be re-computed at the beginning of each run. + if 'WriteOutFilters' in self.MadLoopparam.user_set and \ + self.MadLoopparam.get('WriteOutFilters'): + logger.info( +-"""You chose to have MadLoop writing out filters. ++"""You chose to have MadLoop writing out filters. + Beware that this can be dangerous for local multicore runs.""") + self.MadLoopparam.set('WriteOutFilters',False, changeifuserset=False) +- ++ + # The conservative settings below for 'CTModeInit' and 'ZeroThres' + # help adress issues for processes like g g > h z, and g g > h g +- # where there are some helicity configuration heavily suppressed +- # (by several orders of magnitude) so that the helicity filter ++ # where there are some helicity configuration heavily suppressed ++ # (by several orders of magnitude) so that the helicity filter + # needs high numerical accuracy to correctly handle this spread in + # magnitude. Also, because one cannot use the Born as a reference +- # scale, it is better to force quadruple precision *for the ++ # scale, it is better to force quadruple precision *for the + # initialization points only*. This avoids numerical accuracy issues + # when setting up the helicity filters and does not significantly + # slow down the run. +@@ -3298,21 +3298,21 @@ Beware that this can be dangerous for local multicore runs.""") + + # It is a bit superficial to use the level 2 which tries to numerically + # map matching helicities (because of CP symmetry typically) together. +-# It is useless in the context of MC over helicities and it can ++# It is useless in the context of MC over helicities and it can + # potentially make the helicity double checking fail. + self.MadLoopparam.set('HelicityFilterLevel',1, changeifuserset=False) + + # To be on the safe side however, we ask for 4 consecutive matching + # helicity filters. + self.MadLoopparam.set('CheckCycle',4, changeifuserset=False) +- ++ + # For now it is tricky to have each channel performing the helicity + # double check. What we will end up doing is probably some kind + # of new initialization round at the beginning of each launch +- # command, to reset the filters. ++ # command, to reset the filters. + self.MadLoopparam.set('DoubleCheckHelicityFilter',False, + changeifuserset=False) +- ++ + # Thanks to TIR recycling, TIR is typically much faster for Loop-induced + # processes when not doing MC over helicities, so that we place OPP last. + if not hasattr(self, 'run_card'): +@@ -3349,7 +3349,7 @@ Beware that this can be dangerous for local multicore runs.""") + logger.warning( + """You chose to also use a lorentz rotation for stability tests (see parameter NRotations_[DP|QP]). + Beware that, for optimization purposes, MadEvent uses manual TIR cache clearing which is not compatible +- with the lorentz rotation stability test. The number of these rotations to be used will be reset to ++ with the lorentz rotation stability test. The number of these rotations to be used will be reset to + zero by MadLoop. You can avoid this by changing the parameter 'FORCE_ML_HELICITY_SUM' int he matrix.f + files to be .TRUE. so that the sum over helicity configurations is performed within MadLoop (in which case + the helicity of final state particles cannot be speicfied in the LHE file.""") +@@ -3363,15 +3363,15 @@ Beware that this can be dangerous for local multicore runs.""") + # self.MadLoopparam.set('NRotations_DP',0,changeifuserset=False) + # Revert to the above to be slightly less robust but twice faster. + self.MadLoopparam.set('NRotations_DP',1,changeifuserset=False) +- self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) +- ++ self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) ++ + # Finally, the stability tests are slightly less reliable for process +- # with less or equal than 4 final state particles because the ++ # with less or equal than 4 final state particles because the + # accessible kinematic is very limited (i.e. lorentz rotations don't + # shuffle invariants numerics much). In these cases, we therefore + # increase the required accuracy to 10^-7. + # This is important for getting g g > z z [QCD] working with a +- # ptheavy cut as low as 1 GeV. ++ # ptheavy cut as low as 1 GeV. + if self.proc_characteristics['nexternal']<=4: + if ('MLStabThres' in self.MadLoopparam.user_set and \ + self.MadLoopparam.get('MLStabThres')>1.0e-7): +@@ -3381,12 +3381,12 @@ Beware that this can be dangerous for local multicore runs.""") + than four external legs, so this is not recommended (especially not for g g > z z).""") + self.MadLoopparam.set('MLStabThres',1.0e-7,changeifuserset=False) + else: +- self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) ++ self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) + + #write the output file + self.MadLoopparam.write(pjoin(self.me_dir,"SubProcesses","MadLoop5_resources", + "MadLoopParams.dat")) +- ++ + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: + # Now Update MadLoop filters if necessary (if modifications were made to + # the model parameters). +@@ -3403,12 +3403,12 @@ Beware that this can be dangerous for local multicore runs.""") + elif not opt['forbid_MadLoopInit'] and \ + MadLoopInitializer.need_MadLoopInit(self.me_dir): + self.exec_cmd('initMadLoop -f') +- +- ############################################################################ ++ ++ ############################################################################ + def do_survey(self, line): + """Advanced commands: launch survey for the current process """ +- +- ++ ++ + args = self.split_arg(line) + # Check argument's validity + self.check_survey(args) +@@ -3416,7 +3416,7 @@ Beware that this can be dangerous for local multicore runs.""") + + if os.path.exists(pjoin(self.me_dir,'error')): + os.remove(pjoin(self.me_dir,'error')) +- ++ + self.configure_directory() + # Save original random number + self.random_orig = self.random +@@ -3435,9 +3435,9 @@ Beware that this can be dangerous for local multicore runs.""") + P_zero_result = [] # check the number of times where they are no phase-space + + # File for the loop (for loop induced) +- if os.path.exists(pjoin(self.me_dir,'SubProcesses', ++ if os.path.exists(pjoin(self.me_dir,'SubProcesses', + 'MadLoop5_resources')) and cluster.need_transfer(self.options): +- tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', ++ tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', + 'MadLoop5_resources.tar.gz'), 'w:gz', dereference=True) + tf.add(pjoin(self.me_dir,'SubProcesses','MadLoop5_resources'), + arcname='MadLoop5_resources') +@@ -3467,7 +3467,7 @@ Beware that this can be dangerous for local multicore runs.""") + except Exception as error: + logger.debug(error) + pass +- ++ + jobs, P_zero_result = ajobcreator.launch() + # Check if all or only some fails + if P_zero_result: +@@ -3481,60 +3481,60 @@ Beware that this can be dangerous for local multicore runs.""") + self.get_Gdir() + for P in P_zero_result: + self.Gdirs[0][pjoin(self.me_dir,'SubProcesses',P)] = [] +- ++ + self.monitor(run_type='All jobs submitted for survey', html=True) + if not self.history or 'survey' in self.history[-1] or self.ninitial ==1 or \ + self.run_card['gridpack']: + #will be done during the refine (more precisely in gen_ximprove) + cross, error = self.make_make_all_html_results() + self.results.add_detail('cross', cross) +- self.results.add_detail('error', error) ++ self.results.add_detail('error', error) + self.exec_cmd("print_results %s" % self.run_name, +- errorhandling=False, printcmd=False, precmd=False, postcmd=False) +- ++ errorhandling=False, printcmd=False, precmd=False, postcmd=False) ++ + self.results.add_detail('run_statistics', dict(ajobcreator.run_statistics)) + self.update_status('End survey', 'parton', makehtml=False) + + ############################################################################ + def pass_in_difficult_integration_mode(self, rate=1): + """be more secure for the integration to not miss it due to strong cut""" +- ++ + # improve survey options if default + if self.opts['points'] == self._survey_options['points'][1]: + self.opts['points'] = (rate+2) * self._survey_options['points'][1] + if self.opts['iterations'] == self._survey_options['iterations'][1]: + self.opts['iterations'] = 1 + rate + self._survey_options['iterations'][1] + if self.opts['accuracy'] == self._survey_options['accuracy'][1]: +- self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) +- ++ self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) ++ + # Modify run_config.inc in order to improve the refine + conf_path = pjoin(self.me_dir, 'Source','run_config.inc') + files.cp(conf_path, conf_path + '.bk') + # + text = open(conf_path).read() +- min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) +- ++ min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) ++ + text = re.sub('''\(min_events = \d+\)''', '(min_events = %i )' % min_evt, text) + text = re.sub('''\(max_events = \d+\)''', '(max_events = %i )' % max_evt, text) + fsock = open(conf_path, 'w') + fsock.write(text) + fsock.close() +- ++ + # Compile + for name in ['../bin/internal/gen_ximprove', 'all']: + self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) +- +- +- ############################################################################ ++ ++ ++ ############################################################################ + def do_refine(self, line): + """Advanced commands: launch survey for the current process """ +- devnull = open(os.devnull, 'w') ++ devnull = open(os.devnull, 'w') + self.nb_refine += 1 + args = self.split_arg(line) + treshold=None +- + +- ++ ++ + for a in args: + if a.startswith('--treshold='): + treshold = float(a.split('=',1)[1]) +@@ -3548,8 +3548,8 @@ Beware that this can be dangerous for local multicore runs.""") + break + # Check argument's validity + self.check_refine(args) +- +- refine_opt = {'err_goal': args[0], 'split_channels': True} ++ ++ refine_opt = {'err_goal': args[0], 'split_channels': True} + precision = args[0] + if len(args) == 2: + refine_opt['max_process']= args[1] +@@ -3560,15 +3560,15 @@ Beware that this can be dangerous for local multicore runs.""") + # Update random number + self.update_random() + self.save_random() +- ++ + if self.cluster_mode: + logger.info('Creating Jobs') + self.update_status('Refine results to %s' % precision, level=None) +- ++ + self.total_jobs = 0 +- subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', ++ subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + 'subproc.mg'))] +- ++ + # cleanning the previous job + for nb_proc,subdir in enumerate(subproc): + subdir = subdir.strip() +@@ -3589,14 +3589,14 @@ Beware that this can be dangerous for local multicore runs.""") + level = 5 + if value.has_warning(): + level = 10 +- logger.log(level, ++ logger.log(level, + value.nice_output(str('/'.join([key[0],'G%s'%key[1]]))). + replace(' statistics','')) + logger.debug(globalstat.nice_output('combined', no_warning=True)) +- ++ + if survey_statistics: + x_improve.run_statistics = survey_statistics +- ++ + x_improve.launch() # create the ajob for the refinment. + if not self.history or 'refine' not in self.history[-1]: + cross, error = x_improve.update_html() #update html results for survey +@@ -3610,14 +3610,26 @@ Beware that this can be dangerous for local multicore runs.""") + subdir = subdir.strip() + Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) + bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) +- ++ + logger.info(' %s ' % subdir) +- ++ + if os.path.exists(pjoin(Pdir, 'ajob1')): +- self.compile(['madevent'], cwd=Pdir) +- ++ ++ cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py ++ logger.info("Building madevent in madevent_interface.py with '%s' matrix elements"%cudacpp_backend) ++ if cudacpp_backend == 'FORTRAN': ++ self.compile(['madevent_fortran_link'], cwd=Pdir) ++ elif cudacpp_backend == 'CPP': ++ self.compile(['madevent_cpp_link'], cwd=Pdir) ++ elif cudacpp_backend == 'CUDA': ++ self.compile(['madevent_cuda_link'], cwd=Pdir) ++ else: ++ raise Exception("Invalid cudacpp_backend='%s': only 'FORTRAN', 'CPP', 'CUDA' are supported") ++ ###logger.info("Building madevent with ALL (FORTRAN/CPP/CUDA) matrix elements (cudacpp_backend=%s)"%cudacpp_backend) ++ ###self.compile(['all'], cwd=Pdir) ++ + alljobs = misc.glob('ajob*', Pdir) +- ++ + #remove associated results.dat (ensure to not mix with all data) + Gre = re.compile("\s*j=(G[\d\.\w]+)") + for job in alljobs: +@@ -3625,49 +3637,49 @@ Beware that this can be dangerous for local multicore runs.""") + for Gdir in Gdirs: + if os.path.exists(pjoin(Pdir, Gdir, 'results.dat')): + os.remove(pjoin(Pdir, Gdir,'results.dat')) +- +- nb_tot = len(alljobs) ++ ++ nb_tot = len(alljobs) + self.total_jobs += nb_tot + for i, job in enumerate(alljobs): + job = os.path.basename(job) +- self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), +- run_type='Refine number %s on %s (%s/%s)' % ++ self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), ++ run_type='Refine number %s on %s (%s/%s)' % + (self.nb_refine, subdir, nb_proc+1, len(subproc))) + + +- self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, ++ self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, + html=True) +- ++ + self.update_status("Combining runs", level='parton') + try: + os.remove(pjoin(Pdir, 'combine_runs.log')) + except Exception: + pass +- ++ + if isinstance(x_improve, gen_ximprove.gen_ximprove_v4): + # the merge of the events.lhe is handle in the x_improve class +- # for splitted runs. (and partly in store_events). ++ # for splitted runs. (and partly in store_events). + combine_runs.CombineRuns(self.me_dir) + self.refine_mode = "old" + else: + self.refine_mode = "new" +- ++ + cross, error = self.make_make_all_html_results() + self.results.add_detail('cross', cross) + self.results.add_detail('error', error) + +- self.results.add_detail('run_statistics', ++ self.results.add_detail('run_statistics', + dict(self.results.get_detail('run_statistics'))) + + self.update_status('finish refine', 'parton', makehtml=False) + devnull.close() +- +- ############################################################################ ++ ++ ############################################################################ + def do_comine_iteration(self, line): + """Not in help: Combine a given iteration combine_iteration Pdir Gdir S|R step +- S is for survey ++ S is for survey + R is for refine +- step is the iteration number (not very critical)""" ++ step is the iteration number (not very critical)""" + + self.set_run_name("tmp") + self.configure_directory(html_opening=False) +@@ -3683,12 +3695,12 @@ Beware that this can be dangerous for local multicore runs.""") + gensym.combine_iteration(Pdir, Gdir, int(step)) + elif mode == "R": + refine = gen_ximprove.gen_ximprove_share(self) +- refine.combine_iteration(Pdir, Gdir, int(step)) +- +- ++ refine.combine_iteration(Pdir, Gdir, int(step)) ++ ++ ++ + +- +- ############################################################################ ++ ############################################################################ + def do_combine_events(self, line): + """Advanced commands: Launch combine events""" + start=time.time() +@@ -3698,11 +3710,11 @@ Beware that this can be dangerous for local multicore runs.""") + self.check_combine_events(args) + self.update_status('Combining Events', level='parton') + +- ++ + if self.run_card['gridpack'] and isinstance(self, GridPackCmd): + return GridPackCmd.do_combine_events(self, line) + +- ++ + # Define The Banner + tag = self.run_card['run_tag'] + # Update the banner with the pythia card +@@ -3715,14 +3727,14 @@ Beware that this can be dangerous for local multicore runs.""") + self.banner.change_seed(self.random_orig) + if not os.path.exists(pjoin(self.me_dir, 'Events', self.run_name)): + os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) +- self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, ++ self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, + '%s_%s_banner.txt' % (self.run_name, tag))) +- + +- get_wgt = lambda event: event.wgt ++ ++ get_wgt = lambda event: event.wgt + AllEvent = lhe_parser.MultiEventFile() + AllEvent.banner = self.banner +- ++ + partials = 0 # if too many file make some partial unweighting + sum_xsec, sum_xerru, sum_axsec = 0,[],0 + Gdirs = self.get_Gdir() +@@ -3739,12 +3751,12 @@ Beware that this can be dangerous for local multicore runs.""") + os.remove(pjoin(Gdir, 'events.lhe')) + continue + +- AllEvent.add(pjoin(Gdir, 'events.lhe'), ++ AllEvent.add(pjoin(Gdir, 'events.lhe'), + result.get('xsec'), + result.get('xerru'), + result.get('axsec') + ) +- ++ + if len(AllEvent) >= 80: #perform a partial unweighting + AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), + get_wgt, log_level=5, trunc_error=1e-2, event_target=self.run_card['nevents']) +@@ -3753,13 +3765,13 @@ Beware that this can be dangerous for local multicore runs.""") + AllEvent.add(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), + sum_xsec, + math.sqrt(sum(x**2 for x in sum_xerru)), +- sum_axsec) ++ sum_axsec) + partials +=1 +- ++ + if not hasattr(self,'proc_characteristic'): + self.proc_characteristic = self.get_characteristics() + if len(AllEvent) == 0: +- nb_event = 0 ++ nb_event = 0 + else: + nb_event = AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe.gz"), + get_wgt, trunc_error=1e-2, event_target=self.run_card['nevents'], +@@ -3779,22 +3791,22 @@ Beware that this can be dangerous for local multicore runs.""") + os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % i)) + except Exception: + os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe" % i)) +- ++ + self.results.add_detail('nb_event', nb_event) +- ++ + if self.run_card['bias_module'].lower() not in ['dummy', 'none'] and nb_event: + self.correct_bias() + elif self.run_card['custom_fcts']: + self.correct_bias() + logger.info("combination of events done in %s s ", time.time()-start) +- ++ + self.to_store.append('event') +- +- ############################################################################ ++ ++ ############################################################################ + def correct_bias(self): +- """check the first event and correct the weight by the bias ++ """check the first event and correct the weight by the bias + and correct the cross-section. +- If the event do not have the bias tag it means that the bias is ++ If the event do not have the bias tag it means that the bias is + one modifying the cross-section/shape so we have nothing to do + """ + +@@ -3822,7 +3834,7 @@ Beware that this can be dangerous for local multicore runs.""") + output.write('') + output.close() + lhe.close() +- ++ + # MODIFY THE BANNER i.e. INIT BLOCK + # ensure information compatible with normalisation choice + total_cross = sum(cross[key] for key in cross) +@@ -3834,8 +3846,8 @@ Beware that this can be dangerous for local multicore runs.""") + elif self.run_card['event_norm'] == 'unity': + total_cross = self.results.current['cross'] * total_cross / nb_event + for key in cross: +- cross[key] *= total_cross / nb_event +- ++ cross[key] *= total_cross / nb_event ++ + bannerfile = lhe_parser.EventFile(pjoin(self.me_dir, 'Events', self.run_name, '.banner.tmp.gz'),'w') + banner = banner_mod.Banner(lhe.banner) + banner.modify_init_cross(cross) +@@ -3850,12 +3862,12 @@ Beware that this can be dangerous for local multicore runs.""") + os.remove(lhe.name) + os.remove(bannerfile.name) + os.remove(output.name) +- +- ++ ++ + self.results.current['cross'] = total_cross + self.results.current['error'] = 0 +- +- ############################################################################ ++ ++ ############################################################################ + def do_store_events(self, line): + """Advanced commands: Launch store events""" + +@@ -3871,16 +3883,16 @@ Beware that this can be dangerous for local multicore runs.""") + if not os.path.exists(pjoin(self.me_dir, 'Events', run)): + os.mkdir(pjoin(self.me_dir, 'Events', run)) + if not os.path.exists(pjoin(self.me_dir, 'HTML', run)): +- os.mkdir(pjoin(self.me_dir, 'HTML', run)) +- ++ os.mkdir(pjoin(self.me_dir, 'HTML', run)) ++ + # 1) Store overall process information + #input = pjoin(self.me_dir, 'SubProcesses', 'results.dat') + #output = pjoin(self.me_dir, 'SubProcesses', '%s_results.dat' % run) +- #files.cp(input, output) ++ #files.cp(input, output) + + + # 2) Treat the files present in the P directory +- # Ensure that the number of events is different of 0 ++ # Ensure that the number of events is different of 0 + if self.results.current['nb_event'] == 0 and not self.run_card['gridpack']: + logger.warning("No event detected. No cleaning performed! This should allow to run:\n" + + " cd Subprocesses; ../bin/internal/combine_events\n"+ +@@ -3898,18 +3910,18 @@ Beware that this can be dangerous for local multicore runs.""") + # if os.path.exists(pjoin(G_path, 'results.dat')): + # input = pjoin(G_path, 'results.dat') + # output = pjoin(G_path, '%s_results.dat' % run) +- # files.cp(input, output) ++ # files.cp(input, output) + #except Exception: +- # continue ++ # continue + # Store log + try: + if os.path.exists(pjoin(G_path, 'log.txt')): + input = pjoin(G_path, 'log.txt') + output = pjoin(G_path, '%s_log.txt' % run) +- files.mv(input, output) ++ files.mv(input, output) + except Exception: + continue +- #try: ++ #try: + # # Grid + # for name in ['ftn26']: + # if os.path.exists(pjoin(G_path, name)): +@@ -3918,7 +3930,7 @@ Beware that this can be dangerous for local multicore runs.""") + # input = pjoin(G_path, name) + # output = pjoin(G_path, '%s_%s' % (run,name)) + # files.mv(input, output) +- # misc.gzip(pjoin(G_path, output), error=None) ++ # misc.gzip(pjoin(G_path, output), error=None) + #except Exception: + # continue + # Delete ftn25 to ensure reproducible runs +@@ -3928,11 +3940,11 @@ Beware that this can be dangerous for local multicore runs.""") + # 3) Update the index.html + self.gen_card_html() + +- ++ + # 4) Move the Files present in Events directory + E_path = pjoin(self.me_dir, 'Events') + O_path = pjoin(self.me_dir, 'Events', run) +- ++ + # The events file + for name in ['events.lhe', 'unweighted_events.lhe']: + finput = pjoin(E_path, name) +@@ -3948,30 +3960,30 @@ Beware that this can be dangerous for local multicore runs.""") + # os.remove(pjoin(O_path, '%s.gz' % name)) + # input = pjoin(E_path, name) + ## output = pjoin(O_path, name) +- ++ + + self.update_status('End Parton', level='parton', makehtml=False) + devnull.close() +- +- +- ############################################################################ ++ ++ ++ ############################################################################ + def do_create_gridpack(self, line): + """Advanced commands: Create gridpack from present run""" + + self.update_status('Creating gridpack', level='parton') + # compile gen_ximprove + misc.compile(['../bin/internal/gen_ximprove'], cwd=pjoin(self.me_dir, "Source")) +- ++ + Gdir = self.get_Gdir() + Pdir = set([os.path.dirname(G) for G in Gdir]) +- for P in Pdir: ++ for P in Pdir: + allG = misc.glob('G*', path=P) + for G in allG: + if pjoin(P, G) not in Gdir: + logger.debug('removing %s', pjoin(P,G)) + shutil.rmtree(pjoin(P,G)) +- +- ++ ++ + args = self.split_arg(line) + self.check_combine_events(args) + if not self.run_tag: self.run_tag = 'tag_1' +@@ -3984,13 +3996,13 @@ Beware that this can be dangerous for local multicore runs.""") + cwd=self.me_dir) + misc.call(['./bin/internal/clean'], cwd=self.me_dir) + misc.call(['./bin/internal/make_gridpack'], cwd=self.me_dir) +- files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), ++ files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), + pjoin(self.me_dir, '%s_gridpack.tar.gz' % self.run_name)) + os.system("sed -i.bak \"s/\s*.true.*=.*GridRun/ .false. = GridRun/g\" %s/Cards/grid_card.dat" \ + % self.me_dir) + self.update_status('gridpack created', level='gridpack') +- +- ############################################################################ ++ ++ ############################################################################ + def do_shower(self, line): + """launch the shower""" + +@@ -3998,7 +4010,7 @@ Beware that this can be dangerous for local multicore runs.""") + if len(args)>1 and args[0] in self._interfaced_showers: + chosen_showers = [args.pop(0)] + elif '--no_default' in line: +- # If '--no_default' was specified in the arguments, then only one ++ # If '--no_default' was specified in the arguments, then only one + # shower will be run, depending on which card is present. + # but we each of them are called. (each of them check if the file exists) + chosen_showers = list(self._interfaced_showers) +@@ -4009,9 +4021,9 @@ Beware that this can be dangerous for local multicore runs.""") + shower_priority = ['pythia8','pythia'] + chosen_showers = [sorted(chosen_showers,key=lambda sh: + shower_priority.index(sh) if sh in shower_priority else len(shower_priority)+1)[0]] +- ++ + for shower in chosen_showers: +- self.exec_cmd('%s %s'%(shower,' '.join(args)), ++ self.exec_cmd('%s %s'%(shower,' '.join(args)), + postcmd=False, printcmd=False) + + def do_madanalysis5_parton(self, line): +@@ -4027,11 +4039,11 @@ Beware that this can be dangerous for local multicore runs.""") + def mg5amc_py8_interface_consistency_warning(options): + """ Check the consistency of the mg5amc_py8_interface installed with + the current MG5 and Pythia8 versions. """ +- ++ + # All this is only relevant is Pythia8 is interfaced to MG5 + if not options['pythia8_path']: + return None +- ++ + if not options['mg5amc_py8_interface_path']: + return \ + """ +@@ -4041,7 +4053,7 @@ Beware that this can be dangerous for local multicore runs.""") + Consider installing the MG5_aMC-PY8 interface with the following command: + MG5_aMC>install mg5amc_py8_interface + """ +- ++ + mg5amc_py8_interface_path = options['mg5amc_py8_interface_path'] + py8_path = options['pythia8_path'] + # If the specified interface path is relative, make it absolut w.r.t MGDIR if +@@ -4050,7 +4062,7 @@ Beware that this can be dangerous for local multicore runs.""") + mg5amc_py8_interface_path = pjoin(MG5DIR,mg5amc_py8_interface_path) + py8_path = pjoin(MG5DIR,py8_path) + +- # Retrieve all the on-install and current versions ++ # Retrieve all the on-install and current versions + fsock = open(pjoin(mg5amc_py8_interface_path, 'MG5AMC_VERSION_ON_INSTALL')) + MG5_version_on_install = fsock.read().replace('\n','') + fsock.close() +@@ -4062,7 +4074,7 @@ Beware that this can be dangerous for local multicore runs.""") + MG5_curr_version =misc.get_pkg_info()['version'] + try: + p = subprocess.Popen(['./get_pythia8_version.py',py8_path], +- stdout=subprocess.PIPE, stderr=subprocess.PIPE, ++ stdout=subprocess.PIPE, stderr=subprocess.PIPE, + cwd=mg5amc_py8_interface_path) + (out, err) = p.communicate() + out = out.decode(errors='ignore').replace('\n','') +@@ -4072,37 +4084,37 @@ Beware that this can be dangerous for local multicore runs.""") + float(out) + except: + PY8_curr_version = None +- ++ + if not MG5_version_on_install is None and not MG5_curr_version is None: + if MG5_version_on_install != MG5_curr_version: + return \ + """ + The current version of MG5_aMC (v%s) is different than the one active when +- installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). ++ installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). + Please consider refreshing the installation of this interface with the command: + MG5_aMC>install mg5amc_py8_interface + """%(MG5_curr_version, MG5_version_on_install) +- ++ + if not PY8_version_on_install is None and not PY8_curr_version is None: + if PY8_version_on_install != PY8_curr_version: + return \ + """ + The current version of Pythia8 (v%s) is different than the one active when +- installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). ++ installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). + Please consider refreshing the installation of this interface with the command: + MG5_aMC>install mg5amc_py8_interface + """%(PY8_curr_version,PY8_version_on_install) +- ++ + return None + + def setup_Pythia8RunAndCard(self, PY8_Card, run_type): + """ Setup the Pythia8 Run environment and card. In particular all the process and run specific parameters + of the card are automatically set here. This function returns the path where HEPMC events will be output, + if any.""" +- ++ + HepMC_event_output = None + tag = self.run_tag +- ++ + PY8_Card.subruns[0].systemSet('Beams:LHEF',"unweighted_events.lhe.gz") + + hepmc_format = PY8_Card['HEPMCoutput:file'].lower() +@@ -4173,7 +4185,7 @@ already exists and is not a fifo file."""%fifo_path) + misc.mkfifo(fifo_path) + # Use defaultSet not to overwrite the current userSet status + PY8_Card.defaultSet('HEPMCoutput:file',fifo_path) +- HepMC_event_output=fifo_path ++ HepMC_event_output=fifo_path + elif hepmc_format in ['','/dev/null','None']: + logger.warning('User disabled the HepMC output of Pythia8.') + HepMC_event_output = None +@@ -4194,7 +4206,7 @@ already exists and is not a fifo file."""%fifo_path) + # only if it is not already user_set. + if PY8_Card['JetMatching:qCut']==-1.0: + PY8_Card.MadGraphSet('JetMatching:qCut',1.5*self.run_card['xqcut'], force=True) +- ++ + if PY8_Card['JetMatching:qCut']<(1.5*self.run_card['xqcut']): + logger.error( + 'The MLM merging qCut parameter you chose (%f) is less than '%PY8_Card['JetMatching:qCut']+ +@@ -4221,7 +4233,7 @@ already exists and is not a fifo file."""%fifo_path) + if PY8_Card['JetMatching:qCut'] not in qCutList: + qCutList.append(PY8_Card['JetMatching:qCut']) + PY8_Card.MadGraphSet('SysCalc:qCutList', qCutList, force=True) +- ++ + + if PY8_Card['SysCalc:qCutList']!='auto': + for scale in PY8_Card['SysCalc:qCutList']: +@@ -4232,7 +4244,7 @@ already exists and is not a fifo file."""%fifo_path) + "'sys_matchscale' in the run_card) is less than 1.5*xqcut, where xqcut is"+ + ' the run_card parameter (=%f)\n'%self.run_card['xqcut']+ + 'It would be better/safer to use a larger qCut or a smaller xqcut.') +- ++ + # Specific MLM settings + # PY8 should not implement the MLM veto since the driver should do it + # if merging scale variation is turned on +@@ -4282,18 +4294,18 @@ already exists and is not a fifo file."""%fifo_path) + CKKW_cut = 'ktdurham' + elif self.run_card['ptlund']>0.0 and self.run_card['ktdurham']<=0.0: + PY8_Card.subruns[0].MadGraphSet('Merging:doPTLundMerging',True) +- CKKW_cut = 'ptlund' ++ CKKW_cut = 'ptlund' + else: + raise InvalidCmd("*Either* the 'ptlund' or 'ktdurham' cut in "+\ + " the run_card must be turned on to activate CKKW(L) merging"+ + " with Pythia8, but *both* cuts cannot be turned on at the same time."+ + "\n ptlund=%f, ktdurham=%f."%(self.run_card['ptlund'],self.run_card['ktdurham'])) + +- ++ + # Automatically set qWeed to the CKKWL cut if not defined by the user. + if PY8_Card['SysCalc:qWeed']==-1.0: + PY8_Card.MadGraphSet('SysCalc:qWeed',self.run_card[CKKW_cut], force=True) +- ++ + # MadGraphSet sets the corresponding value (in system mode) + # only if it is not already user_set. + if PY8_Card['Merging:TMS']==-1.0: +@@ -4307,7 +4319,7 @@ already exists and is not a fifo file."""%fifo_path) + 'The CKKWl merging scale you chose (%f) is less than '%PY8_Card['Merging:TMS']+ + 'the %s cut specified in the run_card parameter (=%f).\n'%(CKKW_cut,self.run_card[CKKW_cut])+ + 'It is incorrect to use a smaller CKKWl scale than the generation-level %s cut!'%CKKW_cut) +- ++ + PY8_Card.MadGraphSet('TimeShower:pTmaxMatch',1) + PY8_Card.MadGraphSet('SpaceShower:pTmaxMatch',1) + PY8_Card.MadGraphSet('SpaceShower:rapidityOrder',False) +@@ -4369,7 +4381,7 @@ already exists and is not a fifo file."""%fifo_path) + + try: + import madgraph +- except ImportError: ++ except ImportError: + import internal.histograms as histograms + else: + import madgraph.various.histograms as histograms +@@ -4388,16 +4400,16 @@ already exists and is not a fifo file."""%fifo_path) + self.check_pythia8(args) + self.configure_directory(html_opening =False) + else: +- # initialize / remove lhapdf mode ++ # initialize / remove lhapdf mode + self.configure_directory(html_opening =False) +- self.check_pythia8(args) ++ self.check_pythia8(args) + + # Update the banner with the pythia card + if not self.banner or len(self.banner) <=1: + # Here the level keyword 'pythia' must not be changed to 'pythia8'. + self.banner = banner_mod.recover_banner(self.results, 'pythia') + +- # the args are modify and the last arg is always the mode ++ # the args are modify and the last arg is always the mode + if not no_default: + self.ask_pythia_run_configuration(args[-1], pythia_version=8, banner=self.banner) + +@@ -4413,7 +4425,7 @@ already exists and is not a fifo file."""%fifo_path) + #"Please use 'event_norm = average' in the run_card to avoid this problem.") + + +- ++ + if not self.options['mg5amc_py8_interface_path'] or not \ + os.path.exists(pjoin(self.options['mg5amc_py8_interface_path'], + 'MG5aMC_PY8_interface')): +@@ -4432,16 +4444,16 @@ Please install this tool with the following MG5_aMC command: + + # Again here 'pythia' is just a keyword for the simulation level. + self.update_status('\033[92mRunning Pythia8 [arXiv:1410.3012]\033[0m', 'pythia8') +- +- tag = self.run_tag ++ ++ tag = self.run_tag + # Now write Pythia8 card + # Start by reading, starting from the default one so that the 'user_set' + # tag are correctly set. +- PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', ++ PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', + 'pythia8_card_default.dat')) + PY8_Card.read(pjoin(self.me_dir, 'Cards', 'pythia8_card.dat'), + setter='user') +- ++ + run_type = 'default' + merged_run_types = ['MLM','CKKW'] + if int(self.run_card['ickkw'])==1: +@@ -4459,7 +4471,7 @@ Please install this tool with the following MG5_aMC command: + cmd_card = StringIO.StringIO() + PY8_Card.write(cmd_card,pjoin(self.me_dir,'Cards','pythia8_card_default.dat'), + direct_pythia_input=True) +- ++ + # Now setup the preamble to make sure that everything will use the locally + # installed tools (if present) even if the user did not add it to its + # environment variables. +@@ -4474,13 +4486,13 @@ Please install this tool with the following MG5_aMC command: + preamble = misc.get_HEPTools_location_setter( + pjoin(MG5DIR,'HEPTools'),'lib') + preamble += "\n unset PYTHIA8DATA\n" +- ++ + open(pythia_cmd_card,'w').write("""! + ! It is possible to run this card manually with: + ! %s %s + ! + """%(preamble+pythia_main,os.path.basename(pythia_cmd_card))+cmd_card.getvalue()) +- ++ + # launch pythia8 + pythia_log = pjoin(self.me_dir , 'Events', self.run_name , + '%s_pythia8.log' % tag) +@@ -4492,13 +4504,13 @@ Please install this tool with the following MG5_aMC command: + shell_exe = None + if os.path.exists('/usr/bin/env'): + shell_exe = '/usr/bin/env %s'%shell +- else: ++ else: + shell_exe = misc.which(shell) + if not shell_exe: + raise self.InvalidCmd('No s hell could be found in your environment.\n'+ + "Make sure that either '%s' is in your path or that the"%shell+\ + " command '/usr/bin/env %s' exists and returns a valid path."%shell) +- ++ + exe_cmd = "#!%s\n%s"%(shell_exe,' '.join( + [preamble+pythia_main, + os.path.basename(pythia_cmd_card)])) +@@ -4516,7 +4528,7 @@ Please install this tool with the following MG5_aMC command: + ( os.path.exists(HepMC_event_output) and \ + stat.S_ISFIFO(os.stat(HepMC_event_output).st_mode)) + startPY8timer = time.time() +- ++ + # Information that will be extracted from this PY8 run + PY8_extracted_information={ 'sigma_m':None, 'Nacc':None, 'Ntry':None, + 'cross_sections':{} } +@@ -4544,7 +4556,7 @@ You can follow PY8 run with the following command (in a separate terminal): + n_cores = max(int(self.options['cluster_size']),1) + elif self.options['run_mode']==2: + n_cores = max(int(self.cluster.nb_core),1) +- ++ + lhe_file_name = os.path.basename(PY8_Card.subruns[0]['Beams:LHEF']) + lhe_file = lhe_parser.EventFile(pjoin(self.me_dir,'Events', + self.run_name,PY8_Card.subruns[0]['Beams:LHEF'])) +@@ -4562,7 +4574,7 @@ You can follow PY8 run with the following command (in a separate terminal): + if self.options['run_mode']==2: + min_n_events_per_job = 100 + elif self.options['run_mode']==1: +- min_n_events_per_job = 1000 ++ min_n_events_per_job = 1000 + min_n_core = n_events//min_n_events_per_job + n_cores = max(min(min_n_core,n_cores),1) + +@@ -4572,8 +4584,8 @@ You can follow PY8 run with the following command (in a separate terminal): + logger.info('Follow Pythia8 shower by running the '+ + 'following command (in a separate terminal):\n tail -f %s'%pythia_log) + +- if self.options['run_mode']==2 and self.options['nb_core']>1: +- ret_code = self.cluster.launch_and_wait(wrapper_path, ++ if self.options['run_mode']==2 and self.options['nb_core']>1: ++ ret_code = self.cluster.launch_and_wait(wrapper_path, + argument= [], stdout= pythia_log, stderr=subprocess.STDOUT, + cwd=pjoin(self.me_dir,'Events',self.run_name)) + else: +@@ -4618,10 +4630,10 @@ You can follow PY8 run with the following command (in a separate terminal): + wrapper = open(wrapper_path,'w') + if self.options['cluster_temp_path'] is None: + exe_cmd = \ +-"""#!%s ++"""#!%s + ./%s PY8Card.dat >& PY8_log.txt + """ +- else: ++ else: + exe_cmd = \ + """#!%s + ln -s ./events_$1.lhe.gz ./events.lhe.gz +@@ -4651,21 +4663,21 @@ tar -czf split_$1.tar.gz split_$1 + # Set it as executable + st = os.stat(wrapper_path) + os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) +- ++ + # Split the .lhe event file, create event partition + partition=[n_available_events//n_cores]*n_cores + for i in range(n_available_events%n_cores): + partition[i] += 1 +- ++ + # Splitting according to the total number of events requested by the user + # Will be used to determine the number of events to indicate in the PY8 split cards. + partition_for_PY8=[n_events//n_cores]*n_cores + for i in range(n_events%n_cores): + partition_for_PY8[i] += 1 +- +- logger.info('Splitting .lhe event file for PY8 parallelization...') +- n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) +- ++ ++ logger.info('Splitting .lhe event file for PY8 parallelization...') ++ n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) ++ + if n_splits!=len(partition): + raise MadGraph5Error('Error during lhe file splitting. Expected %d files but obtained %d.' + %(len(partition),n_splits)) +@@ -4678,7 +4690,7 @@ tar -czf split_$1.tar.gz split_$1 + # Add the necessary run content + shutil.move(pjoin(parallelization_dir,lhe_file.name+'_%d.lhe.gz'%split_id), + pjoin(parallelization_dir,split_files[-1])) +- ++ + logger.info('Submitting Pythia8 jobs...') + for i, split_file in enumerate(split_files): + # We must write a PY8Card tailored for each split so as to correct the normalization +@@ -4694,7 +4706,7 @@ tar -czf split_$1.tar.gz split_$1 + split_PY8_Card.write(pjoin(parallelization_dir,'PY8Card_%d.dat'%i), + pjoin(parallelization_dir,'PY8Card.dat'), add_missing=False) + in_files = [pjoin(parallelization_dir,os.path.basename(pythia_main)), +- pjoin(parallelization_dir,'PY8Card_%d.dat'%i), ++ pjoin(parallelization_dir,'PY8Card_%d.dat'%i), + pjoin(parallelization_dir,split_file)] + if self.options['cluster_temp_path'] is None: + out_files = [] +@@ -4706,35 +4718,35 @@ tar -czf split_$1.tar.gz split_$1 + if os.path.basename(in_file)==split_file: + ln(in_file,selected_cwd,name='events.lhe.gz') + elif os.path.basename(in_file).startswith('PY8Card'): +- ln(in_file,selected_cwd,name='PY8Card.dat') ++ ln(in_file,selected_cwd,name='PY8Card.dat') + else: +- ln(in_file,selected_cwd) ++ ln(in_file,selected_cwd) + in_files = [] + wrapper_path = os.path.basename(wrapper_path) + else: + out_files = ['split_%d.tar.gz'%i] + selected_cwd = parallelization_dir + +- self.cluster.submit2(wrapper_path, +- argument=[str(i)], cwd=selected_cwd, ++ self.cluster.submit2(wrapper_path, ++ argument=[str(i)], cwd=selected_cwd, + input_files=in_files, + output_files=out_files, + required_output=out_files) +- ++ + def wait_monitoring(Idle, Running, Done): + if Idle+Running+Done == 0: + return + logger.info('Pythia8 shower jobs: %d Idle, %d Running, %d Done [%s]'\ + %(Idle, Running, Done, misc.format_time(time.time() - startPY8timer))) + self.cluster.wait(parallelization_dir,wait_monitoring) +- ++ + logger.info('Merging results from the split PY8 runs...') + if self.options['cluster_temp_path']: + # Decompressing the output + for i, split_file in enumerate(split_files): + misc.call(['tar','-xzf','split_%d.tar.gz'%i],cwd=parallelization_dir) + os.remove(pjoin(parallelization_dir,'split_%d.tar.gz'%i)) +- ++ + # Now merge logs + pythia_log_file = open(pythia_log,'w') + n_added = 0 +@@ -4766,7 +4778,7 @@ tar -czf split_$1.tar.gz split_$1 + if n_added>0: + PY8_extracted_information['sigma_m'] /= float(n_added) + pythia_log_file.close() +- ++ + # djr plots + djr_HwU = None + n_added = 0 +@@ -4833,7 +4845,7 @@ tar -czf split_$1.tar.gz split_$1 + if not os.path.isfile(hepmc_file): + continue + all_hepmc_files.append(hepmc_file) +- ++ + if len(all_hepmc_files)>0: + hepmc_output = pjoin(self.me_dir,'Events',self.run_name,HepMC_event_output) + with misc.TMP_directory() as tmp_dir: +@@ -4848,8 +4860,8 @@ tar -czf split_$1.tar.gz split_$1 + break + header.close() + tail = open(pjoin(tmp_dir,'tail.hepmc'),'w') +- n_tail = 0 +- ++ n_tail = 0 ++ + for line in misc.reverse_readline(all_hepmc_files[-1]): + if line.startswith('HepMC::'): + n_tail += 1 +@@ -4859,7 +4871,7 @@ tar -czf split_$1.tar.gz split_$1 + tail.close() + if n_tail>1: + raise MadGraph5Error('HEPMC files should only have one trailing command.') +- ###################################################################### ++ ###################################################################### + # This is the most efficient way of putting together HEPMC's, *BUT* # + # WARNING: NEED TO RENDER THE CODE BELOW SAFE TOWARDS INJECTION # + ###################################################################### +@@ -4876,12 +4888,12 @@ tar -czf split_$1.tar.gz split_$1 + elif sys.platform == 'darwin': + # sed on MAC has slightly different synthax than on + os.system(' '.join(['sed','-i',"''","'%s;$d'"% +- (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) +- else: +- # other UNIX systems ++ (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) ++ else: ++ # other UNIX systems + os.system(' '.join(['sed','-i']+["-e '%id'"%(i+1) for i in range(n_head)]+ + ["-e '$d'",hepmc_file])) +- ++ + os.system(' '.join(['cat',pjoin(tmp_dir,'header.hepmc')]+all_hepmc_files+ + [pjoin(tmp_dir,'tail.hepmc'),'>',hepmc_output])) + +@@ -4903,12 +4915,12 @@ tar -czf split_$1.tar.gz split_$1 + 'Inclusive cross section:' not in '\n'.join(open(pythia_log,'r').readlines()[-20:]): + logger.warning('Fail to produce a pythia8 output. More info in \n %s'%pythia_log) + return +- ++ + # Plot for Pythia8 + successful = self.create_plot('Pythia8') + if not successful: + logger.warning('Failed to produce Pythia8 merging plots.') +- ++ + self.to_store.append('pythia8') + + # Study matched cross-sections +@@ -4919,7 +4931,7 @@ tar -czf split_$1.tar.gz split_$1 + if self.options['run_mode']==0 or (self.options['run_mode']==2 and self.options['nb_core']==1): + PY8_extracted_information['sigma_m'],PY8_extracted_information['Nacc'],\ + PY8_extracted_information['Ntry'] = self.parse_PY8_log_file( +- pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) ++ pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) + else: + logger.warning('Pythia8 cross-section could not be retreived.\n'+ + 'Try turning parallelization off by setting the option nb_core to 1. YYYYY') +@@ -4932,8 +4944,8 @@ tar -czf split_$1.tar.gz split_$1 + Ntry = PY8_extracted_information['Ntry'] + sigma_m = PY8_extracted_information['sigma_m'] + # Compute pythia error +- error = self.results[self.run_name].return_tag(self.run_tag)['error'] +- try: ++ error = self.results[self.run_name].return_tag(self.run_tag)['error'] ++ try: + error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) + except ZeroDivisionError: + # Cannot compute error +@@ -4954,31 +4966,31 @@ tar -czf split_$1.tar.gz split_$1 + else: + logger.warning('Pythia8 merged cross-sections could not be retreived.\n'+ + 'Try turning parallelization off by setting the option nb_core to 1.XXXXX') +- PY8_extracted_information['cross_sections'] = {} +- ++ PY8_extracted_information['cross_sections'] = {} ++ + cross_sections = PY8_extracted_information['cross_sections'] + if cross_sections: +- # Filter the cross_sections specified an keep only the ones ++ # Filter the cross_sections specified an keep only the ones + # with central parameters and a different merging scale + a_float_re = '[\+|-]?\d+(\.\d*)?([EeDd][\+|-]?\d+)?' + central_merging_re = re.compile( + '^\s*Weight_MERGING\s*=\s*(?P%s)\s*$'%a_float_re, +- re.IGNORECASE) ++ re.IGNORECASE) + cross_sections = dict( + (float(central_merging_re.match(xsec).group('merging')),value) +- for xsec, value in cross_sections.items() if not ++ for xsec, value in cross_sections.items() if not + central_merging_re.match(xsec) is None) + central_scale = PY8_Card['JetMatching:qCut'] if \ + int(self.run_card['ickkw'])==1 else PY8_Card['Merging:TMS'] + if central_scale in cross_sections: + self.results.add_detail('cross_pythia8', cross_sections[central_scale][0]) + self.results.add_detail('error_pythia8', cross_sections[central_scale][1]) +- ++ + #logger.info('Pythia8 merged cross-sections are:') + #for scale in sorted(cross_sections.keys()): + # logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ +- # (scale,cross_sections[scale][0],cross_sections[scale][1])) +- ++ # (scale,cross_sections[scale][0],cross_sections[scale][1])) ++ + xsecs_file = open(pjoin(self.me_dir,'Events',self.run_name, + '%s_merged_xsecs.txt'%tag),'w') + if cross_sections: +@@ -4991,9 +5003,9 @@ tar -czf split_$1.tar.gz split_$1 + xsecs_file.write('Cross-sections could not be read from the'+\ + "XML node 'xsection' of the .dat file produced by Pythia8.") + xsecs_file.close() +- ++ + #Update the banner +- # We add directly the pythia command card because it has the full ++ # We add directly the pythia command card because it has the full + # information + self.banner.add(pythia_cmd_card) + +@@ -5010,13 +5022,13 @@ tar -czf split_$1.tar.gz split_$1 + if self.options['delphes_path']: + self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) + self.print_results_in_shell(self.results.current) +- ++ + def parse_PY8_log_file(self, log_file_path): + """ Parse a log file to extract number of event and cross-section. """ + pythiare = re.compile("Les Houches User Process\(es\)\s*\d+\s*\|\s*(?P\d+)\s*(?P\d+)\s*(?P\d+)\s*\|\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") + pythia_xsec_re = re.compile("Inclusive cross section\s*:\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") + sigma_m, Nacc, Ntry = None, None, None +- for line in misc.BackRead(log_file_path): ++ for line in misc.BackRead(log_file_path): + info = pythiare.search(line) + if not info: + # Also try to obtain the cross-section and error from the final xsec line of pythia8 log +@@ -5046,7 +5058,7 @@ tar -czf split_$1.tar.gz split_$1 + + raise self.InvalidCmd("Could not find cross-section and event number information "+\ + "in Pythia8 log\n '%s'."%log_file_path) +- ++ + def extract_cross_sections_from_DJR(self,djr_output): + """Extract cross-sections from a djr XML output.""" + import xml.dom.minidom as minidom +@@ -5063,11 +5075,11 @@ tar -czf split_$1.tar.gz split_$1 + [float(xsec.childNodes[0].data.split()[0]), + float(xsec.childNodes[0].data.split()[1])]) + for xsec in xsections) +- ++ + def do_pythia(self, line): + """launch pythia""" +- +- ++ ++ + # Check argument's validity + args = self.split_arg(line) + if '--no_default' in args: +@@ -5077,12 +5089,12 @@ tar -czf split_$1.tar.gz split_$1 + args.remove('--no_default') + else: + no_default = False +- ++ + if not self.run_name: + self.check_pythia(args) + self.configure_directory(html_opening =False) + else: +- # initialize / remove lhapdf mode ++ # initialize / remove lhapdf mode + self.configure_directory(html_opening =False) + self.check_pythia(args) + +@@ -5090,7 +5102,7 @@ tar -czf split_$1.tar.gz split_$1 + logger.error('pythia-pgs require event_norm to be on sum. Do not run pythia6') + return + +- # the args are modify and the last arg is always the mode ++ # the args are modify and the last arg is always the mode + if not no_default: + self.ask_pythia_run_configuration(args[-1]) + if self.options['automatic_html_opening']: +@@ -5102,35 +5114,35 @@ tar -czf split_$1.tar.gz split_$1 + self.banner = banner_mod.recover_banner(self.results, 'pythia') + + pythia_src = pjoin(self.options['pythia-pgs_path'],'src') +- ++ + self.results.add_detail('run_mode', 'madevent') + + self.update_status('Running Pythia', 'pythia') + try: + os.remove(pjoin(self.me_dir,'Events','pythia.done')) + except Exception: +- pass +- ++ pass ++ + ## LAUNCHING PYTHIA + # check that LHAPATH is define. + if not re.search(r'^\s*LHAPATH=%s/PDFsets' % pythia_src, +- open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), ++ open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), + re.M): + f = open(pjoin(self.me_dir,'Cards','pythia_card.dat'),'a') + f.write('\n LHAPATH=%s/PDFsets' % pythia_src) + f.close() + tag = self.run_tag + pythia_log = pjoin(self.me_dir, 'Events', self.run_name , '%s_pythia.log' % tag) +- #self.cluster.launch_and_wait('../bin/internal/run_pythia', ++ #self.cluster.launch_and_wait('../bin/internal/run_pythia', + # argument= [pythia_src], stdout= pythia_log, + # stderr=subprocess.STDOUT, + # cwd=pjoin(self.me_dir,'Events')) + output_files = ['pythia_events.hep'] + if self.run_card['use_syst']: + output_files.append('syst.dat') +- if self.run_card['ickkw'] == 1: ++ if self.run_card['ickkw'] == 1: + output_files += ['beforeveto.tree', 'xsecs.tree', 'events.tree'] +- ++ + os.environ['PDG_MASS_TBL'] = pjoin(pythia_src,'mass_width_2004.mc') + self.cluster.launch_and_wait(pjoin(pythia_src, 'pythia'), + input_files=[pjoin(self.me_dir, "Events", "unweighted_events.lhe"), +@@ -5140,23 +5152,23 @@ tar -czf split_$1.tar.gz split_$1 + stdout= pythia_log, + stderr=subprocess.STDOUT, + cwd=pjoin(self.me_dir,'Events')) +- ++ + + os.remove(pjoin(self.me_dir, "Events", "unweighted_events.lhe")) + + if not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): + logger.warning('Fail to produce pythia output. More info in \n %s' % pythia_log) + return +- ++ + self.to_store.append('pythia') +- ++ + # Find the matched cross-section + if int(self.run_card['ickkw']): + # read the line from the bottom of the file +- #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, ++ #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, + # '%s_pythia.log' % tag)) +- pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") +- for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, ++ pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") ++ for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, + '%s_pythia.log' % tag)): + info = pythiare.search(line) + if not info: +@@ -5176,16 +5188,16 @@ tar -czf split_$1.tar.gz split_$1 + self.results.add_detail('nb_event_pythia', Nacc) + #compute pythia error + error = self.results[self.run_name].return_tag(self.run_tag)['error'] +- if Nacc: ++ if Nacc: + error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) + else: + error_m = 10000 * sigma_m + # works both for fixed number of generated events and fixed accepted events + self.results.add_detail('error_pythia', error_m) +- break ++ break + + #pythia_log.close() +- ++ + pydir = pjoin(self.options['pythia-pgs_path'], 'src') + eradir = self.options['exrootanalysis_path'] + madir = self.options['madanalysis_path'] +@@ -5204,12 +5216,12 @@ tar -czf split_$1.tar.gz split_$1 + + # Creating LHE file + self.run_hep2lhe(banner_path) +- ++ + if int(self.run_card['ickkw']): + misc.gzip(pjoin(self.me_dir,'Events','beforeveto.tree'), +- stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) ++ stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) ++ + +- + if self.run_card['use_syst'] in self.true: + # Calculate syscalc info based on syst.dat + try: +@@ -5221,7 +5233,7 @@ tar -czf split_$1.tar.gz split_$1 + # Store syst.dat + misc.gzip(pjoin(self.me_dir,'Events', 'syst.dat'), + stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_syst.dat.gz')) +- ++ + # Store syscalc.dat + if os.path.exists(pjoin(self.me_dir, 'Events', 'syscalc.dat')): + filename = pjoin(self.me_dir, 'Events' ,self.run_name, +@@ -5241,7 +5253,7 @@ tar -czf split_$1.tar.gz split_$1 + if self.options['delphes_path']: + self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) + self.print_results_in_shell(self.results.current) +- ++ + + ################################################################################ + def do_remove(self, line): +@@ -5251,8 +5263,8 @@ tar -czf split_$1.tar.gz split_$1 + run, tag, mode = self.check_remove(args) + if 'banner' in mode: + mode.append('all') +- +- ++ ++ + if run == 'all': + # Check first if they are not a run with a name run. + if os.path.exists(pjoin(self.me_dir, 'Events', 'all')): +@@ -5268,7 +5280,7 @@ tar -czf split_$1.tar.gz split_$1 + logger.info(error) + pass # run already clear + return +- ++ + # Check that run exists + if not os.path.exists(pjoin(self.me_dir, 'Events', run)): + raise self.InvalidCmd('No run \'%s\' detected' % run) +@@ -5282,7 +5294,7 @@ tar -czf split_$1.tar.gz split_$1 + + + # Found the file to delete +- ++ + to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) + to_delete += misc.glob('*', pjoin(self.me_dir, 'HTML', run)) + # forbid the banner to be removed +@@ -5302,7 +5314,7 @@ tar -czf split_$1.tar.gz split_$1 + if os.path.exists(pjoin(self.me_dir, 'Events', run, 'unweighted_events.lhe.gz')): + to_delete.append('unweighted_events.lhe.gz') + if os.path.exists(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')): +- to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) ++ to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) + if nb_rm != len(to_delete): + logger.warning('Be carefull that partonic information are on the point to be removed.') + if 'all' in mode: +@@ -5315,8 +5327,8 @@ tar -czf split_$1.tar.gz split_$1 + if 'delphes' not in mode: + to_delete = [f for f in to_delete if 'delphes' not in f] + if 'parton' not in mode: +- to_delete = [f for f in to_delete if 'delphes' in f +- or 'pgs' in f ++ to_delete = [f for f in to_delete if 'delphes' in f ++ or 'pgs' in f + or 'pythia' in f] + if not self.force and len(to_delete): + question = 'Do you want to delete the following files?\n %s' % \ +@@ -5324,7 +5336,7 @@ tar -czf split_$1.tar.gz split_$1 + ans = self.ask(question, 'y', choices=['y','n']) + else: + ans = 'y' +- ++ + if ans == 'y': + for file2rm in to_delete: + if os.path.exists(pjoin(self.me_dir, 'Events', run, file2rm)): +@@ -5362,7 +5374,7 @@ tar -czf split_$1.tar.gz split_$1 + if ans == 'y': + for file2rm in to_delete: + os.remove(file2rm) +- ++ + if 'banner' in mode: + to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) + if tag: +@@ -5377,8 +5389,8 @@ tar -czf split_$1.tar.gz split_$1 + return + elif any(['banner' not in os.path.basename(p) for p in to_delete]): + if to_delete: +- raise MadGraph5Error('''Some output still exists for this run. +- Please remove those output first. Do for example: ++ raise MadGraph5Error('''Some output still exists for this run. ++ Please remove those output first. Do for example: + remove %s all banner + ''' % run) + else: +@@ -5388,7 +5400,7 @@ tar -czf split_$1.tar.gz split_$1 + return + else: + logger.info('''The banner is not removed. In order to remove it run: +- remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) ++ remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) + + # update database. + self.results.clean(mode, run, tag) +@@ -5408,7 +5420,7 @@ tar -czf split_$1.tar.gz split_$1 + logger.info('plot for run %s' % self.run_name) + if not self.force: + self.ask_edit_cards(['plot_card.dat'], args, plot=True) +- ++ + if any([arg in ['all','parton'] for arg in args]): + filename = pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe') + if os.path.exists(filename+'.gz'): +@@ -5426,8 +5438,8 @@ tar -czf split_$1.tar.gz split_$1 + except Exception: + pass + else: +- logger.info('No valid files for partonic plot') +- ++ logger.info('No valid files for partonic plot') ++ + if any([arg in ['all','pythia'] for arg in args]): + filename = pjoin(self.me_dir, 'Events' ,self.run_name, + '%s_pythia_events.lhe' % self.run_tag) +@@ -5440,10 +5452,10 @@ tar -czf split_$1.tar.gz split_$1 + stdout= "%s.gz" % filename) + else: + logger.info('No valid files for pythia plot') +- +- ++ ++ + if any([arg in ['all','pgs'] for arg in args]): +- filename = pjoin(self.me_dir, 'Events', self.run_name, ++ filename = pjoin(self.me_dir, 'Events', self.run_name, + '%s_pgs_events.lhco' % self.run_tag) + if os.path.exists(filename+'.gz'): + misc.gunzip("%s.gz" % filename) +@@ -5452,15 +5464,15 @@ tar -czf split_$1.tar.gz split_$1 + misc.gzip(filename) + else: + logger.info('No valid files for pgs plot') +- ++ + if any([arg in ['all','delphes'] for arg in args]): +- filename = pjoin(self.me_dir, 'Events', self.run_name, ++ filename = pjoin(self.me_dir, 'Events', self.run_name, + '%s_delphes_events.lhco' % self.run_tag) + if os.path.exists(filename+'.gz'): + misc.gunzip("%s.gz" % filename) + if os.path.exists(filename): + self.create_plot('Delphes') +- misc.gzip(filename) ++ misc.gzip(filename) + else: + logger.info('No valid files for delphes plot') + +@@ -5476,9 +5488,9 @@ tar -czf split_$1.tar.gz split_$1 + if self.ninitial == 1: + logger.error('SysCalc can\'t be run for decay processes') + return +- ++ + logger.info('Calculating systematics for run %s' % self.run_name) +- ++ + self.ask_edit_cards(['run_card.dat'], args, plot=False) + self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) + if any([arg in ['all','parton'] for arg in args]): +@@ -5492,7 +5504,7 @@ tar -czf split_$1.tar.gz split_$1 + stdout="%s.gz" % filename) + else: + logger.info('No valid files for parton level systematics run.') +- ++ + if any([arg in ['all','pythia'] for arg in args]): + filename = pjoin(self.me_dir, 'Events' ,self.run_name, + '%s_pythia_syst.dat' % self.run_tag) +@@ -5513,17 +5525,17 @@ tar -czf split_$1.tar.gz split_$1 + else: + logger.info('No valid files for pythia level') + +- ++ + def store_result(self): +- """ tar the pythia results. This is done when we are quite sure that ++ """ tar the pythia results. This is done when we are quite sure that + the pythia output will not be use anymore """ + + if not self.run_name: + return +- ++ + if not self.to_store: +- return +- ++ return ++ + tag = self.run_card['run_tag'] + self.update_status('storing files of previous run', level=None,\ + error=True) +@@ -5534,14 +5546,14 @@ tar -czf split_$1.tar.gz split_$1 + misc.gzip(pjoin(self.me_dir,'Events',self.run_name,"unweighted_events.lhe")) + if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): + os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) +- ++ + if 'pythia' in self.to_store: + self.update_status('Storing Pythia files of previous run', level='pythia', error=True) + p = pjoin(self.me_dir,'Events') + n = self.run_name + t = tag + self.to_store.remove('pythia') +- misc.gzip(pjoin(p,'pythia_events.hep'), ++ misc.gzip(pjoin(p,'pythia_events.hep'), + stdout=pjoin(p, str(n),'%s_pythia_events.hep' % t),forceexternal=True) + + if 'pythia8' in self.to_store: +@@ -5569,26 +5581,26 @@ tar -czf split_$1.tar.gz split_$1 + os.system("mv " + file_path + hepmc_fileformat + " " + move_hepmc_path) + + self.update_status('Done', level='pythia',makehtml=False,error=True) +- self.results.save() +- ++ self.results.save() ++ + self.to_store = [] + +- def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, ++ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, + run_type='', mode=None, **opt): + """ """ + argument = [str(arg) for arg in argument] + if mode is None: + mode = self.cluster_mode +- ++ + # ensure that exe is executable + if os.path.exists(exe) and not os.access(exe, os.X_OK): + os.system('chmod +x %s ' % exe) + elif (cwd and os.path.exists(pjoin(cwd, exe))) and not \ + os.access(pjoin(cwd, exe), os.X_OK): + os.system('chmod +x %s ' % pjoin(cwd, exe)) +- ++ + if mode == 0: +- self.update_status((remaining, 1, ++ self.update_status((remaining, 1, + self.total_jobs - remaining -1, run_type), level=None, force=False) + start = time.time() + #os.system('cd %s; ./%s' % (cwd,exe)) +@@ -5601,24 +5613,24 @@ tar -czf split_$1.tar.gz split_$1 + elif mode in [1,2]: + exename = os.path.basename(exe) + # For condor cluster, create the input/output files +- if 'ajob' in exename: ++ if 'ajob' in exename: + input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat','dname.mg', + pjoin(self.me_dir, 'SubProcesses','randinit')] +- if os.path.exists(pjoin(self.me_dir,'SubProcesses', ++ if os.path.exists(pjoin(self.me_dir,'SubProcesses', + 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) +- ++ + output_files = [] + required_output = [] +- ++ + + #Find the correct PDF input file + input_files.append(self.get_pdf_input_filename()) +- ++ + #Find the correct ajob + Gre = re.compile("\s*j=(G[\d\.\w]+)") + origre = re.compile("grid_directory=(G[\d\.\w]+)") +- try : ++ try : + fsock = open(exe) + except Exception: + fsock = open(pjoin(cwd,exe)) +@@ -5636,21 +5648,21 @@ tar -czf split_$1.tar.gz split_$1 + if os.path.isdir(pjoin(cwd,G)): + input_files.append(G) + required_output.append('%s/results.dat' % G) +- ++ + if origre.search(text): + G_grid = origre.search(text).groups()[0] + input_files.append(pjoin(G_grid, 'ftn26')) +- ++ + #submitting +- self.cluster.submit2(exe, stdout=stdout, cwd=cwd, ++ self.cluster.submit2(exe, stdout=stdout, cwd=cwd, + input_files=input_files, output_files=output_files, + required_output=required_output) + elif 'survey' in exename: + input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat', 'dname.mg', +- pjoin(self.me_dir, 'SubProcesses','randinit')] +- if os.path.exists(pjoin(self.me_dir,'SubProcesses', ++ pjoin(self.me_dir, 'SubProcesses','randinit')] ++ if os.path.exists(pjoin(self.me_dir,'SubProcesses', + 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): +- input_files.append(pjoin(self.me_dir,'SubProcesses', ++ input_files.append(pjoin(self.me_dir,'SubProcesses', + 'MadLoop5_resources.tar.gz')) + + #Find the correct PDF input file +@@ -5659,7 +5671,7 @@ tar -czf split_$1.tar.gz split_$1 + + output_files = [] + required_output = [] +- ++ + #Find the correct ajob + suffix = "_%s" % int(float(argument[0])) + if suffix == '_0': +@@ -5673,12 +5685,12 @@ tar -czf split_$1.tar.gz split_$1 + if '.' in argument[0]: + offset = int(str(argument[0]).split('.')[1]) + else: +- offset = 0 +- ++ offset = 0 ++ + if offset ==0 or offset == int(float(argument[0])): + if os.path.exists(pjoin(cwd, G, 'input_app.txt')): + os.remove(pjoin(cwd, G, 'input_app.txt')) +- ++ + if os.path.exists(os.path.realpath(pjoin(cwd, G, 'ftn25'))): + if offset == 0 or offset == int(float(argument[0])): + os.remove(pjoin(cwd, G, 'ftn25')) +@@ -5694,16 +5706,16 @@ tar -czf split_$1.tar.gz split_$1 + pass + + #submitting +- self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, ++ self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + input_files=input_files, output_files=output_files, + required_output=required_output, **opt) + elif "refine_splitted.sh" in exename: + input_files = ['madevent','symfact.dat','iproc.dat', 'dname.mg', +- pjoin(self.me_dir, 'SubProcesses','randinit')] +- ++ pjoin(self.me_dir, 'SubProcesses','randinit')] ++ + if os.path.exists(pjoin(self.me_dir,'SubProcesses', + 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): +- input_files.append(pjoin(self.me_dir,'SubProcesses', ++ input_files.append(pjoin(self.me_dir,'SubProcesses', + 'MadLoop5_resources.tar.gz')) + + #Find the correct PDF input file +@@ -5713,25 +5725,25 @@ tar -czf split_$1.tar.gz split_$1 + output_files = [argument[0]] + required_output = [] + for G in output_files: +- required_output.append('%s/results.dat' % G) ++ required_output.append('%s/results.dat' % G) + input_files.append(pjoin(argument[1], "input_app.txt")) + input_files.append(pjoin(argument[1], "ftn26")) +- ++ + #submitting +- self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, ++ self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + input_files=input_files, output_files=output_files, +- required_output=required_output, **opt) ++ required_output=required_output, **opt) ++ ++ + +- +- + else: + self.cluster.submit(exe, argument=argument, stdout=stdout, cwd=cwd, **opt) +- ++ + + ############################################################################ + def find_madevent_mode(self): + """Find if Madevent is in Group mode or not""" +- ++ + # The strategy is too look in the files Source/run_configs.inc + # if we found: ChanPerJob=3 then it's a group mode. + file_path = pjoin(self.me_dir, 'Source', 'run_config.inc') +@@ -5740,11 +5752,11 @@ tar -czf split_$1.tar.gz split_$1 + return 'group' + else: + return 'v4' +- ++ + ############################################################################ + def monitor(self, run_type='monitor', mode=None, html=False): + """ monitor the progress of running job """ +- ++ + + starttime = time.time() + if mode is None: +@@ -5760,8 +5772,8 @@ tar -czf split_$1.tar.gz split_$1 + else: + update_status = lambda idle, run, finish: None + update_first = None +- try: +- self.cluster.wait(self.me_dir, update_status, update_first=update_first) ++ try: ++ self.cluster.wait(self.me_dir, update_status, update_first=update_first) + except Exception as error: + logger.info(error) + if not self.force: +@@ -5776,24 +5788,24 @@ tar -czf split_$1.tar.gz split_$1 + raise + except KeyboardInterrupt as error: + self.cluster.remove() +- raise +- +- ++ raise ++ + +- ############################################################################ ++ ++ ############################################################################ + def configure_directory(self, html_opening=True): +- """ All action require before any type of run """ ++ """ All action require before any type of run """ + + # Basic check + assert os.path.exists(pjoin(self.me_dir,'SubProcesses')) + + # environmental variables to be included in make_opts + self.make_opts_var = {} +- ++ + #see when the last file was modified + time_mod = max([os.path.getmtime(pjoin(self.me_dir,'Cards','run_card.dat')), + os.path.getmtime(pjoin(self.me_dir,'Cards','param_card.dat'))]) +- ++ + if self.configured >= time_mod and hasattr(self, 'random') and hasattr(self, 'run_card'): + #just ensure that cluster specific are correctly handled + if self.cluster: +@@ -5808,7 +5820,7 @@ tar -czf split_$1.tar.gz split_$1 + #open only once the web page + # Change current working directory + self.launching_dir = os.getcwd() +- ++ + # Check if we need the MSSM special treatment + model = self.find_model_name() + if model == 'mssm' or model.startswith('mssm-'): +@@ -5816,14 +5828,14 @@ tar -czf split_$1.tar.gz split_$1 + mg5_param = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') + check_param_card.convert_to_mg5card(param_card, mg5_param) + check_param_card.check_valid_param_card(mg5_param) +- ++ + # limit the number of event to 100k + self.check_nb_events() + + # this is in order to avoid conflicts between runs with and without + # lhapdf. not needed anymore the makefile handles it automaticallu + #misc.compile(['clean4pdf'], cwd = pjoin(self.me_dir, 'Source')) +- ++ + self.make_opts_var['pdlabel1'] = '' + self.make_opts_var['pdlabel2'] = '' + if self.run_card['pdlabel1'] in ['eva', 'iww']: +@@ -5854,7 +5866,7 @@ tar -czf split_$1.tar.gz split_$1 + self.copy_lep_densities(self.run_card['pdlabel'], pjoin(self.me_dir, 'Source')) + self.make_opts_var['pdlabel1'] = 'ee' + self.make_opts_var['pdlabel2'] = 'ee' +- ++ + # set random number + if self.run_card['iseed'] != 0: + self.random = int(self.run_card['iseed']) +@@ -5873,18 +5885,18 @@ tar -czf split_$1.tar.gz split_$1 + break + else: + self.random = random.randint(1, 30107) +- ++ + #set random seed for python part of the code + if self.run_card['python_seed'] == -2: #-2 means same as run_card + import random + if not hasattr(random, 'mg_seedset'): +- random.seed(self.run_card['python_seed']) +- random.mg_seedset = self.run_card['python_seed'] ++ random.seed(self.run_card['python_seed']) ++ random.mg_seedset = self.run_card['python_seed'] + elif self.run_card['python_seed'] >= 0: + import random + if not hasattr(random, 'mg_seedset'): +- random.seed(self.run_card['python_seed']) +- random.mg_seedset = self.run_card['python_seed'] ++ random.seed(self.run_card['python_seed']) ++ random.mg_seedset = self.run_card['python_seed'] + if self.run_card['ickkw'] == 2: + logger.info('Running with CKKW matching') + self.treat_ckkw_matching() +@@ -5893,12 +5905,12 @@ tar -czf split_$1.tar.gz split_$1 + self.update_make_opts(self.run_card) + # reset list of Gdirectory + self.Gdirs = None +- ++ + # create param_card.inc and run_card.inc + self.do_treatcards('') +- ++ + logger.info("compile Source Directory") +- ++ + # Compile + for name in [ 'all']:#, '../bin/internal/combine_events']: + self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) +@@ -5921,7 +5933,7 @@ tar -czf split_$1.tar.gz split_$1 + os.remove(pjoin(self.me_dir, 'lib','libbias.a')) + force_subproc_clean = True + +- ++ + # Finally compile the bias module as well + if self.run_card['bias_module'] not in ['dummy',None]: + logger.debug("Compiling the bias module '%s'"%bias_name) +@@ -5933,7 +5945,7 @@ tar -czf split_$1.tar.gz split_$1 + 'INVALID' in str(bias_module_valid).upper(): + raise InvalidCmd("The bias module '%s' cannot be used because of:\n%s"% + (bias_name,bias_module_valid)) +- ++ + self.compile(arg=[], cwd=os.path.join(self.me_dir, 'Source','BIAS',bias_name)) + self.proc_characteristics['bias_module']=bias_name + # Update the proc_characterstics file +@@ -5942,7 +5954,7 @@ tar -czf split_$1.tar.gz split_$1 + + if force_subproc_clean: + # Make sure that madevent will be recompiled +- subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', ++ subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + 'subproc.mg'))] + for nb_proc,subdir in enumerate(subproc): + Pdir = pjoin(self.me_dir, 'SubProcesses',subdir.strip()) +@@ -5959,20 +5971,20 @@ tar -czf split_$1.tar.gz split_$1 + ############################################################################ + @staticmethod + def check_dir(path, default=''): +- """check if the directory exists. if so return the path otherwise the ++ """check if the directory exists. if so return the path otherwise the + default""" +- ++ + if os.path.isdir(path): + return path + else: + return default + + +- ++ + ############################################################################ + def get_Gdir(self, Pdir=None, symfact=None): + """get the list of Gdirectory if not yet saved.""" +- ++ + if hasattr(self, "Gdirs") and self.Gdirs: + if self.me_dir in self.Gdirs[0]: + if Pdir is None: +@@ -5988,8 +6000,8 @@ tar -czf split_$1.tar.gz split_$1 + + + Pdirs = self.get_Pdir() +- Gdirs = {self.me_dir:[]} +- mfactors = {} ++ Gdirs = {self.me_dir:[]} ++ mfactors = {} + for P in Pdirs: + Gdirs[P] = [] + #for the next line do not use P, since in readonly mode it might not have symfact +@@ -6000,7 +6012,7 @@ tar -czf split_$1.tar.gz split_$1 + mfactors[pjoin(P, "G%s" % tag)] = mfactor + self.Gdirs = (Gdirs, mfactors) + return self.get_Gdir(Pdir, symfact=symfact) +- ++ + ############################################################################ + def set_run_name(self, name, tag=None, level='parton', reload_card=False, + allow_new_tag=True): +@@ -6018,8 +6030,8 @@ tar -czf split_$1.tar.gz split_$1 + tagRun = self.results[self.run_name][i] + if tagRun.pythia or tagRun.shower or tagRun.pythia8 : + return tagRun['tag'] +- +- ++ ++ + # when are we force to change the tag new_run:previous run requiring changes + upgrade_tag = {'parton': ['parton','pythia','pgs','delphes','madanalysis5_hadron','madanalysis5_parton', 'rivet'], + 'pythia': ['pythia','pgs','delphes','madanalysis5_hadron'], +@@ -6032,7 +6044,7 @@ tar -czf split_$1.tar.gz split_$1 + 'syscalc':[], + 'rivet':['rivet']} + +- if name == self.run_name: ++ if name == self.run_name: + if reload_card: + run_card = pjoin(self.me_dir, 'Cards','run_card.dat') + self.run_card = banner_mod.RunCard(run_card) +@@ -6052,13 +6064,13 @@ tar -czf split_$1.tar.gz split_$1 + break + return get_last_tag(self, level) + +- ++ + # save/clean previous run + if self.run_name: + self.store_result() + # store new name + self.run_name = name +- ++ + new_tag = False + # First call for this run -> set the banner + self.banner = banner_mod.recover_banner(self.results, level, name) +@@ -6067,8 +6079,8 @@ tar -czf split_$1.tar.gz split_$1 + else: + # Read run_card + run_card = pjoin(self.me_dir, 'Cards','run_card.dat') +- self.run_card = banner_mod.RunCard(run_card) +- ++ self.run_card = banner_mod.RunCard(run_card) ++ + if tag: + self.run_card['run_tag'] = tag + new_tag = True +@@ -6081,7 +6093,7 @@ tar -czf split_$1.tar.gz split_$1 + self.results.update('add run %s' % name, 'all', makehtml=False) + else: + for tag in upgrade_tag[level]: +- ++ + if getattr(self.results[self.run_name][-1], tag): + # LEVEL is already define in the last tag -> need to switch tag + tag = self.get_available_tag() +@@ -6091,8 +6103,8 @@ tar -czf split_$1.tar.gz split_$1 + if not new_tag: + # We can add the results to the current run + tag = self.results[self.run_name][-1]['tag'] +- self.run_card['run_tag'] = tag # ensure that run_tag is correct +- ++ self.run_card['run_tag'] = tag # ensure that run_tag is correct ++ + if allow_new_tag and (name in self.results and not new_tag): + self.results.def_current(self.run_name) + else: +@@ -6101,15 +6113,15 @@ tar -czf split_$1.tar.gz split_$1 + self.run_tag = self.run_card['run_tag'] + + return get_last_tag(self, level) +- +- +- ++ ++ ++ + ############################################################################ + def check_nb_events(self): +- """Find the number of event in the run_card, and check that this is not ++ """Find the number of event in the run_card, and check that this is not + too large""" + +- ++ + nb_event = int(self.run_card['nevents']) + if nb_event > 1000000: + logger.warning("Attempting to generate more than 1M events") +@@ -6121,20 +6133,20 @@ tar -czf split_$1.tar.gz split_$1 + + return + +- +- ############################################################################ ++ ++ ############################################################################ + def update_random(self): + """ change random number""" +- ++ + self.random += 3 + if self.random > 30081*30081: # can't use too big random number + raise MadGraph5Error('Random seed too large ' + str(self.random) + ' > 30081*30081') +- if self.run_card['python_seed'] == -2: ++ if self.run_card['python_seed'] == -2: + import random + if not hasattr(random, 'mg_seedset'): +- random.seed(self.random) ++ random.seed(self.random) + random.mg_seedset = self.random +- ++ + ############################################################################ + def save_random(self): + """save random number in appropirate file""" +@@ -6143,14 +6155,14 @@ tar -czf split_$1.tar.gz split_$1 + fsock.writelines('r=%s\n' % self.random) + + def do_quit(self, *args, **opts): +- ++ + return common_run.CommonRunCmd.do_quit(self, *args, **opts) + #return CmdExtended.do_quit(self, *args, **opts) +- ++ + ############################################################################ + def treat_CKKW_matching(self): + """check for ckkw""" +- ++ + lpp1 = self.run_card['lpp1'] + lpp2 = self.run_card['lpp2'] + e1 = self.run_card['ebeam1'] +@@ -6158,19 +6170,19 @@ tar -czf split_$1.tar.gz split_$1 + pd = self.run_card['pdlabel'] + lha = self.run_card['lhaid'] + xq = self.run_card['xqcut'] +- translation = {'e1': e1, 'e2':e2, 'pd':pd, ++ translation = {'e1': e1, 'e2':e2, 'pd':pd, + 'lha':lha, 'xq':xq} + + if lpp1 or lpp2: +- # Remove ':s from pd ++ # Remove ':s from pd + if pd.startswith("'"): + pd = pd[1:] + if pd.endswith("'"): +- pd = pd[:-1] ++ pd = pd[:-1] + + if xq >2 or xq ==2: + xq = 2 +- ++ + # find data file + if pd == "lhapdf": + issudfile = 'lib/issudgrid-%(e1)s-%(e2)s-%(pd)s-%(lha)s-%(xq)s.dat.gz' +@@ -6180,9 +6192,9 @@ tar -czf split_$1.tar.gz split_$1 + issudfile = pjoin(self.webbin, issudfile % translation) + else: + issudfile = pjoin(self.me_dir, issudfile % translation) +- ++ + logger.info('Sudakov grid file: %s' % issudfile) +- ++ + # check that filepath exists + if os.path.exists(issudfile): + path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') +@@ -6191,20 +6203,20 @@ tar -czf split_$1.tar.gz split_$1 + msg = 'No sudakov grid file for parameter choice. Start to generate it. This might take a while' + logger.info(msg) + self.update_status('GENERATE SUDAKOV GRID', level='parton') +- ++ + for i in range(-2,6): +- self.cluster.submit('%s/gensudgrid ' % self.dirbin, ++ self.cluster.submit('%s/gensudgrid ' % self.dirbin, + argument = ['%d'%i], +- cwd=self.me_dir, ++ cwd=self.me_dir, + stdout=open(pjoin(self.me_dir, 'gensudgrid%s.log' % i),'w')) + self.monitor() + for i in range(-2,6): + path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') + os.system('cat %s/gensudgrid%s.log >> %s' % (self.me_dir, path)) + misc.gzip(path, stdout=issudfile) +- ++ + ############################################################################ +- def create_root_file(self, input='unweighted_events.lhe', ++ def create_root_file(self, input='unweighted_events.lhe', + output='unweighted_events.root' ): + """create the LHE root file """ + self.update_status('Creating root files', level='parton') +@@ -6221,14 +6233,14 @@ tar -czf split_$1.tar.gz split_$1 + totar = False + torm = True + input = input[:-3] +- ++ + try: +- misc.call(['%s/ExRootLHEFConverter' % eradir, ++ misc.call(['%s/ExRootLHEFConverter' % eradir, + input, output], + cwd=pjoin(self.me_dir, 'Events')) + except Exception: + logger.warning('fail to produce Root output [problem with ExRootAnalysis]') +- ++ + if totar: + if os.path.exists('%s.gz' % input): + try: +@@ -6239,13 +6251,13 @@ tar -czf split_$1.tar.gz split_$1 + misc.gzip(input) + if torm: + os.remove(input) +- ++ + def run_syscalc(self, mode='parton', event_path=None, output=None): +- """create the syscalc output""" ++ """create the syscalc output""" + + if self.run_card['use_syst'] not in self.true: + return +- ++ + scdir = self.options['syscalc_path'] + if not scdir or not os.path.exists(scdir): + return +@@ -6253,12 +6265,12 @@ tar -czf split_$1.tar.gz split_$1 + if self.run_card['event_norm'] != 'sum': + logger.critical('SysCalc works only when event_norm is on \'sum\'.') + return +- logger.info('running SysCalc on mode %s' % mode) +- ++ logger.info('running SysCalc on mode %s' % mode) ++ + # Restore the old default for SysCalc+PY6 + if self.run_card['sys_matchscale']=='auto': + self.run_card['sys_matchscale'] = "30 50" +- ++ + # Check that all pdfset are correctly installed + lhaid = [self.run_card.get_lhapdf_id()] + if '&&' in self.run_card['sys_pdf']: +@@ -6273,20 +6285,20 @@ tar -czf split_$1.tar.gz split_$1 + logger.debug(str(error)) + logger.warning('Systematic computation requires lhapdf to run. Bypass SysCalc') + return +- ++ + # Copy all the relevant PDF sets + [self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid] +- ++ + to_syscalc={'sys_scalefact': self.run_card['sys_scalefact'], + 'sys_alpsfact': self.run_card['sys_alpsfact'], + 'sys_matchscale': self.run_card['sys_matchscale'], + 'sys_scalecorrelation': self.run_card['sys_scalecorrelation'], + 'sys_pdf': self.run_card['sys_pdf']} +- +- tag = self.run_card['run_tag'] ++ ++ tag = self.run_card['run_tag'] + card = pjoin(self.me_dir, 'bin','internal', 'syscalc_card.dat') + template = open(pjoin(self.me_dir, 'bin','internal', 'syscalc_template.dat')).read() +- ++ + if '&&' in to_syscalc['sys_pdf']: + to_syscalc['sys_pdf'] = to_syscalc['sys_pdf'].split('#',1)[0].replace('&&',' \n ') + else: +@@ -6299,8 +6311,8 @@ tar -czf split_$1.tar.gz split_$1 + new.append(d) + else: + new[-1] += ' %s' % d +- to_syscalc['sys_pdf'] = '\n'.join(new) +- ++ to_syscalc['sys_pdf'] = '\n'.join(new) ++ + if to_syscalc['sys_pdf'].lower() in ['', 'f', 'false', 'none', '.false.']: + to_syscalc['sys_pdf'] = '' + if to_syscalc['sys_alpsfact'].lower() in ['', 'f', 'false', 'none','.false.']: +@@ -6308,17 +6320,17 @@ tar -czf split_$1.tar.gz split_$1 + + + +- ++ + # check if the scalecorrelation parameter is define: + if not 'sys_scalecorrelation' in self.run_card: + self.run_card['sys_scalecorrelation'] = -1 + open(card,'w').write(template % self.run_card) +- ++ + if not os.path.exists(card): + return False + +- +- ++ ++ + event_dir = pjoin(self.me_dir, 'Events') + + if not event_path: +@@ -6341,19 +6353,19 @@ tar -czf split_$1.tar.gz split_$1 + raise SysCalcError('qcut value for sys_matchscale lower than qcut in pythia_card. Bypass syscalc') + if float(value) < xqcut: + raise SysCalcError('qcut value for sys_matchscale lower than xqcut in run_card. Bypass syscalc') +- +- ++ ++ + event_path = pjoin(event_dir,'syst.dat') + output = pjoin(event_dir, 'syscalc.dat') + else: + raise self.InvalidCmd('Invalid mode %s' % mode) +- ++ + if not os.path.exists(event_path): + if os.path.exists(event_path+'.gz'): + misc.gunzip(event_path+'.gz') + else: + raise SysCalcError('Events file %s does not exits' % event_path) +- ++ + self.update_status('Calculating systematics for %s level' % mode, level = mode.lower()) + try: + proc = misc.call([os.path.join(scdir, 'sys_calc'), +@@ -6362,7 +6374,7 @@ tar -czf split_$1.tar.gz split_$1 + stderr = subprocess.STDOUT, + cwd=event_dir) + # Wait 5 s to make sure file is finished writing +- time.sleep(5) ++ time.sleep(5) + except OSError as error: + logger.error('fail to run syscalc: %s. Please check that SysCalc is correctly installed.' % error) + else: +@@ -6370,11 +6382,11 @@ tar -czf split_$1.tar.gz split_$1 + logger.warning('SysCalc Failed. Please read the associate log to see the reason. Did you install the associate PDF set?') + elif mode == 'parton': + files.mv(output, event_path) +- ++ + self.update_status('End syscalc for %s level' % mode, level = mode.lower(), + makehtml=False) +- +- return True ++ ++ return True + + + action_switcher = AskRun +@@ -6387,23 +6399,23 @@ tar -czf split_$1.tar.gz split_$1 + passing_cmd.append('reweight=ON') + if '-M' in args or '--madspin' in args: + passing_cmd.append('madspin=ON') +- ++ + switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, + mode=mode, line_args=args, force=self.force, + first_cmd=passing_cmd, return_instance=True) + # +- self.switch = switch # store the value of the switch for plugin purpose ++ self.switch = switch # store the value of the switch for plugin purpose + if 'dynamical' in switch: + mode = 'auto' +- ++ + # Now that we know in which mode we are check that all the card + #exists (copy default if needed) +- ++ + cards = ['param_card.dat', 'run_card.dat'] + if switch['shower'] == 'Pythia6': + cards.append('pythia_card.dat') + if switch['shower'] == 'Pythia8': +- cards.append('pythia8_card.dat') ++ cards.append('pythia8_card.dat') + if switch['detector'] in ['PGS','DELPHES+PGS']: + cards.append('pgs_card.dat') + if switch['detector'] in ['Delphes', 'DELPHES+PGS']: +@@ -6426,29 +6438,29 @@ tar -czf split_$1.tar.gz split_$1 + cards.append('rivet_card.dat') + + self.keep_cards(cards) +- ++ + first_cmd = cmd_switch.get_cardcmd() +- ++ + if os.path.isfile(pjoin(self.me_dir,'Cards','MadLoopParams.dat')): + cards.append('MadLoopParams.dat') +- ++ + if self.force: + self.check_param_card(pjoin(self.me_dir,'Cards','param_card.dat' )) + return switch +- ++ + + if 'dynamical' in switch and switch['dynamical']: + self.ask_edit_cards(cards, plot=False, mode='auto', first_cmd=first_cmd) + else: + self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) + return switch +- ++ + ############################################################################ + def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None): + """Ask the question when launching pythia""" +- ++ + pythia_suffix = '' if pythia_version==6 else '%d'%pythia_version +- ++ + available_mode = ['0', '1'] + if pythia_version==6: + available_mode.append('2') +@@ -6473,10 +6485,10 @@ tar -czf split_$1.tar.gz split_$1 + mode = self.ask(question, '0', options) + elif not mode: + mode = 'auto' +- ++ + if mode.isdigit(): + mode = name[mode] +- ++ + auto = False + if mode == 'auto': + auto = True +@@ -6485,7 +6497,7 @@ tar -czf split_$1.tar.gz split_$1 + mode = 'pgs' + elif os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')): + mode = 'delphes' +- else: ++ else: + mode = 'pythia%s'%pythia_suffix + logger.info('Will run in mode %s' % mode) + # Now that we know in which mode we are check that all the card +@@ -6501,15 +6513,15 @@ tar -czf split_$1.tar.gz split_$1 + cards.append('delphes_trigger.dat') + self.keep_cards(cards, ignore=['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat', + 'plot_card.dat']) +- ++ + if self.force: + return mode +- ++ + if not banner: + banner = self.banner +- ++ + if auto: +- self.ask_edit_cards(cards, from_banner=['param', 'run'], ++ self.ask_edit_cards(cards, from_banner=['param', 'run'], + mode='auto', plot=(pythia_version==6), banner=banner + ) + else: +@@ -6517,12 +6529,12 @@ tar -czf split_$1.tar.gz split_$1 + plot=(pythia_version==6), banner=banner) + + return mode +- ++ + #=============================================================================== + # MadEventCmd + #=============================================================================== + class MadEventCmdShell(MadEventCmd, cmd.CmdShell): +- """The command line processor of MadGraph""" ++ """The command line processor of MadGraph""" + + + +@@ -6536,11 +6548,11 @@ class SubProcesses(object): + @classmethod + def clean(cls): + cls.name_to_pdg = {} +- ++ + @staticmethod + def get_subP(me_dir): + """return the list of Subprocesses""" +- ++ + out = [] + for line in open(pjoin(me_dir,'SubProcesses', 'subproc.mg')): + if not line: +@@ -6548,9 +6560,9 @@ class SubProcesses(object): + name = line.strip() + if os.path.exists(pjoin(me_dir, 'SubProcesses', name)): + out.append(pjoin(me_dir, 'SubProcesses', name)) +- ++ + return out +- ++ + + + @staticmethod +@@ -6611,9 +6623,9 @@ class SubProcesses(object): + particles = re.search("/([\d,-]+)/", line) + all_ids.append([int(p) for p in particles.group(1).split(',')]) + return all_ids +- +- +-#=============================================================================== ++ ++ ++#=============================================================================== + class GridPackCmd(MadEventCmd): + """The command for the gridpack --Those are not suppose to be use interactively--""" + +@@ -6627,7 +6639,7 @@ class GridPackCmd(MadEventCmd): + self.random = seed + self.random_orig = self.random + self.granularity = gran +- ++ + self.options['automatic_html_opening'] = False + #write the grid_card.dat on disk + self.nb_event = int(nb_event) +@@ -6668,7 +6680,7 @@ class GridPackCmd(MadEventCmd): + + def write_gridcard(self, nb_event, seed, gran): + """write the grid_card.dat file at appropriate location""" +- ++ + # first try to write grid_card within the gridpack. + print("WRITE GRIDCARD", self.me_dir) + if self.readonly: +@@ -6677,35 +6689,35 @@ class GridPackCmd(MadEventCmd): + fsock = open('grid_card.dat','w') + else: + fsock = open(pjoin(self.me_dir, 'Cards', 'grid_card.dat'),'w') +- ++ + gridpackcard = banner_mod.GridpackCard() + gridpackcard['GridRun'] = True + gridpackcard['gevents'] = nb_event + gridpackcard['gseed'] = seed + gridpackcard['ngran'] = gran +- ++ + gridpackcard.write(fsock) + + ############################################################################ + def get_Pdir(self): + """get the list of Pdirectory if not yet saved.""" +- ++ + if hasattr(self, "Pdirs"): + if self.me_dir in self.Pdirs[0]: + return self.Pdirs +- ++ + if not self.readonly: +- self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) ++ self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) + for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] + else: +- self.Pdirs = [l.strip() +- for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] +- ++ self.Pdirs = [l.strip() ++ for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] ++ + return self.Pdirs +- ++ + def prepare_local_dir(self): + """create the P directory structure in the local directory""" +- ++ + if not self.readonly: + os.chdir(self.me_dir) + else: +@@ -6714,7 +6726,7 @@ class GridPackCmd(MadEventCmd): + os.mkdir(p) + files.cp(pjoin(self.me_dir,'SubProcesses',p,'symfact.dat'), + pjoin(p, 'symfact.dat')) +- ++ + + def launch(self, nb_event, seed): + """ launch the generation for the grid """ +@@ -6730,13 +6742,13 @@ class GridPackCmd(MadEventCmd): + if self.run_card['python_seed'] == -2: + import random + if not hasattr(random, 'mg_seedset'): +- random.seed(seed) ++ random.seed(seed) + random.mg_seedset = seed + elif self.run_card['python_seed'] > 0: + import random + if not hasattr(random, 'mg_seedset'): +- random.seed(self.run_card['python_seed']) +- random.mg_seedset = self.run_card['python_seed'] ++ random.seed(self.run_card['python_seed']) ++ random.mg_seedset = self.run_card['python_seed'] + # 2) Run the refine for the grid + self.update_status('Generating Events', level=None) + #misc.call([pjoin(self.me_dir,'bin','refine4grid'), +@@ -6755,70 +6767,70 @@ class GridPackCmd(MadEventCmd): + self.exec_cmd('decay_events -from_cards', postcmd=False) + elif self.run_card['use_syst'] and self.run_card['systematics_program'] == 'systematics': + self.options['nb_core'] = 1 +- self.exec_cmd('systematics %s --from_card' % ++ self.exec_cmd('systematics %s --from_card' % + pjoin('Events', self.run_name, 'unweighted_events.lhe.gz'), + postcmd=False,printcmd=False) +- ++ + + def refine4grid(self, nb_event): + """Special refine for gridpack run.""" + self.nb_refine += 1 +- ++ + precision = nb_event + + self.opts = dict([(key,value[1]) for (key,value) in \ + self._survey_options.items()]) +- ++ + # initialize / remove lhapdf mode + # self.configure_directory() # All this has been done before + self.cluster_mode = 0 # force single machine + + # Store seed in randinit file, to be read by ranmar.f + self.save_random() +- ++ + self.update_status('Refine results to %s' % precision, level=None) + logger.info("Using random number seed offset = %s" % self.random) + + refine_opt = {'err_goal': nb_event, 'split_channels': False, +- 'ngran':self.granularity, 'readonly': self.readonly} ++ 'ngran':self.granularity, 'readonly': self.readonly} + x_improve = gen_ximprove.gen_ximprove_gridpack(self, refine_opt) + x_improve.launch() # create the ajob for the refinment and run those! +- self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack +- +- ++ self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack ++ ++ + #bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) + #print 'run combine!!!' + #combine_runs.CombineRuns(self.me_dir) +- ++ + return + #update html output + Presults = sum_html.collect_result(self) + cross, error = Presults.xsec, Presults.xerru + self.results.add_detail('cross', cross) + self.results.add_detail('error', error) +- +- ++ ++ + #self.update_status('finish refine', 'parton', makehtml=False) + #devnull.close() +- +- +- ++ ++ ++ + return + self.total_jobs = 0 +- subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if ++ subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if + P.startswith('P') and os.path.isdir(pjoin(self.me_dir,'SubProcesses', P))] + devnull = open(os.devnull, 'w') + for nb_proc,subdir in enumerate(subproc): + subdir = subdir.strip() + Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) + bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) +- ++ + logger.info(' %s ' % subdir) + # clean previous run + for match in misc.glob('*ajob*', Pdir): + if os.path.basename(match)[:4] in ['ajob', 'wait', 'run.', 'done']: + os.remove(pjoin(Pdir, match)) +- ++ + + logfile = pjoin(Pdir, 'gen_ximprove.log') + misc.call([pjoin(bindir, 'gen_ximprove')], +@@ -6828,40 +6840,40 @@ class GridPackCmd(MadEventCmd): + + if os.path.exists(pjoin(Pdir, 'ajob1')): + alljobs = misc.glob('ajob*', Pdir) +- nb_tot = len(alljobs) ++ nb_tot = len(alljobs) + self.total_jobs += nb_tot + for i, job in enumerate(alljobs): + job = os.path.basename(job) +- self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), ++ self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + run_type='Refine number %s on %s (%s/%s)' % + (self.nb_refine, subdir, nb_proc+1, len(subproc))) + if os.path.exists(pjoin(self.me_dir,'error')): + self.monitor(html=True) + raise MadEventError('Error detected in dir %s: %s' % \ + (Pdir, open(pjoin(self.me_dir,'error')).read())) +- self.monitor(run_type='All job submitted for refine number %s' % ++ self.monitor(run_type='All job submitted for refine number %s' % + self.nb_refine) +- ++ + self.update_status("Combining runs", level='parton') + try: + os.remove(pjoin(Pdir, 'combine_runs.log')) + except Exception: + pass +- ++ + bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) + combine_runs.CombineRuns(self.me_dir) +- ++ + #update html output + cross, error = self.make_make_all_html_results() + self.results.add_detail('cross', cross) + self.results.add_detail('error', error) +- +- ++ ++ + self.update_status('finish refine', 'parton', makehtml=False) + devnull.close() + + def do_combine_events(self, line): +- """Advanced commands: Launch combine events""" ++ """Advanced commands: Launch combine events""" + + if self.readonly: + outdir = 'Events' +@@ -6883,17 +6895,17 @@ class GridPackCmd(MadEventCmd): + self.banner.add_generation_info(self.results.current['cross'], self.run_card['nevents']) + if not hasattr(self, 'random_orig'): self.random_orig = 0 + self.banner.change_seed(self.random_orig) +- +- ++ ++ + if not os.path.exists(pjoin(outdir, self.run_name)): + os.mkdir(pjoin(outdir, self.run_name)) +- self.banner.write(pjoin(outdir, self.run_name, ++ self.banner.write(pjoin(outdir, self.run_name, + '%s_%s_banner.txt' % (self.run_name, tag))) +- +- get_wgt = lambda event: event.wgt ++ ++ get_wgt = lambda event: event.wgt + AllEvent = lhe_parser.MultiEventFile() + AllEvent.banner = self.banner +- ++ + partials = 0 # if too many file make some partial unweighting + sum_xsec, sum_xerru, sum_axsec = 0,[],0 + Gdirs = self.get_Gdir() +@@ -6903,7 +6915,7 @@ class GridPackCmd(MadEventCmd): + if os.path.exists(pjoin(Gdir, 'events.lhe')): + result = sum_html.OneResult('') + result.read_results(pjoin(Gdir, 'results.dat')) +- AllEvent.add(pjoin(Gdir, 'events.lhe'), ++ AllEvent.add(pjoin(Gdir, 'events.lhe'), + result.get('xsec')*gscalefact[Gdir], + result.get('xerru')*gscalefact[Gdir], + result.get('axsec')*gscalefact[Gdir] +@@ -6912,7 +6924,7 @@ class GridPackCmd(MadEventCmd): + sum_xsec += result.get('xsec')*gscalefact[Gdir] + sum_xerru.append(result.get('xerru')*gscalefact[Gdir]) + sum_axsec += result.get('axsec')*gscalefact[Gdir] +- ++ + if len(AllEvent) >= 80: #perform a partial unweighting + AllEvent.unweight(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), + get_wgt, log_level=5, trunc_error=1e-2, event_target=self.nb_event) +@@ -6921,26 +6933,26 @@ class GridPackCmd(MadEventCmd): + AllEvent.add(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), + sum_xsec, + math.sqrt(sum(x**2 for x in sum_xerru)), +- sum_axsec) ++ sum_axsec) + partials +=1 +- ++ + if not hasattr(self,'proc_characteristic'): + self.proc_characteristic = self.get_characteristics() +- ++ + self.banner.add_generation_info(sum_xsec, self.nb_event) + nb_event = AllEvent.unweight(pjoin(outdir, self.run_name, "unweighted_events.lhe.gz"), + get_wgt, trunc_error=1e-2, event_target=self.nb_event, + log_level=logging.DEBUG, normalization=self.run_card['event_norm'], + proc_charac=self.proc_characteristic) +- +- ++ ++ + if partials: + for i in range(partials): + try: + os.remove(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % i)) + except Exception: + os.remove(pjoin(outdir, self.run_name, "partials%s.lhe" % i)) +- ++ + self.results.add_detail('nb_event', nb_event) + self.banner.add_generation_info(sum_xsec, nb_event) + if self.run_card['bias_module'].lower() not in ['dummy', 'none']: +@@ -6949,7 +6961,7 @@ class GridPackCmd(MadEventCmd): + + class MadLoopInitializer(object): + """ A container class for the various methods for initializing MadLoop. It is +- placed in MadEventInterface because it is used by Madevent for loop-induced ++ placed in MadEventInterface because it is used by Madevent for loop-induced + simulations. """ + + @staticmethod +@@ -6962,7 +6974,7 @@ class MadLoopInitializer(object): + if os.path.isfile(pjoin(dir_name,'check')): + os.remove(pjoin(dir_name,'check')) + os.remove(pjoin(dir_name,'check_sa.o')) +- os.remove(pjoin(dir_name,'loop_matrix.o')) ++ os.remove(pjoin(dir_name,'loop_matrix.o')) + # Now run make + devnull = open(os.devnull, 'w') + start=time.time() +@@ -6984,7 +6996,7 @@ class MadLoopInitializer(object): + stdout=devnull, stderr=devnull, close_fds=True) + try: + ptimer.execute() +- #poll as often as possible; otherwise the subprocess might ++ #poll as often as possible; otherwise the subprocess might + # "sneak" in some extra memory usage while you aren't looking + # Accuracy of .2 seconds is enough for the timing. + while ptimer.poll(): +@@ -7016,7 +7028,7 @@ class MadLoopInitializer(object): + If mu_r > 0.0, then the renormalization constant value will be hardcoded + directly in check_sa.f, if is is 0 it will be set to Sqrt(s) and if it + is < 0.0 the value in the param_card.dat is used. +- If the split_orders target (i.e. the target squared coupling orders for ++ If the split_orders target (i.e. the target squared coupling orders for + the computation) is != -1, it will be changed in check_sa.f via the + subroutine CALL SET_COUPLINGORDERS_TARGET(split_orders).""" + +@@ -7031,12 +7043,12 @@ class MadLoopInitializer(object): + file_path = pjoin(directories[0],'check_sa.f') + if not os.path.isfile(file_path): + raise MadGraph5Error('Could not find the location of check_sa.f'+\ +- ' from the specified path %s.'%str(file_path)) ++ ' from the specified path %s.'%str(file_path)) + + file = open(file_path, 'r') + check_sa = file.read() + file.close() +- ++ + file = open(file_path, 'w') + check_sa = re.sub(r"READPS = \S+\)","READPS = %s)"%('.TRUE.' if read_ps \ + else '.FALSE.'), check_sa) +@@ -7052,42 +7064,42 @@ class MadLoopInitializer(object): + (("%.17e"%mu_r).replace('e','d')),check_sa) + elif mu_r < 0.0: + check_sa = re.sub(r"MU_R=SQRTS","",check_sa) +- ++ + if split_orders > 0: + check_sa = re.sub(r"SET_COUPLINGORDERS_TARGET\(-?\d+\)", +- "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) +- ++ "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) ++ + file.write(check_sa) + file.close() + +- @staticmethod ++ @staticmethod + def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ + req_files = ['HelFilter.dat','LoopFilter.dat'], + attempts = [4,15]): +- """ Run the initialization of the process in 'run_dir' with success ++ """ Run the initialization of the process in 'run_dir' with success + characterized by the creation of the files req_files in this directory. + The directory containing the driving source code 'check_sa.f'. +- The list attempt gives the successive number of PS points the ++ The list attempt gives the successive number of PS points the + initialization should be tried with before calling it failed. + Returns the number of PS points which were necessary for the init. + Notice at least run_dir or SubProc_dir must be provided. + A negative attempt number given in input means that quadprec will be + forced for initialization.""" +- ++ + # If the user does not want detailed info, then set the dictionary + # to a dummy one. + if infos is None: + infos={} +- ++ + if SubProc_dir is None and run_dir is None: + raise MadGraph5Error('At least one of [SubProc_dir,run_dir] must'+\ + ' be provided in run_initialization.') +- ++ + # If the user does not specify where is check_sa.f, then it is assumed + # to be one levels above run_dir + if SubProc_dir is None: + SubProc_dir = os.path.abspath(pjoin(run_dir,os.pardir)) +- ++ + if run_dir is None: + directories =[ dir for dir in misc.glob('P[0-9]*', SubProc_dir) + if os.path.isdir(dir) ] +@@ -7097,7 +7109,7 @@ class MadLoopInitializer(object): + raise MadGraph5Error('Could not find a valid running directory'+\ + ' in %s.'%str(SubProc_dir)) + +- # Use the presence of the file born_matrix.f to decide if it is a ++ # Use the presence of the file born_matrix.f to decide if it is a + # loop-induced process or not. It's not crucial, but just that because + # of the dynamic adjustment of the ref scale used for deciding what are + # the zero contributions, more points are neeeded for loop-induced. +@@ -7116,9 +7128,9 @@ class MadLoopInitializer(object): + raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ + %MLCardPath) + else: +- MLCard = banner_mod.MadLoopParam(MLCardPath) ++ MLCard = banner_mod.MadLoopParam(MLCardPath) + MLCard_orig = banner_mod.MadLoopParam(MLCard) +- ++ + # Make sure that LoopFilter really is needed. + if not MLCard['UseLoopFilter']: + try: +@@ -7141,11 +7153,11 @@ class MadLoopInitializer(object): + proc_prefix+fname)) for fname in my_req_files]) or \ + not os.path.isfile(pjoin(run_dir,'check')) or \ + not os.access(pjoin(run_dir,'check'), os.X_OK) +- ++ + # Check if this is a process without born by checking the presence of the + # file born_matrix.f + is_loop_induced = os.path.exists(pjoin(run_dir,'born_matrix.f')) +- ++ + # For loop induced processes, always attempt quadruple precision if + # double precision attempts fail and the user didn't specify himself + # quadruple precision initializations attempts +@@ -7154,11 +7166,11 @@ class MadLoopInitializer(object): + use_quad_prec = 1 + curr_attempt = 1 + +- MLCard.set('WriteOutFilters',True) +- ++ MLCard.set('WriteOutFilters',True) ++ + while to_attempt!=[] and need_init(): + curr_attempt = to_attempt.pop() +- # if the attempt is a negative number it means we must force ++ # if the attempt is a negative number it means we must force + # quadruple precision at initialization time + if curr_attempt < 0: + use_quad_prec = -1 +@@ -7171,11 +7183,11 @@ class MadLoopInitializer(object): + MLCard.set('ZeroThres',1e-9) + # Plus one because the filter are written on the next PS point after + curr_attempt = abs(curr_attempt+1) +- MLCard.set('MaxAttempts',curr_attempt) ++ MLCard.set('MaxAttempts',curr_attempt) + MLCard.write(pjoin(SubProc_dir,'MadLoopParams.dat')) + + # initialization is performed. +- MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, ++ MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, + npoints = curr_attempt) + compile_time, run_time, ram_usage = \ + MadLoopInitializer.make_and_run(run_dir) +@@ -7188,7 +7200,7 @@ class MadLoopInitializer(object): + infos['Process_compilation']==None: + infos['Process_compilation'] = compile_time + infos['Initialization'] = run_time +- ++ + MLCard_orig.write(pjoin(SubProc_dir,'MadLoopParams.dat')) + if need_init(): + return None +@@ -7207,8 +7219,8 @@ class MadLoopInitializer(object): + MLCardPath = pjoin(proc_dir,'SubProcesses','MadLoopParams.dat') + if not os.path.isfile(MLCardPath): + raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ +- %MLCardPath) +- MLCard = banner_mod.MadLoopParam(MLCardPath) ++ %MLCardPath) ++ MLCard = banner_mod.MadLoopParam(MLCardPath) + + req_files = ['HelFilter.dat','LoopFilter.dat'] + # Make sure that LoopFilter really is needed. +@@ -7222,9 +7234,9 @@ class MadLoopInitializer(object): + req_files.remove('HelFilter.dat') + except ValueError: + pass +- ++ + for v_folder in glob.iglob(pjoin(proc_dir,'SubProcesses', +- '%s*'%subproc_prefix)): ++ '%s*'%subproc_prefix)): + # Make sure it is a valid MadLoop directory + if not os.path.isdir(v_folder) or not os.path.isfile(\ + pjoin(v_folder,'loop_matrix.f')): +@@ -7235,7 +7247,7 @@ class MadLoopInitializer(object): + if need_init(pjoin(proc_dir,'SubProcesses','MadLoop5_resources'), + proc_prefix, req_files): + return True +- ++ + return False + + @staticmethod +@@ -7253,7 +7265,7 @@ class MadLoopInitializer(object): + misc.compile(arg=['treatCardsLoopNoInit'], cwd=pjoin(proc_dir,'Source')) + else: + interface.do_treatcards('all --no_MadLoopInit') +- ++ + # First make sure that IREGI and CUTTOOLS are compiled if needed + if os.path.exists(pjoin(proc_dir,'Source','CutTools')): + misc.compile(arg=['libcuttools'],cwd=pjoin(proc_dir,'Source')) +@@ -7261,8 +7273,8 @@ class MadLoopInitializer(object): + misc.compile(arg=['libiregi'],cwd=pjoin(proc_dir,'Source')) + # Then make sure DHELAS and MODEL are compiled + misc.compile(arg=['libmodel'],cwd=pjoin(proc_dir,'Source')) +- misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) +- ++ misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) ++ + # Now initialize the MadLoop outputs + logger.info('Initializing MadLoop loop-induced matrix elements '+\ + '(this can take some time)...') +@@ -7271,7 +7283,7 @@ class MadLoopInitializer(object): + if MG_options: + if interface and hasattr(interface, 'cluster') and isinstance(interface.cluster, cluster.MultiCore): + mcore = interface.cluster +- else: ++ else: + mcore = cluster.MultiCore(**MG_options) + else: + mcore = cluster.onecore +@@ -7282,10 +7294,10 @@ class MadLoopInitializer(object): + run_dir=run_dir, infos=infos) + else: + n_PS = MadLoopInitializer.run_initialization( +- run_dir=run_dir, infos=infos, attempts=attempts) ++ run_dir=run_dir, infos=infos, attempts=attempts) + infos['nPS'] = n_PS + return 0 +- ++ + def wait_monitoring(Idle, Running, Done): + if Idle+Running+Done == 0: + return +@@ -7295,21 +7307,21 @@ class MadLoopInitializer(object): + init_info = {} + # List all virtual folders while making sure they are valid MadLoop folders + VirtualFolders = [f for f in glob.iglob(pjoin(proc_dir,'SubProcesses', +- '%s*'%subproc_prefix)) if (os.path.isdir(f) or ++ '%s*'%subproc_prefix)) if (os.path.isdir(f) or + os.path.isfile(pjoin(f,'loop_matrix.f')))] + logger.debug("Now Initializing MadLoop matrix element in %d folder%s:"%\ + (len(VirtualFolders),'s' if len(VirtualFolders)>1 else '')) +- logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in ++ logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in + VirtualFolders)) + for v_folder in VirtualFolders: + init_info[v_folder] = {} +- ++ + # We try all multiples of n_PS from 1 to max_mult, first in DP and then + # in QP before giving up, or use default values if n_PS is None. + max_mult = 3 + if n_PS is None: + # Then use the default list of number of PS points to try +- mcore.submit(run_initialization_wrapper, ++ mcore.submit(run_initialization_wrapper, + [pjoin(v_folder), init_info[v_folder], None]) + else: + # Use specific set of PS points +@@ -7336,8 +7348,8 @@ class MadLoopInitializer(object): + '%d PS points (%s), in %.3g(compil.) + %.3g(init.) secs.'%( + abs(init['nPS']),'DP' if init['nPS']>0 else 'QP', + init['Process_compilation'],init['Initialization'])) +- +- logger.info('MadLoop initialization finished.') ++ ++ logger.info('MadLoop initialization finished.') + + AskforEditCard = common_run.AskforEditCard + +@@ -7352,16 +7364,16 @@ if '__main__' == __name__: + + import os + import optparse +- # Get the directory of the script real path (bin) +- # and add it to the current PYTHONPATH ++ # Get the directory of the script real path (bin) ++ # and add it to the current PYTHONPATH + #root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath( __file__ )))) + sys.path.insert(0, root_path) + +- class MyOptParser(optparse.OptionParser): ++ class MyOptParser(optparse.OptionParser): + class InvalidOption(Exception): pass + def error(self, msg=''): + raise MyOptParser.InvalidOption(msg) +- # Write out nice usage message if called with -h or --help ++ # Write out nice usage message if called with -h or --help + usage = "usage: %prog [options] [FILE] " + parser = MyOptParser(usage=usage) + parser.add_option("-l", "--logging", default='INFO', +@@ -7372,7 +7384,7 @@ if '__main__' == __name__: + help='force to launch debug mode') + parser_error = '' + done = False +- ++ + for i in range(len(sys.argv)-1): + try: + (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) +@@ -7382,7 +7394,7 @@ if '__main__' == __name__: + else: + args += sys.argv[len(sys.argv)-i:] + if not done: +- # raise correct error: ++ # raise correct error: + try: + (options, args) = parser.parse_args() + except MyOptParser.InvalidOption as error: +@@ -7395,8 +7407,8 @@ if '__main__' == __name__: + import subprocess + import logging + import logging.config +- # Set logging level according to the logging level given by options +- #logging.basicConfig(level=vars(logging)[options.logging]) ++ # Set logging level according to the logging level given by options ++ #logging.basicConfig(level=vars(logging)[options.logging]) + import internal + import internal.coloring_logging + # internal.file = XXX/bin/internal/__init__.py +@@ -7419,13 +7431,13 @@ if '__main__' == __name__: + raise + pass + +- # Call the cmd interface main loop ++ # Call the cmd interface main loop + try: + if args: + # a single command is provided + if '--web' in args: +- i = args.index('--web') +- args.pop(i) ++ i = args.index('--web') ++ args.pop(i) + cmd_line = MadEventCmd(me_dir, force_run=True) + else: + cmd_line = MadEventCmdShell(me_dir, force_run=True) +@@ -7445,13 +7457,13 @@ if '__main__' == __name__: + pass + + +- +- +- +- +- +- +- +- ++ ++ ++ ++ ++ ++ ++ ++ + + diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/Bridge.h b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/Bridge.h index bf8b5e024d..c263f39a62 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/Bridge.h +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/Bridge.h @@ -236,7 +236,7 @@ namespace mg5amcCpu #ifdef __CUDACC__ if( ( m_nevt < s_gputhreadsmin ) || ( m_nevt % s_gputhreadsmin != 0 ) ) throw std::runtime_error( "Bridge constructor: nevt should be a multiple of " + std::to_string( s_gputhreadsmin ) ); - while( m_nevt != m_gpublocks * m_gputhreads ) + while( m_nevt != static_cast( m_gpublocks * m_gputhreads ) ) { m_gputhreads /= 2; if( m_gputhreads < s_gputhreadsmin ) @@ -266,7 +266,7 @@ namespace mg5amcCpu template void Bridge::set_gpugrid( const int gpublocks, const int gputhreads ) { - if( m_nevt != gpublocks * gputhreads ) + if( m_nevt != static_cast( gpublocks * gputhreads ) ) throw std::runtime_error( "Bridge: gpublocks*gputhreads must equal m_nevt in set_gpugrid" ); m_gpublocks = gpublocks; m_gputhreads = gputhreads; diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/MadgraphTest.h b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/MadgraphTest.h index ef40624c88..b0f2250c25 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/MadgraphTest.h +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/MadgraphTest.h @@ -199,10 +199,6 @@ class MadgraphTest : public testing::TestWithParam } }; -// Since we link both the CPU-only and GPU tests into the same executable, we prevent -// a multiply defined symbol by only compiling this in the non-CUDA phase: -#ifndef __CUDACC__ - /// Compare momenta and matrix elements. /// This uses an implementation of TestDriverBase to run a madgraph workflow, /// and compares momenta and matrix elements with a reference file. @@ -307,6 +303,4 @@ TEST_P( MadgraphTest, CompareMomentaAndME ) } } -#endif // __CUDACC__ - #endif /* MADGRAPHTEST_H_ */ diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/MatrixElementKernels.cc b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/MatrixElementKernels.cc index 74b5239ebf..2d6f27cd5d 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/MatrixElementKernels.cc +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/MatrixElementKernels.cc @@ -196,6 +196,9 @@ namespace mg5amcGpu void MatrixElementKernelDevice::setGrid( const int gpublocks, const int gputhreads ) { + m_gpublocks = gpublocks; + m_gputhreads = gputhreads; + if( m_gpublocks == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gpublocks must be > 0 in setGrid" ); if( m_gputhreads == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gputhreads must be > 0 in setGrid" ); if( this->nevt() != m_gpublocks * m_gputhreads ) throw std::runtime_error( "MatrixElementKernelDevice: nevt mismatch in setGrid" ); diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/check_sa.cc b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/check_sa.cc +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk index b399eb36b0..db26bf090e 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk @@ -1,56 +1,41 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: use ':=' to ensure that the value of CUDACPP_MAKEFILE is not modified further down after including make_opts -#=== NB: use 'override' to ensure that the value can not be modified from the outside -override CUDACPP_MAKEFILE := $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) -###$(info CUDACPP_MAKEFILE='$(CUDACPP_MAKEFILE)') - -#=== NB: different names (e.g. cudacpp.mk and cudacpp_src.mk) are used in the Subprocess and src directories -override CUDACPP_SRC_MAKEFILE = cudacpp_src.mk - -#------------------------------------------------------------------------------- - -#=== Use bash in the Makefile (https://www.gnu.org/software/make/manual/html_node/Choosing-the-Shell.html) - -SHELL := /bin/bash - -#------------------------------------------------------------------------------- - -#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) - -# Detect O/S kernel (Linux, Darwin...) -UNAME_S := $(shell uname -s) -###$(info UNAME_S='$(UNAME_S)') - -# Detect architecture (x86_64, ppc64le...) -UNAME_P := $(shell uname -p) -###$(info UNAME_P='$(UNAME_P)') - -#------------------------------------------------------------------------------- - -#=== Include the common MG5aMC Makefile options - -# OM: this is crucial for MG5aMC flag consistency/documentation -# AV: temporarely comment this out because it breaks cudacpp builds -ifneq ($(wildcard ../../Source/make_opts),) -include ../../Source/make_opts -endif +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. + +# This makefile extends the Fortran makefile called "makefile" + +CUDACPP_SRC_MAKEFILE = cudacpp_src.mk + +# Self-invocation with adapted flags: +cppnative: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=native AVXFLAGS="-march=native" cppbuild +cppnone: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=none AVXFLAGS= cppbuild +cppsse4: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=sse4 AVXFLAGS=-march=nehalem cppbuild +cppavx2: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=avx2 AVXFLAGS=-march=haswell cppbuild +cppavx512y: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512y AVXFLAGS="-march=skylake-avx512 -mprefer-vector-width=256" cppbuild +cppavx512z: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512z AVXFLAGS="-march=skylake-avx512 -DMGONGPU_PVW512" cppbuild +cuda: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=cuda cudabuild #------------------------------------------------------------------------------- #=== Configure common compiler flags for C++ and CUDA +# NB: The base flags are defined in the fortran "makefile" + +# Include directories +INCFLAGS = -I. -I../../src -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here +MG_CXXFLAGS += $(INCFLAGS) +MG_NVCCFLAGS += $(INCFLAGS) # Dependency on src directory -MG5AMC_COMMONLIB = mg5amc_common -LIBFLAGS = -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -INCFLAGS += -I../../src +MG5AMC_COMMONLIB = mg5amc_common # Compiler-specific googletest build directory (#125 and #738) ifneq ($(shell $(CXX) --version | grep '^Intel(R) oneAPI DPC++/C++ Compiler'),) @@ -99,356 +84,42 @@ endif #------------------------------------------------------------------------------- -#=== Configure the C++ compiler - -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) -Wall -Wshadow -Wextra -ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS += -ffast-math # see issue #117 -endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY - -# Optionally add debug flags to display the full list of flags (eg on Darwin) -###CXXFLAGS+= -v - -# Note: AR, CXX and FC are implicitly defined if not set externally -# See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html - -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler - -# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) -# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below -ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside - $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") - override CUDA_HOME=disabled -endif - -# If CUDA_HOME is not set, try to set it from the location of nvcc -ifndef CUDA_HOME - CUDA_HOME = $(patsubst %%bin/nvcc,%%,$(shell which nvcc 2>/dev/null)) - $(warning CUDA_HOME was not set: using "$(CUDA_HOME)") -endif - -# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists -ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) - NVCC = $(CUDA_HOME)/bin/nvcc - USE_NVTX ?=-DUSE_NVTX - # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html - # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ - # Default: use compute capability 70 for V100 (CERN lxbatch, CERN itscrd, Juwels Cluster). - # Embed device code for 70, and PTX for 70+. - # Export MADGRAPH_CUDA_ARCHITECTURE (comma-separated list) to use another value or list of values (see #533). - # Examples: use 60 for P100 (Piz Daint), 80 for A100 (Juwels Booster, NVidia raplab/Curiosity). - MADGRAPH_CUDA_ARCHITECTURE ?= 70 - ###CUARCHFLAGS = -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=compute_$(MADGRAPH_CUDA_ARCHITECTURE) -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=sm_$(MADGRAPH_CUDA_ARCHITECTURE) # Older implementation (AV): go back to this one for multi-GPU support #533 - ###CUARCHFLAGS = --gpu-architecture=compute_$(MADGRAPH_CUDA_ARCHITECTURE) --gpu-code=sm_$(MADGRAPH_CUDA_ARCHITECTURE),compute_$(MADGRAPH_CUDA_ARCHITECTURE) # Newer implementation (SH): cannot use this as-is for multi-GPU support #533 - comma:=, - CUARCHFLAGS = $(foreach arch,$(subst $(comma), ,$(MADGRAPH_CUDA_ARCHITECTURE)),-gencode arch=compute_$(arch),code=compute_$(arch) -gencode arch=compute_$(arch),code=sm_$(arch)) - CUINC = -I$(CUDA_HOME)/include/ - ifeq ($(RNDGEN),hasNoCurand) - CURANDLIBFLAGS= - else - CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! - endif - CUOPTFLAGS = -lineinfo - CUFLAGS = $(foreach opt, $(OPTFLAGS), -Xcompiler $(opt)) $(CUOPTFLAGS) $(INCFLAGS) $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) -use_fast_math - ###CUFLAGS += -Xcompiler -Wall -Xcompiler -Wextra -Xcompiler -Wshadow - ###NVCC_VERSION = $(shell $(NVCC) --version | grep 'Cuda compilation tools' | cut -d' ' -f5 | cut -d, -f1) - CUFLAGS += -std=c++17 # need CUDA >= 11.2 (see #333): this is enforced in mgOnGpuConfig.h - # Without -maxrregcount: baseline throughput: 6.5E8 (16384 32 12) up to 7.3E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 160 # improves throughput: 6.9E8 (16384 32 12) up to 7.7E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 128 # improves throughput: 7.3E8 (16384 32 12) up to 7.6E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 96 # degrades throughput: 4.1E8 (16384 32 12) up to 4.5E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 64 # degrades throughput: 1.7E8 (16384 32 12) flat at 1.7E8 (65536 128 12) -else ifneq ($(origin REQUIRE_CUDA),undefined) - # If REQUIRE_CUDA is set but no cuda is found, stop here (e.g. for CI tests on GPU #443) - $(error No cuda installation found (set CUDA_HOME or make nvcc visible in PATH)) -else - # No cuda. Switch cuda compilation off and go to common random numbers in C++ - $(warning CUDA_HOME is not set or is invalid: export CUDA_HOME to compile with cuda) - override NVCC= - override USE_NVTX= - override CUINC= - override CURANDLIBFLAGS= -endif -export NVCC -export CUFLAGS - -# Set the host C++ compiler for nvcc via "-ccbin " -# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) -CUFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) - -# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) -ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) -CUFLAGS += -allow-unsupported-compiler -endif - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ and CUDA builds - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif -ifneq ($(NVCC),) - ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) - override NVCC:=ccache $(NVCC) - endif -endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for C++ and CUDA - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3%% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1%% for none, loses ~1%% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # would increase to none=4.08-4.12E6, sse4=4.99-5.03E6! -else - ###CXXFLAGS+= -flto # also on Intel this would increase throughputs by a factor 2 to 4... - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -# PowerPC-specific CUDA compiler flags (to be reviewed!) -ifeq ($(UNAME_P),ppc64le) - CUFLAGS+= -Xcompiler -mno-float128 -endif - -#------------------------------------------------------------------------------- - #=== Configure defaults and check if user-defined choices exist for OMPFLAGS, AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the default OMPFLAGS choice -ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on Intel (was ok without nvcc but not ok with nvcc before #578) -else ifneq ($(shell $(CXX) --version | egrep '^(clang)'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on clang (was not ok without or with nvcc before #578) -###else ifneq ($(shell $(CXX) --version | egrep '^(Apple clang)'),) # AV for Mac (Apple clang compiler) -else ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) +OMPFLAGS ?= -fopenmp +ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) override OMPFLAGS = # AV disable OpenMP MT on Apple clang (builds fail in the CI #578) -###override OMPFLAGS = -fopenmp # OM reenable OpenMP MT on Apple clang? (AV Oct 2023: this still fails in the CI) -else -override OMPFLAGS = -fopenmp # enable OpenMP MT by default on all other platforms -###override OMPFLAGS = # disable OpenMP MT on all other platforms (default before #575) -endif - -# Set the default AVX (vectorization) choice -ifeq ($(AVX),) - ifeq ($(UNAME_P),ppc64le) - ###override AVX = none - override AVX = sse4 - else ifeq ($(UNAME_P),arm) - ###override AVX = none - override AVX = sse4 - else ifeq ($(wildcard /proc/cpuinfo),) - override AVX = none - $(warning Using AVX='$(AVX)' because host SIMD features cannot be read from /proc/cpuinfo) - else ifeq ($(shell grep -m1 -c avx512vl /proc/cpuinfo)$(shell $(CXX) --version | grep ^clang),1) - override AVX = 512y - ###$(info Using AVX='$(AVX)' as no user input exists) - else - override AVX = avx2 - ifneq ($(shell grep -m1 -c avx512vl /proc/cpuinfo),1) - $(warning Using AVX='$(AVX)' because host does not support avx512vl) - else - $(warning Using AVX='$(AVX)' because this is faster than avx512vl for clang) - endif - endif -else - ###$(info Using AVX='$(AVX)' according to user input) -endif - -# Set the default FPTYPE (floating point type) choice -ifeq ($(FPTYPE),) - override FPTYPE = d -endif - -# Set the default HELINL (inline helicities?) choice -ifeq ($(HELINL),) - override HELINL = 0 -endif - -# Set the default HRDCOD (hardcode cIPD physics parameters?) choice -ifeq ($(HRDCOD),) - override HRDCOD = 0 -endif - -# Set the default RNDGEN (random number generator) choice -ifeq ($(RNDGEN),) - ifeq ($(NVCC),) - override RNDGEN = hasNoCurand - else ifeq ($(RNDGEN),) - override RNDGEN = hasCurand - endif endif -# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so that it is not necessary to pass them to the src Makefile too -export AVX -export FPTYPE -export HELINL -export HRDCOD -export RNDGEN +# Export here, so sub makes don't fall back to the defaults: export OMPFLAGS -#------------------------------------------------------------------------------- - -#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN - -# Set the build flags appropriate to OMPFLAGS -$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS - CUFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM - CUFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND -else ifeq ($(RNDGEN),hasCurand) - override CXXFLAGSCURAND = -else - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- #=== Configure build directories and build lockfiles === -# Build directory "short" tag (defines target and path to the optional build directory) -# (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) - -# Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) -# (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) - -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIR = ../../lib/$(BUILDDIR) - override LIBDIRRPATH = '$$ORIGIN/../$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is set = 1)) -else - override BUILDDIR = . - override LIBDIR = ../../lib - override LIBDIRRPATH = '$$ORIGIN/$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) +# Build directory "short" tag (defines target and path to the build directory) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +CUDACPP_BUILDDIR = build.$(DIRTAG) +CUDACPP_LIBDIR := ../../lib/$(CUDACPP_BUILDDIR) +LIBDIRRPATH := '$$ORIGIN:$$ORIGIN/../$(CUDACPP_LIBDIR)' +ifneq ($(AVX),) + $(info Building CUDACPP in CUDACPP_BUILDDIR=$(CUDACPP_BUILDDIR). Libs in $(CUDACPP_LIBDIR)) endif -###override INCDIR = ../../include -###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) -# On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH +# On Linux, set rpath to CUDACPP_LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables or shared libraries ($ORIGIN on Linux) -# On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary +# On Darwin, building libraries with absolute paths in CUDACPP_LIBDIR makes this unnecessary ifeq ($(UNAME_S),Darwin) override CXXLIBFLAGSRPATH = override CULIBFLAGSRPATH = - override CXXLIBFLAGSRPATH2 = - override CULIBFLAGSRPATH2 = else # RPATH to cuda/cpp libs when linking executables override CXXLIBFLAGSRPATH = -Wl,-rpath,$(LIBDIRRPATH) override CULIBFLAGSRPATH = -Xlinker -rpath,$(LIBDIRRPATH) - # RPATH to common lib when linking cuda/cpp libs - override CXXLIBFLAGSRPATH2 = -Wl,-rpath,'$$ORIGIN' - override CULIBFLAGSRPATH2 = -Xlinker -rpath,'$$ORIGIN' endif # Setting LD_LIBRARY_PATH or DYLD_LIBRARY_PATH in the RUNTIME is no longer necessary (neither on Linux nor on Mac) @@ -458,107 +129,68 @@ override RUNTIME = #=== Makefile TARGETS and build rules below #=============================================================================== -cxx_main=$(BUILDDIR)/check.exe -fcxx_main=$(BUILDDIR)/fcheck.exe +cxx_main=$(CUDACPP_BUILDDIR)/check.exe +fcxx_main=$(CUDACPP_BUILDDIR)/fcheck.exe -ifneq ($(NVCC),) -cu_main=$(BUILDDIR)/gcheck.exe -fcu_main=$(BUILDDIR)/fgcheck.exe -else -cu_main= -fcu_main= -endif - -testmain=$(BUILDDIR)/runTest.exe +cu_main=$(CUDACPP_BUILDDIR)/gcheck.exe +fcu_main=$(CUDACPP_BUILDDIR)/fgcheck.exe ifneq ($(GTESTLIBS),) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) $(testmain) -else -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) +testmain=$(CUDACPP_BUILDDIR)/runTest.exe +cutestmain=$(CUDACPP_BUILDDIR)/runTest_cuda.exe endif -# Target (and build options): debug -MAKEDEBUG= -debug: OPTFLAGS = -g -O0 -debug: CUOPTFLAGS = -G -debug: MAKEDEBUG := debug -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -$(BUILDDIR)/.build.$(TAG): - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @if [ "$(oldtagsb)" != "" ]; then echo "Cannot build for tag=$(TAG) as old builds exist for other tags:"; echo " $(oldtagsb)"; echo "Please run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @touch $(BUILDDIR)/.build.$(TAG) +cppbuild: $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(cxx_main) $(fcxx_main) $(testmain) +cudabuild: $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(cu_main) $(fcu_main) $(cutestmain) # Generic target and build rules: objects from CUDA compilation -ifneq ($(NVCC),) -$(BUILDDIR)/%%.o : %%.cu *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%%.o : %%.cu *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c $< -o $@ -$(BUILDDIR)/%%_cu.o : %%.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ -endif +$(CUDACPP_BUILDDIR)/%%_cu.o : %%.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ # Generic target and build rules: objects from C++ compilation # (NB do not include CUINC here! add it only for NVTX or curand #679) -$(BUILDDIR)/%%.o : %%.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%%.o : %%.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Apply special build flags only to CrossSectionKernel.cc and gCrossSectionKernel.cu (no fast math, see #117 and #516) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS := $(filter-out -ffast-math,$(CXXFLAGS)) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math +$(CUDACPP_BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math ifneq ($(NVCC),) -$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -fno-fast-math +$(CUDACPP_BUILDDIR)/gCrossSectionKernels.o: NVCCFLAGS += -Xcompiler -fno-fast-math endif endif # Apply special build flags only to check_sa.o and gcheck_sa.o (NVTX in timermap.h, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) -$(BUILDDIR)/gcheck_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) # Apply special build flags only to check_sa and CurandRandomNumberKernel (curand headers, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gcheck_sa.o: CUFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gCurandRandomNumberKernel.o: CUFLAGS += $(CXXFLAGSCURAND) -ifeq ($(RNDGEN),hasCurand) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CUINC) -endif +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) + # Avoid "warning: builtin __has_trivial_... is deprecated; use __is_trivially_... instead" in nvcc with icx2023 (#592) ifneq ($(shell $(CXX) --version | egrep '^(Intel)'),) ifneq ($(NVCC),) -CUFLAGS += -Xcompiler -Wno-deprecated-builtins +MG_NVCCFLAGS += -Xcompiler -Wno-deprecated-builtins endif endif -# Avoid clang warning "overriding '-ffp-contract=fast' option with '-ffp-contract=on'" (#516) -# This patch does remove the warning, but I prefer to keep it disabled for the moment... -###ifneq ($(shell $(CXX) --version | egrep '^(clang|Apple clang|Intel)'),) -###$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -Wno-overriding-t-option -###ifneq ($(NVCC),) -###$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -Wno-overriding-t-option -###endif -###endif - #### Apply special build flags only to CPPProcess.cc (-flto) ###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += -flto -#### Apply special build flags only to CPPProcess.cc (AVXFLAGS) -###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += $(AVXFLAGS) - #------------------------------------------------------------------------------- -# Target (and build rules): common (src) library -commonlib : $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc $(BUILDDIR)/.build.$(TAG) - $(MAKE) -C ../../src $(MAKEDEBUG) -f $(CUDACPP_SRC_MAKEFILE) +$(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc + $(MAKE) AVX=$(AVX) AVXFLAGS="$(AVXFLAGS)" -C ../../src -f $(CUDACPP_SRC_MAKEFILE) #------------------------------------------------------------------------------- @@ -566,162 +198,123 @@ processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') ###$(info processid_short=$(processid_short)) MG5AMC_CXXLIB = mg5amc_$(processid_short)_cpp -cxx_objects_lib=$(BUILDDIR)/CPPProcess.o $(BUILDDIR)/MatrixElementKernels.o $(BUILDDIR)/BridgeKernels.o $(BUILDDIR)/CrossSectionKernels.o -cxx_objects_exe=$(BUILDDIR)/CommonRandomNumberKernel.o $(BUILDDIR)/RamboSamplingKernels.o +cxx_objects_lib=$(CUDACPP_BUILDDIR)/CPPProcess.o $(CUDACPP_BUILDDIR)/MatrixElementKernels.o $(CUDACPP_BUILDDIR)/BridgeKernels.o $(CUDACPP_BUILDDIR)/CrossSectionKernels.o +cxx_objects_exe=$(CUDACPP_BUILDDIR)/CommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/RamboSamplingKernels.o -ifneq ($(NVCC),) MG5AMC_CULIB = mg5amc_$(processid_short)_cuda -cu_objects_lib=$(BUILDDIR)/gCPPProcess.o $(BUILDDIR)/gMatrixElementKernels.o $(BUILDDIR)/gBridgeKernels.o $(BUILDDIR)/gCrossSectionKernels.o -cu_objects_exe=$(BUILDDIR)/gCommonRandomNumberKernel.o $(BUILDDIR)/gRamboSamplingKernels.o -endif +cu_objects_lib=$(CUDACPP_BUILDDIR)/gCPPProcess.o $(CUDACPP_BUILDDIR)/gMatrixElementKernels.o $(CUDACPP_BUILDDIR)/gBridgeKernels.o $(CUDACPP_BUILDDIR)/gCrossSectionKernels.o +cu_objects_exe=$(CUDACPP_BUILDDIR)/gCommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/gRamboSamplingKernels.o # Target (and build rules): C++ and CUDA shared libraries -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) - $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) - -ifneq ($(NVCC),) -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) - $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -endif +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) + $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) $(MG_LDFLAGS) $(LDFLAGS) -#------------------------------------------------------------------------------- - -# Target (and build rules): Fortran include files -###$(INCDIR)/%%.inc : ../%%.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### \cp $< $@ +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) + $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) #------------------------------------------------------------------------------- # Target (and build rules): C++ and CUDA standalone executables -$(cxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cxx_main): $(BUILDDIR)/check_sa.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o - $(CXX) -o $@ $(BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o $(CURANDLIBFLAGS) -ifneq ($(NVCC),) +$(cxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(cxx_main): $(CUDACPP_BUILDDIR)/check_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) + ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(cu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(cu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(cu_main): LIBFLAGS += -L$(patsubst %%bin/nvc++,%%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(cu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cu_main): $(BUILDDIR)/gcheck_sa.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o - $(NVCC) -o $@ $(BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o $(CURANDLIBFLAGS) +$(cu_main): MG_LDFLAGS += -L$(patsubst %%bin/nvc++,%%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif +$(cu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(cu_main): $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- - -# Generic target and build rules: objects from Fortran compilation -$(BUILDDIR)/%%.o : %%.f *.inc - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(FC) -I. -c $< -o $@ - -# Generic target and build rules: objects from Fortran compilation -###$(BUILDDIR)/%%.o : %%.f *.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi -### $(FC) -I. -I$(INCDIR) -c $< -o $@ - -# Target (and build rules): Fortran standalone executables -###$(BUILDDIR)/fcheck_sa.o : $(INCDIR)/fbridge.inc +# Check executables: ifeq ($(UNAME_S),Darwin) -$(fcxx_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 +$(fcxx_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif -$(fcxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcxx_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) - $(CXX) -o $@ $(BUILDDIR)/fcheck_sa.o $(OMPFLAGS) $(BUILDDIR)/fsampler.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) +$(fcxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(fcxx_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(cxx_objects_exe) $(OMPFLAGS) $(CUDACPP_BUILDDIR)/fsampler.o -lgfortran -L$(CUDACPP_LIBDIR) $(MG_LDFLAGS) $(LDFLAGS) -ifneq ($(NVCC),) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(fcu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(fcu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(fcu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(fcu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') endif ifeq ($(UNAME_S),Darwin) -$(fcu_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 -endif -$(fcu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcu_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) - $(NVCC) -o $@ $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) +$(fcu_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif +$(fcu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(fcu_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(cu_objects_exe) -lgfortran $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- # Target (and build rules): test objects and test executable -$(BUILDDIR)/testxxx.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx_cu.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx_cu.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -endif +$(testmain) $(cutestmain): $(GTESTLIBS) +$(testmain) $(cutestmain): INCFLAGS += $(GTESTINC) +$(testmain) $(cutestmain): MG_LDFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main -$(BUILDDIR)/testmisc.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(CUDACPP_BUILDDIR)/testxxx.o $(CUDACPP_BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) testxxx_cc_ref.txt +$(testmain): $(CUDACPP_BUILDDIR)/testxxx.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions +$(cutestmain): $(CUDACPP_BUILDDIR)/testxxx_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc_cu.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests -endif -$(BUILDDIR)/runTest.o: $(GTESTLIBS) -$(BUILDDIR)/runTest.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/runTest.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/runTest.o +$(CUDACPP_BUILDDIR)/testmisc.o $(CUDACPP_BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/testmisc.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(cutestmain): $(CUDACPP_BUILDDIR)/testmisc_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests + + +$(CUDACPP_BUILDDIR)/runTest.o $(CUDACPP_BUILDDIR)/runTest_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/runTest.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/runTest.o +$(cutestmain): $(CUDACPP_BUILDDIR)/runTest_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/runTest_cu.o + -ifneq ($(NVCC),) -$(BUILDDIR)/runTest_cu.o: $(GTESTLIBS) -$(BUILDDIR)/runTest_cu.o: INCFLAGS += $(GTESTINC) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(testmain): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(testmain): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cutestmain): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cutestmain): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(testmain): LIBFLAGS += -L$(patsubst %%bin/nvc++,%%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(testmain): $(BUILDDIR)/runTest_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/runTest_cu.o +$(cutestmain): MG_LDFLAGS += -L$(patsubst %%bin/nvc++,%%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif -$(testmain): $(GTESTLIBS) -$(testmain): INCFLAGS += $(GTESTINC) -$(testmain): LIBFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main ifneq ($(OMPFLAGS),) ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -$(testmain): LIBFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) +$(testmain): MG_LDFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -$(testmain): LIBFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 +$(testmain): MG_LDFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 ###else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) ###$(testmain): LIBFLAGS += ???? # OMP is not supported yet by cudacpp for Apple clang (see #578 and #604) else -$(testmain): LIBFLAGS += -lgomp +$(testmain): MG_LDFLAGS += -lgomp endif endif -ifeq ($(NVCC),) # link only runTest.o -$(testmain): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) - $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -ldl -pthread $(LIBFLAGS) -else # link both runTest.o and runTest_cu.o -$(testmain): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) - $(NVCC) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) -ldl $(LIBFLAGS) -lcuda -endif +$(testmain): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(testmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) + $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -pthread $(MG_LDFLAGS) $(LDFLAGS) + +$(cutestmain): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cutestmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) + $(NVCC) -o $@ $(cu_objects_lib) $(cu_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -lcuda $(MG_LDFLAGS) $(LDFLAGS) # Use target gtestlibs to build only googletest ifneq ($(GTESTLIBS),) @@ -731,72 +324,15 @@ endif # Use flock (Linux only, no Mac) to allow 'make -j' if googletest has not yet been downloaded https://stackoverflow.com/a/32666215 $(GTESTLIBS): ifneq ($(shell which flock 2>/dev/null),) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - flock $(BUILDDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) + flock $(TESTDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) else if [ -d $(TESTDIR) ]; then $(MAKE) -C $(TESTDIR); fi endif #------------------------------------------------------------------------------- -# Target: build all targets in all AVX modes (each AVX mode in a separate build directory) -# Split the avxall target into five separate targets to allow parallel 'make -j avxall' builds -# (Hack: add a fbridge.inc dependency to avxall, to ensure it is only copied once for all AVX modes) -avxnone: - @echo - $(MAKE) USEBUILDDIR=1 AVX=none -f $(CUDACPP_MAKEFILE) - -avxsse4: - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 -f $(CUDACPP_MAKEFILE) - -avxavx2: - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 -f $(CUDACPP_MAKEFILE) - -avx512y: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y -f $(CUDACPP_MAKEFILE) - -avx512z: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z -f $(CUDACPP_MAKEFILE) - -ifeq ($(UNAME_P),ppc64le) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else ifeq ($(UNAME_P),arm) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 avxavx2 avx512y avx512z -avxall: avxnone avxsse4 avxavx2 avx512y avx512z -endif - -#------------------------------------------------------------------------------- - -# Target: clean the builds -.PHONY: clean - -clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(BUILDDIR) -else - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe - rm -f $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(LIBDIR)/lib$(MG5AMC_CULIB).so -endif - $(MAKE) -C ../../src clean -f $(CUDACPP_SRC_MAKEFILE) -### rm -rf $(INCDIR) - -cleanall: - @echo - $(MAKE) USEBUILDDIR=0 clean -f $(CUDACPP_MAKEFILE) - @echo - $(MAKE) USEBUILDDIR=0 -C ../../src cleanall -f $(CUDACPP_SRC_MAKEFILE) - rm -rf build.* - # Target: clean the builds as well as the gtest installation(s) -distclean: cleanall +distclean: clean cleansrc ifneq ($(wildcard $(TESTDIRCOMMON)),) $(MAKE) -C $(TESTDIRCOMMON) clean endif @@ -848,50 +384,55 @@ endif #------------------------------------------------------------------------------- -# Target: check (run the C++ test executable) +# Target: check/gcheck (run the C++ test executable) # [NB THIS IS WHAT IS USED IN THE GITHUB CI!] -ifneq ($(NVCC),) -check: runTest cmpFcheck cmpFGcheck -else check: runTest cmpFcheck -endif +gcheck: + $(MAKE) AVX=cuda runTest cmpFGcheck # Target: runTest (run the C++ test executable runTest.exe) -runTest: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/runTest.exe +ifneq ($(AVX),cuda) +runTest: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest.exe +else +runTest: cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest_cuda.exe +endif + # Target: runCheck (run the C++ standalone executable check.exe, with a small number of events) -runCheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/check.exe -p 2 32 2 +runCheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe -p 2 32 2 # Target: runGcheck (run the CUDA standalone executable gcheck.exe, with a small number of events) -runGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/gcheck.exe -p 2 32 2 +runGcheck: AVX=cuda +runGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe -p 2 32 2 # Target: runFcheck (run the Fortran standalone executable - with C++ MEs - fcheck.exe, with a small number of events) -runFcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 +runFcheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 # Target: runFGcheck (run the Fortran standalone executable - with CUDA MEs - fgcheck.exe, with a small number of events) -runFGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 +runFGcheck: AVX=cuda +runFGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 # Target: cmpFcheck (compare ME results from the C++ and Fortran with C++ MEs standalone executables, with a small number of events) -cmpFcheck: all.$(TAG) +cmpFcheck: cppbuild @echo - @echo "$(BUILDDIR)/check.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%%s (relative difference %%s 2E-4)' %% ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%%s (relative difference %%s 2E-4)' %% ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi # Target: cmpFGcheck (compare ME results from the CUDA and Fortran with CUDA MEs standalone executables, with a small number of events) -cmpFGcheck: all.$(TAG) +cmpFGcheck: AVX=cuda +cmpFGcheck: + $(MAKE) AVX=cuda cudabuild @echo - @echo "$(BUILDDIR)/gcheck.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fgcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%%s (relative difference %%s 2E-4)' %% ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%%s (relative difference %%s 2E-4)' %% ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi -# Target: memcheck (run the CUDA standalone executable gcheck.exe with a small number of events through cuda-memcheck) -memcheck: all.$(TAG) - $(RUNTIME) $(CUDA_HOME)/bin/cuda-memcheck --check-api-memory-access yes --check-deprecated-instr yes --check-device-heap yes --demangle full --language c --leak-check full --racecheck-report all --report-api-errors all --show-backtrace yes --tool memcheck --track-unused-memory yes $(BUILDDIR)/gcheck.exe -p 2 32 2 - -#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp_src.mk b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp_src.mk index 25b6f8f7c8..ce8c2906c6 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp_src.mk +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp_src.mk @@ -1,12 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories - -THISMK = $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #------------------------------------------------------------------------------- @@ -16,165 +11,24 @@ SHELL := /bin/bash #------------------------------------------------------------------------------- -#=== Configure common compiler flags for CUDA and C++ - -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here - -#------------------------------------------------------------------------------- - #=== Configure the C++ compiler -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) $(USE_NVTX) -fPIC -Wall -Wshadow -Wextra +include ../Source/make_opts + +MG_CXXFLAGS += -fPIC -I. $(USE_NVTX) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS+= -ffast-math # see issue #117 +MG_CXXFLAGS += -ffast-math # see issue #117 endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY # Note: AR, CXX and FC are implicitly defined if not set externally # See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html ###RANLIB = ranlib -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -LDFLAGS = -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -LDFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler (note: NVCC is already exported including ccache) - -###$(info NVCC=$(NVCC)) - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ builds (note: NVCC is already exported including ccache) - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for CUDA and C++ - -# Assuming uname is available, detect if architecture is PowerPC -UNAME_P := $(shell uname -p) - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3%% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1%% for none, loses ~1%% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # BUILD ERROR IF THIS ADDED IN SRC?! -else - ###AR=gcc-ar # needed by -flto - ###RANLIB=gcc-ranlib # needed by -flto - ###CXXFLAGS+= -flto # NB: build error from src/Makefile unless gcc-ar and gcc-ranlib are used - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -#------------------------------------------------------------------------------- - #=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the build flags appropriate to OMPFLAGS ###$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -###$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -###$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -###$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -###$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - CXXFLAGS += -DMGONGPU_HAS_NO_CURAND -else ifneq ($(RNDGEN),hasCurand) - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- @@ -182,28 +36,18 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -###$(info Current directory is $(shell pwd)) -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIRREL = ../lib/$(BUILDDIR) - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR=1 is set)) -else - override BUILDDIR = . - override LIBDIRREL = ../lib - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) -endif -######$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) +# Build directory: +BUILDDIR := build.$(DIRTAG) +LIBDIRREL := ../lib/$(BUILDDIR) # Workaround for Mac #375 (I did not manage to fix rpath with @executable_path): use absolute paths for LIBDIR # (NB: this is quite ugly because it creates the directory if it does not exist - to avoid removing src by mistake) -UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Darwin) override LIBDIR = $(shell mkdir -p $(LIBDIRREL); cd $(LIBDIRREL); pwd) ifeq ($(wildcard $(LIBDIR)),) @@ -223,55 +67,35 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -# Target (and build options): debug -debug: OPTFLAGS = -g -O0 -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%%.o : %%.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%%.o : %%.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%%_cu.o : %%.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%%_cu.o : %%.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ #------------------------------------------------------------------------------- cxx_objects=$(addprefix $(BUILDDIR)/, Parameters_%(model)s.o read_slha.o) -ifneq ($(NVCC),) +ifeq ($(AVX),cuda) +COMPILER=$(NVCC) cu_objects=$(addprefix $(BUILDDIR)/, Parameters_%(model)s_cu.o) +else +COMPILER=$(CXX) +cu_objects= endif # Target (and build rules): common (src) library -ifneq ($(NVCC),) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) $(cu_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(NVCC) -shared -o $@ $(cxx_objects) $(cu_objects) $(LDFLAGS) -else -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(CXX) -shared -o $@ $(cxx_objects) $(LDFLAGS) -endif + mkdir -p $(LIBDIR) + $(COMPILER) -shared -o $@ $(cxx_objects) $(cu_objects) $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- @@ -279,19 +103,7 @@ endif .PHONY: clean clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) -else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe -endif - -cleanall: - @echo - $(MAKE) clean -f $(THISMK) - @echo - rm -rf $(LIBDIR)/build.* - rm -rf build.* + $(RM) -f ../lib/build.*/*.so + $(RM) -rf build.* #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/mgOnGpuCxtypes.h b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/mgOnGpuCxtypes.h index ca9a9f00c0..3290d314d6 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/mgOnGpuCxtypes.h +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/mgOnGpuCxtypes.h @@ -21,10 +21,14 @@ // Complex type in cuda: thrust or cucomplex or cxsmpl #ifdef __CUDACC__ #if defined MGONGPU_CUCXTYPE_THRUST +#ifdef __CLANG__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wtautological-compare" // for icpx2021/clang13 (https://stackoverflow.com/a/15864661) +#endif #include +#ifdef __CLANG__ #pragma clang diagnostic pop +#endif #elif defined MGONGPU_CUCXTYPE_CUCOMPLEX #include #elif not defined MGONGPU_CUCXTYPE_CXSMPL diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/runTest.cc b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/runTest.cc index d4a760a71b..6c77775fb2 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/runTest.cc +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/runTest.cc @@ -243,18 +243,20 @@ struct CUDATest : public CUDA_CPU_TestBase // Use two levels of macros to force stringification at the right level // (see https://gcc.gnu.org/onlinedocs/gcc-3.0.1/cpp_3.html#SEC17 and https://stackoverflow.com/a/3419392) // Google macro is in https://github.com/google/googletest/blob/master/googletest/include/gtest/gtest-param-test.h +/* clang-format off */ #define TESTID_CPU( s ) s##_CPU #define XTESTID_CPU( s ) TESTID_CPU( s ) #define MG_INSTANTIATE_TEST_SUITE_CPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); #define TESTID_GPU( s ) s##_GPU #define XTESTID_GPU( s ) TESTID_GPU( s ) #define MG_INSTANTIATE_TEST_SUITE_GPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); +/* clang-format on */ #ifdef __CUDACC__ MG_INSTANTIATE_TEST_SUITE_GPU( XTESTID_GPU( MG_EPOCH_PROCESS_ID ), MadgraphTest ); diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/testxxx.cc b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/testxxx.cc index 2d1578cb43..4f4f658fa3 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/testxxx.cc +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/testxxx.cc @@ -40,7 +40,7 @@ namespace mg5amcCpu { std::string FPEhandlerMessage = "unknown"; int FPEhandlerIevt = -1; - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU): '" << FPEhandlerMessage << "' ievt=" << FPEhandlerIevt << std::endl; @@ -71,11 +71,10 @@ TEST( XTESTID( MG_EPOCH_PROCESS_ID ), testxxx ) constexpr bool testEvents = !dumpEvents; // run the test? constexpr fptype toleranceXXXs = std::is_same::value ? 1.E-15 : 1.E-5; // Constant parameters - constexpr int neppM = MemoryAccessMomenta::neppM; // AOSOA layout constexpr int np4 = CPPProcess::np4; - const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') - assert( nevt %% neppM == 0 ); // nevt must be a multiple of neppM - assert( nevt %% neppV == 0 ); // nevt must be a multiple of neppV + const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') + assert( nevt %% MemoryAccessMomenta::neppM == 0 ); // nevt must be a multiple of neppM + assert( nevt %% neppV == 0 ); // nevt must be a multiple of neppV // Fill in the input momenta #ifdef __CUDACC__ mg5amcGpu::PinnedHostBufferMomenta hstMomenta( nevt ); // AOSOA[npagM][npar=4][np4=4][neppM] diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/patchMad.sh b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/patchMad.sh index 7edafba599..e67cd6e1fb 100755 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/patchMad.sh +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/patchMad.sh @@ -47,14 +47,19 @@ if [ "${patchlevel}" == "0" ]; then exit $status; fi # (1) Process-independent patches touch ${dir}/Events/.keep # this file should already be present (mg5amcnlo copies it from Template/LO/Events/.keep) \cp -pr ${scrdir}/MG5aMC_patches/${dir_patches}/fbridge_common.inc ${dir}/SubProcesses # new file +cd ${dir} +echo "DEBUG: standardise ${PWD}/Source/make_opts (fix f2py3 and sort make_opts_variables) before applying patch.common" # AV moved back hare in patchMad.sh from generateAndCompare.sh (see PR #753) +sed -i 's/DEFAULT_F2PY_COMPILER=f2py.*/DEFAULT_F2PY_COMPILER=f2py3/' Source/make_opts +cat Source/make_opts | sed '/#end/q' | head --lines=-1 | sort > Source/make_opts.new +cat Source/make_opts | sed -n -e '/#end/,$p' >> Source/make_opts.new +\mv Source/make_opts.new Source/make_opts if [ "${patchlevel}" == "2" ]; then - cd ${dir} echo "DEBUG: cd ${PWD}; patch -p4 -i ${scrdir}/MG5aMC_patches/${dir_patches}/patch.common" if ! patch -p4 -i ${scrdir}/MG5aMC_patches/${dir_patches}/patch.common; then status=1; fi \rm -f Source/*.orig \rm -f bin/internal/*.orig - cd - > /dev/null fi +cd - > /dev/null for p1dir in ${dir}/SubProcesses/P*; do cd $p1dir ln -sf ../fbridge_common.inc . # new file diff --git a/epochX/cudacpp/CODEGEN/allGenerateAndCompare.sh b/epochX/cudacpp/CODEGEN/allGenerateAndCompare.sh index ed73959a27..b8fd1d51ba 100755 --- a/epochX/cudacpp/CODEGEN/allGenerateAndCompare.sh +++ b/epochX/cudacpp/CODEGEN/allGenerateAndCompare.sh @@ -6,28 +6,34 @@ set -e # fail on error -cd $(dirname $0)/.. - -./CODEGEN/generateAndCompare.sh -q ee_mumu -./CODEGEN/generateAndCompare.sh -q ee_mumu --mad - -./CODEGEN/generateAndCompare.sh -q gg_tt -./CODEGEN/generateAndCompare.sh -q gg_tt --mad - -./CODEGEN/generateAndCompare.sh -q gg_ttg -./CODEGEN/generateAndCompare.sh -q gg_ttg --mad - -./CODEGEN/generateAndCompare.sh -q gg_ttgg -./CODEGEN/generateAndCompare.sh -q gg_ttgg --mad +sa=1 +mad=1 +if [ "$1" == "--sa" ]; then + mad=0; shift +elif [ "$1" == "--mad" ]; then + sa=0; shift +fi +if [ "$1" != "" ]; then echo "Usage: $0 [--sa|--mad]"; exit 1; fi -./CODEGEN/generateAndCompare.sh -q gg_ttggg -./CODEGEN/generateAndCompare.sh -q gg_ttggg --mad - -./CODEGEN/generateAndCompare.sh -q gq_ttq -./CODEGEN/generateAndCompare.sh -q gq_ttq --mad - -./CODEGEN/generateAndCompare.sh -q heft_gg_h - -./CODEGEN/generateAndCompare.sh -q gg_tt01g --mad +cd $(dirname $0)/.. -./CODEGEN/generateAndCompare.sh -q pp_tt012j --mad +if [ "$mad" == "1" ]; then + ./CODEGEN/generateAndCompare.sh -q ee_mumu --mad + ./CODEGEN/generateAndCompare.sh -q gg_tt --mad + ./CODEGEN/generateAndCompare.sh -q gg_ttg --mad + ./CODEGEN/generateAndCompare.sh -q gg_ttgg --mad + ./CODEGEN/generateAndCompare.sh -q gg_ttggg --mad + ./CODEGEN/generateAndCompare.sh -q gq_ttq --mad + ./CODEGEN/generateAndCompare.sh -q gg_tt01g --mad + ./CODEGEN/generateAndCompare.sh -q pp_tt012j --mad +fi + +if [ "$sa" == "1" ]; then + ./CODEGEN/generateAndCompare.sh -q ee_mumu + ./CODEGEN/generateAndCompare.sh -q gg_tt + ./CODEGEN/generateAndCompare.sh -q gg_ttg + ./CODEGEN/generateAndCompare.sh -q gg_ttgg + ./CODEGEN/generateAndCompare.sh -q gg_ttggg + ./CODEGEN/generateAndCompare.sh -q gq_ttq + ./CODEGEN/generateAndCompare.sh -q heft_gg_h +fi diff --git a/epochX/cudacpp/CODEGEN/generateAndCompare.sh b/epochX/cudacpp/CODEGEN/generateAndCompare.sh index f3f830205c..0d4a60cf5f 100755 --- a/epochX/cudacpp/CODEGEN/generateAndCompare.sh +++ b/epochX/cudacpp/CODEGEN/generateAndCompare.sh @@ -236,19 +236,6 @@ function codeGenAndDiff() \rm -rf ${outproc}/bin/internal/ufomodel/py3_model.pkl \rm -rf ${outproc}/bin/internal/ufomodel/__pycache__ touch ${outproc}/HTML/.keep # new file - if [ "${patchlevel}" != "0" ]; then - # Add global flag '-O3 -ffast-math -fbounds-check' as in previous gridpacks - # (FIXME? these flags are already set in the runcards, why are they not propagated to make_opts?) - echo "GLOBAL_FLAG=-O3 -ffast-math -fbounds-check" > ${outproc}/Source/make_opts.new - cat ${outproc}/Source/make_opts >> ${outproc}/Source/make_opts.new - \mv ${outproc}/Source/make_opts.new ${outproc}/Source/make_opts - fi - if [ "${patchlevel}" == "2" ]; then - sed -i 's/DEFAULT_F2PY_COMPILER=f2py.*/DEFAULT_F2PY_COMPILER=f2py3/' ${outproc}/Source/make_opts - cat ${outproc}/Source/make_opts | sed '/#end/q' | head --lines=-1 | sort > ${outproc}/Source/make_opts.new - cat ${outproc}/Source/make_opts | sed -n -e '/#end/,$p' >> ${outproc}/Source/make_opts.new - \mv ${outproc}/Source/make_opts.new ${outproc}/Source/make_opts - fi fi popd >& /dev/null # Choose which directory must be copied (for gridpack generation: untar and modify the gridpack) diff --git a/epochX/cudacpp/ee_mumu.mad/CODEGEN_mad_ee_mumu_log.txt b/epochX/cudacpp/ee_mumu.mad/CODEGEN_mad_ee_mumu_log.txt index e090137829..79ed67ff2a 100644 --- a/epochX/cudacpp/ee_mumu.mad/CODEGEN_mad_ee_mumu_log.txt +++ b/epochX/cudacpp/ee_mumu.mad/CODEGEN_mad_ee_mumu_log.txt @@ -52,7 +52,7 @@ Note that you can still compile and run aMC@NLO with the built-in PDFs Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt import /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_ee_mumu.mg The import format was not given, so we guess it as command set stdout_level DEBUG @@ -62,7 +62,7 @@ generate e+ e- > mu+ mu- No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005680561065673828  +DEBUG: model prefixing takes 0.005708932876586914  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -174,7 +174,7 @@ INFO: Generating Helas calls for process: e+ e- > mu+ mu- WEIGHTED<=4 @1 INFO: Processing color information for process: e+ e- > mu+ mu- @1 INFO: Creating files in directory P1_epem_mupmum DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -191,19 +191,19 @@ INFO: Created files CPPProcess.h and CPPProcess.cc in directory ./. INFO: Generating Feynman diagrams for Process: e+ e- > mu+ mu- WEIGHTED<=4 @1 INFO: Finding symmetric diagrams for subprocess group epem_mupmum Generated helas calls for 1 subprocesses (2 diagrams) in 0.004 s -Wrote files for 8 helas calls in 0.097 s +Wrote files for 8 helas calls in 0.098 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates FFV1 routines ALOHA: aloha creates FFV2 routines ALOHA: aloha creates FFV4 routines -ALOHA: aloha creates 3 routines in 0.199 s +ALOHA: aloha creates 3 routines in 0.197 s DEBUG: Entering PLUGIN_ProcessExporter.convert_model (create the model) [output.py at line 202]  ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates FFV1 routines ALOHA: aloha creates FFV2 routines ALOHA: aloha creates FFV4 routines ALOHA: aloha creates FFV2_4 routines -ALOHA: aloha creates 7 routines in 0.254 s +ALOHA: aloha creates 7 routines in 0.253 s FFV1 FFV1 FFV2 @@ -226,12 +226,14 @@ save configuration file to /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CO INFO: Use Fortran compiler gfortran INFO: Use c++ compiler g++ INFO: Generate web pages +DEBUG: standardise /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_ee_mumu/Source/make_opts (fix f2py3 and sort make_opts_variables) before applying patch.common DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_ee_mumu; patch -p4 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common patching file Source/genps.inc +patching file Source/make_opts patching file Source/makefile patching file SubProcesses/makefile +patching file bin/internal/banner.py patching file bin/internal/gen_ximprove.py -Hunk #1 succeeded at 391 (offset 6 lines). patching file bin/internal/madevent_interface.py DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_ee_mumu/SubProcesses/P1_epem_mupmum; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f @@ -248,9 +250,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m1.848s -user 0m1.621s -sys 0m0.225s +real 0m1.877s +user 0m1.634s +sys 0m0.245s ************************************************************ * * * W E L C O M E to * @@ -276,7 +278,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_ee_mumu/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards run quit INFO: @@ -306,7 +308,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_ee_mumu/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards param quit INFO: diff --git a/epochX/cudacpp/ee_mumu.mad/Source/make_opts b/epochX/cudacpp/ee_mumu.mad/Source/make_opts index e4b87ee6ad..435bed0dc7 100644 --- a/epochX/cudacpp/ee_mumu.mad/Source/make_opts +++ b/epochX/cudacpp/ee_mumu.mad/Source/make_opts @@ -1,7 +1,7 @@ DEFAULT_CPP_COMPILER=g++ DEFAULT_F2PY_COMPILER=f2py3 DEFAULT_F_COMPILER=gfortran -GLOBAL_FLAG=-O3 -ffast-math -fbounds-check +GLOBAL_FLAG=-O3 -ffast-math MACFLAG= MG5AMC_VERSION=SpecifiedByMG5aMCAtRunTime PYTHIA8_PATH=NotInstalled @@ -13,31 +13,53 @@ BIASLIBDIR=../../../lib/ BIASLIBRARY=libbias.$(libext) # Rest of the makefile -ifeq ($(origin FFLAGS),undefined) -FFLAGS= -w -fPIC -#FFLAGS+= -g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none -endif -FFLAGS += $(GLOBAL_FLAG) +#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) + +# Detect O/S kernel (Linux, Darwin...) +UNAME_S := $(shell uname -s) + +# Detect architecture (x86_64, ppc64le...) +UNAME_P := $(shell uname -p) + +#------------------------------------------------------------------------------- # REMOVE MACFLAG IF NOT ON MAC OR FOR F2PY -UNAME := $(shell uname -s) ifdef f2pymode MACFLAG= else -ifneq ($(UNAME), Darwin) +ifneq ($(UNAME_S), Darwin) MACFLAG= endif endif +############################################################ +# Default compiler flags +# To change optimisation level, override these as follows: +# make CXXFLAGS="-O0 -g" +# or export them as environment variables +# For debugging Fortran, one could e.g. use: +# FCFLAGS="-g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none" +############################################################ +FCFLAGS ?= $(GLOBAL_FLAG) -fbounds-check +CXXFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG +NVCCFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG -use_fast_math -lineinfo +LDFLAGS ?= $(STDLIB) -ifeq ($(origin CXXFLAGS),undefined) -CXXFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +ifneq ($(FFLAGS),) +# Madgraph used to use FFLAGS, so the user probably tries to change the flags specifically for madgraph: +FCFLAGS = $(FFLAGS) endif -ifeq ($(origin CFLAGS),undefined) -CFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +# Madgraph-specific flags: +WARNFLAGS = -Wall -Wshadow -Wextra +ifeq (,$(findstring -std=,$(CXXFLAGS))) +CXXSTANDARD= -std=c++17 endif +MG_FCFLAGS += -fPIC -w +MG_CXXFLAGS += -fPIC $(CXXSTANDARD) $(WARNFLAGS) $(MACFLAG) +MG_NVCCFLAGS += -fPIC $(CXXSTANDARD) --forward-unknown-to-host-compiler $(WARNFLAGS) +MG_LDFLAGS += $(MACFLAG) # Set FC unless it's defined by an environment variable ifeq ($(origin FC),default) @@ -49,45 +71,40 @@ endif # Increase the number of allowed charcters in a Fortran line ifeq ($(FC), ftn) -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler else VERS="$(shell $(FC) --version | grep ifort -i)" ifeq ($(VERS), "") -FFLAGS+= -ffixed-line-length-132 +MG_FCFLAGS += -ffixed-line-length-132 else -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler endif endif -UNAME := $(shell uname -s) -ifeq ($(origin LDFLAGS), undefined) -LDFLAGS=$(STDLIB) $(MACFLAG) -endif - # Options: dynamic, lhapdf # Option dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) dylibext=dylib else dylibext=so endif ifdef dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) libext=dylib -FFLAGS+= -fno-common -LDFLAGS += -bundle +MG_FCFLAGS += -fno-common +MG_LDFLAGS += -bundle define CREATELIB $(FC) -dynamiclib -undefined dynamic_lookup -o $(1) $(2) endef else libext=so -FFLAGS+= -fPIC -LDFLAGS += -shared +MG_FCFLAGS += -fPIC +MG_LDFLAGS += -shared define CREATELIB -$(FC) $(FFLAGS) $(LDFLAGS) -o $(1) $(2) +$(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MG_LDFLAGS) $(LDFLAGS) -o $(1) $(2) endef endif else @@ -101,17 +118,9 @@ endif # Option lhapdf ifneq ($(lhapdf),) -CXXFLAGS += $(shell $(lhapdf) --cppflags) +MG_CXXFLAGS += $(shell $(lhapdf) --cppflags) alfas_functions=alfas_functions_lhapdf llhapdf+= $(shell $(lhapdf) --cflags --libs) -lLHAPDF -# check if we need to activate c++11 (for lhapdf6.2) -ifeq ($(origin CXX),default) -ifeq ($lhapdfversion$lhapdfsubversion,62) -CXX=$(DEFAULT_CPP_COMPILER) -std=c++11 -else -CXX=$(DEFAULT_CPP_COMPILER) -endif -endif else alfas_functions=alfas_functions llhapdf= @@ -120,4 +129,207 @@ endif # Helper function to check MG5 version define CHECK_MG5AMC_VERSION python -c 'import re; from distutils.version import StrictVersion; print StrictVersion("$(MG5AMC_VERSION)") >= StrictVersion("$(1)") if re.match("^[\d\.]+$$","$(MG5AMC_VERSION)") else True;' -endef \ No newline at end of file +endef + +#------------------------------------------------------------------------------- + +# Set special cases for non-gcc/clang builds +# AVX below gets overridden from outside in architecture-specific builds +AVX ?= none +# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] +# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] +$(info AVX=$(AVX)) +ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + endif +else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif +endif + +# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? +MG_CXXFLAGS+= $(AVXFLAGS) + +#------------------------------------------------------------------------------- + +#=== Configure the CUDA compiler if available + +# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) +# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below +ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside + $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") + override CUDA_HOME=disabled +endif + +# If CUDA_HOME is not set, try to set it from the location of nvcc +ifndef CUDA_HOME + CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) + $(info CUDA_HOME="$(CUDA_HOME)") +endif + +# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists +ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) + NVCC = $(CUDA_HOME)/bin/nvcc + USE_NVTX ?=-DUSE_NVTX + # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html + # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ + # Default: use compute capability 70 (Volta architecture), and embed PTX to support later architectures, too. + # Set MADGRAPH_CUDA_ARCHITECTURE to the desired value to change the default. + # Build for multiple architectures using a space-separated list, e.g. MADGRAPH_CUDA_ARCHITECTURE="70 80" + MADGRAPH_CUDA_ARCHITECTURE ?= 70 + # Generate PTX for the first architecture: + CUARCHFLAGS := --generate-code arch=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)),code=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)) + # Generate device code for all architectures: + CUARCHFLAGS += $(foreach arch,$(MADGRAPH_CUDA_ARCHITECTURE), --generate-code arch=compute_$(arch),code=sm_$(arch)) + + CUINC = -I$(CUDA_HOME)/include/ + CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! + MG_LDFLAGS += $(CURANDLIBFLAGS) + MG_NVCCFLAGS += $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) + +else ifeq ($(AVX),cuda) + $(error nvcc is not visible in PATH. Either add it to PATH or export CUDA_HOME to compile with cuda) + ifeq ($(AVX),cuda) + $(error Cannot compile for cuda without NVCC) + endif +endif + +# Set the host C++ compiler for nvcc via "-ccbin " +# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) +MG_NVCCFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) + +# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) +ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) +MG_NVCCFLAGS += -allow-unsupported-compiler +endif + +#------------------------------------------------------------------------------- + +#=== Configure ccache for C++ and CUDA builds + +# Enable ccache if USECCACHE=1 +ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) + override CXX:=ccache $(CXX) +endif + +ifneq ($(NVCC),) + ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) + override NVCC:=ccache $(NVCC) + endif +endif + +#------------------------------------------------------------------------------- + +#=== Configure PowerPC-specific compiler flags for C++ and CUDA + +# PowerPC-specific CXX / CUDA compiler flags (being reviewed) +ifeq ($(UNAME_P),ppc64le) + MG_CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 + MG_NVCCFLAGS+= -Xcompiler -mno-float128 + + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + endif +endif + +#------------------------------------------------------------------------------- +#=== Apple-specific compiler/linker options + +# Add -std=c++17 explicitly to avoid build errors on macOS +# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" +ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) +MG_CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 +endif + +ifeq ($(UNAME_S),Darwin) +STDLIB = -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) +MG_LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" +else +MG_LDFLAGS += -Xlinker --no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +endif + +#------------------------------------------------------------------------------- + +#=== C++/CUDA-specific flags for floating-point types and random generators to use + +# Set the default FPTYPE (floating point type) choice +FPTYPE ?= m + +# Set the default HELINL (inline helicities?) choice +HELINL ?= 0 + +# Set the default HRDCOD (hardcode cIPD physics parameters?) choice +HRDCOD ?= 0 + +# Set the default RNDGEN (random number generator) choice +ifeq ($(NVCC),) + RNDGEN ?= hasNoCurand +else + RNDGEN ?= hasCurand +endif + +# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so sub-makes don't go back to the defaults +export AVX +export AVXFLAGS +export FPTYPE +export HELINL +export HRDCOD +export RNDGEN + +#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN + +# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") +# $(info FPTYPE=$(FPTYPE)) +ifeq ($(FPTYPE),d) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE +else ifeq ($(FPTYPE),f) + COMMONFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT +else ifeq ($(FPTYPE),m) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT +else + $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) +endif + +# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") +# $(info HELINL=$(HELINL)) +ifeq ($(HELINL),1) + COMMONFLAGS += -DMGONGPU_INLINE_HELAMPS +else ifneq ($(HELINL),0) + $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") +# $(info HRDCOD=$(HRDCOD)) +ifeq ($(HRDCOD),1) + COMMONFLAGS += -DMGONGPU_HARDCODE_PARAM +else ifneq ($(HRDCOD),0) + $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") +$(info RNDGEN=$(RNDGEN)) +ifeq ($(RNDGEN),hasNoCurand) + override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND + override CURANDLIBFLAGS = +else ifeq ($(RNDGEN),hasCurand) + CXXFLAGSCURAND = $(CUINC) +else + $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) +endif + +MG_CXXFLAGS += $(COMMONFLAGS) +MG_NVCCFLAGS += $(COMMONFLAGS) + +#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/ee_mumu.mad/Source/makefile b/epochX/cudacpp/ee_mumu.mad/Source/makefile index 00c73099a0..407b1b753e 100644 --- a/epochX/cudacpp/ee_mumu.mad/Source/makefile +++ b/epochX/cudacpp/ee_mumu.mad/Source/makefile @@ -10,8 +10,8 @@ include make_opts # Source files -PROCESS= hfill.o matrix.o myamp.o -DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o +PROCESS = hfill.o matrix.o myamp.o +DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o HBOOK = hfill.o hcurve.o hbook1.o hbook2.o GENERIC = $(alfas_functions).o transpole.o invarients.o hfill.o pawgraphs.o ran1.o \ rw_events.o rw_routines.o kin_functions.o open_file.o basecode.o setrun.o \ @@ -22,7 +22,7 @@ GENSUDGRID = gensudgrid.o is-sud.o setrun_gen.o rw_routines.o open_file.o # Locally compiled libraries -LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) +LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) # Binaries @@ -32,6 +32,9 @@ BINARIES = $(BINDIR)gen_ximprove $(BINDIR)gensudgrid $(BINDIR)combine_runs all: $(LIBRARIES) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(LIBDIR)libbias.$(libext) +%.o: %.f *.inc + $(FC) -I. $(MG_FCFLAGS) $(FCFLAGS) -c $< -o $@ + # Libraries $(LIBDIR)libdsample.$(libext): $(DSAMPLE) @@ -39,36 +42,35 @@ $(LIBDIR)libdsample.$(libext): $(DSAMPLE) $(LIBDIR)libgeneric.$(libext): $(GENERIC) $(call CREATELIB, $@, $^) $(LIBDIR)libdhelas.$(libext): DHELAS - cd DHELAS; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libpdf.$(libext): PDF make_opts - cd PDF; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" ifneq (,$(filter edff chff, $(pdlabel1) $(pdlabel2))) $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make ; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" else $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make -f makefile_dummy; cd ../../ -endif + $(MAKE) -C $< -f makefile_dummy FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" +endif $(LIBDIR)libcernlib.$(libext): CERNLIB - cd CERNLIB; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" # The bias library is here the dummy by default; compilation of other ones specified in the run_card will be done by MG5aMC directly. $(LIBDIR)libbias.$(libext): BIAS/dummy - cd BIAS/dummy; make; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libmodel.$(libext): MODEL param_card.inc - cd MODEL; make + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" param_card.inc: ../Cards/param_card.dat ../bin/madevent treatcards param + touch $@ # madevent doesn't update the time stamp if there's nothing to do -$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o - $(FC) $(LDFLAGS) -o $@ $^ -#$(BINDIR)combine_events: $(COMBINE) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) run_card.inc $(LIBDIR)libbias.$(libext) -# $(FC) -o $@ $(COMBINE) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC $(llhapdf) $(LDFLAGS) -lbias +$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o + $(FC) $(MG_LDFLAGS) $(LDFLAGS) -o $@ $^ $(BINDIR)gensudgrid: $(GENSUDGRID) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libcernlib.$(libext) - $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(LDFLAGS) + $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(MG_LDFLAGS) $(LDFLAGS) # Dependencies @@ -85,6 +87,7 @@ rw_events.o: rw_events.f run_config.inc run_card.inc: ../Cards/run_card.dat ../bin/madevent treatcards run + touch $@ # madevent doesn't update the time stamp if there's nothing to do clean4pdf: rm -f ../lib/libpdf.$(libext) @@ -120,7 +123,7 @@ $(LIBDIR)libiregi.a: $(IREGIDIR) cd $(IREGIDIR); make ln -sf ../Source/$(IREGIDIR)libiregi.a $(LIBDIR)libiregi.a -cleanSource: +clean: $(RM) *.o $(LIBRARIES) $(BINARIES) cd PDF; make clean; cd .. cd PDF/gammaUPC; make clean; cd ../../ @@ -132,11 +135,3 @@ cleanSource: cd BIAS/ptj_bias; make clean; cd ../.. if [ -d $(CUTTOOLSDIR) ]; then cd $(CUTTOOLSDIR); make clean; cd ..; fi if [ -d $(IREGIDIR) ]; then cd $(IREGIDIR); make clean; cd ..; fi - -clean: cleanSource - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make clean; cd -; done; - -cleanavx: - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; -cleanall: cleanSource # THIS IS THE ONE - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; diff --git a/epochX/cudacpp/ee_mumu.mad/SubProcesses/Bridge.h b/epochX/cudacpp/ee_mumu.mad/SubProcesses/Bridge.h index bf8b5e024d..c263f39a62 100644 --- a/epochX/cudacpp/ee_mumu.mad/SubProcesses/Bridge.h +++ b/epochX/cudacpp/ee_mumu.mad/SubProcesses/Bridge.h @@ -236,7 +236,7 @@ namespace mg5amcCpu #ifdef __CUDACC__ if( ( m_nevt < s_gputhreadsmin ) || ( m_nevt % s_gputhreadsmin != 0 ) ) throw std::runtime_error( "Bridge constructor: nevt should be a multiple of " + std::to_string( s_gputhreadsmin ) ); - while( m_nevt != m_gpublocks * m_gputhreads ) + while( m_nevt != static_cast( m_gpublocks * m_gputhreads ) ) { m_gputhreads /= 2; if( m_gputhreads < s_gputhreadsmin ) @@ -266,7 +266,7 @@ namespace mg5amcCpu template void Bridge::set_gpugrid( const int gpublocks, const int gputhreads ) { - if( m_nevt != gpublocks * gputhreads ) + if( m_nevt != static_cast( gpublocks * gputhreads ) ) throw std::runtime_error( "Bridge: gpublocks*gputhreads must equal m_nevt in set_gpugrid" ); m_gpublocks = gpublocks; m_gputhreads = gputhreads; diff --git a/epochX/cudacpp/ee_mumu.mad/SubProcesses/MadgraphTest.h b/epochX/cudacpp/ee_mumu.mad/SubProcesses/MadgraphTest.h index ef40624c88..b0f2250c25 100644 --- a/epochX/cudacpp/ee_mumu.mad/SubProcesses/MadgraphTest.h +++ b/epochX/cudacpp/ee_mumu.mad/SubProcesses/MadgraphTest.h @@ -199,10 +199,6 @@ class MadgraphTest : public testing::TestWithParam } }; -// Since we link both the CPU-only and GPU tests into the same executable, we prevent -// a multiply defined symbol by only compiling this in the non-CUDA phase: -#ifndef __CUDACC__ - /// Compare momenta and matrix elements. /// This uses an implementation of TestDriverBase to run a madgraph workflow, /// and compares momenta and matrix elements with a reference file. @@ -307,6 +303,4 @@ TEST_P( MadgraphTest, CompareMomentaAndME ) } } -#endif // __CUDACC__ - #endif /* MADGRAPHTEST_H_ */ diff --git a/epochX/cudacpp/ee_mumu.mad/SubProcesses/MatrixElementKernels.cc b/epochX/cudacpp/ee_mumu.mad/SubProcesses/MatrixElementKernels.cc index 74b5239ebf..2d6f27cd5d 100644 --- a/epochX/cudacpp/ee_mumu.mad/SubProcesses/MatrixElementKernels.cc +++ b/epochX/cudacpp/ee_mumu.mad/SubProcesses/MatrixElementKernels.cc @@ -196,6 +196,9 @@ namespace mg5amcGpu void MatrixElementKernelDevice::setGrid( const int gpublocks, const int gputhreads ) { + m_gpublocks = gpublocks; + m_gputhreads = gputhreads; + if( m_gpublocks == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gpublocks must be > 0 in setGrid" ); if( m_gputhreads == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gputhreads must be > 0 in setGrid" ); if( this->nevt() != m_gpublocks * m_gputhreads ) throw std::runtime_error( "MatrixElementKernelDevice: nevt mismatch in setGrid" ); diff --git a/epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum/check_sa.cc b/epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum/check_sa.cc +++ b/epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum/counters.cc b/epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum/counters.cc +++ b/epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk index 509307506b..a522ddb335 100644 --- a/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk @@ -1,56 +1,41 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: use ':=' to ensure that the value of CUDACPP_MAKEFILE is not modified further down after including make_opts -#=== NB: use 'override' to ensure that the value can not be modified from the outside -override CUDACPP_MAKEFILE := $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) -###$(info CUDACPP_MAKEFILE='$(CUDACPP_MAKEFILE)') - -#=== NB: different names (e.g. cudacpp.mk and cudacpp_src.mk) are used in the Subprocess and src directories -override CUDACPP_SRC_MAKEFILE = cudacpp_src.mk - -#------------------------------------------------------------------------------- - -#=== Use bash in the Makefile (https://www.gnu.org/software/make/manual/html_node/Choosing-the-Shell.html) - -SHELL := /bin/bash - -#------------------------------------------------------------------------------- - -#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) - -# Detect O/S kernel (Linux, Darwin...) -UNAME_S := $(shell uname -s) -###$(info UNAME_S='$(UNAME_S)') - -# Detect architecture (x86_64, ppc64le...) -UNAME_P := $(shell uname -p) -###$(info UNAME_P='$(UNAME_P)') - -#------------------------------------------------------------------------------- - -#=== Include the common MG5aMC Makefile options - -# OM: this is crucial for MG5aMC flag consistency/documentation -# AV: temporarely comment this out because it breaks cudacpp builds -ifneq ($(wildcard ../../Source/make_opts),) -include ../../Source/make_opts -endif +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. + +# This makefile extends the Fortran makefile called "makefile" + +CUDACPP_SRC_MAKEFILE = cudacpp_src.mk + +# Self-invocation with adapted flags: +cppnative: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=native AVXFLAGS="-march=native" cppbuild +cppnone: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=none AVXFLAGS= cppbuild +cppsse4: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=sse4 AVXFLAGS=-march=nehalem cppbuild +cppavx2: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=avx2 AVXFLAGS=-march=haswell cppbuild +cppavx512y: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512y AVXFLAGS="-march=skylake-avx512 -mprefer-vector-width=256" cppbuild +cppavx512z: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512z AVXFLAGS="-march=skylake-avx512 -DMGONGPU_PVW512" cppbuild +cuda: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=cuda cudabuild #------------------------------------------------------------------------------- #=== Configure common compiler flags for C++ and CUDA +# NB: The base flags are defined in the fortran "makefile" + +# Include directories +INCFLAGS = -I. -I../../src -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here +MG_CXXFLAGS += $(INCFLAGS) +MG_NVCCFLAGS += $(INCFLAGS) # Dependency on src directory -MG5AMC_COMMONLIB = mg5amc_common -LIBFLAGS = -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -INCFLAGS += -I../../src +MG5AMC_COMMONLIB = mg5amc_common # Compiler-specific googletest build directory (#125 and #738) ifneq ($(shell $(CXX) --version | grep '^Intel(R) oneAPI DPC++/C++ Compiler'),) @@ -99,356 +84,42 @@ endif #------------------------------------------------------------------------------- -#=== Configure the C++ compiler - -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) -Wall -Wshadow -Wextra -ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS += -ffast-math # see issue #117 -endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY - -# Optionally add debug flags to display the full list of flags (eg on Darwin) -###CXXFLAGS+= -v - -# Note: AR, CXX and FC are implicitly defined if not set externally -# See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html - -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler - -# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) -# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below -ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside - $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") - override CUDA_HOME=disabled -endif - -# If CUDA_HOME is not set, try to set it from the location of nvcc -ifndef CUDA_HOME - CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) - $(warning CUDA_HOME was not set: using "$(CUDA_HOME)") -endif - -# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists -ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) - NVCC = $(CUDA_HOME)/bin/nvcc - USE_NVTX ?=-DUSE_NVTX - # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html - # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ - # Default: use compute capability 70 for V100 (CERN lxbatch, CERN itscrd, Juwels Cluster). - # Embed device code for 70, and PTX for 70+. - # Export MADGRAPH_CUDA_ARCHITECTURE (comma-separated list) to use another value or list of values (see #533). - # Examples: use 60 for P100 (Piz Daint), 80 for A100 (Juwels Booster, NVidia raplab/Curiosity). - MADGRAPH_CUDA_ARCHITECTURE ?= 70 - ###CUARCHFLAGS = -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=compute_$(MADGRAPH_CUDA_ARCHITECTURE) -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=sm_$(MADGRAPH_CUDA_ARCHITECTURE) # Older implementation (AV): go back to this one for multi-GPU support #533 - ###CUARCHFLAGS = --gpu-architecture=compute_$(MADGRAPH_CUDA_ARCHITECTURE) --gpu-code=sm_$(MADGRAPH_CUDA_ARCHITECTURE),compute_$(MADGRAPH_CUDA_ARCHITECTURE) # Newer implementation (SH): cannot use this as-is for multi-GPU support #533 - comma:=, - CUARCHFLAGS = $(foreach arch,$(subst $(comma), ,$(MADGRAPH_CUDA_ARCHITECTURE)),-gencode arch=compute_$(arch),code=compute_$(arch) -gencode arch=compute_$(arch),code=sm_$(arch)) - CUINC = -I$(CUDA_HOME)/include/ - ifeq ($(RNDGEN),hasNoCurand) - CURANDLIBFLAGS= - else - CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! - endif - CUOPTFLAGS = -lineinfo - CUFLAGS = $(foreach opt, $(OPTFLAGS), -Xcompiler $(opt)) $(CUOPTFLAGS) $(INCFLAGS) $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) -use_fast_math - ###CUFLAGS += -Xcompiler -Wall -Xcompiler -Wextra -Xcompiler -Wshadow - ###NVCC_VERSION = $(shell $(NVCC) --version | grep 'Cuda compilation tools' | cut -d' ' -f5 | cut -d, -f1) - CUFLAGS += -std=c++17 # need CUDA >= 11.2 (see #333): this is enforced in mgOnGpuConfig.h - # Without -maxrregcount: baseline throughput: 6.5E8 (16384 32 12) up to 7.3E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 160 # improves throughput: 6.9E8 (16384 32 12) up to 7.7E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 128 # improves throughput: 7.3E8 (16384 32 12) up to 7.6E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 96 # degrades throughput: 4.1E8 (16384 32 12) up to 4.5E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 64 # degrades throughput: 1.7E8 (16384 32 12) flat at 1.7E8 (65536 128 12) -else ifneq ($(origin REQUIRE_CUDA),undefined) - # If REQUIRE_CUDA is set but no cuda is found, stop here (e.g. for CI tests on GPU #443) - $(error No cuda installation found (set CUDA_HOME or make nvcc visible in PATH)) -else - # No cuda. Switch cuda compilation off and go to common random numbers in C++ - $(warning CUDA_HOME is not set or is invalid: export CUDA_HOME to compile with cuda) - override NVCC= - override USE_NVTX= - override CUINC= - override CURANDLIBFLAGS= -endif -export NVCC -export CUFLAGS - -# Set the host C++ compiler for nvcc via "-ccbin " -# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) -CUFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) - -# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) -ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) -CUFLAGS += -allow-unsupported-compiler -endif - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ and CUDA builds - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif -ifneq ($(NVCC),) - ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) - override NVCC:=ccache $(NVCC) - endif -endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for C++ and CUDA - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # would increase to none=4.08-4.12E6, sse4=4.99-5.03E6! -else - ###CXXFLAGS+= -flto # also on Intel this would increase throughputs by a factor 2 to 4... - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -# PowerPC-specific CUDA compiler flags (to be reviewed!) -ifeq ($(UNAME_P),ppc64le) - CUFLAGS+= -Xcompiler -mno-float128 -endif - -#------------------------------------------------------------------------------- - #=== Configure defaults and check if user-defined choices exist for OMPFLAGS, AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the default OMPFLAGS choice -ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on Intel (was ok without nvcc but not ok with nvcc before #578) -else ifneq ($(shell $(CXX) --version | egrep '^(clang)'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on clang (was not ok without or with nvcc before #578) -###else ifneq ($(shell $(CXX) --version | egrep '^(Apple clang)'),) # AV for Mac (Apple clang compiler) -else ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) +OMPFLAGS ?= -fopenmp +ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) override OMPFLAGS = # AV disable OpenMP MT on Apple clang (builds fail in the CI #578) -###override OMPFLAGS = -fopenmp # OM reenable OpenMP MT on Apple clang? (AV Oct 2023: this still fails in the CI) -else -override OMPFLAGS = -fopenmp # enable OpenMP MT by default on all other platforms -###override OMPFLAGS = # disable OpenMP MT on all other platforms (default before #575) -endif - -# Set the default AVX (vectorization) choice -ifeq ($(AVX),) - ifeq ($(UNAME_P),ppc64le) - ###override AVX = none - override AVX = sse4 - else ifeq ($(UNAME_P),arm) - ###override AVX = none - override AVX = sse4 - else ifeq ($(wildcard /proc/cpuinfo),) - override AVX = none - $(warning Using AVX='$(AVX)' because host SIMD features cannot be read from /proc/cpuinfo) - else ifeq ($(shell grep -m1 -c avx512vl /proc/cpuinfo)$(shell $(CXX) --version | grep ^clang),1) - override AVX = 512y - ###$(info Using AVX='$(AVX)' as no user input exists) - else - override AVX = avx2 - ifneq ($(shell grep -m1 -c avx512vl /proc/cpuinfo),1) - $(warning Using AVX='$(AVX)' because host does not support avx512vl) - else - $(warning Using AVX='$(AVX)' because this is faster than avx512vl for clang) - endif - endif -else - ###$(info Using AVX='$(AVX)' according to user input) -endif - -# Set the default FPTYPE (floating point type) choice -ifeq ($(FPTYPE),) - override FPTYPE = d -endif - -# Set the default HELINL (inline helicities?) choice -ifeq ($(HELINL),) - override HELINL = 0 -endif - -# Set the default HRDCOD (hardcode cIPD physics parameters?) choice -ifeq ($(HRDCOD),) - override HRDCOD = 0 -endif - -# Set the default RNDGEN (random number generator) choice -ifeq ($(RNDGEN),) - ifeq ($(NVCC),) - override RNDGEN = hasNoCurand - else ifeq ($(RNDGEN),) - override RNDGEN = hasCurand - endif endif -# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so that it is not necessary to pass them to the src Makefile too -export AVX -export FPTYPE -export HELINL -export HRDCOD -export RNDGEN +# Export here, so sub makes don't fall back to the defaults: export OMPFLAGS -#------------------------------------------------------------------------------- - -#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN - -# Set the build flags appropriate to OMPFLAGS -$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS - CUFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM - CUFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND -else ifeq ($(RNDGEN),hasCurand) - override CXXFLAGSCURAND = -else - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- #=== Configure build directories and build lockfiles === -# Build directory "short" tag (defines target and path to the optional build directory) -# (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) - -# Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) -# (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) - -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIR = ../../lib/$(BUILDDIR) - override LIBDIRRPATH = '$$ORIGIN/../$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is set = 1)) -else - override BUILDDIR = . - override LIBDIR = ../../lib - override LIBDIRRPATH = '$$ORIGIN/$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) +# Build directory "short" tag (defines target and path to the build directory) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +CUDACPP_BUILDDIR = build.$(DIRTAG) +CUDACPP_LIBDIR := ../../lib/$(CUDACPP_BUILDDIR) +LIBDIRRPATH := '$$ORIGIN:$$ORIGIN/../$(CUDACPP_LIBDIR)' +ifneq ($(AVX),) + $(info Building CUDACPP in CUDACPP_BUILDDIR=$(CUDACPP_BUILDDIR). Libs in $(CUDACPP_LIBDIR)) endif -###override INCDIR = ../../include -###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) -# On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH +# On Linux, set rpath to CUDACPP_LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables or shared libraries ($ORIGIN on Linux) -# On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary +# On Darwin, building libraries with absolute paths in CUDACPP_LIBDIR makes this unnecessary ifeq ($(UNAME_S),Darwin) override CXXLIBFLAGSRPATH = override CULIBFLAGSRPATH = - override CXXLIBFLAGSRPATH2 = - override CULIBFLAGSRPATH2 = else # RPATH to cuda/cpp libs when linking executables override CXXLIBFLAGSRPATH = -Wl,-rpath,$(LIBDIRRPATH) override CULIBFLAGSRPATH = -Xlinker -rpath,$(LIBDIRRPATH) - # RPATH to common lib when linking cuda/cpp libs - override CXXLIBFLAGSRPATH2 = -Wl,-rpath,'$$ORIGIN' - override CULIBFLAGSRPATH2 = -Xlinker -rpath,'$$ORIGIN' endif # Setting LD_LIBRARY_PATH or DYLD_LIBRARY_PATH in the RUNTIME is no longer necessary (neither on Linux nor on Mac) @@ -458,107 +129,68 @@ override RUNTIME = #=== Makefile TARGETS and build rules below #=============================================================================== -cxx_main=$(BUILDDIR)/check.exe -fcxx_main=$(BUILDDIR)/fcheck.exe +cxx_main=$(CUDACPP_BUILDDIR)/check.exe +fcxx_main=$(CUDACPP_BUILDDIR)/fcheck.exe -ifneq ($(NVCC),) -cu_main=$(BUILDDIR)/gcheck.exe -fcu_main=$(BUILDDIR)/fgcheck.exe -else -cu_main= -fcu_main= -endif - -testmain=$(BUILDDIR)/runTest.exe +cu_main=$(CUDACPP_BUILDDIR)/gcheck.exe +fcu_main=$(CUDACPP_BUILDDIR)/fgcheck.exe ifneq ($(GTESTLIBS),) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) $(testmain) -else -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) +testmain=$(CUDACPP_BUILDDIR)/runTest.exe +cutestmain=$(CUDACPP_BUILDDIR)/runTest_cuda.exe endif -# Target (and build options): debug -MAKEDEBUG= -debug: OPTFLAGS = -g -O0 -debug: CUOPTFLAGS = -G -debug: MAKEDEBUG := debug -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -$(BUILDDIR)/.build.$(TAG): - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @if [ "$(oldtagsb)" != "" ]; then echo "Cannot build for tag=$(TAG) as old builds exist for other tags:"; echo " $(oldtagsb)"; echo "Please run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @touch $(BUILDDIR)/.build.$(TAG) +cppbuild: $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(cxx_main) $(fcxx_main) $(testmain) +cudabuild: $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(cu_main) $(fcu_main) $(cutestmain) # Generic target and build rules: objects from CUDA compilation -ifneq ($(NVCC),) -$(BUILDDIR)/%.o : %.cu *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cu *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c $< -o $@ -$(BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ -endif +$(CUDACPP_BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ # Generic target and build rules: objects from C++ compilation # (NB do not include CUINC here! add it only for NVTX or curand #679) -$(BUILDDIR)/%.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Apply special build flags only to CrossSectionKernel.cc and gCrossSectionKernel.cu (no fast math, see #117 and #516) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS := $(filter-out -ffast-math,$(CXXFLAGS)) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math +$(CUDACPP_BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math ifneq ($(NVCC),) -$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -fno-fast-math +$(CUDACPP_BUILDDIR)/gCrossSectionKernels.o: NVCCFLAGS += -Xcompiler -fno-fast-math endif endif # Apply special build flags only to check_sa.o and gcheck_sa.o (NVTX in timermap.h, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) -$(BUILDDIR)/gcheck_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) # Apply special build flags only to check_sa and CurandRandomNumberKernel (curand headers, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gcheck_sa.o: CUFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gCurandRandomNumberKernel.o: CUFLAGS += $(CXXFLAGSCURAND) -ifeq ($(RNDGEN),hasCurand) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CUINC) -endif +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) + # Avoid "warning: builtin __has_trivial_... is deprecated; use __is_trivially_... instead" in nvcc with icx2023 (#592) ifneq ($(shell $(CXX) --version | egrep '^(Intel)'),) ifneq ($(NVCC),) -CUFLAGS += -Xcompiler -Wno-deprecated-builtins +MG_NVCCFLAGS += -Xcompiler -Wno-deprecated-builtins endif endif -# Avoid clang warning "overriding '-ffp-contract=fast' option with '-ffp-contract=on'" (#516) -# This patch does remove the warning, but I prefer to keep it disabled for the moment... -###ifneq ($(shell $(CXX) --version | egrep '^(clang|Apple clang|Intel)'),) -###$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -Wno-overriding-t-option -###ifneq ($(NVCC),) -###$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -Wno-overriding-t-option -###endif -###endif - #### Apply special build flags only to CPPProcess.cc (-flto) ###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += -flto -#### Apply special build flags only to CPPProcess.cc (AVXFLAGS) -###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += $(AVXFLAGS) - #------------------------------------------------------------------------------- -# Target (and build rules): common (src) library -commonlib : $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc $(BUILDDIR)/.build.$(TAG) - $(MAKE) -C ../../src $(MAKEDEBUG) -f $(CUDACPP_SRC_MAKEFILE) +$(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc + $(MAKE) AVX=$(AVX) AVXFLAGS="$(AVXFLAGS)" -C ../../src -f $(CUDACPP_SRC_MAKEFILE) #------------------------------------------------------------------------------- @@ -566,162 +198,123 @@ processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') ###$(info processid_short=$(processid_short)) MG5AMC_CXXLIB = mg5amc_$(processid_short)_cpp -cxx_objects_lib=$(BUILDDIR)/CPPProcess.o $(BUILDDIR)/MatrixElementKernels.o $(BUILDDIR)/BridgeKernels.o $(BUILDDIR)/CrossSectionKernels.o -cxx_objects_exe=$(BUILDDIR)/CommonRandomNumberKernel.o $(BUILDDIR)/RamboSamplingKernels.o +cxx_objects_lib=$(CUDACPP_BUILDDIR)/CPPProcess.o $(CUDACPP_BUILDDIR)/MatrixElementKernels.o $(CUDACPP_BUILDDIR)/BridgeKernels.o $(CUDACPP_BUILDDIR)/CrossSectionKernels.o +cxx_objects_exe=$(CUDACPP_BUILDDIR)/CommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/RamboSamplingKernels.o -ifneq ($(NVCC),) MG5AMC_CULIB = mg5amc_$(processid_short)_cuda -cu_objects_lib=$(BUILDDIR)/gCPPProcess.o $(BUILDDIR)/gMatrixElementKernels.o $(BUILDDIR)/gBridgeKernels.o $(BUILDDIR)/gCrossSectionKernels.o -cu_objects_exe=$(BUILDDIR)/gCommonRandomNumberKernel.o $(BUILDDIR)/gRamboSamplingKernels.o -endif +cu_objects_lib=$(CUDACPP_BUILDDIR)/gCPPProcess.o $(CUDACPP_BUILDDIR)/gMatrixElementKernels.o $(CUDACPP_BUILDDIR)/gBridgeKernels.o $(CUDACPP_BUILDDIR)/gCrossSectionKernels.o +cu_objects_exe=$(CUDACPP_BUILDDIR)/gCommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/gRamboSamplingKernels.o # Target (and build rules): C++ and CUDA shared libraries -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) - $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) - -ifneq ($(NVCC),) -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) - $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -endif +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) + $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) $(MG_LDFLAGS) $(LDFLAGS) -#------------------------------------------------------------------------------- - -# Target (and build rules): Fortran include files -###$(INCDIR)/%.inc : ../%.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### \cp $< $@ +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) + $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) #------------------------------------------------------------------------------- # Target (and build rules): C++ and CUDA standalone executables -$(cxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cxx_main): $(BUILDDIR)/check_sa.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o - $(CXX) -o $@ $(BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o $(CURANDLIBFLAGS) -ifneq ($(NVCC),) +$(cxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(cxx_main): $(CUDACPP_BUILDDIR)/check_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) + ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(cu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(cu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(cu_main): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(cu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cu_main): $(BUILDDIR)/gcheck_sa.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o - $(NVCC) -o $@ $(BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o $(CURANDLIBFLAGS) +$(cu_main): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif +$(cu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(cu_main): $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- - -# Generic target and build rules: objects from Fortran compilation -$(BUILDDIR)/%.o : %.f *.inc - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(FC) -I. -c $< -o $@ - -# Generic target and build rules: objects from Fortran compilation -###$(BUILDDIR)/%.o : %.f *.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi -### $(FC) -I. -I$(INCDIR) -c $< -o $@ - -# Target (and build rules): Fortran standalone executables -###$(BUILDDIR)/fcheck_sa.o : $(INCDIR)/fbridge.inc +# Check executables: ifeq ($(UNAME_S),Darwin) -$(fcxx_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 +$(fcxx_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif -$(fcxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcxx_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) - $(CXX) -o $@ $(BUILDDIR)/fcheck_sa.o $(OMPFLAGS) $(BUILDDIR)/fsampler.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) +$(fcxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(fcxx_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(cxx_objects_exe) $(OMPFLAGS) $(CUDACPP_BUILDDIR)/fsampler.o -lgfortran -L$(CUDACPP_LIBDIR) $(MG_LDFLAGS) $(LDFLAGS) -ifneq ($(NVCC),) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(fcu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(fcu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(fcu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(fcu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') endif ifeq ($(UNAME_S),Darwin) -$(fcu_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 -endif -$(fcu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcu_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) - $(NVCC) -o $@ $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) +$(fcu_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif +$(fcu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(fcu_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(cu_objects_exe) -lgfortran $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- # Target (and build rules): test objects and test executable -$(BUILDDIR)/testxxx.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx_cu.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx_cu.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -endif +$(testmain) $(cutestmain): $(GTESTLIBS) +$(testmain) $(cutestmain): INCFLAGS += $(GTESTINC) +$(testmain) $(cutestmain): MG_LDFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main -$(BUILDDIR)/testmisc.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(CUDACPP_BUILDDIR)/testxxx.o $(CUDACPP_BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) testxxx_cc_ref.txt +$(testmain): $(CUDACPP_BUILDDIR)/testxxx.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions +$(cutestmain): $(CUDACPP_BUILDDIR)/testxxx_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc_cu.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests -endif -$(BUILDDIR)/runTest.o: $(GTESTLIBS) -$(BUILDDIR)/runTest.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/runTest.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/runTest.o +$(CUDACPP_BUILDDIR)/testmisc.o $(CUDACPP_BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/testmisc.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(cutestmain): $(CUDACPP_BUILDDIR)/testmisc_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests + + +$(CUDACPP_BUILDDIR)/runTest.o $(CUDACPP_BUILDDIR)/runTest_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/runTest.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/runTest.o +$(cutestmain): $(CUDACPP_BUILDDIR)/runTest_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/runTest_cu.o + -ifneq ($(NVCC),) -$(BUILDDIR)/runTest_cu.o: $(GTESTLIBS) -$(BUILDDIR)/runTest_cu.o: INCFLAGS += $(GTESTINC) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(testmain): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(testmain): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cutestmain): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cutestmain): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(testmain): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(testmain): $(BUILDDIR)/runTest_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/runTest_cu.o +$(cutestmain): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif -$(testmain): $(GTESTLIBS) -$(testmain): INCFLAGS += $(GTESTINC) -$(testmain): LIBFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main ifneq ($(OMPFLAGS),) ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -$(testmain): LIBFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) +$(testmain): MG_LDFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -$(testmain): LIBFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 +$(testmain): MG_LDFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 ###else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) ###$(testmain): LIBFLAGS += ???? # OMP is not supported yet by cudacpp for Apple clang (see #578 and #604) else -$(testmain): LIBFLAGS += -lgomp +$(testmain): MG_LDFLAGS += -lgomp endif endif -ifeq ($(NVCC),) # link only runTest.o -$(testmain): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) - $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -ldl -pthread $(LIBFLAGS) -else # link both runTest.o and runTest_cu.o -$(testmain): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) - $(NVCC) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) -ldl $(LIBFLAGS) -lcuda -endif +$(testmain): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(testmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) + $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -pthread $(MG_LDFLAGS) $(LDFLAGS) + +$(cutestmain): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cutestmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) + $(NVCC) -o $@ $(cu_objects_lib) $(cu_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -lcuda $(MG_LDFLAGS) $(LDFLAGS) # Use target gtestlibs to build only googletest ifneq ($(GTESTLIBS),) @@ -731,72 +324,15 @@ endif # Use flock (Linux only, no Mac) to allow 'make -j' if googletest has not yet been downloaded https://stackoverflow.com/a/32666215 $(GTESTLIBS): ifneq ($(shell which flock 2>/dev/null),) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - flock $(BUILDDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) + flock $(TESTDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) else if [ -d $(TESTDIR) ]; then $(MAKE) -C $(TESTDIR); fi endif #------------------------------------------------------------------------------- -# Target: build all targets in all AVX modes (each AVX mode in a separate build directory) -# Split the avxall target into five separate targets to allow parallel 'make -j avxall' builds -# (Hack: add a fbridge.inc dependency to avxall, to ensure it is only copied once for all AVX modes) -avxnone: - @echo - $(MAKE) USEBUILDDIR=1 AVX=none -f $(CUDACPP_MAKEFILE) - -avxsse4: - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 -f $(CUDACPP_MAKEFILE) - -avxavx2: - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 -f $(CUDACPP_MAKEFILE) - -avx512y: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y -f $(CUDACPP_MAKEFILE) - -avx512z: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z -f $(CUDACPP_MAKEFILE) - -ifeq ($(UNAME_P),ppc64le) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else ifeq ($(UNAME_P),arm) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 avxavx2 avx512y avx512z -avxall: avxnone avxsse4 avxavx2 avx512y avx512z -endif - -#------------------------------------------------------------------------------- - -# Target: clean the builds -.PHONY: clean - -clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(BUILDDIR) -else - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe - rm -f $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(LIBDIR)/lib$(MG5AMC_CULIB).so -endif - $(MAKE) -C ../../src clean -f $(CUDACPP_SRC_MAKEFILE) -### rm -rf $(INCDIR) - -cleanall: - @echo - $(MAKE) USEBUILDDIR=0 clean -f $(CUDACPP_MAKEFILE) - @echo - $(MAKE) USEBUILDDIR=0 -C ../../src cleanall -f $(CUDACPP_SRC_MAKEFILE) - rm -rf build.* - # Target: clean the builds as well as the gtest installation(s) -distclean: cleanall +distclean: clean cleansrc ifneq ($(wildcard $(TESTDIRCOMMON)),) $(MAKE) -C $(TESTDIRCOMMON) clean endif @@ -848,50 +384,55 @@ endif #------------------------------------------------------------------------------- -# Target: check (run the C++ test executable) +# Target: check/gcheck (run the C++ test executable) # [NB THIS IS WHAT IS USED IN THE GITHUB CI!] -ifneq ($(NVCC),) -check: runTest cmpFcheck cmpFGcheck -else check: runTest cmpFcheck -endif +gcheck: + $(MAKE) AVX=cuda runTest cmpFGcheck # Target: runTest (run the C++ test executable runTest.exe) -runTest: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/runTest.exe +ifneq ($(AVX),cuda) +runTest: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest.exe +else +runTest: cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest_cuda.exe +endif + # Target: runCheck (run the C++ standalone executable check.exe, with a small number of events) -runCheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/check.exe -p 2 32 2 +runCheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe -p 2 32 2 # Target: runGcheck (run the CUDA standalone executable gcheck.exe, with a small number of events) -runGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/gcheck.exe -p 2 32 2 +runGcheck: AVX=cuda +runGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe -p 2 32 2 # Target: runFcheck (run the Fortran standalone executable - with C++ MEs - fcheck.exe, with a small number of events) -runFcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 +runFcheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 # Target: runFGcheck (run the Fortran standalone executable - with CUDA MEs - fgcheck.exe, with a small number of events) -runFGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 +runFGcheck: AVX=cuda +runFGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 # Target: cmpFcheck (compare ME results from the C++ and Fortran with C++ MEs standalone executables, with a small number of events) -cmpFcheck: all.$(TAG) +cmpFcheck: cppbuild @echo - @echo "$(BUILDDIR)/check.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi # Target: cmpFGcheck (compare ME results from the CUDA and Fortran with CUDA MEs standalone executables, with a small number of events) -cmpFGcheck: all.$(TAG) +cmpFGcheck: AVX=cuda +cmpFGcheck: + $(MAKE) AVX=cuda cudabuild @echo - @echo "$(BUILDDIR)/gcheck.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fgcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi -# Target: memcheck (run the CUDA standalone executable gcheck.exe with a small number of events through cuda-memcheck) -memcheck: all.$(TAG) - $(RUNTIME) $(CUDA_HOME)/bin/cuda-memcheck --check-api-memory-access yes --check-deprecated-instr yes --check-device-heap yes --demangle full --language c --leak-check full --racecheck-report all --report-api-errors all --show-backtrace yes --tool memcheck --track-unused-memory yes $(BUILDDIR)/gcheck.exe -p 2 32 2 - -#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/ee_mumu.mad/SubProcesses/makefile b/epochX/cudacpp/ee_mumu.mad/SubProcesses/makefile index d572486c2e..b69917ee1f 100644 --- a/epochX/cudacpp/ee_mumu.mad/SubProcesses/makefile +++ b/epochX/cudacpp/ee_mumu.mad/SubProcesses/makefile @@ -1,27 +1,30 @@ SHELL := /bin/bash -include ../../Source/make_opts -FFLAGS+= -w +# Include general setup +OPTIONS_MAKEFILE := ../../Source/make_opts +include $(OPTIONS_MAKEFILE) # Enable the C preprocessor https://gcc.gnu.org/onlinedocs/gfortran/Preprocessing-Options.html -FFLAGS+= -cpp +MG_FCFLAGS += -cpp +MG_CXXFLAGS += -I. -# Compile counters with -O3 as in the cudacpp makefile (avoid being "unfair" to Fortran #740) -CXXFLAGS = -O3 -Wall -Wshadow -Wextra +all: help cppnative + +# Target if user does not specify target +help: + $(info No target specified.) + $(info Viable targets are 'cppnative' (default), 'cppnone', 'cppsse4', 'cppavx2', 'cpp512y', 'cpp512z' and 'cuda') + $(info Or 'cppall' for all C++ targets) + $(info Or 'ALL' for all C++ and cuda targets) -# Add -std=c++17 explicitly to avoid build errors on macOS -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 -endif -# Enable ccache if USECCACHE=1 +# Enable ccache for C++ if USECCACHE=1 (do not enable it for Fortran since it is not supported for Fortran) ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) override CXX:=ccache $(CXX) endif -ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) - override FC:=ccache $(FC) -endif +###ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) +### override FC:=ccache $(FC) +###endif # Load additional dependencies of the bias module, if present ifeq (,$(wildcard ../bias_dependencies)) @@ -46,34 +49,25 @@ else MADLOOP_LIB = endif -LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias - -processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') -CUDACPP_MAKEFILE=cudacpp.mk -# NB1 Using ":=" below instead of "=" is much faster (it only runs the subprocess once instead of many times) -# NB2 Use '|&' in CUDACPP_BUILDDIR to avoid confusing errors about googletest #507 -# NB3 Do not add a comment inlined "CUDACPP_BUILDDIR=$(shell ...) # comment" as otherwise a trailing space is included... -# NB4 The variables relevant to the cudacpp Makefile must be explicitly passed to $(shell...) -CUDACPP_MAKEENV:=$(shell echo '$(.VARIABLES)' | tr " " "\n" | egrep "(USEBUILDDIR|AVX|FPTYPE|HELINL|HRDCOD)") -###$(info CUDACPP_MAKEENV=$(CUDACPP_MAKEENV)) -###$(info $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))")) -CUDACPP_BUILDDIR:=$(shell $(MAKE) $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))") -f $(CUDACPP_MAKEFILE) -pn 2>&1 | awk '/Building/{print $$3}' | sed s/BUILDDIR=//) -ifeq ($(CUDACPP_BUILDDIR),) -$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) -else -$(info CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)') -endif -CUDACPP_COMMONLIB=mg5amc_common -CUDACPP_CXXLIB=mg5amc_$(processid_short)_cpp -CUDACPP_CULIB=mg5amc_$(processid_short)_cuda - +LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias LIBS = $(LIBDIR)libbias.$(libext) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(MADLOOP_LIB) $(LOOP_LIBS) ifneq ("$(wildcard ../../Source/RUNNING)","") LINKLIBS += -lrunning - LIBS += $(LIBDIR)librunning.$(libext) + LIBS += $(LIBDIR)librunning.$(libext) endif +SOURCEDIR_GUARD:=../../Source/.timestamp_guard +# We use $(SOURCEDIR_GUARD) to figure out if Source is out of date. The Source makefile doesn't correctly +# update all files, so we need a proxy that is updated every time we run "$(MAKE) -C ../../Source". +$(SOURCEDIR_GUARD) ../../Source/discretesampler.mod &: ../../Source/*.f ../../Cards/param_card.dat ../../Cards/run_card.dat +ifneq ($(shell which flock 2>/dev/null),) + flock ../../Source/.lock -c "$(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD)" +else + $(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD) +endif + +$(LIBS): $(SOURCEDIR_GUARD) # Source files @@ -91,82 +85,83 @@ PROCESS= myamp.o genps.o unwgt.o setcuts.o get_color.o \ DSIG=driver.o $(patsubst %.f, %.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) DSIG_cudacpp=driver_cudacpp.o $(patsubst %.f, %_cudacpp.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) -SYMMETRY = symmetry.o idenparts.o +SYMMETRY = symmetry.o idenparts.o -# Binaries +# cudacpp targets: +CUDACPP_MAKEFILE := cudacpp.mk +ifneq (,$(wildcard $(CUDACPP_MAKEFILE))) +include $(CUDACPP_MAKEFILE) +endif -ifeq ($(UNAME),Darwin) -LDFLAGS += -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) -LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" -else -LDFLAGS += -Wl,--no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +ifeq ($(CUDACPP_BUILDDIR),) +$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) endif +CUDACPP_COMMONLIB=mg5amc_common +CUDACPP_CXXLIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so +CUDACPP_CULIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so -all: $(PROG)_fortran $(CUDACPP_BUILDDIR)/$(PROG)_cpp # also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (#503) +# Set up OpenMP if supported +OMPFLAGS ?= -fopenmp ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp LINKLIBS += -liomp5 # see #578 LINKLIBS += -lintlc # undefined reference to `_intel_fast_memcpy' else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -override OMPFLAGS = -fopenmp $(CUDACPP_BUILDDIR)/$(PROG)_cpp: LINKLIBS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -override OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang -else -override OMPFLAGS = -fopenmp +OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang endif -$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o - $(FC) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) - -$(LIBS): .libs -.libs: ../../Cards/param_card.dat ../../Cards/run_card.dat - cd ../../Source; make - touch $@ +# Binaries -$(CUDACPP_BUILDDIR)/.cudacpplibs: - $(MAKE) -f $(CUDACPP_MAKEFILE) - touch $@ +$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) # On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables ($ORIGIN on Linux) # On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary -ifeq ($(UNAME_S),Darwin) - override LIBFLAGSRPATH = -else ifeq ($(USEBUILDDIR),1) - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' -else - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/$(LIBDIR)' +ifneq ($(UNAME_S),Darwin) + LIBFLAGSRPATH := -Wl,-rpath,'$$ORIGIN:$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' endif -.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link +.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link madevent_cppnone_link madevent_cppsse4_link madevent_cppavx2_link madevent_cpp512y_link madevent_cpp512z_link clean cleanall cleansrc madevent_fortran_link: $(PROG)_fortran rm -f $(PROG) ln -s $(PROG)_fortran $(PROG) -madevent_cpp_link: $(CUDACPP_BUILDDIR)/$(PROG)_cpp - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) +madevent_cppnone_link: AVX=none +madevent_cppnone_link: cppnone + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -madevent_cuda_link: $(CUDACPP_BUILDDIR)/$(PROG)_cuda - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) +madevent_cppavx2_link: AVX=avx2 +madevent_cppavx2_link: cppavx2 + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512y_link: AVX=512y +madevent_cpp512y_link: cppavx512y + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512z_link: AVX=512z +madevent_cpp512z_link: cppavx512z + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -# Building $(PROG)_cpp also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (improved patch for cpp-only builds #503) -$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o $(CUDACPP_BUILDDIR)/.cudacpplibs - $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CXXLIB) $(LIBFLAGSRPATH) $(LDFLAGS) - if [ -f $(LIBDIR)/$(CUDACPP_BUILDDIR)/lib$(CUDACPP_CULIB).* ]; then $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CULIB) $(LIBFLAGSRPATH) $(LDFLAGS); fi +madevent_cuda_link: AVX=cuda +madevent_cuda_link: cuda + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) -$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(CUDACPP_BUILDDIR)/$(PROG)_cpp +$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(LIBS) $(CUDACPP_CXXLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) + +$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(LIBS) $(CUDACPP_CULIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) counters.o: counters.cc timer.h - $(CXX) $(CXXFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ ompnumthreads.o: ompnumthreads.cc ompnumthreads.h - $(CXX) -I. $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) $(FC) -o $(PROG)_forhel $(PROCESS) $(MATRIX_HEL) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) $(OMPFLAGS) @@ -174,27 +169,14 @@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) gensym: $(SYMMETRY) configs.inc $(LIBS) $(FC) -o gensym $(SYMMETRY) -L$(LIBDIR) $(LINKLIBS) $(LDFLAGS) -###ifeq (,$(wildcard fbridge.inc)) # Pointless: fbridge.inc always exists as this is the cudacpp-modified makefile! -###$(LIBDIR)libmodel.$(libext): ../../Cards/param_card.dat -### cd ../../Source/MODEL; make -### -###$(LIBDIR)libgeneric.$(libext): ../../Cards/run_card.dat -### cd ../../Source; make -### -###$(LIBDIR)libpdf.$(libext): -### cd ../../Source/PDF; make -### -###$(LIBDIR)libgammaUPC.$(libext): -### cd ../../Source/PDF/gammaUPC; make -###endif # Add source so that the compiler finds the DiscreteSampler module. $(MATRIX): %.o: %.f - $(FC) $(FFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC -%.o: %.f - $(FC) $(FFLAGS) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC + $(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC +%.o $(CUDACPP_BUILDDIR)/%.o: %.f + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -I../../Source/ -I../../Source/PDF/gammaUPC -c $< -o $@ %_cudacpp.o: %.f - $(FC) $(FFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ # Dependencies @@ -215,60 +197,42 @@ unwgt.o: genps.inc nexternal.inc symswap.inc cluster.inc run.inc message.inc \ initcluster.o: message.inc # Extra dependencies on discretesampler.mod +../../Source/discretesampler.mod: ../../Source/DiscreteSampler.f -auto_dsig.o: .libs -driver.o: .libs -driver_cudacpp.o: .libs -$(MATRIX): .libs -genps.o: .libs +auto_dsig.o: ../../Source/discretesampler.mod +driver.o: ../../Source/discretesampler.mod +driver_cudacpp.o: ../../Source/discretesampler.mod +$(MATRIX): ../../Source/discretesampler.mod +genps.o: ../../Source/discretesampler.mod # Cudacpp avxall targets -UNAME_P := $(shell uname -p) ifeq ($(UNAME_P),ppc64le) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else ifeq ($(UNAME_P),arm) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else -avxall: avxnone avxsse4 avxavx2 avx512y avx512z +cppall: cppnative cppnone cppsse4 cppavx2 cppavx512y cppavx512z endif -avxnone: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=none - -avxsse4: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 - -avxavx2: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 - -avx512y: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y - -avx512z: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z - -###endif - -# Clean (NB: 'make clean' in Source calls 'make clean' in all P*) +ALL: cppall cuda -clean: # Clean builds: fortran in this Pn; cudacpp executables for one AVX in this Pn - $(RM) *.o gensym $(PROG) $(PROG)_fortran $(PROG)_forhel $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(CUDACPP_BUILDDIR)/$(PROG)_cuda +# Clean all architecture-specific builds: +clean: + $(RM) *.o gensym $(PROG) $(PROG)_* + $(RM) -rf build.*/*{.o,.so,.exe,.dylib,madevent_*} + @for dir in build.*; do if [ -z "$$(ls -A $${dir})" ]; then rm -r $${dir}; else echo "Not cleaning $${dir}; not empty"; fi; done -cleanavxs: clean # Clean builds: fortran in this Pn; cudacpp for all AVX in this Pn and in src - $(MAKE) -f $(CUDACPP_MAKEFILE) cleanall - rm -f $(CUDACPP_BUILDDIR)/.cudacpplibs - rm -f .libs +cleanall: cleansrc + for PROCESS in ../P[0-9]*; do $(MAKE) -C $${PROCESS} clean; done -cleanall: # Clean builds: fortran in all P* and in Source; cudacpp for all AVX in all P* and in src - make -C ../../Source cleanall - rm -rf $(LIBDIR)libbias.$(libext) - rm -f ../../Source/*.mod ../../Source/*/*.mod +# Clean one architecture-specific build +clean%: + $(RM) -r build.$*_* -distclean: cleanall # Clean all fortran and cudacpp builds as well as the googletest installation - $(MAKE) -f $(CUDACPP_MAKEFILE) distclean +# Clean common source directories (interferes with other P*) +cleansrc: + make -C ../../Source clean + $(RM) -f $(SOURCEDIR_GUARD) ../../Source/{*.mod,.lock} ../../Source/*/*.mod + $(RM) -r $(LIBDIR)libbias.$(libext) + if [ -d ../../src ]; then $(MAKE) -C ../../src -f cudacpp_src.mk clean; fi diff --git a/epochX/cudacpp/ee_mumu.mad/SubProcesses/runTest.cc b/epochX/cudacpp/ee_mumu.mad/SubProcesses/runTest.cc index d4a760a71b..6c77775fb2 100644 --- a/epochX/cudacpp/ee_mumu.mad/SubProcesses/runTest.cc +++ b/epochX/cudacpp/ee_mumu.mad/SubProcesses/runTest.cc @@ -243,18 +243,20 @@ struct CUDATest : public CUDA_CPU_TestBase // Use two levels of macros to force stringification at the right level // (see https://gcc.gnu.org/onlinedocs/gcc-3.0.1/cpp_3.html#SEC17 and https://stackoverflow.com/a/3419392) // Google macro is in https://github.com/google/googletest/blob/master/googletest/include/gtest/gtest-param-test.h +/* clang-format off */ #define TESTID_CPU( s ) s##_CPU #define XTESTID_CPU( s ) TESTID_CPU( s ) #define MG_INSTANTIATE_TEST_SUITE_CPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); #define TESTID_GPU( s ) s##_GPU #define XTESTID_GPU( s ) TESTID_GPU( s ) #define MG_INSTANTIATE_TEST_SUITE_GPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); +/* clang-format on */ #ifdef __CUDACC__ MG_INSTANTIATE_TEST_SUITE_GPU( XTESTID_GPU( MG_EPOCH_PROCESS_ID ), MadgraphTest ); diff --git a/epochX/cudacpp/ee_mumu.mad/SubProcesses/testxxx.cc b/epochX/cudacpp/ee_mumu.mad/SubProcesses/testxxx.cc index 3361fe5aa9..1d315f6d75 100644 --- a/epochX/cudacpp/ee_mumu.mad/SubProcesses/testxxx.cc +++ b/epochX/cudacpp/ee_mumu.mad/SubProcesses/testxxx.cc @@ -40,7 +40,7 @@ namespace mg5amcCpu { std::string FPEhandlerMessage = "unknown"; int FPEhandlerIevt = -1; - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU): '" << FPEhandlerMessage << "' ievt=" << FPEhandlerIevt << std::endl; @@ -71,11 +71,10 @@ TEST( XTESTID( MG_EPOCH_PROCESS_ID ), testxxx ) constexpr bool testEvents = !dumpEvents; // run the test? constexpr fptype toleranceXXXs = std::is_same::value ? 1.E-15 : 1.E-5; // Constant parameters - constexpr int neppM = MemoryAccessMomenta::neppM; // AOSOA layout constexpr int np4 = CPPProcess::np4; - const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') - assert( nevt % neppM == 0 ); // nevt must be a multiple of neppM - assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV + const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') + assert( nevt % MemoryAccessMomenta::neppM == 0 ); // nevt must be a multiple of neppM + assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV // Fill in the input momenta #ifdef __CUDACC__ mg5amcGpu::PinnedHostBufferMomenta hstMomenta( nevt ); // AOSOA[npagM][npar=4][np4=4][neppM] diff --git a/epochX/cudacpp/ee_mumu.mad/bin/internal/banner.py b/epochX/cudacpp/ee_mumu.mad/bin/internal/banner.py index bd1517985f..b408679c2f 100755 --- a/epochX/cudacpp/ee_mumu.mad/bin/internal/banner.py +++ b/epochX/cudacpp/ee_mumu.mad/bin/internal/banner.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,7 +53,7 @@ MADEVENT = False import madgraph.various.misc as misc import madgraph.iolibs.file_writers as file_writers - import madgraph.iolibs.files as files + import madgraph.iolibs.files as files import models.check_param_card as param_card_reader from madgraph import MG5DIR, MadGraph5Error, InvalidCmd @@ -80,36 +80,36 @@ class Banner(dict): 'mgproccard': 'MGProcCard', 'mgruncard': 'MGRunCard', 'ma5card_parton' : 'MA5Card_parton', - 'ma5card_hadron' : 'MA5Card_hadron', + 'ma5card_hadron' : 'MA5Card_hadron', 'mggenerationinfo': 'MGGenerationInfo', 'mgpythiacard': 'MGPythiaCard', 'mgpgscard': 'MGPGSCard', 'mgdelphescard': 'MGDelphesCard', 'mgdelphestrigger': 'MGDelphesTrigger', 'mgshowercard': 'MGShowerCard' } - + forbid_cdata = ['initrwgt'] - + def __init__(self, banner_path=None): """ """ if isinstance(banner_path, Banner): dict.__init__(self, banner_path) self.lhe_version = banner_path.lhe_version - return + return else: dict.__init__(self) - + #Look at the version if MADEVENT: self['mgversion'] = '#%s\n' % open(pjoin(MEDIR, 'MGMEVersion.txt')).read() else: info = misc.get_pkg_info() self['mgversion'] = info['version']+'\n' - + self.lhe_version = None - + if banner_path: self.read_banner(banner_path) @@ -123,7 +123,7 @@ def __init__(self, banner_path=None): 'mgruncard':'run_card.dat', 'mgpythiacard':'pythia_card.dat', 'mgpgscard' : 'pgs_card.dat', - 'mgdelphescard':'delphes_card.dat', + 'mgdelphescard':'delphes_card.dat', 'mgdelphestrigger':'delphes_trigger.dat', 'mg5proccard':'proc_card_mg5.dat', 'mgproccard': 'proc_card.dat', @@ -137,10 +137,10 @@ def __init__(self, banner_path=None): 'mgshowercard':'shower_card.dat', 'pythia8':'pythia8_card.dat', 'ma5card_parton':'madanalysis5_parton_card.dat', - 'ma5card_hadron':'madanalysis5_hadron_card.dat', + 'ma5card_hadron':'madanalysis5_hadron_card.dat', 'run_settings':'' } - + def read_banner(self, input_path): """read a banner""" @@ -151,7 +151,7 @@ def read_banner(self, input_path): def split_iter(string): return (x.groups(0)[0] for x in re.finditer(r"([^\n]*\n)", string, re.DOTALL)) input_path = split_iter(input_path) - + text = '' store = False for line in input_path: @@ -170,13 +170,13 @@ def split_iter(string): text += line else: text += '%s%s' % (line, '\n') - - #reaching end of the banner in a event file avoid to read full file + + #reaching end of the banner in a event file avoid to read full file if "
" in line: break elif "" in line: break - + def __getattribute__(self, attr): """allow auto-build for the run_card/param_card/... """ try: @@ -187,23 +187,23 @@ def __getattribute__(self, attr): return self.charge_card(attr) - + def change_lhe_version(self, version): """change the lhe version associate to the banner""" - + version = float(version) if version < 3: version = 1 elif version > 3: raise Exception("Not Supported version") self.lhe_version = version - + def get_cross(self, witherror=False): """return the cross-section of the file""" if "init" not in self: raise Exception - + text = self["init"].split('\n') cross = 0 error = 0 @@ -217,13 +217,13 @@ def get_cross(self, witherror=False): return cross else: return cross, math.sqrt(error) - + def scale_init_cross(self, ratio): """modify the init information with the associate scale""" assert "init" in self - + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -231,29 +231,29 @@ def scale_init_cross(self, ratio): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break pid = int(pid) - + line = " %+13.7e %+13.7e %+13.7e %i" % \ (ratio*float(xsec), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + def get_pdg_beam(self): """return the pdg of each beam""" - + assert "init" in self - + all_lines = self["init"].split('\n') pdg1,pdg2,_ = all_lines[0].split(None, 2) return int(pdg1), int(pdg2) - + def load_basic(self, medir): """ Load the proc_card /param_card and run_card """ - + self.add(pjoin(medir,'Cards', 'param_card.dat')) self.add(pjoin(medir,'Cards', 'run_card.dat')) if os.path.exists(pjoin(medir, 'SubProcesses', 'procdef_mg5.dat')): @@ -261,29 +261,29 @@ def load_basic(self, medir): self.add(pjoin(medir,'Cards', 'proc_card_mg5.dat')) else: self.add(pjoin(medir,'Cards', 'proc_card.dat')) - + def change_seed(self, seed): """Change the seed value in the banner""" # 0 = iseed p = re.compile(r'''^\s*\d+\s*=\s*iseed''', re.M) new_seed_str = " %s = iseed" % seed self['mgruncard'] = p.sub(new_seed_str, self['mgruncard']) - + def add_generation_info(self, cross, nb_event): """add info on MGGeneration""" - + text = """ # Number of Events : %s # Integrated weight (pb) : %s """ % (nb_event, cross) self['MGGenerationInfo'] = text - + ############################################################################ # SPLIT BANNER ############################################################################ def split(self, me_dir, proc_card=True): """write the banner in the Cards directory. - proc_card argument is present to avoid the overwrite of proc_card + proc_card argument is present to avoid the overwrite of proc_card information""" for tag, text in self.items(): @@ -305,37 +305,37 @@ def check_pid(self, pid2label): """special routine removing width/mass of particles not present in the model This is usefull in case of loop model card, when we want to use the non loop model.""" - + if not hasattr(self, 'param_card'): self.charge_card('slha') - + for tag in ['mass', 'decay']: block = self.param_card.get(tag) for data in block: pid = data.lhacode[0] - if pid not in list(pid2label.keys()): + if pid not in list(pid2label.keys()): block.remove((pid,)) def get_lha_strategy(self): """get the lha_strategy: how the weight have to be handle by the shower""" - + if not self["init"]: raise Exception("No init block define") - + data = self["init"].split('\n')[0].split() if len(data) != 10: misc.sprint(len(data), self['init']) raise Exception("init block has a wrong format") return int(float(data[-2])) - + def set_lha_strategy(self, value): """set the lha_strategy: how the weight have to be handle by the shower""" - + if not (-4 <= int(value) <= 4): six.reraise(Exception, "wrong value for lha_strategy", value) if not self["init"]: raise Exception("No init block define") - + all_lines = self["init"].split('\n') data = all_lines[0].split() if len(data) != 10: @@ -351,13 +351,13 @@ def modify_init_cross(self, cross, allow_zero=False): assert isinstance(cross, dict) # assert "all" in cross assert "init" in self - + cross = dict(cross) for key in cross.keys(): if isinstance(key, str) and key.isdigit() and int(key) not in cross: cross[int(key)] = cross[key] - - + + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -365,7 +365,7 @@ def modify_init_cross(self, cross, allow_zero=False): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break @@ -383,23 +383,23 @@ def modify_init_cross(self, cross, allow_zero=False): (float(cross[pid]), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + ############################################################################ # WRITE BANNER ############################################################################ def write(self, output_path, close_tag=True, exclude=[]): """write the banner""" - + if isinstance(output_path, str): ff = open(output_path, 'w') else: ff = output_path - + if MADEVENT: header = open(pjoin(MEDIR, 'Source', 'banner_header.txt')).read() else: header = open(pjoin(MG5DIR,'Template', 'LO', 'Source', 'banner_header.txt')).read() - + if not self.lhe_version: self.lhe_version = self.get('run_card', 'lhe_version', default=1.0) if float(self.lhe_version) < 3: @@ -412,7 +412,7 @@ def write(self, output_path, close_tag=True, exclude=[]): for tag in [t for t in self.ordered_items if t in list(self.keys())]+ \ [t for t in self.keys() if t not in self.ordered_items]: - if tag in ['init'] or tag in exclude: + if tag in ['init'] or tag in exclude: continue capitalized_tag = self.capitalized_items[tag] if tag in self.capitalized_items else tag start_data, stop_data = '', '' @@ -422,19 +422,19 @@ def write(self, output_path, close_tag=True, exclude=[]): stop_data = ']]>\n' out = '<%(tag)s>%(start_data)s\n%(text)s\n%(stop_data)s\n' % \ {'tag':capitalized_tag, 'text':self[tag].strip(), - 'start_data': start_data, 'stop_data':stop_data} + 'start_data': start_data, 'stop_data':stop_data} try: ff.write(out) except: ff.write(out.encode('utf-8')) - - + + if not '/header' in exclude: out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) if 'init' in self and not 'init' in exclude: text = self['init'] @@ -444,22 +444,22 @@ def write(self, output_path, close_tag=True, exclude=[]): ff.write(out) except: ff.write(out.encode('utf-8')) - + if close_tag: - out = '\n' + out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) return ff - - + + ############################################################################ # BANNER ############################################################################ def add(self, path, tag=None): """Add the content of the file to the banner""" - + if not tag: card_name = os.path.basename(path) if 'param_card' in card_name: @@ -505,33 +505,33 @@ def add_text(self, tag, text): if tag == 'param_card': tag = 'slha' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' - + self[tag.lower()] = text - - + + def charge_card(self, tag): """Build the python object associated to the card""" - + if tag in ['param_card', 'param']: tag = 'slha' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse'], 'invalid card %s' % tag - + if tag == 'slha': param_card = self[tag].split('\n') self.param_card = param_card_reader.ParamCard(param_card) @@ -544,56 +544,56 @@ def charge_card(self, tag): self.proc_card = ProcCard(proc_card) return self.proc_card elif tag =='mgshowercard': - shower_content = self[tag] + shower_content = self[tag] if MADEVENT: import internal.shower_card as shower_card else: import madgraph.various.shower_card as shower_card self.shower_card = shower_card.ShowerCard(shower_content, True) - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.shower_card.testing = False return self.shower_card elif tag =='foanalyse': - analyse_content = self[tag] + analyse_content = self[tag] if MADEVENT: import internal.FO_analyse_card as FO_analyse_card else: import madgraph.various.FO_analyse_card as FO_analyse_card - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.FOanalyse_card = FO_analyse_card.FOAnalyseCard(analyse_content, True) self.FOanalyse_card.testing = False return self.FOanalyse_card - + def get_detail(self, tag, *arg, **opt): """return a specific """ - + if tag in ['param_card', 'param']: tag = 'slha' attr_tag = 'param_card' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], '%s not recognized' % tag - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) + self.charge_card(attr_tag) card = getattr(self, attr_tag) if len(arg) == 0: @@ -613,7 +613,7 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 2 and tag == 'slha': try: return card[arg[0]].get(arg[1:]) @@ -621,15 +621,15 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 0: return card else: raise Exception("Unknow command") - + #convenient alias get = get_detail - + def set(self, tag, *args): """modify one of the cards""" @@ -637,27 +637,27 @@ def set(self, tag, *args): tag = 'slha' attr_tag = 'param_card' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized' - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) - + self.charge_card(attr_tag) + card = getattr(self, attr_tag) if len(args) ==2: if tag == 'mg5proccard': @@ -666,20 +666,20 @@ def set(self, tag, *args): card[args[0]] = args[1] else: card[args[:-1]] = args[-1] - - + + @misc.multiple_try() def add_to_file(self, path, seed=None, out=None): """Add the banner to a file and change the associate seed in the banner""" if seed is not None: self.set("run_card", "iseed", seed) - + if not out: path_out = "%s.tmp" % path else: path_out = out - + ff = self.write(path_out, close_tag=False, exclude=['MGGenerationInfo', '/header', 'init']) ff.write("## END BANNER##\n") @@ -698,44 +698,44 @@ def add_to_file(self, path, seed=None, out=None): files.mv(path_out, path) - + def split_banner(banner_path, me_dir, proc_card=True): """a simple way to split a banner""" - + banner = Banner(banner_path) banner.split(me_dir, proc_card) - + def recover_banner(results_object, level, run=None, tag=None): """as input we receive a gen_crossxhtml.AllResults object. This define the current banner and load it """ - + if not run: - try: - _run = results_object.current['run_name'] - _tag = results_object.current['tag'] + try: + _run = results_object.current['run_name'] + _tag = results_object.current['tag'] except Exception: return Banner() else: _run = run if not tag: - try: - _tag = results_object[run].tags[-1] + try: + _tag = results_object[run].tags[-1] except Exception as error: if os.path.exists( pjoin(results_object.path,'Events','%s_banner.txt' % (run))): tag = None else: - return Banner() + return Banner() else: _tag = tag - - path = results_object.path - if tag: + + path = results_object.path + if tag: banner_path = pjoin(path,'Events',run,'%s_%s_banner.txt' % (run, tag)) else: banner_path = pjoin(results_object.path,'Events','%s_banner.txt' % (run)) - + if not os.path.exists(banner_path): if level != "parton" and tag != _tag: return recover_banner(results_object, level, _run, results_object[_run].tags[0]) @@ -754,12 +754,12 @@ def recover_banner(results_object, level, run=None, tag=None): return Banner(lhe.banner) # security if the banner was remove (or program canceled before created it) - return Banner() - + return Banner() + banner = Banner(banner_path) - - - + + + if level == 'pythia': if 'mgpythiacard' in banner: del banner['mgpythiacard'] @@ -768,13 +768,13 @@ def recover_banner(results_object, level, run=None, tag=None): if tag in banner: del banner[tag] return banner - + class InvalidRunCard(InvalidCmd): pass class ProcCard(list): """Basic Proccard object""" - + history_header = \ '#************************************************************\n' + \ '#* MadGraph5_aMC@NLO *\n' + \ @@ -798,10 +798,10 @@ class ProcCard(list): '#* run as ./bin/mg5_aMC filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - - - - + + + + def __init__(self, init=None): """ initialize a basic proc_card""" self.info = {'model': 'sm', 'generate':None, @@ -810,13 +810,13 @@ def __init__(self, init=None): if init: self.read(init) - + def read(self, init): """read the proc_card and save the information""" - + if isinstance(init, str): #path to file init = open(init, 'r') - + store_line = '' for line in init: line = line.rstrip() @@ -828,28 +828,28 @@ def read(self, init): store_line = "" if store_line: raise Exception("WRONG CARD FORMAT") - - + + def move_to_last(self, cmd): """move an element to the last history.""" for line in self[:]: if line.startswith(cmd): self.remove(line) list.append(self, line) - + def append(self, line): """"add a line in the proc_card perform automatically cleaning""" - + line = line.strip() cmds = line.split() if len(cmds) == 0: return - + list.append(self, line) - + # command type: cmd = cmds[0] - + if cmd == 'output': # Remove previous outputs from history self.clean(allow_for_removal = ['output'], keep_switch=True, @@ -875,7 +875,7 @@ def append(self, line): elif cmds[1] == 'proc_v4': #full cleaning self[:] = [] - + def clean(self, to_keep=['set','add','load'], remove_bef_last=None, @@ -884,13 +884,13 @@ def clean(self, to_keep=['set','add','load'], keep_switch=False): """Remove command in arguments from history. All command before the last occurrence of 'remove_bef_last' - (including it) will be removed (but if another options tells the opposite). + (including it) will be removed (but if another options tells the opposite). 'to_keep' is a set of line to always keep. - 'to_remove' is a set of line to always remove (don't care about remove_bef_ + 'to_remove' is a set of line to always remove (don't care about remove_bef_ status but keep_switch acts.). - if 'allow_for_removal' is define only the command in that list can be + if 'allow_for_removal' is define only the command in that list can be remove of the history for older command that remove_bef_lb1. all parameter - present in to_remove are always remove even if they are not part of this + present in to_remove are always remove even if they are not part of this list. keep_switch force to keep the statement remove_bef_??? which changes starts the removal mode. @@ -900,8 +900,8 @@ def clean(self, to_keep=['set','add','load'], if __debug__ and allow_for_removal: for arg in to_keep: assert arg not in allow_for_removal - - + + nline = -1 removal = False #looping backward @@ -912,7 +912,7 @@ def clean(self, to_keep=['set','add','load'], if not removal and remove_bef_last: if self[nline].startswith(remove_bef_last): removal = True - switch = True + switch = True # if this is the switch and is protected pass to the next element if switch and keep_switch: @@ -923,12 +923,12 @@ def clean(self, to_keep=['set','add','load'], if any([self[nline].startswith(arg) for arg in to_remove]): self.pop(nline) continue - + # Only if removal mode is active! if removal: if allow_for_removal: # Only a subset of command can be removed - if any([self[nline].startswith(arg) + if any([self[nline].startswith(arg) for arg in allow_for_removal]): self.pop(nline) continue @@ -936,10 +936,10 @@ def clean(self, to_keep=['set','add','load'], # All command have to be remove but protected self.pop(nline) continue - + # update the counter to pass to the next element nline -= 1 - + def get(self, tag, default=None): if isinstance(tag, int): list.__getattr__(self, tag) @@ -954,32 +954,32 @@ def get(self, tag, default=None): except ValueError: name, content = line[7:].split(None,1) out.append((name, content)) - return out + return out else: return self.info[tag] - + def write(self, path): """write the proc_card to a given path""" - + fsock = open(path, 'w') fsock.write(self.history_header) for line in self: while len(line) > 70: - sub, line = line[:70]+"\\" , line[70:] + sub, line = line[:70]+"\\" , line[70:] fsock.write(sub+"\n") else: fsock.write(line+"\n") - -class InvalidCardEdition(InvalidCmd): pass - + +class InvalidCardEdition(InvalidCmd): pass + class ConfigFile(dict): """ a class for storing/dealing with input file. - """ + """ def __init__(self, finput=None, **opt): """initialize a new instance. input can be an instance of MadLoopParam, - a file, a path to a file, or simply Nothing""" - + a file, a path to a file, or simply Nothing""" + if isinstance(finput, self.__class__): dict.__init__(self) for key in finput.__dict__: @@ -989,7 +989,7 @@ def __init__(self, finput=None, **opt): return else: dict.__init__(self) - + # Initialize it with all the default value self.user_set = set() self.auto_set = set() @@ -1000,15 +1000,15 @@ def __init__(self, finput=None, **opt): self.comments = {} # comment associated to parameters. can be display via help message # store the valid options for a given parameter. self.allowed_value = {} - + self.default_setup() self.plugin_input(finput) - + # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, **opt) - + @@ -1028,7 +1028,7 @@ def __add__(self, other): base = self.__class__(self) #base = copy.copy(self) base.update((key.lower(),value) for key, value in other.items()) - + return base def __radd__(self, other): @@ -1036,26 +1036,26 @@ def __radd__(self, other): new = copy.copy(other) new.update((key, value) for key, value in self.items()) return new - + def __contains__(self, key): return dict.__contains__(self, key.lower()) def __iter__(self): - + for name in super(ConfigFile, self).__iter__(): yield self.lower_to_case[name.lower()] - - + + #iter = super(ConfigFile, self).__iter__() #misc.sprint(iter) #return (self.lower_to_case[name] for name in iter) - + def keys(self): return [name for name in self] - + def items(self): return [(name,self[name]) for name in self] - + @staticmethod def warn(text, level, raiseerror=False): """convenient proxy to raiseerror/print warning""" @@ -1071,11 +1071,11 @@ def warn(text, level, raiseerror=False): log = lambda t: logger.log(level, t) elif level: log = level - + return log(text) def post_set(self, name, value, change_userdefine, raiseerror): - + if value is None: value = self[name] @@ -1087,25 +1087,25 @@ def post_set(self, name, value, change_userdefine, raiseerror): return getattr(self, 'post_set_%s' % name)(value, change_userdefine, raiseerror) else: raise - + def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): """set the attribute and set correctly the type if the value is a string. change_userdefine on True if we have to add the parameter in user_set """ - + if not len(self): #Should never happen but when deepcopy/pickle self.__init__() - + name = name.strip() - lower_name = name.lower() - + lower_name = name.lower() + # 0. check if this parameter is a system only one if change_userdefine and lower_name in self.system_only: text='%s is a private entry which can not be modify by the user. Keep value at %s' % (name,self[name]) self.warn(text, 'critical', raiseerror) return - + #1. check if the parameter is set to auto -> pass it to special if lower_name in self: targettype = type(dict.__getitem__(self, lower_name)) @@ -1115,22 +1115,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): self.user_set.remove(lower_name) #keep old value. self.post_set(lower_name, 'auto', change_userdefine, raiseerror) - return + return elif lower_name in self.auto_set: self.auto_set.remove(lower_name) - + # 2. Find the type of the attribute that we want if lower_name in self.list_parameter: targettype = self.list_parameter[lower_name] - - - + + + if isinstance(value, str): # split for each comma/space value = value.strip() if value.startswith('[') and value.endswith(']'): value = value[1:-1] - #do not perform split within a " or ' block + #do not perform split within a " or ' block data = re.split(r"((? bad input dropped.append(val) - + if not new_values: text= "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ % (value, name, self[lower_name]) text += "allowed values are any list composed of the following entries: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) - return self.warn(text, 'warning', raiseerror) - elif dropped: + return self.warn(text, 'warning', raiseerror) + elif dropped: text = "some value for entry '%s' are not valid. Invalid items are: '%s'.\n" \ % (name, dropped) text += "value will be set to %s" % new_values - text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) + text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) self.warn(text, 'warning') values = new_values # make the assignment - dict.__setitem__(self, lower_name, values) + dict.__setitem__(self, lower_name, values) if change_userdefine: self.user_set.add(lower_name) #check for specific action - return self.post_set(lower_name, None, change_userdefine, raiseerror) + return self.post_set(lower_name, None, change_userdefine, raiseerror) elif lower_name in self.dict_parameter: - targettype = self.dict_parameter[lower_name] + targettype = self.dict_parameter[lower_name] full_reset = True #check if we just update the current dict or not - + if isinstance(value, str): value = value.strip() # allowed entry: @@ -1209,7 +1209,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): # name , value => just add the entry # name value => just add the entry # {name1:value1, name2:value2} => full reset - + # split for each comma/space if value.startswith('{') and value.endswith('}'): new_value = {} @@ -1219,23 +1219,23 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): x, y = pair.split(':') x, y = x.strip(), y.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] new_value[x] = y value = new_value elif ',' in value: x,y = value.split(',') value = {x.strip():y.strip()} full_reset = False - + elif ':' in value: x,y = value.split(':') value = {x.strip():y.strip()} - full_reset = False + full_reset = False else: x,y = value.split() value = {x:y} - full_reset = False - + full_reset = False + if isinstance(value, dict): for key in value: value[key] = self.format_variable(value[key], targettype, name=name) @@ -1248,7 +1248,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - elif name in self: + elif name in self: targettype = type(self[name]) else: logger.debug('Trying to add argument %s in %s. ' % (name, self.__class__.__name__) +\ @@ -1256,22 +1256,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): suggestions = [k for k in self.keys() if k.startswith(name[0].lower())] if len(suggestions)>0: logger.debug("Did you mean one of the following: %s"%suggestions) - self.add_param(lower_name, self.format_variable(UnknownType(value), + self.add_param(lower_name, self.format_variable(UnknownType(value), UnknownType, name)) self.lower_to_case[lower_name] = name if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - + value = self.format_variable(value, targettype, name=name) #check that the value is allowed: if lower_name in self.allowed_value and '*' not in self.allowed_value[lower_name]: valid = False allowed = self.allowed_value[lower_name] - + # check if the current value is allowed or not (set valid to True) if value in allowed: - valid=True + valid=True elif isinstance(value, str): value = value.lower().strip() allowed = [str(v).lower() for v in allowed] @@ -1279,7 +1279,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): i = allowed.index(value) value = self.allowed_value[lower_name][i] valid=True - + if not valid: # act if not valid: text = "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ @@ -1303,7 +1303,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, if __debug__: if lower_name in self: raise Exception("Duplicate case for %s in %s" % (name,self.__class__)) - + dict.__setitem__(self, lower_name, value) self.lower_to_case[lower_name] = name if isinstance(value, list): @@ -1318,12 +1318,12 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, elif isinstance(value, dict): allvalues = list(value.values()) if any([type(allvalues[0]) != type(v) for v in allvalues]): - raise Exception("All entry should have the same type") - self.dict_parameter[lower_name] = type(allvalues[0]) + raise Exception("All entry should have the same type") + self.dict_parameter[lower_name] = type(allvalues[0]) if '__type__' in value: del value['__type__'] dict.__setitem__(self, lower_name, value) - + if allowed and allowed != ['*']: self.allowed_value[lower_name] = allowed if lower_name in self.list_parameter: @@ -1333,8 +1333,8 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, assert value in allowed or '*' in allowed #elif isinstance(value, bool) and allowed != ['*']: # self.allowed_value[name] = [True, False] - - + + if system: self.system_only.add(lower_name) if comment: @@ -1342,7 +1342,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, def do_help(self, name): """return a minimal help for the parameter""" - + out = "## Information on parameter %s from class %s\n" % (name, self.__class__.__name__) if name.lower() in self: out += "## current value: %s (parameter should be of type %s)\n" % (self[name], type(self[name])) @@ -1351,7 +1351,7 @@ def do_help(self, name): else: out += "## Unknown for this class\n" if name.lower() in self.user_set: - out += "## This value is considered as being set by the user\n" + out += "## This value is considered as being set by the user\n" else: out += "## This value is considered as being set by the system\n" if name.lower() in self.allowed_value: @@ -1359,17 +1359,17 @@ def do_help(self, name): out += "Allowed value are: %s\n" % ','.join([str(p) for p in self.allowed_value[name.lower()]]) else: out += "Suggested value are : %s\n " % ','.join([str(p) for p in self.allowed_value[name.lower()] if p!='*']) - + logger.info(out) return out @staticmethod def guess_type_from_value(value): "try to guess the type of the string --do not use eval as it might not be safe" - + if not isinstance(value, str): return str(value.__class__.__name__) - + #use ast.literal_eval to be safe since value is untrusted # add a timeout to mitigate infinite loop, memory stack attack with misc.stdchannel_redirected(sys.stdout, os.devnull): @@ -1388,7 +1388,7 @@ def guess_type_from_value(value): @staticmethod def format_variable(value, targettype, name="unknown"): """assign the value to the attribute for the given format""" - + if isinstance(targettype, str): if targettype in ['str', 'int', 'float', 'bool']: targettype = eval(targettype) @@ -1412,7 +1412,7 @@ def format_variable(value, targettype, name="unknown"): (name, type(value), targettype, value)) else: raise InvalidCmd("Wrong input type for %s found %s and expecting %s for value %s" %\ - (name, type(value), targettype, value)) + (name, type(value), targettype, value)) else: if targettype != UnknownType: value = value.strip() @@ -1441,8 +1441,8 @@ def format_variable(value, targettype, name="unknown"): value = int(value) elif value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} - value =int(value[:-1]) * convert[value[-1]] - elif '/' in value or '*' in value: + value =int(value[:-1]) * convert[value[-1]] + elif '/' in value or '*' in value: try: split = re.split('(\*|/)',value) v = float(split[0]) @@ -1461,7 +1461,7 @@ def format_variable(value, targettype, name="unknown"): try: value = float(value.replace('d','e')) except ValueError: - raise InvalidCmd("%s can not be mapped to an integer" % value) + raise InvalidCmd("%s can not be mapped to an integer" % value) try: new_value = int(value) except ValueError: @@ -1471,7 +1471,7 @@ def format_variable(value, targettype, name="unknown"): value = new_value else: raise InvalidCmd("incorect input: %s need an integer for %s" % (value,name)) - + elif targettype == float: if value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} @@ -1496,33 +1496,33 @@ def format_variable(value, targettype, name="unknown"): value = v else: raise InvalidCmd("type %s is not handle by the card" % targettype) - + return value - - + + def __getitem__(self, name): - + lower_name = name.lower() if __debug__: if lower_name not in self: if lower_name in [key.lower() for key in self] : raise Exception("Some key are not lower case %s. Invalid use of the class!"\ % [key for key in self if key.lower() != key]) - + if lower_name in self.auto_set: return 'auto' - + return dict.__getitem__(self, name.lower()) - + get = __getitem__ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): """convenient way to change attribute. changeifuserset=False means that the value is NOT change is the value is not on default. - user=True, means that the value will be marked as modified by the user - (potentially preventing future change to the value) + user=True, means that the value will be marked as modified by the user + (potentially preventing future change to the value) """ # changeifuserset=False -> we need to check if the user force a value. @@ -1530,8 +1530,8 @@ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): if name.lower() in self.user_set: #value modified by the user -> do nothing return - self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) - + self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) + class RivetCard(ConfigFile): @@ -1706,7 +1706,7 @@ def setRelevantParamCard(self, f_params, f_relparams): yexec_dict = {} yexec_line = exec_line + "yaxis_relvar = " + self['yaxis_relvar'] exec(yexec_line, locals(), yexec_dict) - if self['yaxis_label'] == "": + if self['yaxis_label'] == "": self['yaxis_label'] = "yaxis_relvar" f_relparams.write("{0} = {1}\n".format(self['yaxis_label'], yexec_dict['yaxis_relvar'])) else: @@ -1715,11 +1715,11 @@ def setRelevantParamCard(self, f_params, f_relparams): class ProcCharacteristic(ConfigFile): """A class to handle information which are passed from MadGraph to the madevent - interface.""" - + interface.""" + def default_setup(self): """initialize the directory to the default value""" - + self.add_param('loop_induced', False) self.add_param('has_isr', False) self.add_param('has_fsr', False) @@ -1735,16 +1735,16 @@ def default_setup(self): self.add_param('pdg_initial1', [0]) self.add_param('pdg_initial2', [0]) self.add_param('splitting_types',[], typelist=str) - self.add_param('perturbation_order', [], typelist=str) - self.add_param('limitations', [], typelist=str) - self.add_param('hel_recycling', False) + self.add_param('perturbation_order', [], typelist=str) + self.add_param('limitations', [], typelist=str) + self.add_param('hel_recycling', False) self.add_param('single_color', True) - self.add_param('nlo_mixed_expansion', True) + self.add_param('nlo_mixed_expansion', True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1752,49 +1752,49 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: if '#' in line: line = line.split('#',1)[0] if not line: continue - + if '=' in line: key, value = line.split('=',1) self[key.strip()] = value - + def write(self, outputpath): """write the file""" template ="# Information about the process #\n" template +="#########################################\n" - + fsock = open(outputpath, 'w') fsock.write(template) - + for key, value in self.items(): fsock.write(" %s = %s \n" % (key, value)) - - fsock.close() - + + fsock.close() + class GridpackCard(ConfigFile): """an object for the GridpackCard""" - + def default_setup(self): """default value for the GridpackCard""" - + self.add_param("GridRun", True) self.add_param("gevents", 2500) self.add_param("gseed", 1) - self.add_param("ngran", -1) - + self.add_param("ngran", -1) + def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1802,7 +1802,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -1812,19 +1812,19 @@ def read(self, finput): self[line[1].strip()] = line[0].replace('\'','').strip() def write(self, output_file, template=None): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'grid_card_default.dat') else: template = pjoin(MEDIR, 'Cards', 'grid_card_default.dat') - + text = "" - for line in open(template,'r'): + for line in open(template,'r'): nline = line.split('#')[0] nline = nline.split('!')[0] comment = line[len(nline):] @@ -1832,19 +1832,19 @@ def write(self, output_file, template=None): if len(nline) != 2: text += line elif nline[1].strip() in self: - text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) + text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) else: logger.info('Adding missing parameter %s to current run_card (with default value)' % nline[1].strip()) - text += line - + text += line + if isinstance(output_file, str): fsock = open(output_file,'w') else: fsock = output_file - + fsock.write(text) fsock.close() - + class PY8Card(ConfigFile): """ Implements the Pythia8 card.""" @@ -1868,7 +1868,7 @@ def add_default_subruns(self, type): def default_setup(self): """ Sets up the list of available PY8 parameters.""" - + # Visible parameters # ================== self.add_param("Main:numberOfEvents", -1) @@ -1877,11 +1877,11 @@ def default_setup(self): self.add_param("JetMatching:qCut", -1.0, always_write_to_card=False) self.add_param("JetMatching:doShowerKt",False,always_write_to_card=False) # -1 means that it is automatically set. - self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) + self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) # for CKKWL merging self.add_param("Merging:TMS", -1.0, always_write_to_card=False) self.add_param("Merging:Process", '', always_write_to_card=False) - # -1 means that it is automatically set. + # -1 means that it is automatically set. self.add_param("Merging:nJetMax", -1, always_write_to_card=False) # for both merging, chose whether to also consider different merging # scale values for the extra weights related to scale and PDF variations. @@ -1918,10 +1918,10 @@ def default_setup(self): comment='This allows to turn on/off hadronization alltogether.') self.add_param("partonlevel:mpi", True, hidden=True, always_write_to_card=False, comment='This allows to turn on/off MPI alltogether.') - self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, + self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, always_write_to_card=False, comment='This parameter is automatically set to True by MG5aMC when doing MLM merging with PY8.') - + # for MLM merging self.add_param("JetMatching:merge", False, hidden=True, always_write_to_card=False, comment='Specifiy if we are merging sample of different multiplicity.') @@ -1931,9 +1931,9 @@ def default_setup(self): comment='Value of the merging scale below which one does not even write the HepMC event.') self.add_param("JetMatching:doVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') - self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) + self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) self.add_param("JetMatching:setMad", False, hidden=True, always_write_to_card=False, - comment='Specify one must read inputs from the MadGraph banner.') + comment='Specify one must read inputs from the MadGraph banner.') self.add_param("JetMatching:coneRadius", 1.0, hidden=True, always_write_to_card=False) self.add_param("JetMatching:nQmatch",4,hidden=True, always_write_to_card=False) # for CKKWL merging (common with UMEPS, UNLOPS) @@ -1946,7 +1946,7 @@ def default_setup(self): self.add_param("Merging:applyVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') self.add_param("Merging:includeWeightInXsection", True, hidden=True, always_write_to_card=False, - comment='If turned off, then the option belows forces PY8 to keep the original weight.') + comment='If turned off, then the option belows forces PY8 to keep the original weight.') self.add_param("Merging:muRen", 91.188, hidden=True, always_write_to_card=False, comment='Set renormalization scales of the 2->2 process.') self.add_param("Merging:muFacInME", 91.188, hidden=True, always_write_to_card=False, @@ -1958,7 +1958,7 @@ def default_setup(self): # To be added in subruns for CKKWL self.add_param("Merging:mayRemoveDecayProducts", False, hidden=True, always_write_to_card=False) self.add_param("Merging:doKTMerging", False, hidden=True, always_write_to_card=False) - self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) + self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) self.add_param("Merging:doPTLundMerging", False, hidden=True, always_write_to_card=False) # Special Pythia8 paremeters useful to simplify the shower. @@ -1975,33 +1975,33 @@ def default_setup(self): # Add parameters controlling the subruns execution flow. # These parameters should not be part of PY8SubRun daughter. self.add_default_subruns('parameters') - + def __init__(self, *args, **opts): - # Parameters which are not printed in the card unless they are - # 'user_set' or 'system_set' or part of the + # Parameters which are not printed in the card unless they are + # 'user_set' or 'system_set' or part of the # self.hidden_params_to_always_print set. self.hidden_param = [] self.hidden_params_to_always_write = set() self.visible_params_to_always_write = set() # List of parameters that should never be written out given the current context. self.params_to_never_write = set() - + # Parameters which have been set by the system (i.e. MG5 itself during # the regular course of the shower interface) self.system_set = set() - + # Add attributes controlling the subruns execution flow. # These attributes should not be part of PY8SubRun daughter. self.add_default_subruns('attributes') - - # Parameters which have been set by the + + # Parameters which have been set by the super(PY8Card, self).__init__(*args, **opts) - def add_param(self, name, value, hidden=False, always_write_to_card=True, + def add_param(self, name, value, hidden=False, always_write_to_card=True, comment=None): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. The option 'hidden' decides whether the parameter should be visible to the user. The option 'always_write_to_card' decides whether it should @@ -2017,7 +2017,7 @@ def add_param(self, name, value, hidden=False, always_write_to_card=True, self.hidden_params_to_always_write.add(name) else: if always_write_to_card: - self.visible_params_to_always_write.add(name) + self.visible_params_to_always_write.add(name) if not comment is None: if not isinstance(comment, str): raise MadGraph5Error("Option 'comment' must be a string, not"+\ @@ -2036,7 +2036,7 @@ def add_subrun(self, py8_subrun): self.subruns[py8_subrun['Main:subrun']] = py8_subrun if not 'LHEFInputs:nSubruns' in self.user_set: self['LHEFInputs:nSubruns'] = max(self.subruns.keys()) - + def userSet(self, name, value, **opts): """Set an attribute of this card, following a user_request""" self.__setitem__(name, value, change_userdefine=True, **opts) @@ -2044,10 +2044,10 @@ def userSet(self, name, value, **opts): self.system_set.remove(name.lower()) def vetoParamWriteOut(self, name): - """ Forbid the writeout of a specific parameter of this card when the + """ Forbid the writeout of a specific parameter of this card when the "write" function will be invoked.""" self.params_to_never_write.add(name.lower()) - + def systemSet(self, name, value, **opts): """Set an attribute of this card, independently of a specific user request and only if not already user_set.""" @@ -2058,7 +2058,7 @@ def systemSet(self, name, value, **opts): if force or name.lower() not in self.user_set: self.__setitem__(name, value, change_userdefine=False, **opts) self.system_set.add(name.lower()) - + def MadGraphSet(self, name, value, **opts): """ Sets a card attribute, but only if it is absent or not already user_set.""" @@ -2068,18 +2068,18 @@ def MadGraphSet(self, name, value, **opts): force = False if name.lower() not in self or (force or name.lower() not in self.user_set): self.__setitem__(name, value, change_userdefine=False, **opts) - self.system_set.add(name.lower()) - + self.system_set.add(name.lower()) + def defaultSet(self, name, value, **opts): self.__setitem__(name, value, change_userdefine=False, **opts) - + @staticmethod def pythia8_formatting(value, formatv=None): """format the variable into pythia8 card convention. The type is detected by default""" if not formatv: if isinstance(value,UnknownType): - formatv = 'unknown' + formatv = 'unknown' elif isinstance(value, bool): formatv = 'bool' elif isinstance(value, int): @@ -2095,7 +2095,7 @@ def pythia8_formatting(value, formatv=None): formatv = 'str' else: assert formatv - + if formatv == 'unknown': # No formatting then return str(value) @@ -2116,7 +2116,7 @@ def pythia8_formatting(value, formatv=None): elif formatv == 'float': return '%.10e' % float(value) elif formatv == 'shortfloat': - return '%.3f' % float(value) + return '%.3f' % float(value) elif formatv == 'str': return "%s" % value elif formatv == 'list': @@ -2124,9 +2124,9 @@ def pythia8_formatting(value, formatv=None): return ','.join([PY8Card.pythia8_formatting(arg, 'shortfloat') for arg in value]) else: return ','.join([PY8Card.pythia8_formatting(arg) for arg in value]) - - def write(self, output_file, template, read_subrun=False, + + def write(self, output_file, template, read_subrun=False, print_only_visible=False, direct_pythia_input=False, add_missing=True): """ Write the card to output_file using a specific template. > 'print_only_visible' specifies whether or not the hidden parameters @@ -2143,28 +2143,28 @@ def write(self, output_file, template, read_subrun=False, or p.lower() in self.user_set] # Filter against list of parameters vetoed for write-out visible_param = [p for p in visible_param if p.lower() not in self.params_to_never_write] - + # Now the hidden param which must be written out if print_only_visible: hidden_output_param = [] else: hidden_output_param = [p for p in self if p.lower() in self.hidden_param and not p.lower() in self.user_set and - (p.lower() in self.hidden_params_to_always_write or + (p.lower() in self.hidden_params_to_always_write or p.lower() in self.system_set)] # Filter against list of parameters vetoed for write-out hidden_output_param = [p for p in hidden_output_param if p not in self.params_to_never_write] - + if print_only_visible: subruns = [] else: if not read_subrun: subruns = sorted(self.subruns.keys()) - + # Store the subruns to write in a dictionary, with its ID in key # and the corresponding stringstream in value subruns_to_write = {} - + # Sort these parameters nicely so as to put together parameters # belonging to the same group (i.e. prefix before the ':' in their name). def group_params(params): @@ -2191,7 +2191,7 @@ def group_params(params): # First dump in a temporary_output (might need to have a second pass # at the very end to update 'LHEFInputs:nSubruns') output = StringIO.StringIO() - + # Setup template from which to read if isinstance(template, str): if os.path.isfile(template): @@ -2199,7 +2199,7 @@ def group_params(params): elif '\n' in template: tmpl = StringIO.StringIO(template) else: - raise Exception("File input '%s' not found." % file_input) + raise Exception("File input '%s' not found." % file_input) elif template is None: # Then use a dummy empty StringIO, hence skipping the reading tmpl = StringIO.StringIO() @@ -2257,8 +2257,8 @@ def group_params(params): # Remove all of its variables (so that nothing is overwritten) DummySubrun.clear() DummySubrun.write(subruns_to_write[int(value)], - tmpl, read_subrun=True, - print_only_visible=print_only_visible, + tmpl, read_subrun=True, + print_only_visible=print_only_visible, direct_pythia_input=direct_pythia_input) logger.info('Adding new unknown subrun with ID %d.'% @@ -2267,7 +2267,7 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - + # Change parameters which must be output if param in visible_param: new_value = PY8Card.pythia8_formatting(self[param]) @@ -2286,10 +2286,10 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - - # Substitute the value. + + # Substitute the value. # If it is directly the pytia input, then don't write the param if it - # is not in the list of visible_params_to_always_write and was + # is not in the list of visible_params_to_always_write and was # not user_set or system_set if ((not direct_pythia_input) or (param.lower() in self.visible_params_to_always_write) or @@ -2304,16 +2304,16 @@ def group_params(params): output.write(template%(param_entry, value_entry.replace(value,new_value))) - + # Proceed to next line last_pos = tmpl.tell() line = tmpl.readline() - + # If add_missing is False, make sure to empty the list of remaining parameters if not add_missing: visible_param = [] hidden_output_param = [] - + # Now output the missing parameters. Warn about visible ones. if len(visible_param)>0 and not template is None: output.write( @@ -2343,12 +2343,12 @@ def group_params(params): """%(' for subrun %d'%self['Main:subrun'] if 'Main:subrun' in self else '')) for param in hidden_output_param: if param.lower() in self.comments: - comment = '\n'.join('! %s'%c for c in + comment = '\n'.join('! %s'%c for c in self.comments[param.lower()].split('\n')) output.write(comment+'\n') output.write('%s=%s\n'%(param,PY8Card.pythia8_formatting(self[param]))) - - # Don't close the file if we were reading a subrun, but simply write + + # Don't close the file if we were reading a subrun, but simply write # output and return now if read_subrun: output_file.write(output.getvalue()) @@ -2382,12 +2382,12 @@ def group_params(params): out.close() else: output_file.write(output.getvalue()) - + def read(self, file_input, read_subrun=False, setter='default'): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file. - The setter option choses the authority that sets potential - modified/new parameters. It can be either: + The setter option choses the authority that sets potential + modified/new parameters. It can be either: 'default' or 'user' or 'system'""" if isinstance(file_input, str): if "\n" in file_input: @@ -2423,8 +2423,8 @@ def read(self, file_input, read_subrun=False, setter='default'): raise MadGraph5Error("Could not read line '%s' of Pythia8 card."%\ line) if '!' in value: - value,_ = value.split('!',1) - + value,_ = value.split('!',1) + # Read a subrun if detected: if param=='Main:subrun': if read_subrun: @@ -2451,7 +2451,7 @@ def read(self, file_input, read_subrun=False, setter='default'): last_pos = finput.tell() line = finput.readline() continue - + # Read parameter. The case of a parameter not defined in the card is # handled directly in ConfigFile. @@ -2478,7 +2478,7 @@ def add_default_subruns(self, type): def __init__(self, *args, **opts): """ Initialize a subrun """ - + # Force user to set it manually. subrunID = -1 if 'subrun_id' in opts: @@ -2489,7 +2489,7 @@ def __init__(self, *args, **opts): def default_setup(self): """Sets up the list of available PY8SubRun parameters.""" - + # Add all default PY8Card parameters super(PY8SubRun, self).default_setup() # Make sure they are all hidden @@ -2501,33 +2501,33 @@ def default_setup(self): self.add_param("Main:subrun", -1) self.add_param("Beams:LHEF", "events.lhe.gz") - + class RunBlock(object): """ Class for a series of parameter in the run_card that can be either visible or hidden. - name: allow to set in the default run_card $name to set where that + name: allow to set in the default run_card $name to set where that block need to be inserted template_on: information to include is block is active template_off: information to include is block is not active on_fields/off_fields: paramater associated to the block - can be specify but are otherwise automatically but + can be specify but are otherwise automatically but otherwise determined from the template. - + function: status(self,run_card) -> return which template need to be used check_validity(self, runcard) -> sanity check - create_default_for_process(self, run_card, proc_characteristic, - history, proc_def) + create_default_for_process(self, run_card, proc_characteristic, + history, proc_def) post_set_XXXX(card, value, change_userdefine, raiseerror) -> fct called when XXXXX is set post_set(card, value, change_userdefine, raiseerror, **opt) -> fct called when a parameter is changed - -> no access to parameter name + -> no access to parameter name -> not called if post_set_XXXX is defined """ - - + + def __init__(self, name, template_on, template_off, on_fields=False, off_fields=False): self.name = name @@ -2550,7 +2550,7 @@ def fields(self): def find_fields_from_template(template): """ return the list of fields from a template. checking line like %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ - + return re.findall(r"^\s*%\((.*)\)s\s*=\s*\1", template, re.M) def get_template(self, card): @@ -2565,7 +2565,7 @@ def get_unused_template(self, card): if self.status(card): return self.template_off else: - return self.template_on + return self.template_on def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -2594,20 +2594,20 @@ def manage_parameters(self, card, written, to_write): written.add(name) if name in to_write: to_write.remove(name) - + def check_validity(self, runcard): """run self consistency check here --avoid to use runcard[''] = xxx here since it can trigger post_set function""" return def create_default_for_process(self, run_card, proc_characteristic, history, proc_def): - return + return # @staticmethod # def post_set(card, value, change_userdefine, raiseerror, **opt): # """default action to run when a parameter of the block is defined. # Here we do not know which parameter is modified. if this is needed. # then one need to define post_set_XXXXX(card, value, change_userdefine, raiseerror) -# and then only that function is used +# and then only that function is used # """ # # if 'pdlabel' in card.user_set: @@ -2621,7 +2621,7 @@ class RunCard(ConfigFile): blocks = [] parameter_in_block = {} - allowed_lep_densities = {} + allowed_lep_densities = {} default_include_file = 'run_card.inc' default_autodef_file = 'run.inc' donewarning = [] @@ -2637,7 +2637,7 @@ def plugin_input(self, finput): curr_dir = os.path.dirname(os.path.dirname(finput.name)) elif isinstance(finput, str): curr_dir = os.path.dirname(os.path.dirname(finput)) - + if curr_dir: if os.path.exists(pjoin(curr_dir, 'bin', 'internal', 'plugin_run_card')): # expected format {} passing everything as optional argument @@ -2646,7 +2646,7 @@ def plugin_input(self, finput): continue opts = dict(eval(line)) self.add_param(**opts) - + @classmethod def fill_post_set_from_blocks(cls): """set the post_set function for any parameter defined in a run_block""" @@ -2659,8 +2659,8 @@ def fill_post_set_from_blocks(cls): elif hasattr(block, 'post_set'): setattr(cls, 'post_set_%s' % parameter, block.post_set) cls.parameter_in_block[parameter] = block - - + + def __new__(cls, finput=None, **opt): cls.fill_post_set_from_blocks() @@ -2718,9 +2718,9 @@ def __new__(cls, finput=None, **opt): return super(RunCard, cls).__new__(cls, finput, **opt) def __init__(self, *args, **opts): - + # The following parameter are updated in the defaultsetup stage. - + #parameter for which no warning should be raised if not define self.hidden_param = [] # in which include file the parameer should be written @@ -2739,11 +2739,11 @@ def __init__(self, *args, **opts): self.cuts_parameter = {} # parameter added where legacy requires an older value. self.system_default = {} - + self.display_block = [] # set some block to be displayed self.fct_mod = {} # {param: (fct_pointer, *argument, **opts)} - self.cut_class = {} + self.cut_class = {} self.warned=False @@ -2776,11 +2776,11 @@ def get_lepton_densities(cls): else: cls.allowed_lep_densities[identity].append(name) - def add_param(self, name, value, fortran_name=None, include=True, + def add_param(self, name, value, fortran_name=None, include=True, hidden=False, legacy=False, cut=False, system=False, sys_default=None, autodef=False, fct_mod=None, **opts): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. fortran_name: defines what is the associate name in the f77 code include: defines if we have to put the value in the include file @@ -2795,7 +2795,7 @@ def add_param(self, name, value, fortran_name=None, include=True, fct_mod: defines a function to run if the parameter is modify in the include file options of **opts: - allowed: list of valid options. '*' means anything else should be allowed. - empty list means anything possible as well. + empty list means anything possible as well. - comment: add comment for writing/help - typelist: type of the list if default is empty """ @@ -2823,9 +2823,9 @@ def add_param(self, name, value, fortran_name=None, include=True, self.fct_mod[name] = fct_mod def read(self, finput, consistency=True, unknown_warning=True, **opt): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -2836,7 +2836,7 @@ def read(self, finput, consistency=True, unknown_warning=True, **opt): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -2864,8 +2864,8 @@ def add_unknown_entry(self, name, value, unknow_warning): This is based on the guess_entry_fromname for the various syntax providing input. This then call add_param accordingly. - This function does not returns anything. - """ + This function does not returns anything. + """ if name == "dsqrt_q2fact1" and not self.LO: raise InvalidRunCard("Looks like you passed a LO run_card for a NLO run. Please correct") @@ -2903,7 +2903,7 @@ def add_unknown_entry(self, name, value, unknow_warning): " The type was assigned to %s. \n"+\ " The definition of that variable will %sbe automatically added to fortran file %s\n"+\ " The value of that variable will %sbe passed to the fortran code via fortran file %s",\ - name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, + name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, "" if opts.get('autodef', False) else "not", "" if opts.get('autodef', False) in [True,False] else opts.get('autodef'), "" if opts.get('include', True) else "not", "" if opts.get('include', True) in [True,False] else opts.get('include')) RunCard.donewarning.append(name) @@ -2923,19 +2923,19 @@ def valid_line(self, line, tmp): return False elif line.strip().startswith('%'): parameter = line[line.find('(')+1:line.find(')')] - + try: cond = self.cuts_parameter[parameter] except KeyError: return True - - + + if template_options.get(cond, default) or cond is True: return True else: - return False + return False else: - return True + return True def reset_simd(self, old_value, new_value, name, *args, **opts): @@ -2946,28 +2946,28 @@ def make_clean(self,old_value, new_value, name, dir): raise Exception('pass make clean for ', dir) def make_Ptouch(self,old_value, new_value, name, reset): - raise Exception('pass Ptouch for ', reset) - + raise Exception('pass Ptouch for ', reset) + def write(self, output_file, template=None, python_template=False, write_hidden=False, template_options=None, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" - to_write = set(self.user_set) + to_write = set(self.user_set) written = set() if not template: raise Exception if not template_options: template_options = collections.defaultdict(str) - + if python_template: text = open(template,'r').read() - text = text.split('\n') + text = text.split('\n') # remove if templating - text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] + text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] for l in text if self.valid_line(l, template_options)] text ='\n'.join(text) - + if python_template and not to_write: import string if self.blocks: @@ -2981,14 +2981,14 @@ def write(self, output_file, template=None, python_template=False, if not self.list_parameter: text = text % self else: - data = dict((key.lower(),value) for key, value in self.items()) + data = dict((key.lower(),value) for key, value in self.items()) for name in self.list_parameter: if self.list_parameter[name] != str: data[name] = ', '.join(str(v) for v in data[name]) else: data[name] = "['%s']" % "', '".join(str(v) for v in data[name]) text = text % data - else: + else: text = "" for line in open(template,'r'): nline = line.split('#')[0] @@ -3005,11 +3005,11 @@ def write(self, output_file, template=None, python_template=False, this_group = this_group[0] text += this_group.get_template(self) % self this_group.manage_parameters(self, written, to_write) - + elif len(nline) != 2: text += line elif nline[1].strip() in self: - + name = nline[1].strip().lower() value = self[name] if name in self.list_parameter: @@ -3026,15 +3026,15 @@ def write(self, output_file, template=None, python_template=False, else: endline = '' text += ' %s\t= %s %s%s' % (value, name, comment, endline) - written.add(name) + written.add(name) if name in to_write: to_write.remove(name) else: logger.info('Adding missing parameter %s to current %s (with default value)', (name, self.filename)) - written.add(name) - text += line + written.add(name) + text += line for b in self.blocks: if b.status(self): @@ -3057,7 +3057,7 @@ def write(self, output_file, template=None, python_template=False, else: #partial writting -> add only what is needed to_add = [] - for line in b.get_template(self).split('\n'): + for line in b.get_template(self).split('\n'): nline = line.split('#')[0] nline = nline.split('!')[0] nline = nline.split('=') @@ -3072,8 +3072,8 @@ def write(self, output_file, template=None, python_template=False, continue #already include before else: to_add.append(line % {nline[1].strip():value, name:value}) - written.add(name) - + written.add(name) + if name in to_write: to_write.remove(name) else: @@ -3095,13 +3095,13 @@ def write(self, output_file, template=None, python_template=False, text += '\n'.join(to_add) if to_write or write_hidden: - text+="""#********************************************************************* + text+="""#********************************************************************* # Additional hidden parameters #********************************************************************* -""" +""" if write_hidden: # - # do not write hidden parameter not hidden for this template + # do not write hidden parameter not hidden for this template # if python_template: written = written.union(set(re.findall('\%\((\w*)\)s', open(template,'r').read(), re.M))) @@ -3129,7 +3129,7 @@ def get_last_value_include(self, output_dir): if inc file does not exist we will return the current value (i.e. set has no change) """ - #remember that + #remember that # default_include_file is a class variable # self.includepath is on the form include_path : [list of param ] out = {} @@ -3165,7 +3165,7 @@ def get_value_from_include(self, path, list_of_params, output_dir): with open(pjoin(output_dir,path), 'r') as fsock: text = fsock.read() - + for name in list_of_params: misc.sprint(name, name in self.fortran_name) misc.sprint(self.fortran_name[name] if name in self.fortran_name[name] else name) @@ -3191,11 +3191,11 @@ def get_value_from_include(self, path, list_of_params, output_dir): misc.sprint(self.fortran_name) misc.sprint(text) raise Exception - return out + return out def get_default(self, name, default=None, log_level=None): - """return self[name] if exist otherwise default. log control if we + """return self[name] if exist otherwise default. log control if we put a warning or not if we use the default value""" lower_name = name.lower() @@ -3216,13 +3216,13 @@ def get_default(self, name, default=None, log_level=None): log_level = 20 if not default: default = dict.__getitem__(self, name.lower()) - + logger.log(log_level, '%s missed argument %s. Takes default: %s' % (self.filename, name, default)) self[name] = default return default else: - return self[name] + return self[name] def mod_inc_pdlabel(self, value): """flag pdlabel has 'dressed' if one of the special lepton PDF with beamstralung. @@ -3237,16 +3237,16 @@ def edit_dummy_fct_from_file(self, filelist, outdir): filelist is a list of input files (given by the user) containing a series of function to be placed in replacement of standard (typically dummy) functions of the code. - This use LO/NLO class attribute that defines which function name need to - be placed in which file. + This use LO/NLO class attribute that defines which function name need to + be placed in which file. First time this is used, a backup of the original file is done in order to - recover if the user remove some of those files. + recover if the user remove some of those files. The function present in the file are determined automatically via regular expression. and only that function is replaced in the associated file. - function in the filelist starting with user_ will also be include within the + function in the filelist starting with user_ will also be include within the dummy_fct.f file """ @@ -3269,7 +3269,7 @@ def edit_dummy_fct_from_file(self, filelist, outdir): fsock = file_writers.FortranWriter(tmp,'w') function_text = fsock.remove_routine(text, fct) fsock.close() - test = open(tmp,'r').read() + test = open(tmp,'r').read() if fct not in self.dummy_fct_file: if fct.startswith('user_'): self.dummy_fct_file[fct] = self.dummy_fct_file['user_'] @@ -3315,22 +3315,22 @@ def guess_entry_fromname(self, name, value): - vartype: type of the variable - name: name of the variable (stripped from metadata) - options: additional options for the add_param - rules: - - if name starts with str_, int_, float_, bool_, list_, dict_ then + rules: + - if name starts with str_, int_, float_, bool_, list_, dict_ then - vartype is set accordingly - name is strip accordingly - otherwise guessed from value (which is string) - if name contains min/max - vartype is set to float - options has an added {'cut':True} - - suffixes like + - suffixes like - will be removed from named - will be added in options (for add_param) as {'cut':True} see add_param documentation for the list of supported options - if include is on False set autodef to False (i.e. enforce it False for future change) """ - # local function + # local function def update_typelist(value, name, opts): """convert a string to a list and update opts to keep track of the type """ value = value.strip() @@ -3358,7 +3358,7 @@ def update_typelist(value, name, opts): opts[key] = val name = name.replace("<%s=%s>" %(key,val), '') - # get vartype + # get vartype # first check that name does not force it supported_type = ["str", "float", "int", "bool", "list", "dict"] if "_" in name and name.split("_")[0].lower() in supported_type: @@ -3406,13 +3406,13 @@ def f77_formatting(value, formatv=None): value = str(value).lower() else: assert formatv - + if formatv == 'bool': if str(value) in ['1','T','.true.','True']: return '.true.' else: return '.false.' - + elif formatv == 'int': try: return str(int(value)) @@ -3422,12 +3422,12 @@ def f77_formatting(value, formatv=None): return str(int(fl)) else: raise - + elif formatv == 'float': if isinstance(value, str): value = value.replace('d','e') return ('%.10e' % float(value)).replace('e','d') - + elif formatv == 'str': # Check if it is a list if value.strip().startswith('[') and value.strip().endswith(']'): @@ -3437,20 +3437,20 @@ def f77_formatting(value, formatv=None): enumerate(elements)] else: return "'%s'" % value - - + + def check_validity(self, log_level=30): """check that parameter missing in the card are set to the expected value""" for name, value in self.system_default.items(): self.set(name, value, changeifuserset=False) - + for name in self.includepath[False]: to_bypass = self.hidden_param + list(self.legacy_parameter.keys()) if name not in to_bypass: - self.get_default(name, log_level=log_level) + self.get_default(name, log_level=log_level) for name in self.legacy_parameter: if self[name] != self.legacy_parameter[name]: @@ -3458,28 +3458,28 @@ def check_validity(self, log_level=30): for block in self.blocks: block.check_validity(self) - + def update_system_parameter_for_include(self): - """update hidden system only parameter for the correct writtin in the + """update hidden system only parameter for the correct writtin in the include""" return - + def write_include_file(self, output_dir, output_file=None): """Write the various include file in output_dir. The entry True of self.includepath will be written in run_card.inc The entry False will not be written anywhere output_file allows testing by providing stream. - This also call the function to add variable definition for the - variable with autodef=True (handle by write_autodef function) + This also call the function to add variable definition for the + variable with autodef=True (handle by write_autodef function) """ - + # ensure that all parameter are coherent and fix those if needed self.check_validity() - + #ensusre that system only parameter are correctly set self.update_system_parameter_for_include() @@ -3490,10 +3490,10 @@ def write_include_file(self, output_dir, output_file=None): self.write_autodef(output_dir, output_file=None) # check/fix status of customised functions self.edit_dummy_fct_from_file(self["custom_fcts"], os.path.dirname(output_dir)) - + for incname in self.includepath: self.write_one_include_file(output_dir, incname, output_file) - + for name,value in value_in_old_include.items(): if value != self[name]: self.fct_mod[name][0](value, self[name], name, *self.fct_mod[name][1],**self.fct_mod[name][2]) @@ -3515,13 +3515,13 @@ def write_one_include_file(self, output_dir, incname, output_file=None): fsock = file_writers.FortranWriter(pjoin(output_dir,pathinc+'.tmp')) - for key in self.includepath[incname]: + for key in self.includepath[incname]: #define the fortran name if key in self.fortran_name: fortran_name = self.fortran_name[key] else: fortran_name = key - + if incname in self.include_as_parameter: fsock.writelines('INTEGER %s\n' % fortran_name) #get the value with warning if the user didn't set it @@ -3534,7 +3534,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): # in case of a list, add the length of the list as 0th # element in fortran. Only in case of integer or float # list (not for bool nor string) - targettype = self.list_parameter[key] + targettype = self.list_parameter[key] if targettype is bool: pass elif targettype is int: @@ -3550,7 +3550,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): elif isinstance(value, dict): for fortran_name, onevalue in value.items(): line = '%s = %s \n' % (fortran_name, self.f77_formatting(onevalue)) - fsock.writelines(line) + fsock.writelines(line) elif isinstance(incname,str) and 'compile' in incname: if incname in self.include_as_parameter: line = 'PARAMETER (%s=%s)' %( fortran_name, value) @@ -3585,7 +3585,7 @@ def write_autodef(self, output_dir, output_file=None): filetocheck = dict(self.definition_path) if True not in self.definition_path: filetocheck[True] = [] - + for incname in filetocheck: if incname is True: @@ -3598,7 +3598,7 @@ def write_autodef(self, output_dir, output_file=None): if output_file: fsock = output_file input = fsock.getvalue() - + else: input = open(pjoin(output_dir,pathinc),'r').read() # do not define fsock here since we might not need to overwrite it @@ -3608,7 +3608,7 @@ def write_autodef(self, output_dir, output_file=None): previous = re.findall(re_pat, input, re.M) # now check which one needed to be added (and remove those identicaly defined) to_add = [] - for key in filetocheck[incname]: + for key in filetocheck[incname]: curr_type = self[key].__class__.__name__ length = "" if curr_type in [list, "list"]: @@ -3640,10 +3640,10 @@ def write_autodef(self, output_dir, output_file=None): fsock.truncate(0) fsock.seek(0) - # remove outdated lines + # remove outdated lines lines = input.split('\n') if previous: - out = [line for line in lines if not re.search(re_pat, line, re.M) or + out = [line for line in lines if not re.search(re_pat, line, re.M) or re.search(re_pat, line, re.M).groups() not in previous] else: out = lines @@ -3662,7 +3662,7 @@ def write_autodef(self, output_dir, output_file=None): stop = out.index('C STOP USER COMMON BLOCK') out = out[:start]+ out[stop+1:] #add new common-block - if self.definition_path[incname]: + if self.definition_path[incname]: out.append("C START USER COMMON BLOCK") if isinstance(pathinc , str): filename = os.path.basename(pathinc).split('.',1)[0] @@ -3675,10 +3675,10 @@ def write_autodef(self, output_dir, output_file=None): filename = filename.upper() out.append(" COMMON/USER_CUSTOM_%s/%s" %(filename,','.join( self.definition_path[incname]))) out.append('C STOP USER COMMON BLOCK') - + if not output_file: fsock.writelines(out) - fsock.close() + fsock.close() else: # for iotest out = ["%s\n" %l for l in out] @@ -3702,7 +3702,7 @@ def get_idbmup(lpp): def get_banner_init_information(self): """return a dictionary with the information needed to write the first line of the block of the lhe file.""" - + output = {} output["idbmup1"] = self.get_idbmup(self['lpp1']) output["idbmup2"] = self.get_idbmup(self['lpp2']) @@ -3713,7 +3713,7 @@ def get_banner_init_information(self): output["pdfsup1"] = self.get_pdf_id(self["pdlabel"]) output["pdfsup2"] = self.get_pdf_id(self["pdlabel"]) return output - + def get_pdf_id(self, pdf): if pdf == "lhapdf": lhaid = self["lhaid"] @@ -3721,19 +3721,19 @@ def get_pdf_id(self, pdf): return lhaid[0] else: return lhaid - else: + else: try: return {'none': 0, 'iww': 0, 'eva':0, 'edff':0, 'chff':0, 'cteq6_m':10000,'cteq6_l':10041,'cteq6l1':10042, 'nn23lo':246800,'nn23lo1':247000,'nn23nlo':244800 - }[pdf] + }[pdf] except: - return 0 - + return 0 + def get_lhapdf_id(self): return self.get_pdf_id(self['pdlabel']) - def remove_all_cut(self): + def remove_all_cut(self): """remove all the cut""" for name in self.cuts_parameter: @@ -3749,7 +3749,7 @@ def remove_all_cut(self): elif 'eta' in name: self[name] = -1 else: - self[name] = 0 + self[name] = 0 ################################################################################################ ### Define various template subpart for the LO Run_card @@ -3767,11 +3767,11 @@ def remove_all_cut(self): %(nb_proton1)s = nb_proton1 # number of proton for the first beam %(nb_neutron1)s = nb_neutron1 # number of neutron for the first beam %(mass_ion1)s = mass_ion1 # mass of the heavy ion (first beam) -# Note that seting differently the two beams only work if you use +# Note that seting differently the two beams only work if you use # group_subprocess=False when generating your matrix-element %(nb_proton2)s = nb_proton2 # number of proton for the second beam %(nb_neutron2)s = nb_neutron2 # number of neutron for the second beam - %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) + %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ template_off = "# To see heavy ion options: type \"update ion_pdf\"" @@ -3834,11 +3834,11 @@ def remove_all_cut(self): # Frame for polarization ------------------------------------------------------------------------------------ template_on = \ """#********************************************************************* -# Frame where to evaluate the matrix-element (not the cut!) for polarization +# Frame where to evaluate the matrix-element (not the cut!) for polarization #********************************************************************* %(me_frame)s = me_frame ! list of particles to sum-up to define the rest-frame ! in which to evaluate the matrix-element - ! [1,2] means the partonic center of mass + ! [1,2] means the partonic center of mass """ template_off = "" frame_block = RunBlock('frame', template_on=template_on, template_off=template_off) @@ -3891,7 +3891,7 @@ def remove_all_cut(self): # CONTROL The extra running scale (not QCD) * # Such running is NOT include in systematics computation * #*********************************************************************** - %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale + %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode %(mue_over_ref)s = mue_over_ref ! ratio to mur if dynamical scale """ @@ -3908,10 +3908,10 @@ def remove_all_cut(self): %(tmin_for_channel)s = tmin_for_channel ! limit the non-singular reach of --some-- channel of integration related to T-channel diagram (value between -1 and 0), -1 is no impact %(survey_splitting)s = survey_splitting ! for loop-induced control how many core are used at survey for the computation of a single iteration. %(survey_nchannel_per_job)s = survey_nchannel_per_job ! control how many Channel are integrated inside a single job on cluster/multicore - %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) + %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) +#********************************************************************* +# Compilation flag. #********************************************************************* -# Compilation flag. -#********************************************************************* %(global_flag)s = global_flag ! fortran optimization flag use for the all code. %(aloha_flag)s = aloha_flag ! fortran optimization flag for aloha function. Suggestions: '-ffast-math' %(matrix_flag)s = matrix_flag ! fortran optimization flag for matrix.f function. Suggestions: '-O3' @@ -3948,7 +3948,7 @@ def check_validity(self, card): if card['pdlabel'] != card['pdlabel1']: dict.__setitem__(card, 'pdlabel', card['pdlabel1']) elif card['pdlabel1'] in sum(card.allowed_lep_densities.values(),[]): - raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") + raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel2'] in sum(card.allowed_lep_densities.values(),[]): raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel1'] == 'none': @@ -3962,7 +3962,7 @@ def check_validity(self, card): dict.__setitem__(card, 'pdlabel2', card['pdlabel']) if abs(card['lpp1']) == 1 == abs(card['lpp2']) and card['pdlabel1'] != card['pdlabel2']: - raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") + raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -4028,7 +4028,7 @@ def post_set(card, value, change_userdefine, raiseerror, name='unknown', **opt): if name == 'fixed_fac_scale2' and 'fixed_fac_scale1' not in card.user_set: dict.__setitem__(card, 'fixed_fac_scale1', card['fixed_fac_scale']) if name == 'fixed_fac_scale1' and 'fixed_fac_scale2' not in card.user_set: - dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) + dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) def status(self, card): @@ -4061,32 +4061,32 @@ def status(self, card): class RunCardLO(RunCard): """an object to handle in a nice way the run_card information""" - + blocks = [heavy_ion_block, beam_pol_block, syscalc_block, ecut_block, frame_block, eva_scale_block, mlm_block, ckkw_block, psoptim_block, pdlabel_block, fixedfacscale, running_block] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), "get_dummy_x1": pjoin("SubProcesses","dummy_fct.f"), - "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), + "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), "dummy_boostframe": pjoin("SubProcesses","dummy_fct.f"), "user_dynamical_scale": pjoin("SubProcesses","dummy_fct.f"), "bias_wgt_custom": pjoin("SubProcesses","dummy_fct.f"), "user_": pjoin("SubProcesses","dummy_fct.f") # all function starting by user will be added to that file } - + include_as_parameter = ['vector.inc'] if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_lo.dat") - + def default_setup(self): """default value for the run_card.dat""" - + self.add_param("run_tag", "tag_1", include=False) self.add_param("gridpack", False) self.add_param("time_of_flight", -1.0, include=False) - self.add_param("nevents", 10000) + self.add_param("nevents", 10000) self.add_param("iseed", 0) self.add_param("python_seed", -2, include=False, hidden=True, comment="controlling python seed [handling in particular the final unweighting].\n -1 means use default from random module.\n -2 means set to same value as iseed") self.add_param("lpp1", 1, fortran_name="lpp(1)", allowed=[-1,1,0,2,3,9,-2,-3,4,-4], @@ -4106,7 +4106,7 @@ def default_setup(self): self.add_param('nb_neutron1', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(1)", comment='For heavy ion physics nb of neutron in the ion (for both beam but if group_subprocess was False)') self.add_param('nb_neutron2', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(2)", - comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') + comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') self.add_param('mass_ion1', -1.0, hidden=True, fortran_name="mass_ion(1)", allowed=[-1,0, 0.938, 207.9766521*0.938, 0.000511, 0.105, '*'], comment='For heavy ion physics mass in GeV of the ion (of beam 1)') @@ -4133,11 +4133,11 @@ def default_setup(self): self.add_param("mue_over_ref", 1.0, hidden=True, comment='ratio mu_other/mu for dynamical scale') self.add_param("ievo_eva",0,hidden=True, allowed=[0,1],fortran_name="ievo_eva", comment='eva: 0 for EW pdf muf evolution by q^2; 1 for evo by pT^2') - + # Bias module options self.add_param("bias_module", 'None', include=False, hidden=True) self.add_param('bias_parameters', {'__type__':1.0}, include='BIAS/bias.inc', hidden=True) - + #matching self.add_param("scalefact", 1.0) self.add_param("ickkw", 0, allowed=[0,1], hidden=True, comment="\'0\' for standard fixed order computation.\n\'1\' for MLM merging activates alphas and pdf re-weighting according to a kt clustering of the QCD radiation.") @@ -4221,7 +4221,7 @@ def default_setup(self): self.add_param("mmaa", 0.0, cut='aa') self.add_param("mmll", 0.0, cut='ll') self.add_param("mmjjmax", -1.0, cut='jj') - self.add_param("mmbbmax", -1.0, cut='bb') + self.add_param("mmbbmax", -1.0, cut='bb') self.add_param("mmaamax", -1.0, cut='aa') self.add_param("mmllmax", -1.0, cut='ll') self.add_param("mmnl", 0.0, cut='LL') @@ -4231,9 +4231,9 @@ def default_setup(self): self.add_param("ptllmax", -1.0, cut='ll') self.add_param("xptj", 0.0, cut='jj') self.add_param("xptb", 0.0, cut='bb') - self.add_param("xpta", 0.0, cut='aa') + self.add_param("xpta", 0.0, cut='aa') self.add_param("xptl", 0.0, cut='ll') - # ordered pt jet + # ordered pt jet self.add_param("ptj1min", 0.0, cut='jj') self.add_param("ptj1max", -1.0, cut='jj') self.add_param("ptj2min", 0.0, cut='jj') @@ -4241,7 +4241,7 @@ def default_setup(self): self.add_param("ptj3min", 0.0, cut='jjj') self.add_param("ptj3max", -1.0, cut='jjj') self.add_param("ptj4min", 0.0, cut='j'*4) - self.add_param("ptj4max", -1.0, cut='j'*4) + self.add_param("ptj4max", -1.0, cut='j'*4) self.add_param("cutuse", 0, cut='jj') # ordered pt lepton self.add_param("ptl1min", 0.0, cut='l'*2) @@ -4249,7 +4249,7 @@ def default_setup(self): self.add_param("ptl2min", 0.0, cut='l'*2) self.add_param("ptl2max", -1.0, cut='l'*2) self.add_param("ptl3min", 0.0, cut='l'*3) - self.add_param("ptl3max", -1.0, cut='l'*3) + self.add_param("ptl3max", -1.0, cut='l'*3) self.add_param("ptl4min", 0.0, cut='l'*4) self.add_param("ptl4max", -1.0, cut='l'*4) # Ht sum of jets @@ -4257,7 +4257,7 @@ def default_setup(self): self.add_param("htjmax", -1.0, cut='j'*2) self.add_param("ihtmin", 0.0, cut='J'*2) self.add_param("ihtmax", -1.0, cut='J'*2) - self.add_param("ht2min", 0.0, cut='J'*3) + self.add_param("ht2min", 0.0, cut='J'*3) self.add_param("ht3min", 0.0, cut='J'*3) self.add_param("ht4min", 0.0, cut='J'*4) self.add_param("ht2max", -1.0, cut='J'*3) @@ -4267,7 +4267,7 @@ def default_setup(self): self.add_param("ptgmin", 0.0, cut='aj') self.add_param("r0gamma", 0.4, hidden=True) self.add_param("xn", 1.0, hidden=True) - self.add_param("epsgamma", 1.0, hidden=True) + self.add_param("epsgamma", 1.0, hidden=True) self.add_param("isoem", True, hidden=True) self.add_param("xetamin", 0.0, cut='jj') self.add_param("deltaeta", 0.0, cut='j'*2) @@ -4280,7 +4280,7 @@ def default_setup(self): self.add_param("use_syst", True) self.add_param('systematics_program', 'systematics', include=False, hidden=True, comment='Choose which program to use for systematics computation: none, systematics, syscalc') self.add_param('systematics_arguments', ['--mur=0.5,1,2', '--muf=0.5,1,2', '--pdf=errorset'], include=False, hidden=True, comment='Choose the argment to pass to the systematics command. like --mur=0.25,1,4. Look at the help of the systematics function for more details.') - + self.add_param("sys_scalefact", "0.5 1 2", include=False, hidden=True) self.add_param("sys_alpsfact", "None", include=False, hidden=True) self.add_param("sys_matchscale", "auto", include=False, hidden=True) @@ -4315,8 +4315,8 @@ def default_setup(self): self.add_param('aloha_flag', '', include=False, hidden=True, comment='global fortran compilation flag, suggestion: -ffast-math', fct_mod=(self.make_clean, ('Source/DHELAS'),{})) self.add_param('matrix_flag', '', include=False, hidden=True, comment='fortran compilation flag for the matrix-element files, suggestion -O3', - fct_mod=(self.make_Ptouch, ('matrix'),{})) - self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', + fct_mod=(self.make_Ptouch, ('matrix'),{})) + self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', fortran_name='VECSIZE_MEMMAX', fct_mod=(self.reset_simd,(),{})) # parameter allowing to define simple cut via the pdg @@ -4329,24 +4329,24 @@ def default_setup(self): self.add_param('eta_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False) - + self.add_param('pdg_cut',[0], system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], system=True) # store pt min self.add_param('ptmax4pdg',[-1.], system=True) self.add_param('Emin4pdg',[0.], system=True) # store pt min - self.add_param('Emax4pdg',[-1.], system=True) + self.add_param('Emax4pdg',[-1.], system=True) self.add_param('etamin4pdg',[0.], system=True) # store pt min - self.add_param('etamax4pdg',[-1.], system=True) + self.add_param('etamax4pdg',[-1.], system=True) self.add_param('mxxmin4pdg',[-1.], system=True) self.add_param('mxxpart_antipart', [False], system=True) - - - + + + def check_validity(self): """ """ - + super(RunCardLO, self).check_validity() - + #Make sure that nhel is only either 0 (i.e. no MC over hel) or #1 (MC over hel with importance sampling). In particular, it can #no longer be > 1. @@ -4357,12 +4357,12 @@ def check_validity(self): "not %s." % self['nhel']) if int(self['maxjetflavor']) > 6: raise InvalidRunCard('maxjetflavor should be lower than 5! (6 is partly supported)') - + if len(self['pdgs_for_merging_cut']) > 1000: raise InvalidRunCard("The number of elements in "+\ "'pdgs_for_merging_cut' should not exceed 1000.") - + # some cut need to be deactivated in presence of isolation if self['ptgmin'] > 0: if self['pta'] > 0: @@ -4370,18 +4370,18 @@ def check_validity(self): self['pta'] = 0.0 if self['draj'] > 0: logger.warning('draj cut discarded since photon isolation is used') - self['draj'] = 0.0 - - # special treatment for gridpack use the gseed instead of the iseed + self['draj'] = 0.0 + + # special treatment for gridpack use the gseed instead of the iseed if self['gridrun']: self['iseed'] = self['gseed'] - + #Some parameter need to be fixed when using syscalc #if self['use_syst']: # if self['scalefact'] != 1.0: # logger.warning('Since use_syst=T, changing the value of \'scalefact\' to 1') # self['scalefact'] = 1.0 - + # CKKW Treatment if self['ickkw'] > 0: if self['ickkw'] != 1: @@ -4399,7 +4399,7 @@ def check_validity(self): raise InvalidRunCard('maxjetflavor at 6 is NOT supported for matching!') if self['ickkw'] == 2: # add warning if ckkw selected but the associate parameter are empty - self.get_default('highestmult', log_level=20) + self.get_default('highestmult', log_level=20) self.get_default('issgridfile', 'issudgrid.dat', log_level=20) if self['xqcut'] > 0: if self['ickkw'] == 0: @@ -4412,13 +4412,13 @@ def check_validity(self): if self['drjl'] != 0: if 'drjl' in self.user_set: logger.warning('Since icckw>0, changing the value of \'drjl\' to 0') - self['drjl'] = 0 - if not self['auto_ptj_mjj']: + self['drjl'] = 0 + if not self['auto_ptj_mjj']: if self['mmjj'] > self['xqcut']: logger.warning('mmjj > xqcut (and auto_ptj_mjj = F). MMJJ set to 0') - self['mmjj'] = 0.0 - - # check validity of the pdf set + self['mmjj'] = 0.0 + + # check validity of the pdf set # note that pdlabel is automatically set to lhapdf if pdlabel1 or pdlabel2 is set to lhapdf if self['pdlabel'] == 'lhapdf': #add warning if lhaid not define @@ -4426,7 +4426,7 @@ def check_validity(self): mod = False for i in [1,2]: - lpp = 'lpp%i' %i + lpp = 'lpp%i' %i pdlabelX = 'pdlabel%i' % i if self[lpp] == 0: # nopdf if self[pdlabelX] != 'none': @@ -4459,12 +4459,12 @@ def check_validity(self): raise InvalidRunCard( "Heavy ion mode is only supported for lpp1=1/2") if self['lpp2'] not in [1,2]: if self['nb_proton2'] !=1 or self['nb_neutron2'] !=0: - raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") + raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") # check that fixed_fac_scale(1/2) is setting as expected # if lpp=2/3/4 -> default is that beam in fixed scale - # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are + # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are # check that both fixed_fac_scale1/2 are defined together # ensure that fixed_fac_scale1 and fixed_fac_scale2 are setup as needed if 'fixed_fac_scale1' in self.user_set: @@ -4475,13 +4475,13 @@ def check_validity(self): elif 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale1 are defined but not fixed_fac_scale2. The value of fixed_fac_scale2 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale2'] = self['fixed_fac_scale'] - elif self['lpp2'] !=0: + elif self['lpp2'] !=0: raise Exception('fixed_fac_scale2 not defined while fixed_fac_scale1 is. Please fix your run_card.') elif 'fixed_fac_scale2' in self.user_set: if 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale2 are defined but not fixed_fac_scale1. The value of fixed_fac_scale1 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale1'] = self['fixed_fac_scale'] - elif self['lpp1'] !=0: + elif self['lpp1'] !=0: raise Exception('fixed_fac_scale1 not defined while fixed_fac_scale2 is. Please fix your run_card.') else: if 'fixed_fac_scale' in self.user_set: @@ -4500,12 +4500,12 @@ def check_validity(self): logger.warning('fixed_fac_scale1 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale1']) logger.warning('fixed_fac_scale2 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale2']) - # check if lpp = + # check if lpp = if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]): for i in [1,2]: if abs(self['lpp%s' % i ]) in [3,4] and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: logger.warning("Vector boson from lepton PDF is using fixed scale value of muf [dsqrt_q2fact%s]. Looks like you kept the default value (Mz). Is this really the cut-off that you want to use?" % i) - + if abs(self['lpp%s' % i ]) == 2 and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: if self['pdlabel'] in ['edff','chff']: logger.warning("Since 3.5.0 exclusive photon-photon processes in ultraperipheral proton and nuclear collisions from gamma-UPC (arXiv:2207.03012) will ignore the factorisation scale.") @@ -4515,10 +4515,10 @@ def check_validity(self): if six.PY2 and self['hel_recycling']: self['hel_recycling'] = False - logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. + logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. In general this optimization speeds up the computation by a factor of two.""") - + # check that ebeam is bigger than the associated mass. for i in [1,2]: if self['lpp%s' % i ] not in [1,2]: @@ -4529,13 +4529,13 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") - elif self['ebeam%i' % i] < self['mass_ion%i' % i]: + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + elif self['ebeam%i' % i] < self['mass_ion%i' % i]: if self['ebeam%i' %i] == 0: logger.warning("At rest ion mode set: Energy beam set to %s" % self['mass_ion%i' % i]) self.set('ebeam%i' %i, self['mass_ion%i' % i]) - - + + # check the tmin_for_channel is negative if self['tmin_for_channel'] == 0: raise InvalidRunCard('tmin_for_channel can not be set to 0.') @@ -4543,15 +4543,15 @@ def check_validity(self): logger.warning('tmin_for_channel should be negative. Will be using -%f instead' % self['tmin_for_channel']) self.set('tmin_for_channel', -self['tmin_for_channel']) - + def update_system_parameter_for_include(self): """system parameter need to be setupe""" - + # polarization self['frame_id'] = sum(2**(n) for n in self['me_frame']) - + # set the pdg_for_cut fortran parameter - pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + + pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + list(self['e_min_pdg'].keys()) +list(self['e_max_pdg'].keys()) + list(self['eta_min_pdg'].keys()) +list(self['eta_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys()) + list(self['mxx_only_part_antipart'].keys())) @@ -4559,15 +4559,15 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different pdgs are allowed for pdg specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative pdg code') - - + + if any(pdg in pdg_to_cut for pdg in [1,2,3,4,5,21,22,11,13,15]): raise Exception("Can not use PDG related cut for light quark/b quark/lepton/gluon/photon") - + if pdg_to_cut: self['pdg_cut'] = list(pdg_to_cut) self['ptmin4pdg'] = [] @@ -4595,7 +4595,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -4605,11 +4605,11 @@ def update_system_parameter_for_include(self): self['ptmax4pdg'] = [-1.] self['Emax4pdg'] = [-1.] self['etamax4pdg'] =[-1.] - self['mxxmin4pdg'] =[0.] + self['mxxmin4pdg'] =[0.] self['mxxpart_antipart'] = [False] - - - + + + def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules process 1->N all cut set on off. @@ -4626,7 +4626,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if proc_characteristic['loop_induced']: self['nhel'] = 1 self['pdgs_for_merging_cut'] = proc_characteristic['colored_pdgs'] - + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() @@ -4636,7 +4636,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): # check for beam_id beam_id = set() beam_id_split = [set(), set()] - for proc in proc_def: + for proc in proc_def: for oneproc in proc: for i,leg in enumerate(oneproc['legs']): if not leg['state']: @@ -4654,20 +4654,20 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): maxjetflavor = max([4]+[abs(i) for i in beam_id if -7< i < 7]) self['maxjetflavor'] = maxjetflavor self['asrwgtflavor'] = maxjetflavor - + if any(i in beam_id for i in [1,-1,2,-2,3,-3,4,-4,5,-5,21,22]): # check for e p collision if any(id in beam_id for id in [11,-11,13,-13]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [11,-11,13,-13]): - self['lpp1'] = 0 - self['lpp2'] = 1 - self['ebeam1'] = '1k' - self['ebeam2'] = '6500' + self['lpp1'] = 0 + self['lpp2'] = 1 + self['ebeam1'] = '1k' + self['ebeam2'] = '6500' else: - self['lpp1'] = 1 - self['lpp2'] = 0 - self['ebeam1'] = '6500' + self['lpp1'] = 1 + self['lpp2'] = 0 + self['ebeam1'] = '6500' self['ebeam2'] = '1k' # UPC for p p collision @@ -4677,7 +4677,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam1'] = '6500' self['ebeam2'] = '6500' self['pdlabel'] = 'edff' - + elif any(id in beam_id for id in [11,-11,13,-13]): self['lpp1'] = 0 self['lpp2'] = 0 @@ -4688,7 +4688,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('ecut') self.display_block.append('beam_pol') - + # check for possibility of eva eva_in_b1 = any(i in beam_id_split[0] for i in [23,24,-24]) #,12,-12,14,-14]) @@ -4701,10 +4701,10 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['nhel'] = 1 self['pdlabel'] = 'eva' self['fixed_fac_scale'] = True - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') elif eva_in_b1: - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') self['pdlabel1'] = 'eva' self['fixed_fac_scale1'] = True self['nhel'] = 1 @@ -4724,7 +4724,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['pdlabel2'] = 'eva' self['fixed_fac_scale2'] = True self['nhel'] = 1 - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') for i in beam_id_split[0]: if abs(i) == 11: self['lpp1'] = math.copysign(3,i) @@ -4740,34 +4740,34 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if any(i in beam_id for i in [22,23,24,-24,12,-12,14,-14]): self.display_block.append('eva_scale') - # automatic polarisation of the beam if neutrino beam + # automatic polarisation of the beam if neutrino beam if any(id in beam_id for id in [12,-12,14,-14,16,-16]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [12,14,16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = -100 if not all(id in [12,14,16] for id in beam_id_split[0]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1]. %s') elif any(id in beam_id_split[0] for id in [-12,-14,-16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[0]): - logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') + logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') if any(id in beam_id_split[1] for id in [12,14,16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = -100 if not all(id in [12,14,16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') elif any(id in beam_id_split[1] for id in [-12,-14,-16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') - + # Check if need matching min_particle = 99 max_particle = 0 @@ -4798,12 +4798,12 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - + break + if matching: self['ickkw'] = 1 self['xqcut'] = 30 - #self['use_syst'] = False + #self['use_syst'] = False self['drjj'] = 0 self['drjl'] = 0 self['sys_alpsfact'] = "0.5 1 2" @@ -4811,8 +4811,8 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('mlm') self.display_block.append('ckkw') self['dynamical_scale_choice'] = -1 - - + + # For interference module, the systematics are wrong. # automatically set use_syst=F and set systematics_program=none no_systematics = False @@ -4826,14 +4826,14 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): continue break - + if interference or no_systematics: self['use_syst'] = False self['systematics_program'] = 'none' if interference: self['dynamical_scale_choice'] = 3 self['sde_strategy'] = 2 - + # set default integration strategy # interference case is already handle above # here pick strategy 2 if only one QCD color flow @@ -4852,7 +4852,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if pure_lepton and proton_initial: self['sde_strategy'] = 1 else: - # check if multi-jet j + # check if multi-jet j is_multijet = True for proc in proc_def: if any(abs(j.get('id')) not in jet_id for j in proc[0]['legs']): @@ -4860,7 +4860,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): break if is_multijet: self['sde_strategy'] = 2 - + # if polarization is used, set the choice of the frame in the run_card # But only if polarization is used for massive particles for plist in proc_def: @@ -4870,7 +4870,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): model = proc.get('model') particle = model.get_particle(l.get('id')) if particle.get('mass').lower() != 'zero': - self.display_block.append('frame') + self.display_block.append('frame') break else: continue @@ -4894,15 +4894,15 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): proc = proc_list[0] if proc['forbidden_onsh_s_channels']: self['sde_strategy'] = 1 - + if 'fix_scale' in proc_characteristic['limitations']: self['fixed_ren_scale'] = 1 self['fixed_fac_scale'] = 1 if self['ickkw'] == 1: logger.critical("MLM matching/merging not compatible with the model! You need to use another method to remove the double counting!") self['ickkw'] = 0 - - # define class of particles present to hide all the cuts associated to + + # define class of particles present to hide all the cuts associated to # not present class cut_class = collections.defaultdict(int) for proc in proc_def: @@ -4925,41 +4925,41 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): one_proc_cut['L'] += 1 elif abs(pdg) in [12,14,16]: one_proc_cut['n'] += 1 - one_proc_cut['L'] += 1 + one_proc_cut['L'] += 1 elif str(oneproc.get('model').get_particle(pdg)['mass']) != 'ZERO': one_proc_cut['H'] += 1 - + for key, nb in one_proc_cut.items(): cut_class[key] = max(cut_class[key], nb) self.cut_class = dict(cut_class) self.cut_class[''] = True #avoid empty - + # If model has running functionality add the additional parameter model = proc_def[0][0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Read file input/default_run_card_lo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'run_card.dat') python_template = True else: template = pjoin(MEDIR, 'Cards', 'run_card_default.dat') python_template = False - + hid_lines = {'default':True}#collections.defaultdict(itertools.repeat(True).next) if isinstance(output_file, str): @@ -4975,9 +4975,9 @@ def write(self, output_file, template=None, python_template=False, hid_lines[k1+k2] = True super(RunCardLO, self).write(output_file, template=template, - python_template=python_template, + python_template=python_template, template_options=hid_lines, - **opt) + **opt) class InvalidMadAnalysis5Card(InvalidCmd): @@ -4986,19 +4986,19 @@ class InvalidMadAnalysis5Card(InvalidCmd): class MadAnalysis5Card(dict): """ A class to store a MadAnalysis5 card. Very basic since it is basically free format.""" - + _MG5aMC_escape_tag = '@MG5aMC' - + _default_hadron_inputs = ['*.hepmc', '*.hep', '*.stdhep', '*.lhco','*.root'] _default_parton_inputs = ['*.lhe'] _skip_analysis = False - + @classmethod def events_can_be_reconstructed(cls, file_path): """ Checks from the type of an event file whether it can be reconstructed or not.""" return not (file_path.endswith('.lhco') or file_path.endswith('.lhco.gz') or \ file_path.endswith('.root') or file_path.endswith('.root.gz')) - + @classmethod def empty_analysis(cls): """ A method returning the structure of an empty analysis """ @@ -5012,7 +5012,7 @@ def empty_reconstruction(cls): 'reco_output':'lhe'} def default_setup(self): - """define the default value""" + """define the default value""" self['mode'] = 'parton' self['inputs'] = [] # None is the default stdout level, it will be set automatically by MG5aMC @@ -5025,8 +5025,8 @@ def default_setup(self): # of this class and some other property could be added to this dictionary # in the future. self['analyses'] = {} - # The recasting structure contains on set of commands and one set of - # card lines. + # The recasting structure contains on set of commands and one set of + # card lines. self['recasting'] = {'commands':[],'card':[]} # Add the default trivial reconstruction to use an lhco input # This is just for the structure @@ -5035,7 +5035,7 @@ def default_setup(self): 'root_input': MadAnalysis5Card.empty_reconstruction()} self['reconstruction']['lhco_input']['reco_output']='lhco' - self['reconstruction']['root_input']['reco_output']='root' + self['reconstruction']['root_input']['reco_output']='root' # Specify in which order the analysis/recasting were specified self['order'] = [] @@ -5049,7 +5049,7 @@ def __init__(self, finput=None,mode=None): return else: dict.__init__(self) - + # Initialize it with all the default value self.default_setup() if not mode is None: @@ -5058,15 +5058,15 @@ def __init__(self, finput=None,mode=None): # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, mode=mode) - + def read(self, input, mode=None): """ Read an MA5 card""" - + if mode not in [None,'parton','hadron']: raise MadGraph5Error('A MadAnalysis5Card can be read online the modes'+ "'parton' or 'hadron'") card_mode = mode - + if isinstance(input, (file, StringIO.StringIO)): input_stream = input elif isinstance(input, str): @@ -5099,10 +5099,10 @@ def read(self, input, mode=None): except ValueError: option = line[len(self._MG5aMC_escape_tag):] option = option.strip() - + if option=='inputs': self['inputs'].extend([v.strip() for v in value.split(',')]) - + elif option == 'skip_analysis': self._skip_analysis = True @@ -5118,7 +5118,7 @@ def read(self, input, mode=None): except: raise InvalidMadAnalysis5Card( "MA5 output level specification '%s' is incorrect."%str(value)) - + elif option=='analysis_name': current_type = 'analyses' current_name = value @@ -5127,7 +5127,7 @@ def read(self, input, mode=None): "Analysis '%s' already defined in MadAnalysis5 card"%current_name) else: self[current_type][current_name] = MadAnalysis5Card.empty_analysis() - + elif option=='set_reconstructions': try: reconstructions = eval(value) @@ -5142,7 +5142,7 @@ def read(self, input, mode=None): "analysis in a MadAnalysis5 card.") self[current_type][current_name]['reconstructions']=reconstructions continue - + elif option=='reconstruction_name': current_type = 'reconstruction' current_name = value @@ -5161,7 +5161,7 @@ def read(self, input, mode=None): raise InvalidMadAnalysis5Card( "Option '%s' can only take the values 'lhe' or 'root'"%option) self['reconstruction'][current_name]['reco_output'] = value.lower() - + elif option.startswith('recasting'): current_type = 'recasting' try: @@ -5171,11 +5171,11 @@ def read(self, input, mode=None): if len(self['recasting'][current_name])>0: raise InvalidMadAnalysis5Card( "Only one recasting can be defined in MadAnalysis5 hadron card") - + else: raise InvalidMadAnalysis5Card( "Unreckognized MG5aMC instruction in MadAnalysis5 card: '%s'"%option) - + if option in ['analysis_name','reconstruction_name'] or \ option.startswith('recasting'): self['order'].append((current_type,current_name)) @@ -5209,7 +5209,7 @@ def read(self, input, mode=None): self['inputs'] = self._default_hadron_inputs else: self['inputs'] = self._default_parton_inputs - + # Make sure at least one reconstruction is specified for each hadron # level analysis and that it exists. if self['mode']=='hadron': @@ -5221,7 +5221,7 @@ def read(self, input, mode=None): analysis['reconstructions']): raise InvalidMadAnalysis5Card('A reconstructions specified in'+\ " analysis '%s' is not defined."%analysis_name) - + def write(self, output): """ Write an MA5 card.""" @@ -5232,7 +5232,7 @@ def write(self, output): else: raise MadGraph5Error('Incorrect input for the write function of'+\ ' the MadAnalysis5Card card. Received argument type is: %s'%str(type(output))) - + output_lines = [] if self._skip_analysis: output_lines.append('%s skip_analysis'%self._MG5aMC_escape_tag) @@ -5240,11 +5240,11 @@ def write(self, output): if not self['stdout_lvl'] is None: output_lines.append('%s stdout_lvl=%s'%(self._MG5aMC_escape_tag,self['stdout_lvl'])) for definition_type, name in self['order']: - + if definition_type=='analyses': output_lines.append('%s analysis_name = %s'%(self._MG5aMC_escape_tag,name)) output_lines.append('%s set_reconstructions = %s'%(self._MG5aMC_escape_tag, - str(self['analyses'][name]['reconstructions']))) + str(self['analyses'][name]['reconstructions']))) elif definition_type=='reconstruction': output_lines.append('%s reconstruction_name = %s'%(self._MG5aMC_escape_tag,name)) elif definition_type=='recasting': @@ -5254,23 +5254,23 @@ def write(self, output): output_lines.extend(self[definition_type][name]) elif definition_type in ['reconstruction']: output_lines.append('%s reco_output = %s'%(self._MG5aMC_escape_tag, - self[definition_type][name]['reco_output'])) + self[definition_type][name]['reco_output'])) output_lines.extend(self[definition_type][name]['commands']) elif definition_type in ['analyses']: - output_lines.extend(self[definition_type][name]['commands']) - + output_lines.extend(self[definition_type][name]['commands']) + output_stream.write('\n'.join(output_lines)) - + return - - def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, + + def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, UFO_model_path=None, run_tag=''): - """ Returns a list of tuples ('AnalysisTag',['commands']) specifying - the commands of the MadAnalysis runs required from this card. - At parton-level, the number of such commands is the number of analysis + """ Returns a list of tuples ('AnalysisTag',['commands']) specifying + the commands of the MadAnalysis runs required from this card. + At parton-level, the number of such commands is the number of analysis asked for. In the future, the idea is that the entire card can be processed in one go from MA5 directly.""" - + if isinstance(inputs_arg, list): inputs = inputs_arg elif isinstance(inputs_arg, str): @@ -5278,21 +5278,21 @@ def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, else: raise MadGraph5Error("The function 'get_MA5_cmds' can only take "+\ " a string or a list for the argument 'inputs_arg'") - + if len(inputs)==0: raise MadGraph5Error("The function 'get_MA5_cmds' must have "+\ " at least one input specified'") - + if run_dir_path is None: run_dir_path = os.path.dirname(inputs_arg) - + cmds_list = [] - + UFO_load = [] # first import the UFO if provided if UFO_model_path: UFO_load.append('import %s'%UFO_model_path) - + def get_import(input, type=None): """ Generates the MA5 import commands for that event file. """ dataset_name = os.path.basename(input).split('.')[0] @@ -5304,7 +5304,7 @@ def get_import(input, type=None): if not type is None: res.append('set %s.type = %s'%(dataset_name, type)) return res - + fifo_status = {'warned_fifo':False,'fifo_used_up':False} def warn_fifo(input): if not input.endswith('.fifo'): @@ -5317,7 +5317,7 @@ def warn_fifo(input): logger.warning('Only the first MA5 analysis/reconstructions can be run on a fifo. Subsequent runs will skip fifo inputs.') fifo_status['warned_fifo'] = True return True - + # Then the event file(s) input(s) inputs_load = [] for input in inputs: @@ -5325,16 +5325,16 @@ def warn_fifo(input): if len(inputs) > 1: inputs_load.append('set main.stacking_method = superimpose') - + submit_command = 'submit %s'%submit_folder+'_%s' - + # Keep track of the reconstruction outpus in the MA5 workflow # Keys are reconstruction names and values are .lhe.gz reco file paths. # We put by default already the lhco/root ones present reconstruction_outputs = { - 'lhco_input':[f for f in inputs if + 'lhco_input':[f for f in inputs if f.endswith('.lhco') or f.endswith('.lhco.gz')], - 'root_input':[f for f in inputs if + 'root_input':[f for f in inputs if f.endswith('.root') or f.endswith('.root.gz')]} # If a recasting card has to be written out, chose here its path @@ -5343,7 +5343,7 @@ def warn_fifo(input): # Make sure to only run over one analysis over each fifo. for definition_type, name in self['order']: - if definition_type == 'reconstruction': + if definition_type == 'reconstruction': analysis_cmds = list(self['reconstruction'][name]['commands']) reco_outputs = [] for i_input, input in enumerate(inputs): @@ -5365,8 +5365,8 @@ def warn_fifo(input): analysis_cmds.append( submit_command%('reco_%s_%d'%(name,i_input+1))) analysis_cmds.append('remove reco_events') - - reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) + + reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) for rec_out in reco_outputs] if len(reco_outputs)>0: cmds_list.append(('_reco_%s'%name,analysis_cmds)) @@ -5386,7 +5386,7 @@ def warn_fifo(input): analysis_cmds = ['set main.mode = parton'] else: analysis_cmds = [] - analysis_cmds.extend(sum([get_import(rec_out) for + analysis_cmds.extend(sum([get_import(rec_out) for rec_out in reconstruction_outputs[reco]],[])) analysis_cmds.extend(self['analyses'][name]['commands']) analysis_cmds.append(submit_command%('%s_%s'%(name,reco))) @@ -5427,12 +5427,12 @@ def warn_fifo(input): %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode """ running_block_nlo = RunBlock('RUNNING', template_on=template_on, template_off="") - + class RunCardNLO(RunCard): """A class object for the run_card for a (aMC@)NLO pocess""" - + LO = False - + blocks = [running_block_nlo] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), @@ -5443,11 +5443,11 @@ class RunCardNLO(RunCard): if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_nlo.dat") - - + + def default_setup(self): """define the default value""" - + self.add_param('run_tag', 'tag_1', include=False) self.add_param('nevents', 10000) self.add_param('req_acc', -1.0, include=False) @@ -5455,27 +5455,27 @@ def default_setup(self): self.add_param("time_of_flight", -1.0, include=False) self.add_param('event_norm', 'average') #FO parameter - self.add_param('req_acc_fo', 0.01, include=False) + self.add_param('req_acc_fo', 0.01, include=False) self.add_param('npoints_fo_grid', 5000, include=False) self.add_param('niters_fo_grid', 4, include=False) - self.add_param('npoints_fo', 10000, include=False) + self.add_param('npoints_fo', 10000, include=False) self.add_param('niters_fo', 6, include=False) #seed and collider self.add_param('iseed', 0) - self.add_param('lpp1', 1, fortran_name='lpp(1)') - self.add_param('lpp2', 1, fortran_name='lpp(2)') + self.add_param('lpp1', 1, fortran_name='lpp(1)') + self.add_param('lpp2', 1, fortran_name='lpp(2)') self.add_param('ebeam1', 6500.0, fortran_name='ebeam(1)') - self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') + self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') self.add_param('pdlabel', 'nn23nlo', allowed=['lhapdf', 'emela', 'cteq6_m','cteq6_d','cteq6_l','cteq6l1', 'nn23lo','nn23lo1','nn23nlo','ct14q00','ct14q07','ct14q14','ct14q21'] +\ - sum(self.allowed_lep_densities.values(),[]) ) + sum(self.allowed_lep_densities.values(),[]) ) self.add_param('lhaid', [244600],fortran_name='lhaPDFid') self.add_param('pdfscheme', 0) # whether to include or not photon-initiated processes in lepton collisions self.add_param('photons_from_lepton', True) self.add_param('lhapdfsetname', ['internal_use_only'], system=True) - # stuff for lepton collisions - # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set - # whether the current PDF set has or not beamstrahlung + # stuff for lepton collisions + # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set + # whether the current PDF set has or not beamstrahlung self.add_param('has_bstrahl', False, system=True) # renormalisation scheme of alpha self.add_param('alphascheme', 0, system=True) @@ -5486,31 +5486,31 @@ def default_setup(self): # w contribution included or not in the running of alpha self.add_param('w_run', 1, system=True) #shower and scale - self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') + self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') self.add_param('shower_scale_factor',1.0) self.add_param('mcatnlo_delta', False) self.add_param('fixed_ren_scale', False) self.add_param('fixed_fac_scale', False) self.add_param('fixed_extra_scale', True, hidden=True, system=True) # set system since running from Ellis-Sexton scale not implemented - self.add_param('mur_ref_fixed', 91.118) + self.add_param('mur_ref_fixed', 91.118) self.add_param('muf1_ref_fixed', -1.0, hidden=True) - self.add_param('muf_ref_fixed', 91.118) + self.add_param('muf_ref_fixed', 91.118) self.add_param('muf2_ref_fixed', -1.0, hidden=True) - self.add_param('mue_ref_fixed', 91.118, hidden=True) - self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', + self.add_param('mue_ref_fixed', 91.118, hidden=True) + self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', allowed = [-2,-1,0,1,2,3,10], comment="\'-1\' is based on CKKW back clustering (following feynman diagram).\n \'1\' is the sum of transverse energy.\n '2' is HT (sum of the transverse mass)\n '3' is HT/2, '0' allows to use the user_hook definition (need to be defined via custom_fct entry) ") self.add_param('fixed_qes_scale', False, hidden=True) self.add_param('qes_ref_fixed', -1.0, hidden=True) self.add_param('mur_over_ref', 1.0) - self.add_param('muf_over_ref', 1.0) - self.add_param('muf1_over_ref', -1.0, hidden=True) + self.add_param('muf_over_ref', 1.0) + self.add_param('muf1_over_ref', -1.0, hidden=True) self.add_param('muf2_over_ref', -1.0, hidden=True) self.add_param('mue_over_ref', 1.0, hidden=True, system=True) # forbid the user to modigy due to incorrect handling of the Ellis-Sexton scale self.add_param('qes_over_ref', -1.0, hidden=True) self.add_param('reweight_scale', [True], fortran_name='lscalevar') - self.add_param('rw_rscale_down', -1.0, hidden=True) + self.add_param('rw_rscale_down', -1.0, hidden=True) self.add_param('rw_rscale_up', -1.0, hidden=True) - self.add_param('rw_fscale_down', -1.0, hidden=True) + self.add_param('rw_fscale_down', -1.0, hidden=True) self.add_param('rw_fscale_up', -1.0, hidden=True) self.add_param('rw_rscale', [1.0,2.0,0.5], fortran_name='scalevarR') self.add_param('rw_fscale', [1.0,2.0,0.5], fortran_name='scalevarF') @@ -5523,60 +5523,60 @@ def default_setup(self): #technical self.add_param('folding', [1,1,1], include=False) - + #merging self.add_param('ickkw', 0, allowed=[-1,0,3,4], comment=" - 0: No merging\n - 3: FxFx Merging : http://amcatnlo.cern.ch/FxFx_merging.htm\n - 4: UNLOPS merging (No interface within MG5aMC)\n - -1: NNLL+NLO jet-veto computation. See arxiv:1412.8408 [hep-ph]") self.add_param('bwcutoff', 15.0) - #cuts + #cuts self.add_param('jetalgo', 1.0) - self.add_param('jetradius', 0.7) + self.add_param('jetradius', 0.7) self.add_param('ptj', 10.0 , cut=True) - self.add_param('etaj', -1.0, cut=True) - self.add_param('gamma_is_j', True) + self.add_param('etaj', -1.0, cut=True) + self.add_param('gamma_is_j', True) self.add_param('ptl', 0.0, cut=True) - self.add_param('etal', -1.0, cut=True) + self.add_param('etal', -1.0, cut=True) self.add_param('drll', 0.0, cut=True) - self.add_param('drll_sf', 0.0, cut=True) + self.add_param('drll_sf', 0.0, cut=True) self.add_param('mll', 0.0, cut=True) - self.add_param('mll_sf', 30.0, cut=True) - self.add_param('rphreco', 0.1) - self.add_param('etaphreco', -1.0) - self.add_param('lepphreco', True) - self.add_param('quarkphreco', True) + self.add_param('mll_sf', 30.0, cut=True) + self.add_param('rphreco', 0.1) + self.add_param('etaphreco', -1.0) + self.add_param('lepphreco', True) + self.add_param('quarkphreco', True) self.add_param('ptgmin', 20.0, cut=True) - self.add_param('etagamma', -1.0) + self.add_param('etagamma', -1.0) self.add_param('r0gamma', 0.4) - self.add_param('xn', 1.0) + self.add_param('xn', 1.0) self.add_param('epsgamma', 1.0) - self.add_param('isoem', True) + self.add_param('isoem', True) self.add_param('maxjetflavor', 4, hidden=True) - self.add_param('pineappl', False) + self.add_param('pineappl', False) self.add_param('lhe_version', 3, hidden=True, include=False) - + # customization self.add_param("custom_fcts",[],typelist="str", include=False, comment="list of files containing function that overwritte dummy function of the code (like adding cuts/...)") #internal variable related to FO_analyse_card self.add_param('FO_LHE_weight_ratio',1e-3, hidden=True, system=True) - self.add_param('FO_LHE_postprocessing',['grouping','random'], + self.add_param('FO_LHE_postprocessing',['grouping','random'], hidden=True, system=True, include=False) - + # parameter allowing to define simple cut via the pdg self.add_param('pt_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('pt_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False, hidden=True) - + #hidden parameter that are transfer to the fortran code self.add_param('pdg_cut',[0], hidden=True, system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], hidden=True, system=True) # store pt min self.add_param('ptmax4pdg',[-1.], hidden=True, system=True) self.add_param('mxxmin4pdg',[0.], hidden=True, system=True) self.add_param('mxxpart_antipart', [False], hidden=True, system=True) - + def check_validity(self): """check the validity of the various input""" - + super(RunCardNLO, self).check_validity() # for lepton-lepton collisions, ignore 'pdlabel' and 'lhaid' @@ -5588,12 +5588,12 @@ def check_validity(self): # for dressed lepton collisions, check that the lhaid is a valid one if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]) + ['emela']: raise InvalidRunCard('pdlabel %s not allowed for dressed-lepton collisions' % self['pdlabel']) - + elif self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' self['reweight_pdf']=[False] logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') - + if self['lpp1'] == 0 == self['lpp2']: if self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' @@ -5601,8 +5601,8 @@ def check_validity(self): logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') # For FxFx merging, make sure that the following parameters are set correctly: - if self['ickkw'] == 3: - # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed + if self['ickkw'] == 3: + # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed scales=['fixed_ren_scale','fixed_fac_scale','fixed_QES_scale'] for scale in scales: if self[scale]: @@ -5615,7 +5615,7 @@ def check_validity(self): self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency in FxFx merging, dynamical_scale_choice has been set to -1 (default)''' ,'$MG:BOLD') - + # 2. Use kT algorithm for jets with pseudo-code size R=1.0 jetparams=['jetradius','jetalgo'] for jetparam in jetparams: @@ -5628,8 +5628,8 @@ def check_validity(self): self["dynamical_scale_choice"] = [-1] self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency with the jet veto, the scale which will be used is ptj. dynamical_scale_choice will be set at -1.''' - ,'$MG:BOLD') - + ,'$MG:BOLD') + # For interface to PINEAPPL, need to use LHAPDF and reweighting to get scale uncertainties if self['pineappl'] and self['pdlabel'].lower() != 'lhapdf': raise InvalidRunCard('PineAPPL generation only possible with the use of LHAPDF') @@ -5661,7 +5661,7 @@ def check_validity(self): if (self['rw_fscale_down'] != -1.0 and ['rw_fscale_down'] not in self['rw_fscale']) or\ (self['rw_fscale_up'] != -1.0 and ['rw_fscale_up'] not in self['rw_fscale']): self['rw_fscale']=[1.0,self['rw_fscale_up'],self['rw_fscale_down']] - + # PDF reweighting check if any(self['reweight_pdf']): # check that we use lhapdf if reweighting is ON @@ -5672,7 +5672,7 @@ def check_validity(self): if self['pdlabel'] != "lhapdf": self['reweight_pdf']=[self['reweight_pdf'][0]] self['lhaid']=[self['lhaid'][0]] - + # make sure set have reweight_scale and dyn_scale_choice of length 1 when fixed scales: if self['fixed_ren_scale'] and self['fixed_fac_scale']: self['reweight_scale']=[self['reweight_scale'][0]] @@ -5685,7 +5685,7 @@ def check_validity(self): self['reweight_pdf']=self['reweight_pdf']*len(self['lhaid']) logger.warning("Setting 'reweight_pdf' for all 'lhaid' to %s" % self['reweight_pdf'][0]) if len(self['reweight_scale']) == 1 and len(self['dynamical_scale_choice']) != 1: - self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) + self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) logger.warning("Setting 'reweight_scale' for all 'dynamical_scale_choice' to %s" % self['reweight_pdf'][0]) # Check that there are no identical elements in lhaid or dynamical_scale_choice @@ -5693,7 +5693,7 @@ def check_validity(self): raise InvalidRunCard("'lhaid' has two or more identical entries. They have to be all different for the code to work correctly.") if len(self['dynamical_scale_choice']) != len(set(self['dynamical_scale_choice'])): raise InvalidRunCard("'dynamical_scale_choice' has two or more identical entries. They have to be all different for the code to work correctly.") - + # Check that lenght of lists are consistent if len(self['reweight_pdf']) != len(self['lhaid']): raise InvalidRunCard("'reweight_pdf' and 'lhaid' lists should have the same length") @@ -5730,7 +5730,7 @@ def check_validity(self): if len(self['folding']) != 3: raise InvalidRunCard("'folding' should contain exactly three integers") for ifold in self['folding']: - if ifold not in [1,2,4,8]: + if ifold not in [1,2,4,8]: raise InvalidRunCard("The three 'folding' parameters should be equal to 1, 2, 4, or 8.") # Check MC@NLO-Delta if self['mcatnlo_delta'] and not self['parton_shower'].lower() == 'pythia8': @@ -5746,11 +5746,11 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938 GeV") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") def update_system_parameter_for_include(self): - + # set the pdg_for_cut fortran parameter pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys())+ list(self['mxx_only_part_antipart'].keys())) @@ -5758,12 +5758,12 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different PDGs are allowed for PDG specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative PDG codes') - - + + if any(pdg in pdg_to_cut for pdg in [21,22,11,13,15]+ list(range(self['maxjetflavor']+1))): # Note that this will double check in the fortran code raise Exception("Can not use PDG related cuts for massless SM particles/leptons") @@ -5790,7 +5790,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -5800,12 +5800,12 @@ def update_system_parameter_for_include(self): self['mxxpart_antipart'] = [False] def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', 'run_card.dat') python_template = True else: @@ -5818,7 +5818,7 @@ def write(self, output_file, template=None, python_template=False, **opt): def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules - e+ e- beam -> lpp:0 ebeam:500 + e+ e- beam -> lpp:0 ebeam:500 p p beam -> set maxjetflavor automatically process with tagged photons -> gamma_is_j = false process without QED splittings -> gamma_is_j = false, recombination = false @@ -5844,19 +5844,19 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam2'] = 500 else: self['lpp1'] = 0 - self['lpp2'] = 0 - + self['lpp2'] = 0 + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() # check for tagged photons tagged_particles = set() - + # If model has running functionality add the additional parameter model = proc_def[0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Check if need matching min_particle = 99 @@ -5885,7 +5885,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: idsmin = [l['id'] for l in procmin['legs']] break - + for procmax in proc_def: if len(procmax['legs']) != max_particle: continue @@ -5901,9 +5901,9 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - - if matching: + break + + if matching: self['ickkw'] = 3 self['fixed_ren_scale'] = False self["fixed_fac_scale"] = False @@ -5911,17 +5911,17 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self["jetalgo"] = 1 self["jetradius"] = 1 self["parton_shower"] = "PYTHIA8" - + # Read file input/default_run_card_nlo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + class MadLoopParam(ConfigFile): """ a class for storing/dealing with the file MadLoopParam.dat contains a parser to read it, facilities to write a new file,... """ - + _ID_reduction_tool_map = {1:'CutTools', 2:'PJFry++', 3:'IREGI', @@ -5929,10 +5929,10 @@ class MadLoopParam(ConfigFile): 5:'Samurai', 6:'Ninja', 7:'COLLIER'} - + def default_setup(self): """initialize the directory to the default value""" - + self.add_param("MLReductionLib", "6|7|1") self.add_param("IREGIMODE", 2) self.add_param("IREGIRECY", True) @@ -5954,7 +5954,7 @@ def default_setup(self): self.add_param("HelicityFilterLevel", 2) self.add_param("LoopInitStartOver", False) self.add_param("HelInitStartOver", False) - self.add_param("UseQPIntegrandForNinja", True) + self.add_param("UseQPIntegrandForNinja", True) self.add_param("UseQPIntegrandForCutTools", True) self.add_param("COLLIERMode", 1) self.add_param("COLLIERComputeUVpoles", True) @@ -5966,9 +5966,9 @@ def default_setup(self): self.add_param("COLLIERUseInternalStabilityTest",True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -5976,7 +5976,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % input) - + previous_line= '' for line in finput: if previous_line.startswith('#'): @@ -5985,20 +5985,20 @@ def read(self, finput): if len(value) and value[0] not in ['#', '!']: self.__setitem__(name, value, change_userdefine=True) previous_line = line - - + + def write(self, outputpath, template=None,commentdefault=False): - + if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', + template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', 'Cards', 'MadLoopParams.dat') else: template = pjoin(MEDIR, 'Cards', 'MadLoopParams_default.dat') fsock = open(template, 'r') template = fsock.readlines() fsock.close() - + if isinstance(outputpath, str): output = open(outputpath, 'w') else: @@ -6019,7 +6019,7 @@ def f77format(value): return value else: raise Exception("Can not format input %s" % type(value)) - + name = '' done = set() for line in template: @@ -6034,12 +6034,12 @@ def f77format(value): elif line.startswith('#'): name = line[1:].split()[0] output.write(line) - - - - - -class eMELA_info(ConfigFile): + + + + + +class eMELA_info(ConfigFile): """ a class for eMELA (LHAPDF-like) info files """ path = '' @@ -6053,7 +6053,7 @@ def __init__(self, finput, me_dir): def read(self, finput): - if isinstance(finput, file): + if isinstance(finput, file): lines = finput.open().read().split('\n') self.path = finput.name else: @@ -6066,7 +6066,7 @@ def read(self, finput): k, v = l.split(':', 1) # ignore further occurrences of : try: self[k.strip()] = eval(v) - except (NameError, SyntaxError): + except (NameError, SyntaxError): self[k.strip()] = v def default_setup(self): @@ -6091,7 +6091,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): +"powers of alpha should be reweighted a posteriori") - logger.info('Updating variables according to %s' % self.path) + logger.info('Updating variables according to %s' % self.path) # Flavours in the running of alpha nd, nu, nl = self['eMELA_ActiveFlavoursAlpha'] self.log_and_update(banner, 'run_card', 'ndnq_run', nd) @@ -6130,8 +6130,8 @@ def update_epdf_emela_variables(self, banner, uvscheme): logger.warning('Cannot treat the following renormalisation schemes for ME and PDFs: %d, %d' \ % (uvscheme, uvscheme_pdf)) - # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref - # also check that the com energy is equal to qref, otherwise print a + # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref + # also check that the com energy is equal to qref, otherwise print a # warning if uvscheme_pdf == 1: qref = self['eMELA_AlphaQref'] @@ -6144,23 +6144,23 @@ def update_epdf_emela_variables(self, banner, uvscheme): # LL / NLL PDF (0/1) pdforder = self['eMELA_PerturbativeOrder'] - # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) + # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) # 4->mixed (leptonic); 5-> nobeta (leptonic); 6->delta (leptonic) # if LL, use nobeta scheme unless LEGACYLLPDF > 0 if pdforder == 0: if 'eMELA_LEGACYLLPDF' not in self.keys() or self['eMELA_LEGACYLLPDF'] in [-1, 0]: self.log_and_update(banner, 'run_card', 'pdfscheme', 5) - elif self['eMELA_LEGACYLLPDF'] == 1: + elif self['eMELA_LEGACYLLPDF'] == 1: # mixed self.log_and_update(banner, 'run_card', 'pdfscheme', 4) - elif self['eMELA_LEGACYLLPDF'] == 2: + elif self['eMELA_LEGACYLLPDF'] == 2: # eta self.log_and_update(banner, 'run_card', 'pdfscheme', 2) - elif self['eMELA_LEGACYLLPDF'] == 3: + elif self['eMELA_LEGACYLLPDF'] == 3: # beta self.log_and_update(banner, 'run_card', 'pdfscheme', 3) elif pdforder == 1: - # for NLL, use eMELA_FactorisationSchemeInt = 0/1 + # for NLL, use eMELA_FactorisationSchemeInt = 0/1 # for delta/MSbar if self['eMELA_FactorisationSchemeInt'] == 0: # MSbar @@ -6177,7 +6177,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): - + def log_and_update(self, banner, card, par, v): """update the card parameter par to value v diff --git a/epochX/cudacpp/ee_mumu.mad/bin/internal/gen_ximprove.py b/epochX/cudacpp/ee_mumu.mad/bin/internal/gen_ximprove.py index 5fd170d18d..cc842aa50f 100755 --- a/epochX/cudacpp/ee_mumu.mad/bin/internal/gen_ximprove.py +++ b/epochX/cudacpp/ee_mumu.mad/bin/internal/gen_ximprove.py @@ -2,18 +2,18 @@ # # Copyright (c) 2014 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch # ################################################################################ """ A python file to replace the fortran script gen_ximprove. - This script analyses the result of the survey/ previous refine and + This script analyses the result of the survey/ previous refine and creates the jobs for the following script. """ from __future__ import division @@ -66,77 +66,77 @@ class gensym(object): """a class to call the fortran gensym executable and handle it's output in order to create the various job that are needed for the survey""" - + #convenient shortcut for the formatting of variable @ staticmethod def format_variable(*args): return bannermod.ConfigFile.format_variable(*args) - + combining_job = 2 # number of channel by ajob - splitted_grid = False + splitted_grid = False min_iterations = 3 mode= "survey" - + def __init__(self, cmd, opt=None): - + try: super(gensym, self).__init__(cmd, opt) except TypeError: pass - - # Run statistics, a dictionary of RunStatistics(), with + + # Run statistics, a dictionary of RunStatistics(), with self.run_statistics = {} - + self.cmd = cmd self.run_card = cmd.run_card self.me_dir = cmd.me_dir - - + + # dictionary to keep track of the precision when combining iteration self.cross = collections.defaultdict(int) self.abscross = collections.defaultdict(int) self.sigma = collections.defaultdict(int) self.chi2 = collections.defaultdict(int) - + self.splitted_grid = False if self.cmd.proc_characteristics['loop_induced']: nexternal = self.cmd.proc_characteristics['nexternal'] self.splitted_grid = max(2, (nexternal-2)**2) if hasattr(self.cmd, "opts") and self.cmd.opts['accuracy'] == 0.1: self.cmd.opts['accuracy'] = 0.02 - + if isinstance(cmd.cluster, cluster.MultiCore) and self.splitted_grid > 1: self.splitted_grid = int(cmd.cluster.nb_core**0.5) if self.splitted_grid == 1 and cmd.cluster.nb_core >1: self.splitted_grid = 2 - + #if the user defines it in the run_card: if self.run_card['survey_splitting'] != -1: self.splitted_grid = self.run_card['survey_splitting'] if self.run_card['survey_nchannel_per_job'] != 1 and 'survey_nchannel_per_job' in self.run_card.user_set: - self.combining_job = self.run_card['survey_nchannel_per_job'] + self.combining_job = self.run_card['survey_nchannel_per_job'] elif self.run_card['hard_survey'] > 1: self.combining_job = 1 - - + + self.splitted_Pdir = {} self.splitted_for_dir = lambda x,y: self.splitted_grid self.combining_job_for_Pdir = lambda x: self.combining_job self.lastoffset = {} - + done_warning_zero_coupling = False def get_helicity(self, to_submit=True, clean=True): """launch a single call to madevent to get the list of non zero helicity""" - - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc P_zero_result = [] nb_tot_proc = len(subproc) - job_list = {} - - + job_list = {} + + for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -154,7 +154,7 @@ def get_helicity(self, to_submit=True, clean=True): p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts - + (stdout, _) = p.communicate(''.encode()) stdout = stdout.decode('ascii',errors='ignore') if stdout: @@ -166,11 +166,11 @@ def get_helicity(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir, 'error')): os.remove(pjoin(self.me_dir, 'error')) continue # bypass bad process - + self.cmd.compile(['madevent_forhel'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'madevent_forhel')): - raise Exception('Error make madevent_forhel not successful') - + raise Exception('Error make madevent_forhel not successful') + if not os.path.exists(pjoin(Pdir, 'Hel')): os.mkdir(pjoin(Pdir, 'Hel')) ff = open(pjoin(Pdir, 'Hel', 'input_app.txt'),'w') @@ -180,15 +180,15 @@ def get_helicity(self, to_submit=True, clean=True): try: os.remove(pjoin(Pdir, 'Hel','results.dat')) except Exception: - pass + pass # Launch gensym - p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, + p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=pjoin(Pdir,'Hel'), shell=True) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(" ".encode()) stdout = stdout.decode('ascii',errors='ignore') if os.path.exists(pjoin(self.me_dir, 'error')): - raise Exception(pjoin(self.me_dir,'error')) + raise Exception(pjoin(self.me_dir,'error')) # note a continue is not enough here, we have in top to link # the matrixX_optim.f to matrixX_orig.f to let the code to work # after this error. @@ -203,7 +203,7 @@ def get_helicity(self, to_submit=True, clean=True): zero_gc = list() all_zampperhel = set() all_bad_amps_perhel = set() - + for line in stdout.splitlines(): if "=" not in line and ":" not in line: continue @@ -229,22 +229,22 @@ def get_helicity(self, to_submit=True, clean=True): "%s\n" % (' '.join(zero_gc)) +\ "This will slow down the computation. Please consider using restricted model:\n" +\ "https://answers.launchpad.net/mg5amcnlo/+faq/2312") - - + + all_good_hels = collections.defaultdict(list) for me_index, hel in all_hel: - all_good_hels[me_index].append(int(hel)) - + all_good_hels[me_index].append(int(hel)) + #print(all_hel) if self.run_card['hel_zeroamp']: all_bad_amps = collections.defaultdict(list) for me_index, amp in all_zamp: all_bad_amps[me_index].append(int(amp)) - + all_bad_amps_perhel = collections.defaultdict(list) for me_index, hel, amp in all_zampperhel: - all_bad_amps_perhel[me_index].append((int(hel),int(amp))) - + all_bad_amps_perhel[me_index].append((int(hel),int(amp))) + elif all_zamp: nb_zero = sum(int(a[1]) for a in all_zamp) if zero_gc: @@ -254,7 +254,7 @@ def get_helicity(self, to_submit=True, clean=True): else: logger.warning("The optimization detected that you have %i zero matrix-element for this SubProcess: %s.\n" % nb_zero +\ "This part can optimize if you set the flag hel_zeroamp to True in the run_card.") - + #check if we need to do something and write associate information" data = [all_hel, all_zamp, all_bad_amps_perhel] if not self.run_card['hel_zeroamp']: @@ -266,14 +266,14 @@ def get_helicity(self, to_submit=True, clean=True): old_data = open(pjoin(Pdir,'Hel','selection')).read() if old_data == data: continue - - + + with open(pjoin(Pdir,'Hel','selection'),'w') as fsock: - fsock.write(data) - - + fsock.write(data) + + for matrix_file in misc.glob('matrix*orig.f', Pdir): - + split_file = matrix_file.split('/') me_index = split_file[-1][len('matrix'):-len('_orig.f')] @@ -289,11 +289,11 @@ def get_helicity(self, to_submit=True, clean=True): #good_hels = sorted(list(good_hels)) good_hels = [str(x) for x in sorted(all_good_hels[me_index])] if self.run_card['hel_zeroamp']: - + bad_amps = [str(x) for x in sorted(all_bad_amps[me_index])] bad_amps_perhel = [x for x in sorted(all_bad_amps_perhel[me_index])] else: - bad_amps = [] + bad_amps = [] bad_amps_perhel = [] if __debug__: mtext = open(matrix_file).read() @@ -310,7 +310,7 @@ def get_helicity(self, to_submit=True, clean=True): recycler.set_input(matrix_file) recycler.set_output(out_file) - recycler.set_template(templ_file) + recycler.set_template(templ_file) recycler.generate_output_file() del recycler @@ -321,19 +321,19 @@ def get_helicity(self, to_submit=True, clean=True): return {}, P_zero_result - + def launch(self, to_submit=True, clean=True): """ """ if not hasattr(self, 'subproc'): - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc - + P_zero_result = [] # check the number of times where they are no phase-space - + nb_tot_proc = len(subproc) - job_list = {} + job_list = {} for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.
(previous processes already running)' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -341,7 +341,7 @@ def launch(self, to_submit=True, clean=True): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) logger.info(' %s ' % subdir) - + # clean previous run if clean: for match in misc.glob('*ajob*', Pdir): @@ -349,17 +349,17 @@ def launch(self, to_submit=True, clean=True): os.remove(match) for match in misc.glob('G*', Pdir): if os.path.exists(pjoin(match,'results.dat')): - os.remove(pjoin(match, 'results.dat')) + os.remove(pjoin(match, 'results.dat')) if os.path.exists(pjoin(match, 'ftn25')): - os.remove(pjoin(match, 'ftn25')) - + os.remove(pjoin(match, 'ftn25')) + #compile gensym self.cmd.compile(['gensym'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'gensym')): - raise Exception('Error make gensym not successful') - + raise Exception('Error make gensym not successful') + # Launch gensym - p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, + p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(''.encode()) @@ -367,8 +367,8 @@ def launch(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir,'error')): files.mv(pjoin(self.me_dir,'error'), pjoin(Pdir,'ajob.no_ps.log')) P_zero_result.append(subdir) - continue - + continue + jobs = stdout.split() job_list[Pdir] = jobs try: @@ -386,8 +386,8 @@ def launch(self, to_submit=True, clean=True): continue else: if done: - raise Exception('Parsing error in gensym: %s' % stdout) - job_list[Pdir] = l.split() + raise Exception('Parsing error in gensym: %s' % stdout) + job_list[Pdir] = l.split() done = True if not done: raise Exception('Parsing error in gensym: %s' % stdout) @@ -408,16 +408,16 @@ def launch(self, to_submit=True, clean=True): if to_submit: self.submit_to_cluster(job_list) job_list = {} - + return job_list, P_zero_result - + def resubmit(self, min_precision=1.0, resubmit_zero=False): """collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - + job_list, P_zero_result = self.launch(to_submit=False, clean=False) - + for P , jobs in dict(job_list).items(): misc.sprint(jobs) to_resub = [] @@ -434,7 +434,7 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): elif max(one_result.xerru, one_result.xerrc)/one_result.xsec > min_precision: to_resub.append(job) else: - to_resub.append(job) + to_resub.append(job) if to_resub: for G in to_resub: try: @@ -442,19 +442,19 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): except Exception as error: misc.sprint(error) pass - misc.sprint(to_resub) + misc.sprint(to_resub) self.submit_to_cluster({P: to_resub}) - - - - - - - - - - - + + + + + + + + + + + def submit_to_cluster(self, job_list): """ """ @@ -467,7 +467,7 @@ def submit_to_cluster(self, job_list): nexternal = self.cmd.proc_characteristics['nexternal'] current = open(pjoin(path, "nexternal.inc")).read() ext = re.search(r"PARAMETER \(NEXTERNAL=(\d+)\)", current).group(1) - + if self.run_card['job_strategy'] == 2: self.splitted_grid = 2 if nexternal == int(ext): @@ -498,18 +498,18 @@ def submit_to_cluster(self, job_list): return self.submit_to_cluster_no_splitting(job_list) else: return self.submit_to_cluster_splitted(job_list) - - + + def submit_to_cluster_no_splitting(self, job_list): """submit the survey without the parralelization. This is the old mode which is still usefull in single core""" - - # write the template file for the parameter file + + # write the template file for the parameter file self.write_parameter(parralelization=False, Pdirs=list(job_list.keys())) - - + + # launch the job with the appropriate grouping - for Pdir, jobs in job_list.items(): + for Pdir, jobs in job_list.items(): jobs = list(jobs) i=0 while jobs: @@ -518,16 +518,16 @@ def submit_to_cluster_no_splitting(self, job_list): for _ in range(self.combining_job_for_Pdir(Pdir)): if jobs: to_submit.append(jobs.pop(0)) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=to_submit, cwd=pjoin(self.me_dir,'SubProcesses' , Pdir)) - + def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): """prepare the input_file for submitting the channel""" - + if 'SubProcesses' not in Pdir: Pdir = pjoin(self.me_dir, 'SubProcesses', Pdir) @@ -535,8 +535,8 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): self.splitted_Pdir[(Pdir, G)] = int(nb_job) - # 1. write the new input_app.txt - run_card = self.cmd.run_card + # 1. write the new input_app.txt + run_card = self.cmd.run_card options = {'event' : submit_ps, 'maxiter': 1, 'miniter': 1, @@ -545,29 +545,29 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): else run_card['nhel'], 'gridmode': -2, 'channel' : G - } - + } + Gdir = pjoin(Pdir, 'G%s' % G) - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + # 2. check that ftn25 exists. - assert os.path.exists(pjoin(Gdir, "ftn25")) - - + assert os.path.exists(pjoin(Gdir, "ftn25")) + + # 3. Submit the new jobs #call back function - packet = cluster.Packet((Pdir, G, step+1), + packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, (Pdir, G, step+1)) - + if step ==0: - self.lastoffset[(Pdir, G)] = 0 - - # resubmit the new jobs + self.lastoffset[(Pdir, G)] = 0 + + # resubmit the new jobs for i in range(int(nb_job)): name = "G%s_%s" % (G,i+1) self.lastoffset[(Pdir, G)] += 1 - offset = self.lastoffset[(Pdir, G)] + offset = self.lastoffset[(Pdir, G)] self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'refine_splitted.sh'), argument=[name, 'G%s'%G, offset], cwd= Pdir, @@ -575,9 +575,9 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): def submit_to_cluster_splitted(self, job_list): - """ submit the version of the survey with splitted grid creation - """ - + """ submit the version of the survey with splitted grid creation + """ + #if self.splitted_grid <= 1: # return self.submit_to_cluster_no_splitting(job_list) @@ -592,7 +592,7 @@ def submit_to_cluster_splitted(self, job_list): for job in jobs: packet = cluster.Packet((Pdir, job, 1), self.combine_iteration, (Pdir, job, 1)) - for i in range(self.splitted_for_dir(Pdir, job)): + for i in range(self.splitted_for_dir(Pdir, job)): self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[i+1, job], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), @@ -601,15 +601,15 @@ def submit_to_cluster_splitted(self, job_list): def combine_iteration(self, Pdir, G, step): grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - - # Compute the number of events used for this run. + + # Compute the number of events used for this run. nb_events = grid_calculator.target_evt Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) - + # 4. make the submission of the next iteration # Three cases - less than 3 iteration -> continue # - more than 3 and less than 5 -> check error @@ -627,15 +627,15 @@ def combine_iteration(self, Pdir, G, step): need_submit = False else: need_submit = True - + elif step >= self.cmd.opts['iterations']: need_submit = False elif self.cmd.opts['accuracy'] < 0: #check for luminosity raise Exception("Not Implemented") elif self.abscross[(Pdir,G)] == 0: - need_submit = False - else: + need_submit = False + else: across = self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) tot_across = self.get_current_axsec() if across == 0: @@ -646,20 +646,20 @@ def combine_iteration(self, Pdir, G, step): need_submit = True else: need_submit = False - - + + if cross: grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_events,mode=self.mode, conservative_factor=5.0) - - xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) - if float(cross)!=0.0 and float(error)!=0.0 else 8) + + xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) + if float(cross)!=0.0 and float(error)!=0.0 else 8) if need_submit: message = "%%s/G%%s is at %%%s +- %%.3g pb. Now submitting iteration #%s."%(xsec_format, step+1) logger.info(message%\ - (os.path.basename(Pdir), G, float(cross), + (os.path.basename(Pdir), G, float(cross), float(error)*float(cross))) self.resubmit_survey(Pdir,G, Gdirs, step) elif cross: @@ -670,26 +670,26 @@ def combine_iteration(self, Pdir, G, step): newGpath = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(newGpath): os.mkdir(newGpath) - + # copy the new grid: - files.cp(pjoin(Gdirs[0], 'ftn25'), + files.cp(pjoin(Gdirs[0], 'ftn25'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, 'ftn26')) - + # copy the events fsock = open(pjoin(newGpath, 'events.lhe'), 'w') for Gdir in Gdirs: - fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) - + fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) + # copy one log - files.cp(pjoin(Gdirs[0], 'log.txt'), + files.cp(pjoin(Gdirs[0], 'log.txt'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G)) - - + + # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) else: logger.info("Survey finished for %s/G%s [0 cross]", os.path.basename(Pdir),G) - + Gdir = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(Gdir): os.mkdir(Gdir) @@ -697,21 +697,21 @@ def combine_iteration(self, Pdir, G, step): files.cp(pjoin(Gdirs[0], 'log.txt'), Gdir) # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) - + return 0 def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): """ exclude_sub_jobs is to remove some of the subjobs if a numerical issue is detected in one of them. Warning is issue when this occurs. """ - + # 1. create an object to combine the grid information and fill it grid_calculator = combine_grid.grid_information(self.run_card['nhel']) - + for i in range(self.splitted_for_dir(Pdir, G)): if i in exclude_sub_jobs: continue - path = pjoin(Pdir, "G%s_%s" % (G, i+1)) + path = pjoin(Pdir, "G%s_%s" % (G, i+1)) fsock = misc.mult_try_open(pjoin(path, 'results.dat')) one_result = grid_calculator.add_results_information(fsock) fsock.close() @@ -723,9 +723,9 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): fsock.close() os.remove(pjoin(path, 'results.dat')) #os.remove(pjoin(path, 'grid_information')) - - - + + + #2. combine the information about the total crossection / error # start by keep the interation in memory cross, across, sigma = grid_calculator.get_cross_section() @@ -736,12 +736,12 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): if maxwgt: nunwgt = grid_calculator.get_nunwgt(maxwgt) # Make sure not to apply the security below during the first step of the - # survey. Also, disregard channels with a contribution relative to the + # survey. Also, disregard channels with a contribution relative to the # total cross-section smaller than 1e-8 since in this case it is unlikely # that this channel will need more than 1 event anyway. apply_instability_security = False rel_contrib = 0.0 - if (self.__class__ != gensym or step > 1): + if (self.__class__ != gensym or step > 1): Pdir_across = 0.0 Gdir_across = 0.0 for (mPdir,mG) in self.abscross.keys(): @@ -750,7 +750,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): (self.sigma[(mPdir,mG)]+1e-99)) if mG == G: Gdir_across += (self.abscross[(mPdir,mG)]/ - (self.sigma[(mPdir,mG)]+1e-99)) + (self.sigma[(mPdir,mG)]+1e-99)) rel_contrib = abs(Gdir_across/(Pdir_across+1e-99)) if rel_contrib > (1.0e-8) and \ nunwgt < 2 and len(grid_calculator.results) > 1: @@ -770,14 +770,14 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): exclude_sub_jobs = list(exclude_sub_jobs) exclude_sub_jobs.append(th_maxwgt[-1][1]) grid_calculator.results.run_statistics['skipped_subchannel'] += 1 - + # Add some monitoring of the problematic events - gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) + gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) if os.path.isfile(pjoin(gPath,'events.lhe')): lhe_file = lhe_parser.EventFile(pjoin(gPath,'events.lhe')) discardedPath = pjoin(Pdir,'DiscardedUnstableEvents') if not os.path.exists(discardedPath): - os.mkdir(discardedPath) + os.mkdir(discardedPath) if os.path.isdir(discardedPath): # Keep only the event with a maximum weight, as it surely # is the problematic one. @@ -790,10 +790,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): lhe_file.close() evtRecord.write(pjoin(gPath,'events.lhe').read()) evtRecord.close() - + return self.combine_grid(Pdir, G, step, exclude_sub_jobs) - + if across !=0: if sigma != 0: self.cross[(Pdir,G)] += cross**3/sigma**2 @@ -814,10 +814,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): self.chi2[(Pdir,G)] = 0 cross = self.cross[(Pdir,G)] error = 0 - + else: error = 0 - + grid_calculator.results.compute_values(update_statistics=True) if (str(os.path.basename(Pdir)), G) in self.run_statistics: self.run_statistics[(str(os.path.basename(Pdir)), G)]\ @@ -825,8 +825,8 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): else: self.run_statistics[(str(os.path.basename(Pdir)), G)] = \ grid_calculator.results.run_statistics - - self.warnings_from_statistics(G, grid_calculator.results.run_statistics) + + self.warnings_from_statistics(G, grid_calculator.results.run_statistics) stats_msg = grid_calculator.results.run_statistics.nice_output( '/'.join([os.path.basename(Pdir),'G%s'%G])) @@ -836,7 +836,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): # Clean up grid_information to avoid border effects in case of a crash for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) - try: + try: os.remove(pjoin(path, 'grid_information')) except OSError as oneerror: if oneerror.errno != 2: @@ -850,7 +850,7 @@ def warnings_from_statistics(self,G,stats): return EPS_fraction = float(stats['exceptional_points'])/stats['n_madloop_calls'] - + msg = "Channel %s has encountered a fraction of %.3g\n"+ \ "of numerically unstable loop matrix element computations\n"+\ "(which could not be rescued using quadruple precision).\n"+\ @@ -861,16 +861,16 @@ def warnings_from_statistics(self,G,stats): elif EPS_fraction > 0.01: logger.critical((msg%(G,EPS_fraction)).replace('might', 'can')) raise Exception((msg%(G,EPS_fraction)).replace('might', 'can')) - + def get_current_axsec(self): - + across = 0 for (Pdir,G) in self.abscross: across += self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) return across - + def write_results(self, grid_calculator, cross, error, Pdir, G, step): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -888,7 +888,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step): maxwgt = grid_calculator.get_max_wgt() nunwgt = grid_calculator.get_nunwgt() luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -897,20 +897,20 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s %s 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross), fstr(maxwgt)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - + def resubmit_survey(self, Pdir, G, Gdirs, step): """submit the next iteration of the survey""" # 1. write the new input_app.txt to double the number of points - run_card = self.cmd.run_card + run_card = self.cmd.run_card options = {'event' : 2**(step) * self.cmd.opts['points'] / self.splitted_grid, 'maxiter': 1, 'miniter': 1, @@ -919,18 +919,18 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): else run_card['nhel'], 'gridmode': -2, 'channel' : '' - } - + } + if int(options['helicity']) == 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + for Gdir in Gdirs: - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + + #2. resubmit the new jobs packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, \ - (Pdir, G, step+1)) + (Pdir, G, step+1)) nb_step = len(Gdirs) * (step+1) for i,subdir in enumerate(Gdirs): subdir = subdir.rsplit('_',1)[1] @@ -938,34 +938,34 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): offset = nb_step+i+1 offset=str(offset) tag = "%s.%s" % (subdir, offset) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[tag, G], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), packet_member=packet) - + def write_parameter_file(self, path, options): """ """ - + template =""" %(event)s %(maxiter)s %(miniter)s !Number of events and max and min iterations %(accuracy)s !Accuracy %(gridmode)s !Grid Adjustment 0=none, 2=adjust 1 !Suppress Amplitude 1=yes %(helicity)s !Helicity Sum/event 0=exact - %(channel)s """ + %(channel)s """ options['event'] = int(options['event']) open(path, 'w').write(template % options) - - + + def write_parameter(self, parralelization, Pdirs=None): """Write the parameter of the survey run""" run_card = self.cmd.run_card - + options = {'event' : self.cmd.opts['points'], 'maxiter': self.cmd.opts['iterations'], 'miniter': self.min_iterations, @@ -975,36 +975,36 @@ def write_parameter(self, parralelization, Pdirs=None): 'gridmode': 2, 'channel': '' } - + if int(options['helicity'])== 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + if parralelization: options['gridmode'] = -2 options['maxiter'] = 1 #this is automatic in dsample anyway options['miniter'] = 1 #this is automatic in dsample anyway options['event'] /= self.splitted_grid - + if not Pdirs: Pdirs = self.subproc - + for Pdir in Pdirs: - path =pjoin(Pdir, 'input_app.txt') + path =pjoin(Pdir, 'input_app.txt') self.write_parameter_file(path, options) - - -class gen_ximprove(object): - - + + +class gen_ximprove(object): + + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job @@ -1022,7 +1022,7 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_gridpack) elif cls.force_class == 'loop_induced': return super(gen_ximprove, cls).__new__(gen_ximprove_share) - + if cmd.proc_characteristics['loop_induced']: return super(gen_ximprove, cls).__new__(gen_ximprove_share) elif gen_ximprove.format_variable(cmd.run_card['gridpack'], bool): @@ -1031,31 +1031,31 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_share) else: return super(gen_ximprove, cls).__new__(gen_ximprove_v4) - - + + def __init__(self, cmd, opt=None): - + try: super(gen_ximprove, self).__init__(cmd, opt) except TypeError: pass - + self.run_statistics = {} self.cmd = cmd self.run_card = cmd.run_card run_card = self.run_card self.me_dir = cmd.me_dir - + #extract from the run_card the information that we need. self.gridpack = run_card['gridpack'] self.nhel = run_card['nhel'] if "nhel_refine" in run_card: self.nhel = run_card["nhel_refine"] - + if self.run_card['refine_evt_by_job'] != -1: self.max_request_event = run_card['refine_evt_by_job'] - - + + # Default option for the run self.gen_events = True self.parralel = False @@ -1066,7 +1066,7 @@ def __init__(self, cmd, opt=None): # parameter for the gridpack run self.nreq = 2000 self.iseed = 4321 - + # placeholder for information self.results = 0 #updated in launch/update_html @@ -1074,16 +1074,16 @@ def __init__(self, cmd, opt=None): self.configure(opt) elif isinstance(opt, bannermod.GridpackCard): self.configure_gridpack(opt) - + def __call__(self): return self.launch() - + def launch(self): - """running """ - + """running """ + #start the run self.handle_seed() - self.results = sum_html.collect_result(self.cmd, + self.results = sum_html.collect_result(self.cmd, main_dir=pjoin(self.cmd.me_dir,'SubProcesses')) #main_dir is for gridpack readonly mode if self.gen_events: # We run to provide a given number of events @@ -1095,15 +1095,15 @@ def launch(self): def configure(self, opt): """Defines some parameter of the run""" - + for key, value in opt.items(): if key in self.__dict__: targettype = type(getattr(self, key)) setattr(self, key, self.format_variable(value, targettype, key)) else: raise Exception('%s not define' % key) - - + + # special treatment always do outside the loop to avoid side effect if 'err_goal' in opt: if self.err_goal < 1: @@ -1113,24 +1113,24 @@ def configure(self, opt): logger.info("Generating %s unweighted events." % self.err_goal) self.gen_events = True self.err_goal = self.err_goal * self.gen_events_security # security - + def handle_seed(self): """not needed but for gridpack --which is not handle here for the moment""" return - - + + def find_job_for_event(self): """return the list of channel that need to be improved""" - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) - - goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 + + goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) - all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) - + all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) + to_refine = [] for C in all_channels: if C.get('axsec') == 0: @@ -1141,61 +1141,61 @@ def find_job_for_event(self): elif C.get('xerr') > max(C.get('axsec'), (1/(100*math.sqrt(self.err_goal)))*all_channels[-1].get('axsec')): to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru + - class gen_ximprove_v4(gen_ximprove): - + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + super(gen_ximprove_v4, self).__init__(cmd, opt) - + if cmd.opts['accuracy'] < cmd._survey_options['accuracy'][1]: self.increase_precision(cmd._survey_options['accuracy'][1]/cmd.opts['accuracy']) @@ -1203,7 +1203,7 @@ def reset_multijob(self): for path in misc.glob(pjoin('*', '*','multijob.dat'), pjoin(self.me_dir, 'SubProcesses')): open(path,'w').write('0\n') - + def write_multijob(self, Channel, nb_split): """ """ if nb_split <=1: @@ -1211,7 +1211,7 @@ def write_multijob(self, Channel, nb_split): f = open(pjoin(self.me_dir, 'SubProcesses', Channel.get('name'), 'multijob.dat'), 'w') f.write('%i\n' % nb_split) f.close() - + def increase_precision(self, rate=3): #misc.sprint(rate) if rate < 3: @@ -1222,25 +1222,25 @@ def increase_precision(self, rate=3): rate = rate -2 self.max_event_in_iter = int((rate+1) * 10000) self.min_events = int(rate+2) * 2500 - self.gen_events_security = 1 + 0.1 * (rate+2) - + self.gen_events_security = 1 + 0.1 * (rate+2) + if int(self.nhel) == 1: self.min_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//3) self.max_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//2) - - + + alphabet = "abcdefghijklmnopqrstuvwxyz" def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() #reset the potential multijob of previous run self.reset_multijob() - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. @@ -1257,17 +1257,17 @@ def get_job_for_event(self): else: for i in range(len(to_refine) //3): new_order.append(to_refine[i]) - new_order.append(to_refine[-2*i-1]) + new_order.append(to_refine[-2*i-1]) new_order.append(to_refine[-2*i-2]) if len(to_refine) % 3 == 1: - new_order.append(to_refine[i+1]) + new_order.append(to_refine[i+1]) elif len(to_refine) % 3 == 2: - new_order.append(to_refine[i+2]) + new_order.append(to_refine[i+2]) #ensure that the reordering is done nicely assert set([id(C) for C in to_refine]) == set([id(C) for C in new_order]) - to_refine = new_order - - + to_refine = new_order + + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target @@ -1279,7 +1279,7 @@ def get_job_for_event(self): nb_split = self.max_splitting nb_split=max(1, nb_split) - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1296,21 +1296,21 @@ def get_job_for_event(self): nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + # write the multi-job information self.write_multijob(C, nb_split) - + packet = cluster.Packet((C.parent_name, C.name), combine_runs.CombineRuns, (pjoin(self.me_dir, 'SubProcesses', C.parent_name)), {"subproc": C.name, "nb_split":nb_split}) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1321,7 +1321,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': packet, + 'packet': packet, } if nb_split == 1: @@ -1334,19 +1334,19 @@ def get_job_for_event(self): if self.keep_grid_for_refine: new_info['base_directory'] = info['directory'] jobs.append(new_info) - - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def create_ajob(self, template, jobs, write_dir=None): """create the ajob""" - + if not jobs: return if not write_dir: write_dir = pjoin(self.me_dir, 'SubProcesses') - + #filter the job according to their SubProcess directory # no mix submition P2job= collections.defaultdict(list) for j in jobs: @@ -1355,11 +1355,11 @@ def create_ajob(self, template, jobs, write_dir=None): for P in P2job.values(): self.create_ajob(template, P, write_dir) return - - + + #Here we can assume that all job are for the same directory. path = pjoin(write_dir, jobs[0]['P_dir']) - + template_text = open(template, 'r').read() # special treatment if needed to combine the script # computes how many submition miss one job @@ -1384,8 +1384,8 @@ def create_ajob(self, template, jobs, write_dir=None): skip1=0 combining_job =1 nb_sub = len(jobs) - - + + nb_use = 0 for i in range(nb_sub): script_number = i+1 @@ -1404,14 +1404,14 @@ def create_ajob(self, template, jobs, write_dir=None): info["base_directory"] = "./" fsock.write(template_text % info) nb_use += nb_job - + fsock.close() return script_number def get_job_for_precision(self): """create the ajob to achieve a give precision on the total cross-section""" - + assert self.err_goal <=1 xtot = abs(self.results.xsec) logger.info("Working on precision: %s %%" %(100*self.err_goal)) @@ -1428,46 +1428,46 @@ def get_job_for_precision(self): rerr *=rerr if not len(to_refine): return - - # change limit since most don't contribute + + # change limit since most don't contribute limit = math.sqrt((self.err_goal * xtot)**2 - rerr/math.sqrt(len(to_refine))) for C in to_refine[:]: cerr = C.mfactor*(C.xerru + len(to_refine)*C.xerrc) if cerr < limit: to_refine.remove(C) - + # all the channel are now selected. create the channel information logger.info('need to improve %s channels' % len(to_refine)) - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. # loop over the channel to refine for C in to_refine: - + #1. Determine how many events we need in each iteration yerr = C.mfactor*(C.xerru+len(to_refine)*C.xerrc) nevents = 0.2*C.nevents*(yerr/limit)**2 - + nb_split = int((nevents*(C.nunwgt/C.nevents)/self.max_request_event/ (2**self.min_iter-1))**(2/3)) nb_split = max(nb_split, 1) - # **(2/3) to slow down the increase in number of jobs + # **(2/3) to slow down the increase in number of jobs if nb_split > self.max_splitting: nb_split = self.max_splitting - + if nb_split >1: nevents = nevents / nb_split self.write_multijob(C, nb_split) # forbid too low/too large value nevents = min(self.min_event_in_iter, max(self.max_event_in_iter, nevents)) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1487,38 +1487,38 @@ def get_job_for_precision(self): new_info['offset'] = i+1 new_info['directory'] += self.alphabet[i % 26] + str((i+1)//26) jobs.append(new_info) - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru @@ -1528,27 +1528,27 @@ class gen_ximprove_v4_nogridupdate(gen_ximprove_v4): # some hardcoded value which impact the generation gen_events_security = 1.1 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 400 # split jobs if a channel if it needs more than that + max_request_event = 400 # split jobs if a channel if it needs more than that max_event_in_iter = 500 min_event_in_iter = 250 - max_splitting = 260 # maximum duplication of a given channel - min_iter = 2 + max_splitting = 260 # maximum duplication of a given channel + min_iter = 2 max_iter = 6 keep_grid_for_refine = True - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + gen_ximprove.__init__(cmd, opt) - + if cmd.proc_characteristics['loopinduced'] and \ cmd.proc_characteristics['nexternal'] > 2: self.increase_parralelization(cmd.proc_characteristics['nexternal']) - + def increase_parralelization(self, nexternal): - self.max_splitting = 1000 - + self.max_splitting = 1000 + if self.run_card['refine_evt_by_job'] != -1: pass elif nexternal == 3: @@ -1563,27 +1563,27 @@ def increase_parralelization(self, nexternal): class gen_ximprove_share(gen_ximprove, gensym): """Doing the refine in multicore. Each core handle a couple of PS point.""" - nb_ps_by_job = 2000 + nb_ps_by_job = 2000 mode = "refine" gen_events_security = 1.15 # Note the real security is lower since we stop the jobs if they are at 96% # of this target. def __init__(self, *args, **opts): - + super(gen_ximprove_share, self).__init__(*args, **opts) self.generated_events = {} self.splitted_for_dir = lambda x,y : self.splitted_Pdir[(x,y)] - + def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - + goal_lum, to_refine = self.find_job_for_event() self.goal_lum = goal_lum - + # loop over the channel to refine to find the number of PS point to launch total_ps_points = 0 channel_to_ps_point = [] @@ -1593,7 +1593,7 @@ def get_job_for_event(self): os.remove(pjoin(self.me_dir, "SubProcesses",C.parent_name, C.name, "events.lhe")) except: pass - + #1. Compute the number of points are needed to reach target needed_event = goal_lum*C.get('axsec') if needed_event == 0: @@ -1609,18 +1609,18 @@ def get_job_for_event(self): nb_split = 1 if nb_split > self.max_splitting: nb_split = self.max_splitting - nevents = self.max_event_in_iter * self.max_splitting + nevents = self.max_event_in_iter * self.max_splitting else: nevents = self.max_event_in_iter * nb_split if nevents > self.max_splitting*self.max_event_in_iter: logger.warning("Channel %s/%s has a very low efficiency of unweighting. Might not be possible to reach target" % \ (C.name, C.parent_name)) - nevents = self.max_event_in_iter * self.max_splitting - - total_ps_points += nevents - channel_to_ps_point.append((C, nevents)) - + nevents = self.max_event_in_iter * self.max_splitting + + total_ps_points += nevents + channel_to_ps_point.append((C, nevents)) + if self.cmd.options["run_mode"] == 1: if self.cmd.options["cluster_size"]: nb_ps_by_job = total_ps_points /int(self.cmd.options["cluster_size"]) @@ -1634,7 +1634,7 @@ def get_job_for_event(self): nb_ps_by_job = total_ps_points / self.cmd.options["nb_core"] else: nb_ps_by_job = self.nb_ps_by_job - + nb_ps_by_job = int(max(nb_ps_by_job, 500)) for C, nevents in channel_to_ps_point: @@ -1648,20 +1648,20 @@ def get_job_for_event(self): self.create_resubmit_one_iter(C.parent_name, C.name[1:], submit_ps, nb_job, step=0) needed_event = goal_lum*C.get('xsec') logger.debug("%s/%s : need %s event. Need %s split job of %s points", C.parent_name, C.name, needed_event, nb_job, submit_ps) - - + + def combine_iteration(self, Pdir, G, step): - + grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - + # collect all the generated_event Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) assert len(grid_calculator.results) == len(Gdirs) == self.splitted_for_dir(Pdir, G) - - + + # Check how many events are going to be kept after un-weighting. needed_event = cross * self.goal_lum if needed_event == 0: @@ -1671,19 +1671,19 @@ def combine_iteration(self, Pdir, G, step): if self.err_goal >=1: if needed_event > self.gen_events_security * self.err_goal: needed_event = int(self.gen_events_security * self.err_goal) - + if (Pdir, G) in self.generated_events: old_nunwgt, old_maxwgt = self.generated_events[(Pdir, G)] else: old_nunwgt, old_maxwgt = 0, 0 - + if old_nunwgt == 0 and os.path.exists(pjoin(Pdir,"G%s" % G, "events.lhe")): # possible for second refine. lhe = lhe_parser.EventFile(pjoin(Pdir,"G%s" % G, "events.lhe")) old_nunwgt = lhe.unweight(None, trunc_error=0.005, log_level=0) old_maxwgt = lhe.max_wgt - - + + maxwgt = max(grid_calculator.get_max_wgt(), old_maxwgt) new_evt = grid_calculator.get_nunwgt(maxwgt) @@ -1695,35 +1695,35 @@ def combine_iteration(self, Pdir, G, step): one_iter_nb_event = max(grid_calculator.get_nunwgt(),1) drop_previous_iteration = False # compare the number of events to generate if we discard the previous iteration - n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) n_target_combined = (needed_event-nunwgt) / efficiency if n_target_one_iter < n_target_combined: # the last iteration alone has more event that the combine iteration. - # it is therefore interesting to drop previous iteration. + # it is therefore interesting to drop previous iteration. drop_previous_iteration = True nunwgt = one_iter_nb_event maxwgt = grid_calculator.get_max_wgt() new_evt = nunwgt - efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) - + efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + try: if drop_previous_iteration: raise IOError output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'a') except IOError: output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'w') - + misc.call(["cat"] + [pjoin(d, "events.lhe") for d in Gdirs], stdout=output_file) output_file.close() # For large number of iteration. check the number of event by doing the # real unweighting. - if nunwgt < 0.6 * needed_event and step > self.min_iter: + if nunwgt < 0.6 * needed_event and step > self.min_iter: lhe = lhe_parser.EventFile(output_file.name) old_nunwgt =nunwgt nunwgt = lhe.unweight(None, trunc_error=0.01, log_level=0) - - + + self.generated_events[(Pdir, G)] = (nunwgt, maxwgt) # misc.sprint("Adding %s event to %s. Currently at %s" % (new_evt, G, nunwgt)) @@ -1742,21 +1742,21 @@ def combine_iteration(self, Pdir, G, step): nevents = grid_calculator.results[0].nevents if nevents == 0: # possible if some integral returns 0 nevents = max(g.nevents for g in grid_calculator.results) - + need_ps_point = (needed_event - nunwgt)/(efficiency+1e-99) - need_job = need_ps_point // nevents + 1 - + need_job = need_ps_point // nevents + 1 + if step < self.min_iter: # This is normal but check if we are on the good track - job_at_first_iter = nb_split_before/2**(step-1) + job_at_first_iter = nb_split_before/2**(step-1) expected_total_job = job_at_first_iter * (2**self.min_iter-1) done_job = job_at_first_iter * (2**step-1) expected_remaining_job = expected_total_job - done_job - logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) + logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) # increase if needed but not too much need_job = min(need_job, expected_remaining_job*1.25) - + nb_job = (need_job-0.5)//(2**(self.min_iter-step)-1) + 1 nb_job = max(1, nb_job) grid_calculator.write_grid_for_submission(Pdir,G, @@ -1768,7 +1768,7 @@ def combine_iteration(self, Pdir, G, step): nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) #self.create_job(Pdir, G, nb_job, nevents, step) - + elif step < self.max_iter: if step + 1 == self.max_iter: need_job = 1.20 * need_job # avoid to have just too few event. @@ -1777,21 +1777,21 @@ def combine_iteration(self, Pdir, G, step): grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_job*nevents ,mode=self.mode, conservative_factor=self.max_iter) - - + + logger.info("%s/G%s is at %i/%i ('%.2g%%') event. Resubmit %i job at iteration %i." \ % (os.path.basename(Pdir), G, int(nunwgt),int(needed_event)+1, (float(nunwgt)/needed_event)*100.0 if needed_event>0.0 else 0.0, nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) - - + + return 0 - - + + def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -1807,7 +1807,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency nevents = nunwgt # make the unweighting to compute the number of events: luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -1816,23 +1816,23 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s 0.0 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - - - + + + class gen_ximprove_gridpack(gen_ximprove_v4): - - min_iter = 1 + + min_iter = 1 max_iter = 13 - max_request_event = 1e12 # split jobs if a channel if it needs more than that + max_request_event = 1e12 # split jobs if a channel if it needs more than that max_event_in_iter = 4000 min_event_in_iter = 500 combining_job = sys.maxsize @@ -1844,7 +1844,7 @@ def __new__(cls, *args, **opts): return super(gen_ximprove_gridpack, cls).__new__(cls, *args, **opts) def __init__(self, *args, **opts): - + self.ngran = -1 self.gscalefact = {} self.readonly = False @@ -1855,23 +1855,23 @@ def __init__(self, *args, **opts): self.readonly = opts['readonly'] super(gen_ximprove_gridpack,self).__init__(*args, **opts) if self.ngran == -1: - self.ngran = 1 - + self.ngran = 1 + def find_job_for_event(self): """return the list of channel that need to be improved""" import random - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) self.gscalefact = {} - + xtot = self.results.axsec - goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 + goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 # logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) all_channels.sort(key=lambda x : x.get('luminosity'), reverse=True) - + to_refine = [] for C in all_channels: tag = C.get('name') @@ -1885,27 +1885,27 @@ def find_job_for_event(self): #need to generate events logger.debug('request events for ', C.get('name'), 'cross=', C.get('axsec'), 'needed events = ', goal_lum * C.get('axsec')) - to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + to_refine.append(C) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. - + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target needed_event = max(goal_lum*C.get('axsec'), self.ngran) nb_split = 1 - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1920,13 +1920,13 @@ def get_job_for_event(self): # forbid too low/too large value nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': os.path.basename(C.parent_name), + 'P_dir': os.path.basename(C.parent_name), 'offset': 1, # need to be change for splitted job 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'nevents': nevents, #int(nevents*self.gen_events_security)+1, @@ -1938,7 +1938,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': None, + 'packet': None, } if self.readonly: @@ -1946,11 +1946,11 @@ def get_job_for_event(self): info['base_directory'] = basedir jobs.append(info) - - write_dir = '.' if self.readonly else None - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) - + + write_dir = '.' if self.readonly else None + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) + done = [] for j in jobs: if j['P_dir'] in done: @@ -1967,22 +1967,22 @@ def get_job_for_event(self): write_dir = '.' if self.readonly else pjoin(self.me_dir, 'SubProcesses') self.check_events(goal_lum, to_refine, jobs, write_dir) - + def check_events(self, goal_lum, to_refine, jobs, Sdir): """check that we get the number of requested events if not resubmit.""" - + new_jobs = [] - + for C, job_info in zip(to_refine, jobs): - P = job_info['P_dir'] + P = job_info['P_dir'] G = job_info['channel'] axsec = C.get('axsec') - requested_events= job_info['requested_event'] - + requested_events= job_info['requested_event'] + new_results = sum_html.OneResult((P,G)) new_results.read_results(pjoin(Sdir,P, 'G%s'%G, 'results.dat')) - + # need to resubmit? if new_results.get('nunwgt') < requested_events: pwd = pjoin(os.getcwd(),job_info['P_dir'],'G%s'%G) if self.readonly else \ @@ -1992,10 +1992,10 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): job_info['offset'] += 1 new_jobs.append(job_info) files.mv(pjoin(pwd, 'events.lhe'), pjoin(pwd, 'events.lhe.previous')) - + if new_jobs: - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) + done = [] for j in new_jobs: if j['P_dir'] in done: @@ -2015,9 +2015,9 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): files.put_at_end(pjoin(pwd, 'events.lhe'),pjoin(pwd, 'events.lhe.previous')) return self.check_events(goal_lum, to_refine, new_jobs, Sdir) - - - - + + + + diff --git a/epochX/cudacpp/ee_mumu.mad/bin/internal/madevent_interface.py b/epochX/cudacpp/ee_mumu.mad/bin/internal/madevent_interface.py index cb6bf4ca57..8abba3f33f 100755 --- a/epochX/cudacpp/ee_mumu.mad/bin/internal/madevent_interface.py +++ b/epochX/cudacpp/ee_mumu.mad/bin/internal/madevent_interface.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,10 +53,10 @@ # Special logger for the Cmd Interface logger = logging.getLogger('madevent.stdout') # -> stdout logger_stderr = logging.getLogger('madevent.stderr') # ->stderr - + try: import madgraph -except ImportError as error: +except ImportError as error: # import from madevent directory MADEVENT = True import internal.extended_cmd as cmd @@ -92,7 +92,7 @@ import madgraph.various.lhe_parser as lhe_parser # import madgraph.various.histograms as histograms # imported later to not slow down the loading of the code import models.check_param_card as check_param_card - from madgraph.iolibs.files import ln + from madgraph.iolibs.files import ln from madgraph import InvalidCmd, MadGraph5Error, MG5DIR, ReadWrite @@ -113,10 +113,10 @@ class CmdExtended(common_run.CommonRunCmd): next_possibility = { 'start': [], } - + debug_output = 'ME5_debug' error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' - error_debug += 'More information is found in \'%(debug)s\'.\n' + error_debug += 'More information is found in \'%(debug)s\'.\n' error_debug += 'Please attach this file to your report.' config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' @@ -124,18 +124,18 @@ class CmdExtended(common_run.CommonRunCmd): keyboard_stop_msg = """stopping all operation in order to quit MadGraph5_aMC@NLO please enter exit""" - + # Define the Error InvalidCmd = InvalidCmd ConfigurationError = MadGraph5Error def __init__(self, me_dir, options, *arg, **opt): """Init history and line continuation""" - + # Tag allowing/forbiding question self.force = False - - # If possible, build an info line with current version number + + # If possible, build an info line with current version number # and date, from the VERSION text file info = misc.get_pkg_info() info_line = "" @@ -150,7 +150,7 @@ def __init__(self, me_dir, options, *arg, **opt): else: version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() info_line = "#* VERSION %s %s *\n" % \ - (version, (24 - len(version)) * ' ') + (version, (24 - len(version)) * ' ') # Create a header for the history file. # Remember to fill in time at writeout time! @@ -177,7 +177,7 @@ def __init__(self, me_dir, options, *arg, **opt): '#* run as ./bin/madevent.py filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - + if info_line: info_line = info_line[1:] @@ -203,11 +203,11 @@ def __init__(self, me_dir, options, *arg, **opt): "* *\n" + \ "************************************************************") super(CmdExtended, self).__init__(me_dir, options, *arg, **opt) - + def get_history_header(self): - """return the history header""" + """return the history header""" return self.history_header % misc.get_time_info() - + def stop_on_keyboard_stop(self): """action to perform to close nicely on a keyboard interupt""" try: @@ -219,20 +219,20 @@ def stop_on_keyboard_stop(self): self.add_error_log_in_html(KeyboardInterrupt) except: pass - + def postcmd(self, stop, line): """ Update the status of the run for finishing interactive command """ - - stop = super(CmdExtended, self).postcmd(stop, line) + + stop = super(CmdExtended, self).postcmd(stop, line) # relaxing the tag forbidding question self.force = False - + if not self.use_rawinput: return stop - + if self.results and not self.results.current: return stop - + arg = line.split() if len(arg) == 0: return stop @@ -240,41 +240,41 @@ def postcmd(self, stop, line): return stop if isinstance(self.results.status, str) and self.results.status == 'Stop by the user': self.update_status('%s Stop by the user' % arg[0], level=None, error=True) - return stop + return stop elif not self.results.status: return stop elif str(arg[0]) in ['exit','quit','EOF']: return stop - + try: - self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], + self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], level=None, error=True) except Exception: misc.sprint('update_status fails') pass - - + + def nice_user_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() - return cmd.Cmd.nice_user_error(self, error, line) - + return cmd.Cmd.nice_user_error(self, error, line) + def nice_config_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() stop = cmd.Cmd.nice_config_error(self, error, line) - - + + try: debug_file = open(self.debug_output, 'a') debug_file.write(open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat'))) debug_file.close() except: - pass + pass return stop - + def nice_error_handling(self, error, line): """If a ME run is currently running add a link in the html output""" @@ -294,7 +294,7 @@ def nice_error_handling(self, error, line): proc_card = pjoin(self.me_dir,'Cards','proc_card_mg5.dat') if os.path.exists(proc_card): self.banner.add(proc_card) - + out_dir = pjoin(self.me_dir, 'Events', self.run_name) if not os.path.isdir(out_dir): os.mkdir(out_dir) @@ -307,7 +307,7 @@ def nice_error_handling(self, error, line): else: pass else: - self.add_error_log_in_html() + self.add_error_log_in_html() stop = cmd.Cmd.nice_error_handling(self, error, line) try: debug_file = open(self.debug_output, 'a') @@ -316,14 +316,14 @@ def nice_error_handling(self, error, line): except: pass return stop - - + + #=============================================================================== # HelpToCmd #=============================================================================== class HelpToCmd(object): """ The Series of help routine for the MadEventCmd""" - + def help_pythia(self): logger.info("syntax: pythia [RUN] [--run_options]") logger.info("-- run pythia on RUN (current one by default)") @@ -352,29 +352,29 @@ def help_banner_run(self): logger.info(" Path should be the path of a valid banner.") logger.info(" RUN should be the name of a run of the current directory") self.run_options_help([('-f','answer all question by default'), - ('--name=X', 'Define the name associated with the new run')]) - + ('--name=X', 'Define the name associated with the new run')]) + def help_open(self): logger.info("syntax: open FILE ") logger.info("-- open a file with the appropriate editor.") logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') logger.info(' the path to the last created/used directory is used') logger.info(' The program used to open those files can be chosen in the') - logger.info(' configuration file ./input/mg5_configuration.txt') - - + logger.info(' configuration file ./input/mg5_configuration.txt') + + def run_options_help(self, data): if data: logger.info('-- local options:') for name, info in data: logger.info(' %s : %s' % (name, info)) - + logger.info("-- session options:") - logger.info(" Note that those options will be kept for the current session") + logger.info(" Note that those options will be kept for the current session") logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) logger.info(" --multicore : Run in multi-core configuration") logger.info(" --nb_core=X : limit the number of core to use to X.") - + def help_generate_events(self): logger.info("syntax: generate_events [run_name] [options]",) @@ -398,16 +398,16 @@ def help_initMadLoop(self): logger.info(" -f : Bypass the edition of MadLoopParams.dat.",'$MG:color:BLUE') logger.info(" -r : Refresh of the existing filters (erasing them if already present).",'$MG:color:BLUE') logger.info(" --nPS= : Specify how many phase-space points should be tried to set up the filters.",'$MG:color:BLUE') - + def help_calculate_decay_widths(self): - + if self.ninitial != 1: logger.warning("This command is only valid for processes of type A > B C.") logger.warning("This command can not be run in current context.") logger.warning("") - + logger.info("syntax: calculate_decay_widths [run_name] [options])") logger.info("-- Calculate decay widths and enter widths and BRs in param_card") logger.info(" for a series of processes of type A > B C ...") @@ -428,8 +428,8 @@ def help_survey(self): logger.info("-- evaluate the different channel associate to the process") self.run_options_help([("--" + key,value[-1]) for (key,value) in \ self._survey_options.items()]) - - + + def help_restart_gridpack(self): logger.info("syntax: restart_gridpack --precision= --restart_zero") @@ -439,14 +439,14 @@ def help_launch(self): logger.info("syntax: launch [run_name] [options])") logger.info(" --alias for either generate_events/calculate_decay_widths") logger.info(" depending of the number of particles in the initial state.") - + if self.ninitial == 1: logger.info("For this directory this is equivalent to calculate_decay_widths") self.help_calculate_decay_widths() else: logger.info("For this directory this is equivalent to $generate_events") self.help_generate_events() - + def help_refine(self): logger.info("syntax: refine require_precision [max_channel] [--run_options]") logger.info("-- refine the LAST run to achieve a given precision.") @@ -454,14 +454,14 @@ def help_refine(self): logger.info(' or the required relative error') logger.info(' max_channel:[5] maximal number of channel per job') self.run_options_help([]) - + def help_combine_events(self): """ """ logger.info("syntax: combine_events [run_name] [--tag=tag_name] [--run_options]") logger.info("-- Combine the last run in order to write the number of events") logger.info(" asked in the run_card.") self.run_options_help([]) - + def help_store_events(self): """ """ logger.info("syntax: store_events [--run_options]") @@ -481,7 +481,7 @@ def help_import(self): logger.info("syntax: import command PATH") logger.info("-- Execute the command present in the file") self.run_options_help([]) - + def help_syscalc(self): logger.info("syntax: syscalc [RUN] [%s] [-f | --tag=]" % '|'.join(self._plot_mode)) logger.info("-- calculate systematics information for the RUN (current run by default)") @@ -506,18 +506,18 @@ class AskRun(cmd.ControlSwitch): ('madspin', 'Decay onshell particles'), ('reweight', 'Add weights to events for new hypp.') ] - + def __init__(self, question, line_args=[], mode=None, force=False, *args, **opt): - + self.check_available_module(opt['mother_interface'].options) self.me_dir = opt['mother_interface'].me_dir super(AskRun,self).__init__(self.to_control, opt['mother_interface'], *args, **opt) - - + + def check_available_module(self, options): - + self.available_module = set() if options['pythia-pgs_path']: self.available_module.add('PY6') @@ -540,32 +540,32 @@ def check_available_module(self, options): self.available_module.add('Rivet') else: logger.warning("Rivet program installed but no parton shower with hepmc output detected.\n Please install pythia8") - + if not MADEVENT or ('mg5_path' in options and options['mg5_path']): self.available_module.add('MadSpin') if misc.has_f2py() or options['f2py_compiler']: self.available_module.add('reweight') -# old mode to activate the shower +# old mode to activate the shower def ans_parton(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if value is None: self.set_all_off() else: logger.warning('Invalid command: parton=%s' % value) - - + + # -# HANDLING SHOWER +# HANDLING SHOWER # def get_allowed_shower(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_shower'): return self.allowed_shower - + self.allowed_shower = [] if 'PY6' in self.available_module: self.allowed_shower.append('Pythia6') @@ -574,9 +574,9 @@ def get_allowed_shower(self): if self.allowed_shower: self.allowed_shower.append('OFF') return self.allowed_shower - + def set_default_shower(self): - + if 'PY6' in self.available_module and\ os.path.exists(pjoin(self.me_dir,'Cards','pythia_card.dat')): self.switch['shower'] = 'Pythia6' @@ -590,10 +590,10 @@ def set_default_shower(self): def check_value_shower(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_shower(): return True - + value =value.lower() if value in ['py6','p6','pythia_6'] and 'PY6' in self.available_module: return 'Pythia6' @@ -601,13 +601,13 @@ def check_value_shower(self, value): return 'Pythia8' else: return False - - -# old mode to activate the shower + + +# old mode to activate the shower def ans_pythia(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if 'PY6' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return @@ -621,13 +621,13 @@ def ans_pythia(self, value=None): self.set_switch('shower', 'OFF') else: logger.warning('Invalid command: pythia=%s' % value) - - + + def consistency_shower_detector(self, vshower, vdetector): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower == 'OFF': @@ -635,35 +635,35 @@ def consistency_shower_detector(self, vshower, vdetector): return 'OFF' if vshower == 'Pythia8' and vdetector == 'PGS': return 'OFF' - + return None - + # # HANDLING DETECTOR # def get_allowed_detector(self): """return valid entry for the switch""" - + if hasattr(self, 'allowed_detector'): - return self.allowed_detector - + return self.allowed_detector + self.allowed_detector = [] if 'PGS' in self.available_module: self.allowed_detector.append('PGS') if 'Delphes' in self.available_module: self.allowed_detector.append('Delphes') - + if self.allowed_detector: self.allowed_detector.append('OFF') - return self.allowed_detector + return self.allowed_detector def set_default_detector(self): - + self.set_default_shower() #ensure that this one is called first! - + if 'PGS' in self.available_module and self.switch['shower'] == 'Pythia6'\ and os.path.exists(pjoin(self.me_dir,'Cards','pgs_card.dat')): self.switch['detector'] = 'PGS' @@ -674,16 +674,16 @@ def set_default_detector(self): self.switch['detector'] = 'OFF' else: self.switch['detector'] = 'Not Avail.' - -# old mode to activate pgs + +# old mode to activate pgs def ans_pgs(self, value=None): """None: means that the user type 'pgs' - value: means that the user type pgs=value""" - + value: means that the user type pgs=value""" + if 'PGS' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return - + if value is None: self.set_all_off() self.switch['shower'] = 'Pythia6' @@ -696,16 +696,16 @@ def ans_pgs(self, value=None): else: logger.warning('Invalid command: pgs=%s' % value) - + # old mode to activate Delphes def ans_delphes(self, value=None): """None: means that the user type 'delphes' - value: means that the user type delphes=value""" - + value: means that the user type delphes=value""" + if 'Delphes' not in self.available_module: logger.warning('Delphes not available. Ignore commmand') return - + if value is None: self.set_all_off() if 'PY6' in self.available_module: @@ -718,15 +718,15 @@ def ans_delphes(self, value=None): elif value == 'off': self.set_switch('detector', 'OFF') else: - logger.warning('Invalid command: pgs=%s' % value) + logger.warning('Invalid command: pgs=%s' % value) def consistency_detector_shower(self,vdetector, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ - + if vdetector == 'PGS' and vshower != 'Pythia6': return 'Pythia6' if vdetector == 'Delphes' and vshower not in ['Pythia6', 'Pythia8']: @@ -744,28 +744,28 @@ def consistency_detector_shower(self,vdetector, vshower): # def get_allowed_analysis(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_analysis'): return self.allowed_analysis - + self.allowed_analysis = [] if 'ExRoot' in self.available_module: self.allowed_analysis.append('ExRoot') if 'MA4' in self.available_module: self.allowed_analysis.append('MadAnalysis4') if 'MA5' in self.available_module: - self.allowed_analysis.append('MadAnalysis5') + self.allowed_analysis.append('MadAnalysis5') if 'Rivet' in self.available_module: - self.allowed_analysis.append('Rivet') - + self.allowed_analysis.append('Rivet') + if self.allowed_analysis: self.allowed_analysis.append('OFF') - + return self.allowed_analysis - + def check_analysis(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_analysis(): return True if value.lower() in ['ma4', 'madanalysis4', 'madanalysis_4','4']: @@ -786,30 +786,30 @@ def consistency_shower_analysis(self, vshower, vanalysis): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'OFF' #new value for analysis - + return None - + def consistency_analysis_shower(self, vanalysis, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'Pythia8' #new value for analysis - + return None def set_default_analysis(self): """initialise the switch for analysis""" - + if 'MA4' in self.available_module and \ os.path.exists(pjoin(self.me_dir,'Cards','plot_card.dat')): self.switch['analysis'] = 'MadAnalysis4' @@ -818,46 +818,46 @@ def set_default_analysis(self): or os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat'))): self.switch['analysis'] = 'MadAnalysis5' elif 'ExRoot' in self.available_module: - self.switch['analysis'] = 'ExRoot' - elif self.get_allowed_analysis(): + self.switch['analysis'] = 'ExRoot' + elif self.get_allowed_analysis(): self.switch['analysis'] = 'OFF' else: self.switch['analysis'] = 'Not Avail.' - + # # MADSPIN handling # def get_allowed_madspin(self): """ ON|OFF|onshell """ - + if hasattr(self, 'allowed_madspin'): return self.allowed_madspin - + self.allowed_madspin = [] if 'MadSpin' in self.available_module: self.allowed_madspin = ['OFF',"ON",'onshell',"full"] return self.allowed_madspin - + def check_value_madspin(self, value): """handle alias and valid option not present in get_allowed_madspin""" - + if value.upper() in self.get_allowed_madspin(): return True elif value.lower() in self.get_allowed_madspin(): return True - + if 'MadSpin' not in self.available_module: return False - + if value.lower() in ['madspin', 'full']: return 'full' elif value.lower() in ['none']: return 'none' - - + + def set_default_madspin(self): """initialise the switch for madspin""" - + if 'MadSpin' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): self.switch['madspin'] = 'ON' @@ -865,10 +865,10 @@ def set_default_madspin(self): self.switch['madspin'] = 'OFF' else: self.switch['madspin'] = 'Not Avail.' - + def get_cardcmd_for_madspin(self, value): """set some command to run before allowing the user to modify the cards.""" - + if value == 'onshell': return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] elif value in ['full', 'madspin']: @@ -877,36 +877,36 @@ def get_cardcmd_for_madspin(self, value): return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] else: return [] - + # # ReWeight handling # def get_allowed_reweight(self): """ return the list of valid option for reweight=XXX """ - + if hasattr(self, 'allowed_reweight'): return getattr(self, 'allowed_reweight') - + if 'reweight' not in self.available_module: self.allowed_reweight = [] return self.allowed_reweight = ['OFF', 'ON'] - + # check for plugin mode plugin_path = self.mother_interface.plugin_path opts = misc.from_plugin_import(plugin_path, 'new_reweight', warning=False) self.allowed_reweight += opts - + def set_default_reweight(self): """initialise the switch for reweight""" - + if 'reweight' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): self.switch['reweight'] = 'ON' else: self.switch['reweight'] = 'OFF' else: - self.switch['reweight'] = 'Not Avail.' + self.switch['reweight'] = 'Not Avail.' #=============================================================================== # CheckValidForCmd @@ -916,14 +916,14 @@ class CheckValidForCmd(object): def check_banner_run(self, args): """check the validity of line""" - + if len(args) == 0: self.help_banner_run() raise self.InvalidCmd('banner_run requires at least one argument.') - + tag = [a[6:] for a in args if a.startswith('--tag=')] - - + + if os.path.exists(args[0]): type ='banner' format = self.detect_card_type(args[0]) @@ -931,7 +931,7 @@ def check_banner_run(self, args): raise self.InvalidCmd('The file is not a valid banner.') elif tag: args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) + (args[0], tag)) if not os.path.exists(args[0]): raise self.InvalidCmd('No banner associates to this name and tag.') else: @@ -939,7 +939,7 @@ def check_banner_run(self, args): type = 'run' banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) if not banners: - raise self.InvalidCmd('No banner associates to this name.') + raise self.InvalidCmd('No banner associates to this name.') elif len(banners) == 1: args[0] = banners[0] else: @@ -947,8 +947,8 @@ def check_banner_run(self, args): tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] tag = self.ask('which tag do you want to use?', tags[0], tags) args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) - + (args[0], tag)) + run_name = [arg[7:] for arg in args if arg.startswith('--name=')] if run_name: try: @@ -970,14 +970,14 @@ def check_banner_run(self, args): except Exception: pass self.set_run_name(name) - + def check_history(self, args): """check the validity of line""" - + if len(args) > 1: self.help_history() raise self.InvalidCmd('\"history\" command takes at most one argument') - + if not len(args): return elif args[0] != 'clean': @@ -985,16 +985,16 @@ def check_history(self, args): if dirpath and not os.path.exists(dirpath) or \ os.path.isdir(args[0]): raise self.InvalidCmd("invalid path %s " % dirpath) - + def check_save(self, args): """ check the validity of the line""" - + if len(args) == 0: args.append('options') if args[0] not in self._save_opts: raise self.InvalidCmd('wrong \"save\" format') - + if args[0] != 'options' and len(args) != 2: self.help_save() raise self.InvalidCmd('wrong \"save\" format') @@ -1003,7 +1003,7 @@ def check_save(self, args): if not os.path.exists(basename): raise self.InvalidCmd('%s is not a valid path, please retry' % \ args[1]) - + if args[0] == 'options': has_path = None for arg in args[1:]: @@ -1024,9 +1024,9 @@ def check_save(self, args): has_path = True if not has_path: if '--auto' in arg and self.options['mg5_path']: - args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) + args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) else: - args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) + args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) def check_set(self, args): """ check the validity of the line""" @@ -1039,20 +1039,20 @@ def check_set(self, args): self.help_set() raise self.InvalidCmd('Possible options for set are %s' % \ self._set_options) - + if args[0] in ['stdout_level']: if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \ and not args[1].isdigit(): raise self.InvalidCmd('output_level needs ' + \ - 'a valid level') - + 'a valid level') + if args[0] in ['timeout']: if not args[1].isdigit(): - raise self.InvalidCmd('timeout values should be a integer') - + raise self.InvalidCmd('timeout values should be a integer') + def check_open(self, args): """ check the validity of the line """ - + if len(args) != 1: self.help_open() raise self.InvalidCmd('OPEN command requires exactly one argument') @@ -1069,7 +1069,7 @@ def check_open(self, args): raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file') else: return True - + path = self.me_dir if os.path.isfile(os.path.join(path,args[0])): args[0] = os.path.join(path,args[0]) @@ -1078,7 +1078,7 @@ def check_open(self, args): elif os.path.isfile(os.path.join(path,'HTML',args[0])): args[0] = os.path.join(path,'HTML',args[0]) # special for card with _default define: copy the default and open it - elif '_card.dat' in args[0]: + elif '_card.dat' in args[0]: name = args[0].replace('_card.dat','_card_default.dat') if os.path.isfile(os.path.join(path,'Cards', name)): files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0])) @@ -1086,13 +1086,13 @@ def check_open(self, args): else: raise self.InvalidCmd('No default path for this file') elif not os.path.isfile(args[0]): - raise self.InvalidCmd('No default path for this file') - + raise self.InvalidCmd('No default path for this file') + def check_initMadLoop(self, args): """ check initMadLoop command arguments are valid.""" - + opt = {'refresh': False, 'nPS': None, 'force': False} - + for arg in args: if arg in ['-r','--refresh']: opt['refresh'] = True @@ -1105,14 +1105,14 @@ def check_initMadLoop(self, args): except ValueError: raise InvalidCmd("The number of attempts specified "+ "'%s' is not a valid integer."%n_attempts) - + return opt - + def check_treatcards(self, args): """check that treatcards arguments are valid [param|run|all] [--output_dir=] [--param_card=] [--run_card=] """ - + opt = {'output_dir':pjoin(self.me_dir,'Source'), 'param_card':pjoin(self.me_dir,'Cards','param_card.dat'), 'run_card':pjoin(self.me_dir,'Cards','run_card.dat'), @@ -1129,14 +1129,14 @@ def check_treatcards(self, args): if os.path.isfile(value): card_name = self.detect_card_type(value) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' % (card_name, key)) opt[key] = value elif os.path.isfile(pjoin(self.me_dir,value)): card_name = self.detect_card_type(pjoin(self.me_dir,value)) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' - % (card_name, key)) + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + % (card_name, key)) opt[key] = value else: raise self.InvalidCmd('No such file: %s ' % value) @@ -1154,14 +1154,14 @@ def check_treatcards(self, args): else: self.help_treatcards() raise self.InvalidCmd('Unvalid argument %s' % arg) - - return mode, opt - - + + return mode, opt + + def check_survey(self, args, cmd='survey'): """check that the argument for survey are valid""" - - + + self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) @@ -1183,41 +1183,41 @@ def check_survey(self, args, cmd='survey'): self.help_survey() raise self.InvalidCmd('Too many argument for %s command' % cmd) elif not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], None,'parton', True) args.pop(0) - + return True def check_generate_events(self, args): """check that the argument for generate_events are valid""" - + run = None if args and args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['auto','parton', 'pythia', 'pgs', 'delphes']: self.help_generate_events() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + #if len(args) > 1: # self.help_generate_events() # raise self.InvalidCmd('Too many argument for generate_events command: %s' % cmd) - + return run def check_calculate_decay_widths(self, args): """check that the argument for calculate_decay_widths are valid""" - + if self.ninitial != 1: raise self.InvalidCmd('Can only calculate decay widths for decay processes A > B C ...') @@ -1232,7 +1232,7 @@ def check_calculate_decay_widths(self, args): if len(args) > 1: self.help_calculate_decay_widths() raise self.InvalidCmd('Too many argument for calculate_decay_widths command: %s' % cmd) - + return accuracy @@ -1241,25 +1241,25 @@ def check_multi_run(self, args): """check that the argument for survey are valid""" run = None - + if not len(args): self.help_multi_run() raise self.InvalidCmd("""multi_run command requires at least one argument for the number of times that it call generate_events command""") - + if args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['parton', 'pythia', 'pgs', 'delphes']: self.help_multi_run() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + elif not args[0].isdigit(): self.help_multi_run() @@ -1267,7 +1267,7 @@ def check_multi_run(self, args): #pass nb run to an integer nb_run = args.pop(0) args.insert(0, int(nb_run)) - + return run @@ -1284,7 +1284,7 @@ def check_refine(self, args): self.help_refine() raise self.InvalidCmd('require_precision argument is require for refine cmd') - + if not self.run_name: if self.results.lastrun: self.set_run_name(self.results.lastrun) @@ -1296,17 +1296,17 @@ def check_refine(self, args): else: try: [float(arg) for arg in args] - except ValueError: - self.help_refine() + except ValueError: + self.help_refine() raise self.InvalidCmd('refine arguments are suppose to be number') - + return True - + def check_combine_events(self, arg): """ Check the argument for the combine events command """ - + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] elif not self.run_tag: @@ -1314,53 +1314,53 @@ def check_combine_events(self, arg): else: tag = self.run_tag self.run_tag = tag - + if len(arg) > 1: self.help_combine_events() raise self.InvalidCmd('Too many argument for combine_events command') - + if len(arg) == 1: self.set_run_name(arg[0], self.run_tag, 'parton', True) - + if not self.run_name: if not self.results.lastrun: raise self.InvalidCmd('No run_name currently define. Unable to run combine') else: self.set_run_name(self.results.lastrun) - + return True - + def check_pythia(self, args): """Check the argument for pythia command - syntax: pythia [NAME] + syntax: pythia [NAME] Note that other option are already removed at this point """ - + mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia', 'pgs', 'delphes']: self.help_pythia() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') - + if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' raise self.InvalidCmd(error_msg) - - - + + + tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1368,8 +1368,8 @@ def check_pythia(self, args): if self.results.lastrun: args.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): @@ -1388,21 +1388,21 @@ def check_pythia(self, args): files.ln(input_file, os.path.dirname(output_file)) else: misc.gunzip(input_file, keep=True, stdout=output_file) - + args.append(mode) - + def check_pythia8(self, args): """Check the argument for pythia command - syntax: pythia8 [NAME] + syntax: pythia8 [NAME] Note that other option are already removed at this point - """ + """ mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia','pythia8','delphes']: self.help_pythia8() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') @@ -1410,7 +1410,7 @@ def check_pythia8(self, args): if not self.options['pythia8_path']: logger.info('Retry reading configuration file to find pythia8 path') self.set_configuration() - + if not self.options['pythia8_path'] or not \ os.path.exists(pjoin(self.options['pythia8_path'],'bin','pythia8-config')): error_msg = 'No valid pythia8 path set.\n' @@ -1421,7 +1421,7 @@ def check_pythia8(self, args): raise self.InvalidCmd(error_msg) tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1430,11 +1430,11 @@ def check_pythia8(self, args): args.insert(0, self.results.lastrun) else: raise self.InvalidCmd('No run name currently define. '+ - 'Please add this information.') - + 'Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ - not os.path.exists(pjoin(self.me_dir,'Events',args[0], + not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): raise self.InvalidCmd('No events file corresponding to %s run. ' % args[0]) @@ -1451,9 +1451,9 @@ def check_pythia8(self, args): else: raise self.InvalidCmd('No event file corresponding to %s run. ' % self.run_name) - + args.append(mode) - + def check_remove(self, args): """Check that the remove command is valid""" @@ -1484,33 +1484,33 @@ def check_plot(self, args): madir = self.options['madanalysis_path'] td = self.options['td_path'] - + if not madir or not td: logger.info('Retry to read configuration file to find madanalysis/td') self.set_configuration() madir = self.options['madanalysis_path'] - td = self.options['td_path'] - + td = self.options['td_path'] + if not madir: error_msg = 'No valid MadAnalysis path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) + raise self.InvalidCmd(error_msg) if not td: error_msg = 'No valid td path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') + raise self.InvalidCmd('No run name currently define. Please add this information.') args.append('all') return - + if args[0] not in self._plot_mode: self.set_run_name(args[0], level='plot') del args[0] @@ -1518,45 +1518,45 @@ def check_plot(self, args): args.append('all') elif not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + for arg in args: if arg not in self._plot_mode and arg != self.run_name: self.help_plot() - raise self.InvalidCmd('unknown options %s' % arg) - + raise self.InvalidCmd('unknown options %s' % arg) + def check_syscalc(self, args): """Check the argument for the syscalc command syscalc run_name modes""" scdir = self.options['syscalc_path'] - + if not scdir: logger.info('Retry to read configuration file to find SysCalc') self.set_configuration() scdir = self.options['syscalc_path'] - + if not scdir: error_msg = 'No valid SysCalc path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' error_msg += 'Please note that you need to compile SysCalc first.' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') args.append('all') return #deal options tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] - + if args[0] not in self._syscalc_mode: self.set_run_name(args[0], tag=tag, level='syscalc') del args[0] @@ -1564,61 +1564,61 @@ def check_syscalc(self, args): args.append('all') elif not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') elif tag and tag != self.run_tag: self.set_run_name(self.run_name, tag=tag, level='syscalc') - + for arg in args: if arg not in self._syscalc_mode and arg != self.run_name: self.help_syscalc() - raise self.InvalidCmd('unknown options %s' % arg) + raise self.InvalidCmd('unknown options %s' % arg) if self.run_card['use_syst'] not in self.true: raise self.InvalidCmd('Run %s does not include ' % self.run_name + \ 'systematics information needed for syscalc.') - - + + def check_pgs(self, arg, no_default=False): """Check the argument for pythia command - syntax is "pgs [NAME]" + syntax is "pgs [NAME]" Note that other option are already remove at this point """ - + # If not pythia-pgs path if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] - - + + if len(arg) == 0 and not self.run_name: if self.results.lastrun: arg.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(arg) == 1 and self.run_name == arg[0]: arg.pop(0) - + if not len(arg) and \ not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): if not no_default: self.help_pgs() raise self.InvalidCmd('''No file file pythia_events.hep currently available Please specify a valid run_name''') - - lock = None + + lock = None if len(arg) == 1: prev_tag = self.set_run_name(arg[0], tag, 'pgs') if not os.path.exists(pjoin(self.me_dir,'Events',self.run_name,'%s_pythia_events.hep.gz' % prev_tag)): @@ -1626,25 +1626,25 @@ def check_pgs(self, arg, no_default=False): else: input_file = pjoin(self.me_dir,'Events', self.run_name, '%s_pythia_events.hep.gz' % prev_tag) output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') - lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), + lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), argument=['-c', input_file]) else: - if tag: + if tag: self.run_card['run_tag'] = tag self.set_run_name(self.run_name, tag, 'pgs') - - return lock + + return lock def check_display(self, args): """check the validity of line syntax is "display XXXXX" """ - + if len(args) < 1 or args[0] not in self._display_opts: self.help_display() raise self.InvalidCmd - + if args[0] == 'variable' and len(args) !=2: raise self.InvalidCmd('variable need a variable name') @@ -1654,39 +1654,39 @@ def check_display(self, args): def check_import(self, args): """check the validity of line""" - + if not args: self.help_import() raise self.InvalidCmd('wrong \"import\" format') - + if args[0] != 'command': args.insert(0,'command') - - + + if not len(args) == 2 or not os.path.exists(args[1]): raise self.InvalidCmd('PATH is mandatory for import command\n') - + #=============================================================================== # CompleteForCmd #=============================================================================== class CompleteForCmd(CheckValidForCmd): """ The Series of help routine for the MadGraphCmd""" - - + + def complete_banner_run(self, text, line, begidx, endidx, formatting=True): "Complete the banner run command" try: - - + + args = self.split_arg(line[0:begidx], error=False) - + if args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args \ - if a.endswith(os.path.sep)])) - - + if a.endswith(os.path.sep)])) + + if len(args) > 1: # only options are possible tags = misc.glob('%s_*_banner.txt' % args[1], pjoin(self.me_dir, 'Events' , args[1])) @@ -1697,9 +1697,9 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): else: return self.list_completion(text, tags) return self.list_completion(text, tags +['--name=','-f'], line) - + # First argument - possibilites = {} + possibilites = {} comp = self.path_completion(text, os.path.join('.',*[a for a in args \ if a.endswith(os.path.sep)])) @@ -1711,10 +1711,10 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) run_list = [n.rsplit('/',2)[1] for n in run_list] possibilites['RUN Name'] = self.list_completion(text, run_list) - + return self.deal_multiple_categories(possibilites, formatting) - - + + except Exception as error: print(error) @@ -1732,12 +1732,12 @@ def complete_history(self, text, line, begidx, endidx): if len(args) == 1: return self.path_completion(text) - - def complete_open(self, text, line, begidx, endidx): + + def complete_open(self, text, line, begidx, endidx): """ complete the open command """ args = self.split_arg(line[0:begidx]) - + # Directory continuation if os.path.sep in args[-1] + text: return self.path_completion(text, @@ -1751,10 +1751,10 @@ def complete_open(self, text, line, begidx, endidx): if os.path.isfile(os.path.join(path,'README')): possibility.append('README') if os.path.isdir(os.path.join(path,'Cards')): - possibility += [f for f in os.listdir(os.path.join(path,'Cards')) + possibility += [f for f in os.listdir(os.path.join(path,'Cards')) if f.endswith('.dat')] if os.path.isdir(os.path.join(path,'HTML')): - possibility += [f for f in os.listdir(os.path.join(path,'HTML')) + possibility += [f for f in os.listdir(os.path.join(path,'HTML')) if f.endswith('.html') and 'default' not in f] else: possibility.extend(['./','../']) @@ -1763,7 +1763,7 @@ def complete_open(self, text, line, begidx, endidx): if os.path.exists('MG5_debug'): possibility.append('MG5_debug') return self.list_completion(text, possibility) - + def complete_set(self, text, line, begidx, endidx): "Complete the set command" @@ -1784,27 +1784,27 @@ def complete_set(self, text, line, begidx, endidx): elif len(args) >2 and args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args if a.endswith(os.path.sep)]), - only_dirs = True) - + only_dirs = True) + def complete_survey(self, text, line, begidx, endidx): """ Complete the survey command """ - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + return self.list_completion(text, self._run_options, line) - + complete_refine = complete_survey complete_combine_events = complete_survey complite_store = complete_survey complete_generate_events = complete_survey complete_create_gridpack = complete_survey - + def complete_generate_events(self, text, line, begidx, endidx): """ Complete the generate events""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() @@ -1813,17 +1813,17 @@ def complete_generate_events(self, text, line, begidx, endidx): return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) def complete_initMadLoop(self, text, line, begidx, endidx): "Complete the initMadLoop command" - + numbers = [str(i) for i in range(10)] opts = ['-f','-r','--nPS='] - + args = self.split_arg(line[0:begidx], error=False) if len(line) >=6 and line[begidx-6:begidx]=='--nPS=': return self.list_completion(text, numbers, line) @@ -1840,18 +1840,18 @@ def complete_launch(self, *args, **opts): def complete_calculate_decay_widths(self, text, line, begidx, endidx): """ Complete the calculate_decay_widths command""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + opts = self._run_options + self._calculate_decay_options return self.list_completion(text, opts, line) - + def complete_display(self, text, line, begidx, endidx): - """ Complete the display command""" - + """ Complete the display command""" + args = self.split_arg(line[0:begidx], error=False) if len(args) >= 2 and args[1] =='results': start = line.find('results') @@ -1860,44 +1860,44 @@ def complete_display(self, text, line, begidx, endidx): def complete_multi_run(self, text, line, begidx, endidx): """complete multi run command""" - + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: data = [str(i) for i in range(0,20)] return self.list_completion(text, data, line) - + if line.endswith('run=') and not text: return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - - - + + + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - + def complete_plot(self, text, line, begidx, endidx): """ Complete the plot command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1: return self.list_completion(text, self._plot_mode) else: return self.list_completion(text, self._plot_mode + list(self.results.keys())) - + def complete_syscalc(self, text, line, begidx, endidx, formatting=True): """ Complete the syscalc command """ - + output = {} args = self.split_arg(line[0:begidx], error=False) - + if len(args) <=1: output['RUN_NAME'] = self.list_completion(list(self.results.keys())) output['MODE'] = self.list_completion(text, self._syscalc_mode) @@ -1907,12 +1907,12 @@ def complete_syscalc(self, text, line, begidx, endidx, formatting=True): if run in self.results: tags = ['--tag=%s' % tag['tag'] for tag in self.results[run]] output['options'] += tags - + return self.deal_multiple_categories(output, formatting) - + def complete_remove(self, text, line, begidx, endidx): """Complete the remove command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1 and (text.startswith('--t')): run = args[1] @@ -1932,8 +1932,8 @@ def complete_remove(self, text, line, begidx, endidx): data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1] for n in data] return self.list_completion(text, ['all'] + data) - - + + def complete_shower(self,text, line, begidx, endidx): "Complete the shower command" args = self.split_arg(line[0:begidx], error=False) @@ -1941,7 +1941,7 @@ def complete_shower(self,text, line, begidx, endidx): return self.list_completion(text, self._interfaced_showers) elif len(args)>1 and args[1] in self._interfaced_showers: return getattr(self, 'complete_%s' % text)\ - (text, args[1],line.replace(args[0]+' ',''), + (text, args[1],line.replace(args[0]+' ',''), begidx-len(args[0])-1, endidx-len(args[0])-1) def complete_pythia8(self,text, line, begidx, endidx): @@ -1955,11 +1955,11 @@ def complete_pythia8(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_madanalysis5_parton(self,text, line, begidx, endidx): @@ -1978,19 +1978,19 @@ def complete_madanalysis5_parton(self,text, line, begidx, endidx): else: tmp2 = self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) - return tmp1 + tmp2 + return tmp1 + tmp2 elif '--MA5_stdout_lvl=' in line and not any(arg.startswith( '--MA5_stdout_lvl=') for arg in args): - return self.list_completion(text, - ['--MA5_stdout_lvl=%s'%opt for opt in + return self.list_completion(text, + ['--MA5_stdout_lvl=%s'%opt for opt in ['logging.INFO','logging.DEBUG','logging.WARNING', 'logging.CRITICAL','90']], line) else: - return self.list_completion(text, ['-f', + return self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) def complete_pythia(self,text, line, begidx, endidx): - "Complete the pythia command" + "Complete the pythia command" args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: @@ -2001,16 +2001,16 @@ def complete_pythia(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_pgs(self,text, line, begidx, endidx): "Complete the pythia command" - args = self.split_arg(line[0:begidx], error=False) + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: #return valid run_name data = misc.glob(pjoin('*', '*_pythia_events.hep.gz'), pjoin(self.me_dir, 'Events')) @@ -2019,23 +2019,23 @@ def complete_pgs(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--tag=' ,'--no_default'], line) - return tmp1 + tmp2 + return tmp1 + tmp2 else: - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--tag=','--no_default'], line) - complete_delphes = complete_pgs - complete_rivet = complete_pgs + complete_delphes = complete_pgs + complete_rivet = complete_pgs #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCmd): - """The command line processor of Mad Graph""" - + """The command line processor of Mad Graph""" + LO = True # Truth values @@ -2063,7 +2063,7 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm cluster_mode = 0 queue = 'madgraph' nb_core = None - + next_possibility = { 'start': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]', 'calculate_decay_widths [OPTIONS]', @@ -2080,9 +2080,9 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm 'pgs': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'], 'delphes' : ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'] } - + asking_for_run = AskRun - + ############################################################################ def __init__(self, me_dir = None, options={}, *completekey, **stdin): """ add information to the cmd """ @@ -2095,16 +2095,16 @@ def __init__(self, me_dir = None, options={}, *completekey, **stdin): if self.web: os.system('touch %s' % pjoin(self.me_dir,'Online')) - self.load_results_db() + self.load_results_db() self.results.def_web_mode(self.web) self.Gdirs = None - + self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) self.configured = 0 # time for reading the card self._options = {} # for compatibility with extended_cmd - - + + def pass_in_web_mode(self): """configure web data""" self.web = True @@ -2113,22 +2113,22 @@ def pass_in_web_mode(self): if os.environ['MADGRAPH_BASE']: self.options['mg5_path'] = pjoin(os.environ['MADGRAPH_BASE'],'MG5') - ############################################################################ + ############################################################################ def check_output_type(self, path): """ Check that the output path is a valid madevent directory """ - + bin_path = os.path.join(path,'bin') if os.path.isfile(os.path.join(bin_path,'generate_events')): return True - else: + else: return False ############################################################################ def set_configuration(self, amcatnlo=False, final=True, **opt): - """assign all configuration variable from file + """assign all configuration variable from file loop over the different config file if config_file not define """ - - super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, + + super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, final=final, **opt) if not final: @@ -2171,24 +2171,24 @@ def set_configuration(self, amcatnlo=False, final=True, **opt): if not os.path.exists(pjoin(path, 'sys_calc')): logger.info("No valid SysCalc path found") continue - # No else since the next line reinitialize the option to the + # No else since the next line reinitialize the option to the #previous value anyway self.options[key] = os.path.realpath(path) continue else: self.options[key] = None - - + + return self.options ############################################################################ - def do_banner_run(self, line): + def do_banner_run(self, line): """Make a run from the banner file""" - + args = self.split_arg(line) #check the validity of the arguments - self.check_banner_run(args) - + self.check_banner_run(args) + # Remove previous cards for name in ['delphes_trigger.dat', 'delphes_card.dat', 'pgs_card.dat', 'pythia_card.dat', 'madspin_card.dat', @@ -2197,20 +2197,20 @@ def do_banner_run(self, line): os.remove(pjoin(self.me_dir, 'Cards', name)) except Exception: pass - + banner_mod.split_banner(args[0], self.me_dir, proc_card=False) - + # Check if we want to modify the run if not self.force: ans = self.ask('Do you want to modify the Cards?', 'n', ['y','n']) if ans == 'n': self.force = True - + # Call Generate events self.exec_cmd('generate_events %s %s' % (self.run_name, self.force and '-f' or '')) - - - + + + ############################################################################ def do_display(self, line, output=sys.stdout): """Display current internal status""" @@ -2223,7 +2223,7 @@ def do_display(self, line, output=sys.stdout): #return valid run_name data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1:] for n in data] - + if data: out = {} for name, tag in data: @@ -2235,11 +2235,11 @@ def do_display(self, line, output=sys.stdout): print('the runs available are:') for run_name, tags in out.items(): print(' run: %s' % run_name) - print(' tags: ', end=' ') + print(' tags: ', end=' ') print(', '.join(tags)) else: print('No run detected.') - + elif args[0] == 'options': outstr = " Run Options \n" outstr += " ----------- \n" @@ -2260,8 +2260,8 @@ def do_display(self, line, output=sys.stdout): if value == default: outstr += " %25s \t:\t%s\n" % (key,value) else: - outstr += " %25s \t:\t%s (user set)\n" % (key,value) - outstr += "\n" + outstr += " %25s \t:\t%s (user set)\n" % (key,value) + outstr += "\n" outstr += " Configuration Options \n" outstr += " --------------------- \n" for key, default in self.options_configuration.items(): @@ -2275,15 +2275,15 @@ def do_display(self, line, output=sys.stdout): self.do_print_results(' '.join(args[1:])) else: super(MadEventCmd, self).do_display(line, output) - + def do_save(self, line, check=True, to_keep={}): - """Not in help: Save information to file""" + """Not in help: Save information to file""" args = self.split_arg(line) # Check argument validity if check: self.check_save(args) - + if args[0] == 'options': # First look at options which should be put in MG5DIR/input to_define = {} @@ -2295,7 +2295,7 @@ def do_save(self, line, check=True, to_keep={}): for key, default in self.options_madevent.items(): if self.options[key] != self.options_madevent[key]: to_define[key] = self.options[key] - + if '--all' in args: for key, default in self.options_madgraph.items(): if self.options[key] != self.options_madgraph[key]: @@ -2312,12 +2312,12 @@ def do_save(self, line, check=True, to_keep={}): filepath = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basefile = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basedir = self.me_dir - + if to_keep: to_define = to_keep self.write_configuration(filepath, basefile, basedir, to_define) - - + + def do_edit_cards(self, line): @@ -2326,80 +2326,80 @@ def do_edit_cards(self, line): # Check argument's validity mode = self.check_generate_events(args) self.ask_run_configuration(mode) - + return ############################################################################ - + ############################################################################ def do_restart_gridpack(self, line): """ syntax restart_gridpack --precision=1.0 --restart_zero collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) - + # initialize / remove lhapdf mode #self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) #self.configure_directory() - + gensym = gen_ximprove.gensym(self) - + min_precision = 1.0 resubmit_zero=False if '--precision=' in line: s = line.index('--precision=') + len('--precision=') arg=line[s:].split(1)[0] min_precision = float(arg) - + if '--restart_zero' in line: resubmit_zero = True - - + + gensym.resubmit(min_precision, resubmit_zero) self.monitor(run_type='All jobs submitted for gridpack', html=True) #will be done during the refine (more precisely in gen_ximprove) cross, error = sum_html.make_all_html_results(self) self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(gensym.run_statistics)) - + #self.exec_cmd('combine_events', postcmd=False) #self.exec_cmd('store_events', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) self.exec_cmd('create_gridpack', postcmd=False) - - - ############################################################################ + + + ############################################################################ ############################################################################ def do_generate_events(self, line): """Main Commands: launch the full chain """ - + self.banner = None self.Gdirs = None - + args = self.split_arg(line) # Check argument's validity mode = self.check_generate_events(args) switch_mode = self.ask_run_configuration(mode, args) if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir), None, 'parton') else: self.set_run_name(args[0], None, 'parton', True) args.pop(0) - + self.run_generate_events(switch_mode, args) self.postprocessing() @@ -2420,8 +2420,8 @@ def postprocessing(self): def rivet_postprocessing(self, rivet_config, postprocess_RIVET, postprocess_CONTUR): - # Check number of Rivet jobs to run - run_dirs = [pjoin(self.me_dir, 'Events',run_name) + # Check number of Rivet jobs to run + run_dirs = [pjoin(self.me_dir, 'Events',run_name) for run_name in self.postprocessing_dirs] nb_rivet = len(run_dirs) @@ -2550,10 +2550,10 @@ def wait_monitoring(Idle, Running, Done): wrapper = open(pjoin(self.me_dir, "Analysis", "contur", "run_contur.sh"), "w") wrapper.write(set_env) - + wrapper.write('{0}\n'.format(contur_cmd)) wrapper.close() - + misc.call(["run_contur.sh"], cwd=(pjoin(self.me_dir, "Analysis", "contur"))) logger.info("Contur outputs are stored in {0}".format(pjoin(self.me_dir, "Analysis", "contur","conturPlot"))) @@ -2572,7 +2572,7 @@ def run_generate_events(self, switch_mode, args): self.do_set('run_mode 2') self.do_set('nb_core 1') - if self.run_card['gridpack'] in self.true: + if self.run_card['gridpack'] in self.true: # Running gridpack warmup gridpack_opts=[('accuracy', 0.01), ('points', 2000), @@ -2593,7 +2593,7 @@ def run_generate_events(self, switch_mode, args): # Regular run mode logger.info('Generating %s events with run name %s' % (self.run_card['nevents'], self.run_name)) - + self.exec_cmd('survey %s %s' % (self.run_name,' '.join(args)), postcmd=False) nb_event = self.run_card['nevents'] @@ -2601,7 +2601,7 @@ def run_generate_events(self, switch_mode, args): self.exec_cmd('refine %s' % nb_event, postcmd=False) if not float(self.results.current['cross']): # Zero cross-section. Try to guess why - text = '''Survey return zero cross section. + text = '''Survey return zero cross section. Typical reasons are the following: 1) A massive s-channel particle has a width set to zero. 2) The pdf are zero for at least one of the initial state particles @@ -2613,17 +2613,17 @@ def run_generate_events(self, switch_mode, args): raise ZeroResult('See https://cp3.irmp.ucl.ac.be/projects/madgraph/wiki/FAQ-General-14') else: bypass_run = True - + #we can bypass the following if scan and first result is zero if not bypass_run: self.exec_cmd('refine %s --treshold=%s' % (nb_event,self.run_card['second_refine_treshold']) , postcmd=False) - + self.exec_cmd('combine_events', postcmd=False,printcmd=False) self.print_results_in_shell(self.results.current) if self.run_card['use_syst']: - if self.run_card['systematics_program'] == 'auto': + if self.run_card['systematics_program'] == 'auto': scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): to_use = 'systematics' @@ -2634,26 +2634,26 @@ def run_generate_events(self, switch_mode, args): else: logger.critical('Unvalid options for systematics_program: bypass computation of systematics variations.') to_use = 'none' - + if to_use == 'systematics': if self.run_card['systematics_arguments'] != ['']: self.exec_cmd('systematics %s %s ' % (self.run_name, - ' '.join(self.run_card['systematics_arguments'])), + ' '.join(self.run_card['systematics_arguments'])), postcmd=False, printcmd=False) else: self.exec_cmd('systematics %s --from_card' % self.run_name, - postcmd=False,printcmd=False) + postcmd=False,printcmd=False) elif to_use == 'syscalc': self.run_syscalc('parton') - - - self.create_plot('parton') - self.exec_cmd('store_events', postcmd=False) + + + self.create_plot('parton') + self.exec_cmd('store_events', postcmd=False) if self.run_card['boost_event'].strip() and self.run_card['boost_event'] != 'False': self.boost_events() - - - self.exec_cmd('reweight -from_cards', postcmd=False) + + + self.exec_cmd('reweight -from_cards', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) if self.run_card['time_of_flight']>=0: self.exec_cmd("add_time_of_flight --threshold=%s" % self.run_card['time_of_flight'] ,postcmd=False) @@ -2664,43 +2664,43 @@ def run_generate_events(self, switch_mode, args): self.create_root_file(input , output) self.exec_cmd('madanalysis5_parton --no_default', postcmd=False, printcmd=False) - # shower launches pgs/delphes if needed + # shower launches pgs/delphes if needed self.exec_cmd('shower --no_default', postcmd=False, printcmd=False) self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) self.exec_cmd('rivet --no_default', postcmd=False, printcmd=False) self.store_result() - - if self.allow_notification_center: - misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), - '%s: %s +- %s ' % (self.results.current['run_name'], + + if self.allow_notification_center: + misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), + '%s: %s +- %s ' % (self.results.current['run_name'], self.results.current['cross'], self.results.current['error'])) - + def boost_events(self): - + if not self.run_card['boost_event']: return - + if self.run_card['boost_event'].startswith('lambda'): if not isinstance(self, cmd.CmdShell): raise Exception("boost not allowed online") filter = eval(self.run_card['boost_event']) else: raise Exception - + path = [pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')] - + for p in path: if os.path.exists(p): event_path = p break else: raise Exception("fail to find event file for the boost") - - + + lhe = lhe_parser.EventFile(event_path) with misc.TMP_directory() as tmp_dir: output = lhe_parser.EventFile(pjoin(tmp_dir, os.path.basename(event_path)), 'w') @@ -2711,28 +2711,28 @@ def boost_events(self): event.boost(filter) #write this modify event output.write(str(event)) - output.write('\n') + output.write('\n') lhe.close() - files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) - - - - - + files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) + + + + + def do_initMadLoop(self,line): - """Compile and run MadLoop for a certain number of PS point so as to + """Compile and run MadLoop for a certain number of PS point so as to initialize MadLoop (setup the zero helicity and loop filter.)""" - + args = line.split() # Check argument's validity options = self.check_initMadLoop(args) - + if not options['force']: self.ask_edit_cards(['MadLoopParams.dat'], mode='fixed', plot=False) self.exec_cmd('treatcards loop --no_MadLoopInit') if options['refresh']: - for filter in misc.glob('*Filter*', + for filter in misc.glob('*Filter*', pjoin(self.me_dir,'SubProcesses','MadLoop5_resources')): logger.debug("Resetting filter '%s'."%os.path.basename(filter)) os.remove(filter) @@ -2753,14 +2753,14 @@ def do_initMadLoop(self,line): def do_launch(self, line, *args, **opt): """Main Commands: exec generate_events for 2>N and calculate_width for 1>N""" - + if self.ninitial == 1: logger.info("Note that since 2.3. The launch for 1>N pass in event generation\n"+ " To have the previous behavior use the calculate_decay_widths function") # self.do_calculate_decay_widths(line, *args, **opt) #else: self.do_generate_events(line, *args, **opt) - + def print_results_in_shell(self, data): """Have a nice results prints in the shell, data should be of type: gen_crossxhtml.OneTagResults""" @@ -2770,7 +2770,7 @@ def print_results_in_shell(self, data): if data['run_statistics']: globalstat = sum_html.RunStatistics() - + logger.info(" " ) logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2786,13 +2786,13 @@ def print_results_in_shell(self, data): logger.warning(globalstat.get_warning_text()) logger.info(" ") - + logger.info(" === Results Summary for run: %s tag: %s ===\n" % (data['run_name'],data['tag'])) - + total_time = int(sum(_['cumulative_timing'] for _ in data['run_statistics'].values())) if total_time > 0: logger.info(" Cumulative sequential time for this run: %s"%misc.format_time(total_time)) - + if self.ninitial == 1: logger.info(" Width : %.4g +- %.4g GeV" % (data['cross'], data['error'])) else: @@ -2810,18 +2810,18 @@ def print_results_in_shell(self, data): if len(split)!=3: continue scale, cross, error = split - cross_sections[float(scale)] = (float(cross), float(error)) + cross_sections[float(scale)] = (float(cross), float(error)) if len(cross_sections)>0: logger.info(' Pythia8 merged cross-sections are:') for scale in sorted(cross_sections.keys()): logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ (scale,cross_sections[scale][0],cross_sections[scale][1])) - + else: if self.ninitial == 1: logger.info(" Matched width : %.4g +- %.4g GeV" % (data['cross_pythia'], data['error_pythia'])) else: - logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) + logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) logger.info(" Nb of events after matching/merging : %d" % int(data['nb_event_pythia'])) if self.run_card['use_syst'] in self.true and \ (int(self.run_card['ickkw'])==1 or self.run_card['ktdurham']>0.0 @@ -2838,9 +2838,9 @@ def print_results_in_file(self, data, path, mode='w', format='full'): data should be of type: gen_crossxhtml.OneTagResults""" if not data: return - + fsock = open(path, mode) - + if data['run_statistics']: logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2851,7 +2851,7 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if format == "full": fsock.write(" === Results Summary for run: %s tag: %s process: %s ===\n" % \ (data['run_name'],data['tag'], os.path.basename(self.me_dir))) - + if self.ninitial == 1: fsock.write(" Width : %.4g +- %.4g GeV\n" % (data['cross'], data['error'])) else: @@ -2861,20 +2861,20 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if self.ninitial == 1: fsock.write(" Matched Width : %.4g +- %.4g GeV\n" % (data['cross_pythia'], data['error_pythia'])) else: - fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) + fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) fsock.write(" Nb of events after Matching : %s\n" % data['nb_event_pythia']) fsock.write(" \n" ) elif format == "short": if mode == "w": fsock.write("# run_name tag cross error Nb_event cross_after_matching nb_event_after matching\n") - + if data['cross_pythia'] and data['nb_event_pythia']: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s %(cross_pythia)s %(nb_event_pythia)s\n" else: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s\n" fsock.write(text % data) - - ############################################################################ + + ############################################################################ def do_calculate_decay_widths(self, line): """Main Commands: launch decay width calculation and automatic inclusion of calculated widths and BRs in the param_card.""" @@ -2887,21 +2887,21 @@ def do_calculate_decay_widths(self, line): self.Gdirs = None if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], reload_card=True) args.pop(0) self.configure_directory() - + # Running gridpack warmup opts=[('accuracy', accuracy), # default 0.01 ('points', 1000), ('iterations',9)] logger.info('Calculating decay widths with run name %s' % self.run_name) - + self.exec_cmd('survey %s %s' % \ (self.run_name, " ".join(['--' + opt + '=' + str(val) for (opt,val) \ @@ -2910,26 +2910,26 @@ def do_calculate_decay_widths(self, line): self.refine_mode = "old" # specify how to combine event self.exec_cmd('combine_events', postcmd=False) self.exec_cmd('store_events', postcmd=False) - + self.collect_decay_widths() self.print_results_in_shell(self.results.current) - self.update_status('calculate_decay_widths done', - level='parton', makehtml=False) + self.update_status('calculate_decay_widths done', + level='parton', makehtml=False) + - ############################################################################ def collect_decay_widths(self): - """ Collect the decay widths and calculate BRs for all particles, and put - in param_card form. + """ Collect the decay widths and calculate BRs for all particles, and put + in param_card form. """ - + particle_dict = {} # store the results run_name = self.run_name # Looping over the Subprocesses for P_path in SubProcesses.get_subP(self.me_dir): ids = SubProcesses.get_subP_ids(P_path) - # due to grouping we need to compute the ratio factor for the + # due to grouping we need to compute the ratio factor for the # ungroup resutls (that we need here). Note that initial particles # grouping are not at the same stage as final particle grouping nb_output = len(ids) / (len(set([p[0] for p in ids]))) @@ -2940,30 +2940,30 @@ def collect_decay_widths(self): particle_dict[particles[0]].append([particles[1:], result/nb_output]) except KeyError: particle_dict[particles[0]] = [[particles[1:], result/nb_output]] - + self.update_width_in_param_card(particle_dict, initial = pjoin(self.me_dir, 'Cards', 'param_card.dat'), output=pjoin(self.me_dir, 'Events', run_name, "param_card.dat")) - + @staticmethod def update_width_in_param_card(decay_info, initial=None, output=None): # Open the param_card.dat and insert the calculated decays and BRs - + if not output: output = initial - + param_card_file = open(initial) param_card = param_card_file.read().split('\n') param_card_file.close() decay_lines = [] line_number = 0 - # Read and remove all decays from the param_card + # Read and remove all decays from the param_card while line_number < len(param_card): line = param_card[line_number] if line.lower().startswith('decay'): - # Read decay if particle in decay_info - # DECAY 6 1.455100e+00 + # Read decay if particle in decay_info + # DECAY 6 1.455100e+00 line = param_card.pop(line_number) line = line.split() particle = 0 @@ -2996,7 +2996,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): break line=param_card[line_number] if particle and particle not in decay_info: - # No decays given, only total width + # No decays given, only total width decay_info[particle] = [[[], width]] else: # Not decay line_number += 1 @@ -3004,7 +3004,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): while not param_card[-1] or param_card[-1].startswith('#'): param_card.pop(-1) - # Append calculated and read decays to the param_card + # Append calculated and read decays to the param_card param_card.append("#\n#*************************") param_card.append("# Decay widths *") param_card.append("#*************************") @@ -3018,7 +3018,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): param_card.append("# BR NDA ID1 ID2 ...") brs = [[(val[1]/width).real, val[0]] for val in decay_info[key] if val[1]] for val in sorted(brs, reverse=True): - param_card.append(" %e %i %s # %s" % + param_card.append(" %e %i %s # %s" % (val[0].real, len(val[1]), " ".join([str(v) for v in val[1]]), val[0] * width @@ -3031,7 +3031,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): ############################################################################ def do_multi_run(self, line): - + args = self.split_arg(line) # Check argument's validity mode = self.check_multi_run(args) @@ -3047,7 +3047,7 @@ def do_multi_run(self, line): self.check_param_card(path, run=False) #store it locally to avoid relaunch param_card_iterator, self.param_card_iterator = self.param_card_iterator, [] - + crossoversig = 0 inv_sq_err = 0 nb_event = 0 @@ -3055,8 +3055,8 @@ def do_multi_run(self, line): self.nb_refine = 0 self.exec_cmd('generate_events %s_%s -f' % (main_name, i), postcmd=False) # Update collected value - nb_event += int(self.results[self.run_name][-1]['nb_event']) - self.results.add_detail('nb_event', nb_event , run=main_name) + nb_event += int(self.results[self.run_name][-1]['nb_event']) + self.results.add_detail('nb_event', nb_event , run=main_name) cross = self.results[self.run_name][-1]['cross'] error = self.results[self.run_name][-1]['error'] + 1e-99 crossoversig+=cross/error**2 @@ -3070,7 +3070,7 @@ def do_multi_run(self, line): os.mkdir(pjoin(self.me_dir,'Events', self.run_name)) except Exception: pass - os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' + os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' % {'bin': self.dirbin, 'event': pjoin(self.me_dir,'Events'), 'name': self.run_name}) @@ -3084,19 +3084,19 @@ def do_multi_run(self, line): self.create_root_file('%s/unweighted_events.lhe' % self.run_name, '%s/unweighted_events.root' % self.run_name) - - path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") + + path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") self.create_plot('parton', path, pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') ) - - if not os.path.exists('%s.gz' % path): + + if not os.path.exists('%s.gz' % path): misc.gzip(path) self.update_status('', level='parton') - self.print_results_in_shell(self.results.current) - + self.print_results_in_shell(self.results.current) + cpath = pjoin(self.me_dir,'Cards','param_card.dat') if param_card_iterator: @@ -3112,21 +3112,21 @@ def do_multi_run(self, line): path = pjoin(self.me_dir, 'Events','scan_%s.txt' % scan_name) logger.info("write all cross-section results in %s" % path, '$MG:BOLD') param_card_iterator.write_summary(path) - - ############################################################################ + + ############################################################################ def do_treatcards(self, line, mode=None, opt=None): """Advanced commands: create .inc files from param_card.dat/run_card.dat""" if not mode and not opt: args = self.split_arg(line) mode, opt = self.check_treatcards(args) - + # To decide whether to refresh MadLoop's helicity filters, it is necessary # to check if the model parameters where modified or not, before doing - # anything else. + # anything else. need_MadLoopFilterUpdate = False - # Just to record what triggered the reinitialization of MadLoop for a + # Just to record what triggered the reinitialization of MadLoop for a # nice debug message. type_of_change = '' if not opt['forbid_MadLoopInit'] and self.proc_characteristics['loop_induced'] \ @@ -3137,10 +3137,10 @@ def do_treatcards(self, line, mode=None, opt=None): (os.path.getmtime(paramDat)-os.path.getmtime(paramInc)) > 0.0: need_MadLoopFilterUpdate = True type_of_change = 'model' - + ML_in = pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat') ML_out = pjoin(self.me_dir,"SubProcesses", - "MadLoop5_resources", "MadLoopParams.dat") + "MadLoop5_resources", "MadLoopParams.dat") if (not os.path.isfile(ML_in)) or (not os.path.isfile(ML_out)) or \ (os.path.getmtime(ML_in)-os.path.getmtime(ML_out)) > 0.0: need_MadLoopFilterUpdate = True @@ -3148,7 +3148,7 @@ def do_treatcards(self, line, mode=None, opt=None): #check if no 'Auto' are present in the file self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) - + if mode in ['param', 'all']: model = self.find_model_name() tmp_model = os.path.basename(model) @@ -3160,9 +3160,9 @@ def do_treatcards(self, line, mode=None, opt=None): check_param_card.check_valid_param_card(mg5_param) opt['param_card'] = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') else: - check_param_card.check_valid_param_card(opt['param_card']) - - logger.debug('write compile file for card: %s' % opt['param_card']) + check_param_card.check_valid_param_card(opt['param_card']) + + logger.debug('write compile file for card: %s' % opt['param_card']) param_card = check_param_card.ParamCard(opt['param_card']) outfile = pjoin(opt['output_dir'], 'param_card.inc') ident_card = pjoin(self.me_dir,'Cards','ident_card.dat') @@ -3185,10 +3185,10 @@ def do_treatcards(self, line, mode=None, opt=None): devnull.close() default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat') - need_mp = self.proc_characteristics['loop_induced'] + need_mp = self.proc_characteristics['loop_induced'] param_card.write_inc_file(outfile, ident_card, default, need_mp=need_mp) - - + + if mode in ['run', 'all']: if not hasattr(self, 'run_card'): run_card = banner_mod.RunCard(opt['run_card'], path=pjoin(self.me_dir, 'Cards', 'run_card.dat')) @@ -3202,7 +3202,7 @@ def do_treatcards(self, line, mode=None, opt=None): run_card['lpp2'] = 0 run_card['ebeam1'] = 0 run_card['ebeam2'] = 0 - + # Ensure that the bias parameters has all the required input from the # run_card if run_card['bias_module'].lower() not in ['dummy','none']: @@ -3219,7 +3219,7 @@ def do_treatcards(self, line, mode=None, opt=None): mandatory_file,run_card['bias_module'])) misc.copytree(run_card['bias_module'], pjoin(self.me_dir,'Source','BIAS', os.path.basename(run_card['bias_module']))) - + #check expected parameters for the module. default_bias_parameters = {} start, last = False,False @@ -3244,50 +3244,50 @@ def do_treatcards(self, line, mode=None, opt=None): for pair in line.split(','): if not pair.strip(): continue - x,y =pair.split(':') + x,y =pair.split(':') x=x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y elif ':' in line: x,y = line.split(':') x = x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y for key,value in run_card['bias_parameters'].items(): if key not in default_bias_parameters: logger.warning('%s not supported by the bias module. We discard this entry.', key) else: default_bias_parameters[key] = value - run_card['bias_parameters'] = default_bias_parameters - - - # Finally write the include file + run_card['bias_parameters'] = default_bias_parameters + + + # Finally write the include file run_card.write_include_file(opt['output_dir']) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: - self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, + self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat')) # The writing out of MadLoop filter is potentially dangerous # when running in multi-core with a central disk. So it is turned - # off here. If these filters were not initialized then they will + # off here. If these filters were not initialized then they will # have to be re-computed at the beginning of each run. if 'WriteOutFilters' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('WriteOutFilters'): logger.info( -"""You chose to have MadLoop writing out filters. +"""You chose to have MadLoop writing out filters. Beware that this can be dangerous for local multicore runs.""") self.MadLoopparam.set('WriteOutFilters',False, changeifuserset=False) - + # The conservative settings below for 'CTModeInit' and 'ZeroThres' # help adress issues for processes like g g > h z, and g g > h g - # where there are some helicity configuration heavily suppressed - # (by several orders of magnitude) so that the helicity filter + # where there are some helicity configuration heavily suppressed + # (by several orders of magnitude) so that the helicity filter # needs high numerical accuracy to correctly handle this spread in # magnitude. Also, because one cannot use the Born as a reference - # scale, it is better to force quadruple precision *for the + # scale, it is better to force quadruple precision *for the # initialization points only*. This avoids numerical accuracy issues # when setting up the helicity filters and does not significantly # slow down the run. @@ -3298,21 +3298,21 @@ def do_treatcards(self, line, mode=None, opt=None): # It is a bit superficial to use the level 2 which tries to numerically # map matching helicities (because of CP symmetry typically) together. -# It is useless in the context of MC over helicities and it can +# It is useless in the context of MC over helicities and it can # potentially make the helicity double checking fail. self.MadLoopparam.set('HelicityFilterLevel',1, changeifuserset=False) # To be on the safe side however, we ask for 4 consecutive matching # helicity filters. self.MadLoopparam.set('CheckCycle',4, changeifuserset=False) - + # For now it is tricky to have each channel performing the helicity # double check. What we will end up doing is probably some kind # of new initialization round at the beginning of each launch - # command, to reset the filters. + # command, to reset the filters. self.MadLoopparam.set('DoubleCheckHelicityFilter',False, changeifuserset=False) - + # Thanks to TIR recycling, TIR is typically much faster for Loop-induced # processes when not doing MC over helicities, so that we place OPP last. if not hasattr(self, 'run_card'): @@ -3349,7 +3349,7 @@ def do_treatcards(self, line, mode=None, opt=None): logger.warning( """You chose to also use a lorentz rotation for stability tests (see parameter NRotations_[DP|QP]). Beware that, for optimization purposes, MadEvent uses manual TIR cache clearing which is not compatible - with the lorentz rotation stability test. The number of these rotations to be used will be reset to + with the lorentz rotation stability test. The number of these rotations to be used will be reset to zero by MadLoop. You can avoid this by changing the parameter 'FORCE_ML_HELICITY_SUM' int he matrix.f files to be .TRUE. so that the sum over helicity configurations is performed within MadLoop (in which case the helicity of final state particles cannot be speicfied in the LHE file.""") @@ -3363,15 +3363,15 @@ def do_treatcards(self, line, mode=None, opt=None): # self.MadLoopparam.set('NRotations_DP',0,changeifuserset=False) # Revert to the above to be slightly less robust but twice faster. self.MadLoopparam.set('NRotations_DP',1,changeifuserset=False) - self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) - + self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) + # Finally, the stability tests are slightly less reliable for process - # with less or equal than 4 final state particles because the + # with less or equal than 4 final state particles because the # accessible kinematic is very limited (i.e. lorentz rotations don't # shuffle invariants numerics much). In these cases, we therefore # increase the required accuracy to 10^-7. # This is important for getting g g > z z [QCD] working with a - # ptheavy cut as low as 1 GeV. + # ptheavy cut as low as 1 GeV. if self.proc_characteristics['nexternal']<=4: if ('MLStabThres' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('MLStabThres')>1.0e-7): @@ -3381,12 +3381,12 @@ def do_treatcards(self, line, mode=None, opt=None): than four external legs, so this is not recommended (especially not for g g > z z).""") self.MadLoopparam.set('MLStabThres',1.0e-7,changeifuserset=False) else: - self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) + self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) #write the output file self.MadLoopparam.write(pjoin(self.me_dir,"SubProcesses","MadLoop5_resources", "MadLoopParams.dat")) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: # Now Update MadLoop filters if necessary (if modifications were made to # the model parameters). @@ -3403,12 +3403,12 @@ def do_treatcards(self, line, mode=None, opt=None): elif not opt['forbid_MadLoopInit'] and \ MadLoopInitializer.need_MadLoopInit(self.me_dir): self.exec_cmd('initMadLoop -f') - - ############################################################################ + + ############################################################################ def do_survey(self, line): """Advanced commands: launch survey for the current process """ - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) @@ -3416,7 +3416,7 @@ def do_survey(self, line): if os.path.exists(pjoin(self.me_dir,'error')): os.remove(pjoin(self.me_dir,'error')) - + self.configure_directory() # Save original random number self.random_orig = self.random @@ -3435,9 +3435,9 @@ def do_survey(self, line): P_zero_result = [] # check the number of times where they are no phase-space # File for the loop (for loop induced) - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources')) and cluster.need_transfer(self.options): - tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', + tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', 'MadLoop5_resources.tar.gz'), 'w:gz', dereference=True) tf.add(pjoin(self.me_dir,'SubProcesses','MadLoop5_resources'), arcname='MadLoop5_resources') @@ -3467,7 +3467,7 @@ def do_survey(self, line): except Exception as error: logger.debug(error) pass - + jobs, P_zero_result = ajobcreator.launch() # Check if all or only some fails if P_zero_result: @@ -3481,60 +3481,60 @@ def do_survey(self, line): self.get_Gdir() for P in P_zero_result: self.Gdirs[0][pjoin(self.me_dir,'SubProcesses',P)] = [] - + self.monitor(run_type='All jobs submitted for survey', html=True) if not self.history or 'survey' in self.history[-1] or self.ninitial ==1 or \ self.run_card['gridpack']: #will be done during the refine (more precisely in gen_ximprove) cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(ajobcreator.run_statistics)) self.update_status('End survey', 'parton', makehtml=False) ############################################################################ def pass_in_difficult_integration_mode(self, rate=1): """be more secure for the integration to not miss it due to strong cut""" - + # improve survey options if default if self.opts['points'] == self._survey_options['points'][1]: self.opts['points'] = (rate+2) * self._survey_options['points'][1] if self.opts['iterations'] == self._survey_options['iterations'][1]: self.opts['iterations'] = 1 + rate + self._survey_options['iterations'][1] if self.opts['accuracy'] == self._survey_options['accuracy'][1]: - self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) - + self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) + # Modify run_config.inc in order to improve the refine conf_path = pjoin(self.me_dir, 'Source','run_config.inc') files.cp(conf_path, conf_path + '.bk') # text = open(conf_path).read() - min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) - + min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) + text = re.sub('''\(min_events = \d+\)''', '(min_events = %i )' % min_evt, text) text = re.sub('''\(max_events = \d+\)''', '(max_events = %i )' % max_evt, text) fsock = open(conf_path, 'w') fsock.write(text) fsock.close() - + # Compile for name in ['../bin/internal/gen_ximprove', 'all']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) - - - ############################################################################ + + + ############################################################################ def do_refine(self, line): """Advanced commands: launch survey for the current process """ - devnull = open(os.devnull, 'w') + devnull = open(os.devnull, 'w') self.nb_refine += 1 args = self.split_arg(line) treshold=None - - + + for a in args: if a.startswith('--treshold='): treshold = float(a.split('=',1)[1]) @@ -3548,8 +3548,8 @@ def do_refine(self, line): break # Check argument's validity self.check_refine(args) - - refine_opt = {'err_goal': args[0], 'split_channels': True} + + refine_opt = {'err_goal': args[0], 'split_channels': True} precision = args[0] if len(args) == 2: refine_opt['max_process']= args[1] @@ -3560,15 +3560,15 @@ def do_refine(self, line): # Update random number self.update_random() self.save_random() - + if self.cluster_mode: logger.info('Creating Jobs') self.update_status('Refine results to %s' % precision, level=None) - + self.total_jobs = 0 - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + # cleanning the previous job for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() @@ -3589,14 +3589,14 @@ def do_refine(self, line): level = 5 if value.has_warning(): level = 10 - logger.log(level, + logger.log(level, value.nice_output(str('/'.join([key[0],'G%s'%key[1]]))). replace(' statistics','')) logger.debug(globalstat.nice_output('combined', no_warning=True)) - + if survey_statistics: x_improve.run_statistics = survey_statistics - + x_improve.launch() # create the ajob for the refinment. if not self.history or 'refine' not in self.history[-1]: cross, error = x_improve.update_html() #update html results for survey @@ -3610,9 +3610,9 @@ def do_refine(self, line): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) - + if os.path.exists(pjoin(Pdir, 'ajob1')): cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py @@ -3629,7 +3629,7 @@ def do_refine(self, line): ###self.compile(['all'], cwd=Pdir) alljobs = misc.glob('ajob*', Pdir) - + #remove associated results.dat (ensure to not mix with all data) Gre = re.compile("\s*j=(G[\d\.\w]+)") for job in alljobs: @@ -3637,49 +3637,49 @@ def do_refine(self, line): for Gdir in Gdirs: if os.path.exists(pjoin(Pdir, Gdir, 'results.dat')): os.remove(pjoin(Pdir, Gdir,'results.dat')) - - nb_tot = len(alljobs) + + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), - run_type='Refine number %s on %s (%s/%s)' % + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) - self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, html=True) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + if isinstance(x_improve, gen_ximprove.gen_ximprove_v4): # the merge of the events.lhe is handle in the x_improve class - # for splitted runs. (and partly in store_events). + # for splitted runs. (and partly in store_events). combine_runs.CombineRuns(self.me_dir) self.refine_mode = "old" else: self.refine_mode = "new" - + cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - self.results.add_detail('run_statistics', + self.results.add_detail('run_statistics', dict(self.results.get_detail('run_statistics'))) self.update_status('finish refine', 'parton', makehtml=False) devnull.close() - - ############################################################################ + + ############################################################################ def do_comine_iteration(self, line): """Not in help: Combine a given iteration combine_iteration Pdir Gdir S|R step - S is for survey + S is for survey R is for refine - step is the iteration number (not very critical)""" + step is the iteration number (not very critical)""" self.set_run_name("tmp") self.configure_directory(html_opening=False) @@ -3695,12 +3695,12 @@ def do_comine_iteration(self, line): gensym.combine_iteration(Pdir, Gdir, int(step)) elif mode == "R": refine = gen_ximprove.gen_ximprove_share(self) - refine.combine_iteration(Pdir, Gdir, int(step)) - - + refine.combine_iteration(Pdir, Gdir, int(step)) - - ############################################################################ + + + + ############################################################################ def do_combine_events(self, line): """Advanced commands: Launch combine events""" start=time.time() @@ -3710,11 +3710,11 @@ def do_combine_events(self, line): self.check_combine_events(args) self.update_status('Combining Events', level='parton') - + if self.run_card['gridpack'] and isinstance(self, GridPackCmd): return GridPackCmd.do_combine_events(self, line) - + # Define The Banner tag = self.run_card['run_tag'] # Update the banner with the pythia card @@ -3727,14 +3727,14 @@ def do_combine_events(self, line): self.banner.change_seed(self.random_orig) if not os.path.exists(pjoin(self.me_dir, 'Events', self.run_name)): os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) - self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, + self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -3751,12 +3751,12 @@ def do_combine_events(self, line): os.remove(pjoin(Gdir, 'events.lhe')) continue - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec'), result.get('xerru'), result.get('axsec') ) - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.run_card['nevents']) @@ -3765,13 +3765,13 @@ def do_combine_events(self, line): AllEvent.add(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() if len(AllEvent) == 0: - nb_event = 0 + nb_event = 0 else: nb_event = AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.run_card['nevents'], @@ -3791,22 +3791,22 @@ def do_combine_events(self, line): os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) - + if self.run_card['bias_module'].lower() not in ['dummy', 'none'] and nb_event: self.correct_bias() elif self.run_card['custom_fcts']: self.correct_bias() logger.info("combination of events done in %s s ", time.time()-start) - + self.to_store.append('event') - - ############################################################################ + + ############################################################################ def correct_bias(self): - """check the first event and correct the weight by the bias + """check the first event and correct the weight by the bias and correct the cross-section. - If the event do not have the bias tag it means that the bias is + If the event do not have the bias tag it means that the bias is one modifying the cross-section/shape so we have nothing to do """ @@ -3834,7 +3834,7 @@ def correct_bias(self): output.write('') output.close() lhe.close() - + # MODIFY THE BANNER i.e. INIT BLOCK # ensure information compatible with normalisation choice total_cross = sum(cross[key] for key in cross) @@ -3846,8 +3846,8 @@ def correct_bias(self): elif self.run_card['event_norm'] == 'unity': total_cross = self.results.current['cross'] * total_cross / nb_event for key in cross: - cross[key] *= total_cross / nb_event - + cross[key] *= total_cross / nb_event + bannerfile = lhe_parser.EventFile(pjoin(self.me_dir, 'Events', self.run_name, '.banner.tmp.gz'),'w') banner = banner_mod.Banner(lhe.banner) banner.modify_init_cross(cross) @@ -3862,12 +3862,12 @@ def correct_bias(self): os.remove(lhe.name) os.remove(bannerfile.name) os.remove(output.name) - - + + self.results.current['cross'] = total_cross self.results.current['error'] = 0 - - ############################################################################ + + ############################################################################ def do_store_events(self, line): """Advanced commands: Launch store events""" @@ -3883,16 +3883,16 @@ def do_store_events(self, line): if not os.path.exists(pjoin(self.me_dir, 'Events', run)): os.mkdir(pjoin(self.me_dir, 'Events', run)) if not os.path.exists(pjoin(self.me_dir, 'HTML', run)): - os.mkdir(pjoin(self.me_dir, 'HTML', run)) - + os.mkdir(pjoin(self.me_dir, 'HTML', run)) + # 1) Store overall process information #input = pjoin(self.me_dir, 'SubProcesses', 'results.dat') #output = pjoin(self.me_dir, 'SubProcesses', '%s_results.dat' % run) - #files.cp(input, output) + #files.cp(input, output) # 2) Treat the files present in the P directory - # Ensure that the number of events is different of 0 + # Ensure that the number of events is different of 0 if self.results.current['nb_event'] == 0 and not self.run_card['gridpack']: logger.warning("No event detected. No cleaning performed! This should allow to run:\n" + " cd Subprocesses; ../bin/internal/combine_events\n"+ @@ -3910,18 +3910,18 @@ def do_store_events(self, line): # if os.path.exists(pjoin(G_path, 'results.dat')): # input = pjoin(G_path, 'results.dat') # output = pjoin(G_path, '%s_results.dat' % run) - # files.cp(input, output) + # files.cp(input, output) #except Exception: - # continue + # continue # Store log try: if os.path.exists(pjoin(G_path, 'log.txt')): input = pjoin(G_path, 'log.txt') output = pjoin(G_path, '%s_log.txt' % run) - files.mv(input, output) + files.mv(input, output) except Exception: continue - #try: + #try: # # Grid # for name in ['ftn26']: # if os.path.exists(pjoin(G_path, name)): @@ -3930,7 +3930,7 @@ def do_store_events(self, line): # input = pjoin(G_path, name) # output = pjoin(G_path, '%s_%s' % (run,name)) # files.mv(input, output) - # misc.gzip(pjoin(G_path, output), error=None) + # misc.gzip(pjoin(G_path, output), error=None) #except Exception: # continue # Delete ftn25 to ensure reproducible runs @@ -3940,11 +3940,11 @@ def do_store_events(self, line): # 3) Update the index.html self.gen_card_html() - + # 4) Move the Files present in Events directory E_path = pjoin(self.me_dir, 'Events') O_path = pjoin(self.me_dir, 'Events', run) - + # The events file for name in ['events.lhe', 'unweighted_events.lhe']: finput = pjoin(E_path, name) @@ -3960,30 +3960,30 @@ def do_store_events(self, line): # os.remove(pjoin(O_path, '%s.gz' % name)) # input = pjoin(E_path, name) ## output = pjoin(O_path, name) - + self.update_status('End Parton', level='parton', makehtml=False) devnull.close() - - - ############################################################################ + + + ############################################################################ def do_create_gridpack(self, line): """Advanced commands: Create gridpack from present run""" self.update_status('Creating gridpack', level='parton') # compile gen_ximprove misc.compile(['../bin/internal/gen_ximprove'], cwd=pjoin(self.me_dir, "Source")) - + Gdir = self.get_Gdir() Pdir = set([os.path.dirname(G) for G in Gdir]) - for P in Pdir: + for P in Pdir: allG = misc.glob('G*', path=P) for G in allG: if pjoin(P, G) not in Gdir: logger.debug('removing %s', pjoin(P,G)) shutil.rmtree(pjoin(P,G)) - - + + args = self.split_arg(line) self.check_combine_events(args) if not self.run_tag: self.run_tag = 'tag_1' @@ -3996,13 +3996,13 @@ def do_create_gridpack(self, line): cwd=self.me_dir) misc.call(['./bin/internal/clean'], cwd=self.me_dir) misc.call(['./bin/internal/make_gridpack'], cwd=self.me_dir) - files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), + files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), pjoin(self.me_dir, '%s_gridpack.tar.gz' % self.run_name)) os.system("sed -i.bak \"s/\s*.true.*=.*GridRun/ .false. = GridRun/g\" %s/Cards/grid_card.dat" \ % self.me_dir) self.update_status('gridpack created', level='gridpack') - - ############################################################################ + + ############################################################################ def do_shower(self, line): """launch the shower""" @@ -4010,7 +4010,7 @@ def do_shower(self, line): if len(args)>1 and args[0] in self._interfaced_showers: chosen_showers = [args.pop(0)] elif '--no_default' in line: - # If '--no_default' was specified in the arguments, then only one + # If '--no_default' was specified in the arguments, then only one # shower will be run, depending on which card is present. # but we each of them are called. (each of them check if the file exists) chosen_showers = list(self._interfaced_showers) @@ -4021,9 +4021,9 @@ def do_shower(self, line): shower_priority = ['pythia8','pythia'] chosen_showers = [sorted(chosen_showers,key=lambda sh: shower_priority.index(sh) if sh in shower_priority else len(shower_priority)+1)[0]] - + for shower in chosen_showers: - self.exec_cmd('%s %s'%(shower,' '.join(args)), + self.exec_cmd('%s %s'%(shower,' '.join(args)), postcmd=False, printcmd=False) def do_madanalysis5_parton(self, line): @@ -4039,11 +4039,11 @@ def do_madanalysis5_parton(self, line): def mg5amc_py8_interface_consistency_warning(options): """ Check the consistency of the mg5amc_py8_interface installed with the current MG5 and Pythia8 versions. """ - + # All this is only relevant is Pythia8 is interfaced to MG5 if not options['pythia8_path']: return None - + if not options['mg5amc_py8_interface_path']: return \ """ @@ -4053,7 +4053,7 @@ def mg5amc_py8_interface_consistency_warning(options): Consider installing the MG5_aMC-PY8 interface with the following command: MG5_aMC>install mg5amc_py8_interface """ - + mg5amc_py8_interface_path = options['mg5amc_py8_interface_path'] py8_path = options['pythia8_path'] # If the specified interface path is relative, make it absolut w.r.t MGDIR if @@ -4062,7 +4062,7 @@ def mg5amc_py8_interface_consistency_warning(options): mg5amc_py8_interface_path = pjoin(MG5DIR,mg5amc_py8_interface_path) py8_path = pjoin(MG5DIR,py8_path) - # Retrieve all the on-install and current versions + # Retrieve all the on-install and current versions fsock = open(pjoin(mg5amc_py8_interface_path, 'MG5AMC_VERSION_ON_INSTALL')) MG5_version_on_install = fsock.read().replace('\n','') fsock.close() @@ -4074,7 +4074,7 @@ def mg5amc_py8_interface_consistency_warning(options): MG5_curr_version =misc.get_pkg_info()['version'] try: p = subprocess.Popen(['./get_pythia8_version.py',py8_path], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mg5amc_py8_interface_path) (out, err) = p.communicate() out = out.decode(errors='ignore').replace('\n','') @@ -4084,37 +4084,37 @@ def mg5amc_py8_interface_consistency_warning(options): float(out) except: PY8_curr_version = None - + if not MG5_version_on_install is None and not MG5_curr_version is None: if MG5_version_on_install != MG5_curr_version: return \ """ The current version of MG5_aMC (v%s) is different than the one active when - installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). + installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(MG5_curr_version, MG5_version_on_install) - + if not PY8_version_on_install is None and not PY8_curr_version is None: if PY8_version_on_install != PY8_curr_version: return \ """ The current version of Pythia8 (v%s) is different than the one active when - installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). + installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(PY8_curr_version,PY8_version_on_install) - + return None def setup_Pythia8RunAndCard(self, PY8_Card, run_type): """ Setup the Pythia8 Run environment and card. In particular all the process and run specific parameters of the card are automatically set here. This function returns the path where HEPMC events will be output, if any.""" - + HepMC_event_output = None tag = self.run_tag - + PY8_Card.subruns[0].systemSet('Beams:LHEF',"unweighted_events.lhe.gz") hepmc_format = PY8_Card['HEPMCoutput:file'].lower() @@ -4185,7 +4185,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): misc.mkfifo(fifo_path) # Use defaultSet not to overwrite the current userSet status PY8_Card.defaultSet('HEPMCoutput:file',fifo_path) - HepMC_event_output=fifo_path + HepMC_event_output=fifo_path elif hepmc_format in ['','/dev/null','None']: logger.warning('User disabled the HepMC output of Pythia8.') HepMC_event_output = None @@ -4206,7 +4206,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): # only if it is not already user_set. if PY8_Card['JetMatching:qCut']==-1.0: PY8_Card.MadGraphSet('JetMatching:qCut',1.5*self.run_card['xqcut'], force=True) - + if PY8_Card['JetMatching:qCut']<(1.5*self.run_card['xqcut']): logger.error( 'The MLM merging qCut parameter you chose (%f) is less than '%PY8_Card['JetMatching:qCut']+ @@ -4233,7 +4233,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): if PY8_Card['JetMatching:qCut'] not in qCutList: qCutList.append(PY8_Card['JetMatching:qCut']) PY8_Card.MadGraphSet('SysCalc:qCutList', qCutList, force=True) - + if PY8_Card['SysCalc:qCutList']!='auto': for scale in PY8_Card['SysCalc:qCutList']: @@ -4244,7 +4244,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): "'sys_matchscale' in the run_card) is less than 1.5*xqcut, where xqcut is"+ ' the run_card parameter (=%f)\n'%self.run_card['xqcut']+ 'It would be better/safer to use a larger qCut or a smaller xqcut.') - + # Specific MLM settings # PY8 should not implement the MLM veto since the driver should do it # if merging scale variation is turned on @@ -4294,18 +4294,18 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): CKKW_cut = 'ktdurham' elif self.run_card['ptlund']>0.0 and self.run_card['ktdurham']<=0.0: PY8_Card.subruns[0].MadGraphSet('Merging:doPTLundMerging',True) - CKKW_cut = 'ptlund' + CKKW_cut = 'ptlund' else: raise InvalidCmd("*Either* the 'ptlund' or 'ktdurham' cut in "+\ " the run_card must be turned on to activate CKKW(L) merging"+ " with Pythia8, but *both* cuts cannot be turned on at the same time."+ "\n ptlund=%f, ktdurham=%f."%(self.run_card['ptlund'],self.run_card['ktdurham'])) - + # Automatically set qWeed to the CKKWL cut if not defined by the user. if PY8_Card['SysCalc:qWeed']==-1.0: PY8_Card.MadGraphSet('SysCalc:qWeed',self.run_card[CKKW_cut], force=True) - + # MadGraphSet sets the corresponding value (in system mode) # only if it is not already user_set. if PY8_Card['Merging:TMS']==-1.0: @@ -4319,7 +4319,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): 'The CKKWl merging scale you chose (%f) is less than '%PY8_Card['Merging:TMS']+ 'the %s cut specified in the run_card parameter (=%f).\n'%(CKKW_cut,self.run_card[CKKW_cut])+ 'It is incorrect to use a smaller CKKWl scale than the generation-level %s cut!'%CKKW_cut) - + PY8_Card.MadGraphSet('TimeShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:rapidityOrder',False) @@ -4381,7 +4381,7 @@ def do_pythia8(self, line): try: import madgraph - except ImportError: + except ImportError: import internal.histograms as histograms else: import madgraph.various.histograms as histograms @@ -4400,16 +4400,16 @@ def do_pythia8(self, line): self.check_pythia8(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) - self.check_pythia8(args) + self.check_pythia8(args) # Update the banner with the pythia card if not self.banner or len(self.banner) <=1: # Here the level keyword 'pythia' must not be changed to 'pythia8'. self.banner = banner_mod.recover_banner(self.results, 'pythia') - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1], pythia_version=8, banner=self.banner) @@ -4425,7 +4425,7 @@ def do_pythia8(self, line): #"Please use 'event_norm = average' in the run_card to avoid this problem.") - + if not self.options['mg5amc_py8_interface_path'] or not \ os.path.exists(pjoin(self.options['mg5amc_py8_interface_path'], 'MG5aMC_PY8_interface')): @@ -4444,16 +4444,16 @@ def do_pythia8(self, line): # Again here 'pythia' is just a keyword for the simulation level. self.update_status('\033[92mRunning Pythia8 [arXiv:1410.3012]\033[0m', 'pythia8') - - tag = self.run_tag + + tag = self.run_tag # Now write Pythia8 card # Start by reading, starting from the default one so that the 'user_set' # tag are correctly set. - PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', + PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', 'pythia8_card_default.dat')) PY8_Card.read(pjoin(self.me_dir, 'Cards', 'pythia8_card.dat'), setter='user') - + run_type = 'default' merged_run_types = ['MLM','CKKW'] if int(self.run_card['ickkw'])==1: @@ -4471,7 +4471,7 @@ def do_pythia8(self, line): cmd_card = StringIO.StringIO() PY8_Card.write(cmd_card,pjoin(self.me_dir,'Cards','pythia8_card_default.dat'), direct_pythia_input=True) - + # Now setup the preamble to make sure that everything will use the locally # installed tools (if present) even if the user did not add it to its # environment variables. @@ -4486,13 +4486,13 @@ def do_pythia8(self, line): preamble = misc.get_HEPTools_location_setter( pjoin(MG5DIR,'HEPTools'),'lib') preamble += "\n unset PYTHIA8DATA\n" - + open(pythia_cmd_card,'w').write("""! ! It is possible to run this card manually with: ! %s %s ! """%(preamble+pythia_main,os.path.basename(pythia_cmd_card))+cmd_card.getvalue()) - + # launch pythia8 pythia_log = pjoin(self.me_dir , 'Events', self.run_name , '%s_pythia8.log' % tag) @@ -4504,13 +4504,13 @@ def do_pythia8(self, line): shell_exe = None if os.path.exists('/usr/bin/env'): shell_exe = '/usr/bin/env %s'%shell - else: + else: shell_exe = misc.which(shell) if not shell_exe: raise self.InvalidCmd('No s hell could be found in your environment.\n'+ "Make sure that either '%s' is in your path or that the"%shell+\ " command '/usr/bin/env %s' exists and returns a valid path."%shell) - + exe_cmd = "#!%s\n%s"%(shell_exe,' '.join( [preamble+pythia_main, os.path.basename(pythia_cmd_card)])) @@ -4528,7 +4528,7 @@ def do_pythia8(self, line): ( os.path.exists(HepMC_event_output) and \ stat.S_ISFIFO(os.stat(HepMC_event_output).st_mode)) startPY8timer = time.time() - + # Information that will be extracted from this PY8 run PY8_extracted_information={ 'sigma_m':None, 'Nacc':None, 'Ntry':None, 'cross_sections':{} } @@ -4556,7 +4556,7 @@ def do_pythia8(self, line): n_cores = max(int(self.options['cluster_size']),1) elif self.options['run_mode']==2: n_cores = max(int(self.cluster.nb_core),1) - + lhe_file_name = os.path.basename(PY8_Card.subruns[0]['Beams:LHEF']) lhe_file = lhe_parser.EventFile(pjoin(self.me_dir,'Events', self.run_name,PY8_Card.subruns[0]['Beams:LHEF'])) @@ -4574,7 +4574,7 @@ def do_pythia8(self, line): if self.options['run_mode']==2: min_n_events_per_job = 100 elif self.options['run_mode']==1: - min_n_events_per_job = 1000 + min_n_events_per_job = 1000 min_n_core = n_events//min_n_events_per_job n_cores = max(min(min_n_core,n_cores),1) @@ -4584,8 +4584,8 @@ def do_pythia8(self, line): logger.info('Follow Pythia8 shower by running the '+ 'following command (in a separate terminal):\n tail -f %s'%pythia_log) - if self.options['run_mode']==2 and self.options['nb_core']>1: - ret_code = self.cluster.launch_and_wait(wrapper_path, + if self.options['run_mode']==2 and self.options['nb_core']>1: + ret_code = self.cluster.launch_and_wait(wrapper_path, argument= [], stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events',self.run_name)) else: @@ -4630,10 +4630,10 @@ def do_pythia8(self, line): wrapper = open(wrapper_path,'w') if self.options['cluster_temp_path'] is None: exe_cmd = \ -"""#!%s +"""#!%s ./%s PY8Card.dat >& PY8_log.txt """ - else: + else: exe_cmd = \ """#!%s ln -s ./events_$1.lhe.gz ./events.lhe.gz @@ -4663,21 +4663,21 @@ def do_pythia8(self, line): # Set it as executable st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) - + # Split the .lhe event file, create event partition partition=[n_available_events//n_cores]*n_cores for i in range(n_available_events%n_cores): partition[i] += 1 - + # Splitting according to the total number of events requested by the user # Will be used to determine the number of events to indicate in the PY8 split cards. partition_for_PY8=[n_events//n_cores]*n_cores for i in range(n_events%n_cores): partition_for_PY8[i] += 1 - - logger.info('Splitting .lhe event file for PY8 parallelization...') - n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) - + + logger.info('Splitting .lhe event file for PY8 parallelization...') + n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) + if n_splits!=len(partition): raise MadGraph5Error('Error during lhe file splitting. Expected %d files but obtained %d.' %(len(partition),n_splits)) @@ -4690,7 +4690,7 @@ def do_pythia8(self, line): # Add the necessary run content shutil.move(pjoin(parallelization_dir,lhe_file.name+'_%d.lhe.gz'%split_id), pjoin(parallelization_dir,split_files[-1])) - + logger.info('Submitting Pythia8 jobs...') for i, split_file in enumerate(split_files): # We must write a PY8Card tailored for each split so as to correct the normalization @@ -4706,7 +4706,7 @@ def do_pythia8(self, line): split_PY8_Card.write(pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,'PY8Card.dat'), add_missing=False) in_files = [pjoin(parallelization_dir,os.path.basename(pythia_main)), - pjoin(parallelization_dir,'PY8Card_%d.dat'%i), + pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,split_file)] if self.options['cluster_temp_path'] is None: out_files = [] @@ -4718,35 +4718,35 @@ def do_pythia8(self, line): if os.path.basename(in_file)==split_file: ln(in_file,selected_cwd,name='events.lhe.gz') elif os.path.basename(in_file).startswith('PY8Card'): - ln(in_file,selected_cwd,name='PY8Card.dat') + ln(in_file,selected_cwd,name='PY8Card.dat') else: - ln(in_file,selected_cwd) + ln(in_file,selected_cwd) in_files = [] wrapper_path = os.path.basename(wrapper_path) else: out_files = ['split_%d.tar.gz'%i] selected_cwd = parallelization_dir - self.cluster.submit2(wrapper_path, - argument=[str(i)], cwd=selected_cwd, + self.cluster.submit2(wrapper_path, + argument=[str(i)], cwd=selected_cwd, input_files=in_files, output_files=out_files, required_output=out_files) - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return logger.info('Pythia8 shower jobs: %d Idle, %d Running, %d Done [%s]'\ %(Idle, Running, Done, misc.format_time(time.time() - startPY8timer))) self.cluster.wait(parallelization_dir,wait_monitoring) - + logger.info('Merging results from the split PY8 runs...') if self.options['cluster_temp_path']: # Decompressing the output for i, split_file in enumerate(split_files): misc.call(['tar','-xzf','split_%d.tar.gz'%i],cwd=parallelization_dir) os.remove(pjoin(parallelization_dir,'split_%d.tar.gz'%i)) - + # Now merge logs pythia_log_file = open(pythia_log,'w') n_added = 0 @@ -4778,7 +4778,7 @@ def wait_monitoring(Idle, Running, Done): if n_added>0: PY8_extracted_information['sigma_m'] /= float(n_added) pythia_log_file.close() - + # djr plots djr_HwU = None n_added = 0 @@ -4845,7 +4845,7 @@ def wait_monitoring(Idle, Running, Done): if not os.path.isfile(hepmc_file): continue all_hepmc_files.append(hepmc_file) - + if len(all_hepmc_files)>0: hepmc_output = pjoin(self.me_dir,'Events',self.run_name,HepMC_event_output) with misc.TMP_directory() as tmp_dir: @@ -4860,8 +4860,8 @@ def wait_monitoring(Idle, Running, Done): break header.close() tail = open(pjoin(tmp_dir,'tail.hepmc'),'w') - n_tail = 0 - + n_tail = 0 + for line in misc.reverse_readline(all_hepmc_files[-1]): if line.startswith('HepMC::'): n_tail += 1 @@ -4871,7 +4871,7 @@ def wait_monitoring(Idle, Running, Done): tail.close() if n_tail>1: raise MadGraph5Error('HEPMC files should only have one trailing command.') - ###################################################################### + ###################################################################### # This is the most efficient way of putting together HEPMC's, *BUT* # # WARNING: NEED TO RENDER THE CODE BELOW SAFE TOWARDS INJECTION # ###################################################################### @@ -4888,12 +4888,12 @@ def wait_monitoring(Idle, Running, Done): elif sys.platform == 'darwin': # sed on MAC has slightly different synthax than on os.system(' '.join(['sed','-i',"''","'%s;$d'"% - (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) - else: - # other UNIX systems + (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) + else: + # other UNIX systems os.system(' '.join(['sed','-i']+["-e '%id'"%(i+1) for i in range(n_head)]+ ["-e '$d'",hepmc_file])) - + os.system(' '.join(['cat',pjoin(tmp_dir,'header.hepmc')]+all_hepmc_files+ [pjoin(tmp_dir,'tail.hepmc'),'>',hepmc_output])) @@ -4915,12 +4915,12 @@ def wait_monitoring(Idle, Running, Done): 'Inclusive cross section:' not in '\n'.join(open(pythia_log,'r').readlines()[-20:]): logger.warning('Fail to produce a pythia8 output. More info in \n %s'%pythia_log) return - + # Plot for Pythia8 successful = self.create_plot('Pythia8') if not successful: logger.warning('Failed to produce Pythia8 merging plots.') - + self.to_store.append('pythia8') # Study matched cross-sections @@ -4931,7 +4931,7 @@ def wait_monitoring(Idle, Running, Done): if self.options['run_mode']==0 or (self.options['run_mode']==2 and self.options['nb_core']==1): PY8_extracted_information['sigma_m'],PY8_extracted_information['Nacc'],\ PY8_extracted_information['Ntry'] = self.parse_PY8_log_file( - pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) + pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) else: logger.warning('Pythia8 cross-section could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1. YYYYY') @@ -4944,8 +4944,8 @@ def wait_monitoring(Idle, Running, Done): Ntry = PY8_extracted_information['Ntry'] sigma_m = PY8_extracted_information['sigma_m'] # Compute pythia error - error = self.results[self.run_name].return_tag(self.run_tag)['error'] - try: + error = self.results[self.run_name].return_tag(self.run_tag)['error'] + try: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) except ZeroDivisionError: # Cannot compute error @@ -4966,31 +4966,31 @@ def wait_monitoring(Idle, Running, Done): else: logger.warning('Pythia8 merged cross-sections could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1.XXXXX') - PY8_extracted_information['cross_sections'] = {} - + PY8_extracted_information['cross_sections'] = {} + cross_sections = PY8_extracted_information['cross_sections'] if cross_sections: - # Filter the cross_sections specified an keep only the ones + # Filter the cross_sections specified an keep only the ones # with central parameters and a different merging scale a_float_re = '[\+|-]?\d+(\.\d*)?([EeDd][\+|-]?\d+)?' central_merging_re = re.compile( '^\s*Weight_MERGING\s*=\s*(?P%s)\s*$'%a_float_re, - re.IGNORECASE) + re.IGNORECASE) cross_sections = dict( (float(central_merging_re.match(xsec).group('merging')),value) - for xsec, value in cross_sections.items() if not + for xsec, value in cross_sections.items() if not central_merging_re.match(xsec) is None) central_scale = PY8_Card['JetMatching:qCut'] if \ int(self.run_card['ickkw'])==1 else PY8_Card['Merging:TMS'] if central_scale in cross_sections: self.results.add_detail('cross_pythia8', cross_sections[central_scale][0]) self.results.add_detail('error_pythia8', cross_sections[central_scale][1]) - + #logger.info('Pythia8 merged cross-sections are:') #for scale in sorted(cross_sections.keys()): # logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ - # (scale,cross_sections[scale][0],cross_sections[scale][1])) - + # (scale,cross_sections[scale][0],cross_sections[scale][1])) + xsecs_file = open(pjoin(self.me_dir,'Events',self.run_name, '%s_merged_xsecs.txt'%tag),'w') if cross_sections: @@ -5003,9 +5003,9 @@ def wait_monitoring(Idle, Running, Done): xsecs_file.write('Cross-sections could not be read from the'+\ "XML node 'xsection' of the .dat file produced by Pythia8.") xsecs_file.close() - + #Update the banner - # We add directly the pythia command card because it has the full + # We add directly the pythia command card because it has the full # information self.banner.add(pythia_cmd_card) @@ -5022,13 +5022,13 @@ def wait_monitoring(Idle, Running, Done): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + def parse_PY8_log_file(self, log_file_path): """ Parse a log file to extract number of event and cross-section. """ pythiare = re.compile("Les Houches User Process\(es\)\s*\d+\s*\|\s*(?P\d+)\s*(?P\d+)\s*(?P\d+)\s*\|\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") pythia_xsec_re = re.compile("Inclusive cross section\s*:\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") sigma_m, Nacc, Ntry = None, None, None - for line in misc.BackRead(log_file_path): + for line in misc.BackRead(log_file_path): info = pythiare.search(line) if not info: # Also try to obtain the cross-section and error from the final xsec line of pythia8 log @@ -5058,7 +5058,7 @@ def parse_PY8_log_file(self, log_file_path): raise self.InvalidCmd("Could not find cross-section and event number information "+\ "in Pythia8 log\n '%s'."%log_file_path) - + def extract_cross_sections_from_DJR(self,djr_output): """Extract cross-sections from a djr XML output.""" import xml.dom.minidom as minidom @@ -5075,11 +5075,11 @@ def extract_cross_sections_from_DJR(self,djr_output): [float(xsec.childNodes[0].data.split()[0]), float(xsec.childNodes[0].data.split()[1])]) for xsec in xsections) - + def do_pythia(self, line): """launch pythia""" - - + + # Check argument's validity args = self.split_arg(line) if '--no_default' in args: @@ -5089,12 +5089,12 @@ def do_pythia(self, line): args.remove('--no_default') else: no_default = False - + if not self.run_name: self.check_pythia(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) self.check_pythia(args) @@ -5102,7 +5102,7 @@ def do_pythia(self, line): logger.error('pythia-pgs require event_norm to be on sum. Do not run pythia6') return - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1]) if self.options['automatic_html_opening']: @@ -5114,35 +5114,35 @@ def do_pythia(self, line): self.banner = banner_mod.recover_banner(self.results, 'pythia') pythia_src = pjoin(self.options['pythia-pgs_path'],'src') - + self.results.add_detail('run_mode', 'madevent') self.update_status('Running Pythia', 'pythia') try: os.remove(pjoin(self.me_dir,'Events','pythia.done')) except Exception: - pass - + pass + ## LAUNCHING PYTHIA # check that LHAPATH is define. if not re.search(r'^\s*LHAPATH=%s/PDFsets' % pythia_src, - open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), + open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), re.M): f = open(pjoin(self.me_dir,'Cards','pythia_card.dat'),'a') f.write('\n LHAPATH=%s/PDFsets' % pythia_src) f.close() tag = self.run_tag pythia_log = pjoin(self.me_dir, 'Events', self.run_name , '%s_pythia.log' % tag) - #self.cluster.launch_and_wait('../bin/internal/run_pythia', + #self.cluster.launch_and_wait('../bin/internal/run_pythia', # argument= [pythia_src], stdout= pythia_log, # stderr=subprocess.STDOUT, # cwd=pjoin(self.me_dir,'Events')) output_files = ['pythia_events.hep'] if self.run_card['use_syst']: output_files.append('syst.dat') - if self.run_card['ickkw'] == 1: + if self.run_card['ickkw'] == 1: output_files += ['beforeveto.tree', 'xsecs.tree', 'events.tree'] - + os.environ['PDG_MASS_TBL'] = pjoin(pythia_src,'mass_width_2004.mc') self.cluster.launch_and_wait(pjoin(pythia_src, 'pythia'), input_files=[pjoin(self.me_dir, "Events", "unweighted_events.lhe"), @@ -5152,23 +5152,23 @@ def do_pythia(self, line): stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events')) - + os.remove(pjoin(self.me_dir, "Events", "unweighted_events.lhe")) if not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): logger.warning('Fail to produce pythia output. More info in \n %s' % pythia_log) return - + self.to_store.append('pythia') - + # Find the matched cross-section if int(self.run_card['ickkw']): # read the line from the bottom of the file - #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, + #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, # '%s_pythia.log' % tag)) - pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") - for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, + pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") + for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, '%s_pythia.log' % tag)): info = pythiare.search(line) if not info: @@ -5188,16 +5188,16 @@ def do_pythia(self, line): self.results.add_detail('nb_event_pythia', Nacc) #compute pythia error error = self.results[self.run_name].return_tag(self.run_tag)['error'] - if Nacc: + if Nacc: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) else: error_m = 10000 * sigma_m # works both for fixed number of generated events and fixed accepted events self.results.add_detail('error_pythia', error_m) - break + break #pythia_log.close() - + pydir = pjoin(self.options['pythia-pgs_path'], 'src') eradir = self.options['exrootanalysis_path'] madir = self.options['madanalysis_path'] @@ -5216,12 +5216,12 @@ def do_pythia(self, line): # Creating LHE file self.run_hep2lhe(banner_path) - + if int(self.run_card['ickkw']): misc.gzip(pjoin(self.me_dir,'Events','beforeveto.tree'), - stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + - if self.run_card['use_syst'] in self.true: # Calculate syscalc info based on syst.dat try: @@ -5233,7 +5233,7 @@ def do_pythia(self, line): # Store syst.dat misc.gzip(pjoin(self.me_dir,'Events', 'syst.dat'), stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_syst.dat.gz')) - + # Store syscalc.dat if os.path.exists(pjoin(self.me_dir, 'Events', 'syscalc.dat')): filename = pjoin(self.me_dir, 'Events' ,self.run_name, @@ -5253,7 +5253,7 @@ def do_pythia(self, line): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + ################################################################################ def do_remove(self, line): @@ -5263,8 +5263,8 @@ def do_remove(self, line): run, tag, mode = self.check_remove(args) if 'banner' in mode: mode.append('all') - - + + if run == 'all': # Check first if they are not a run with a name run. if os.path.exists(pjoin(self.me_dir, 'Events', 'all')): @@ -5280,7 +5280,7 @@ def do_remove(self, line): logger.info(error) pass # run already clear return - + # Check that run exists if not os.path.exists(pjoin(self.me_dir, 'Events', run)): raise self.InvalidCmd('No run \'%s\' detected' % run) @@ -5294,7 +5294,7 @@ def do_remove(self, line): # Found the file to delete - + to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) to_delete += misc.glob('*', pjoin(self.me_dir, 'HTML', run)) # forbid the banner to be removed @@ -5314,7 +5314,7 @@ def do_remove(self, line): if os.path.exists(pjoin(self.me_dir, 'Events', run, 'unweighted_events.lhe.gz')): to_delete.append('unweighted_events.lhe.gz') if os.path.exists(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')): - to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) + to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) if nb_rm != len(to_delete): logger.warning('Be carefull that partonic information are on the point to be removed.') if 'all' in mode: @@ -5327,8 +5327,8 @@ def do_remove(self, line): if 'delphes' not in mode: to_delete = [f for f in to_delete if 'delphes' not in f] if 'parton' not in mode: - to_delete = [f for f in to_delete if 'delphes' in f - or 'pgs' in f + to_delete = [f for f in to_delete if 'delphes' in f + or 'pgs' in f or 'pythia' in f] if not self.force and len(to_delete): question = 'Do you want to delete the following files?\n %s' % \ @@ -5336,7 +5336,7 @@ def do_remove(self, line): ans = self.ask(question, 'y', choices=['y','n']) else: ans = 'y' - + if ans == 'y': for file2rm in to_delete: if os.path.exists(pjoin(self.me_dir, 'Events', run, file2rm)): @@ -5374,7 +5374,7 @@ def do_remove(self, line): if ans == 'y': for file2rm in to_delete: os.remove(file2rm) - + if 'banner' in mode: to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) if tag: @@ -5389,8 +5389,8 @@ def do_remove(self, line): return elif any(['banner' not in os.path.basename(p) for p in to_delete]): if to_delete: - raise MadGraph5Error('''Some output still exists for this run. - Please remove those output first. Do for example: + raise MadGraph5Error('''Some output still exists for this run. + Please remove those output first. Do for example: remove %s all banner ''' % run) else: @@ -5400,7 +5400,7 @@ def do_remove(self, line): return else: logger.info('''The banner is not removed. In order to remove it run: - remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) + remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) # update database. self.results.clean(mode, run, tag) @@ -5420,7 +5420,7 @@ def do_plot(self, line): logger.info('plot for run %s' % self.run_name) if not self.force: self.ask_edit_cards(['plot_card.dat'], args, plot=True) - + if any([arg in ['all','parton'] for arg in args]): filename = pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe') if os.path.exists(filename+'.gz'): @@ -5438,8 +5438,8 @@ def do_plot(self, line): except Exception: pass else: - logger.info('No valid files for partonic plot') - + logger.info('No valid files for partonic plot') + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_events.lhe' % self.run_tag) @@ -5452,10 +5452,10 @@ def do_plot(self, line): stdout= "%s.gz" % filename) else: logger.info('No valid files for pythia plot') - - + + if any([arg in ['all','pgs'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) @@ -5464,15 +5464,15 @@ def do_plot(self, line): misc.gzip(filename) else: logger.info('No valid files for pgs plot') - + if any([arg in ['all','delphes'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) if os.path.exists(filename): self.create_plot('Delphes') - misc.gzip(filename) + misc.gzip(filename) else: logger.info('No valid files for delphes plot') @@ -5488,9 +5488,9 @@ def do_syscalc(self, line): if self.ninitial == 1: logger.error('SysCalc can\'t be run for decay processes') return - + logger.info('Calculating systematics for run %s' % self.run_name) - + self.ask_edit_cards(['run_card.dat'], args, plot=False) self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) if any([arg in ['all','parton'] for arg in args]): @@ -5504,7 +5504,7 @@ def do_syscalc(self, line): stdout="%s.gz" % filename) else: logger.info('No valid files for parton level systematics run.') - + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_syst.dat' % self.run_tag) @@ -5525,17 +5525,17 @@ def do_syscalc(self, line): else: logger.info('No valid files for pythia level') - + def store_result(self): - """ tar the pythia results. This is done when we are quite sure that + """ tar the pythia results. This is done when we are quite sure that the pythia output will not be use anymore """ if not self.run_name: return - + if not self.to_store: - return - + return + tag = self.run_card['run_tag'] self.update_status('storing files of previous run', level=None,\ error=True) @@ -5546,14 +5546,14 @@ def store_result(self): misc.gzip(pjoin(self.me_dir,'Events',self.run_name,"unweighted_events.lhe")) if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) - + if 'pythia' in self.to_store: self.update_status('Storing Pythia files of previous run', level='pythia', error=True) p = pjoin(self.me_dir,'Events') n = self.run_name t = tag self.to_store.remove('pythia') - misc.gzip(pjoin(p,'pythia_events.hep'), + misc.gzip(pjoin(p,'pythia_events.hep'), stdout=pjoin(p, str(n),'%s_pythia_events.hep' % t),forceexternal=True) if 'pythia8' in self.to_store: @@ -5581,26 +5581,26 @@ def store_result(self): os.system("mv " + file_path + hepmc_fileformat + " " + move_hepmc_path) self.update_status('Done', level='pythia',makehtml=False,error=True) - self.results.save() - + self.results.save() + self.to_store = [] - def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, + def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, run_type='', mode=None, **opt): """ """ argument = [str(arg) for arg in argument] if mode is None: mode = self.cluster_mode - + # ensure that exe is executable if os.path.exists(exe) and not os.access(exe, os.X_OK): os.system('chmod +x %s ' % exe) elif (cwd and os.path.exists(pjoin(cwd, exe))) and not \ os.access(pjoin(cwd, exe), os.X_OK): os.system('chmod +x %s ' % pjoin(cwd, exe)) - + if mode == 0: - self.update_status((remaining, 1, + self.update_status((remaining, 1, self.total_jobs - remaining -1, run_type), level=None, force=False) start = time.time() #os.system('cd %s; ./%s' % (cwd,exe)) @@ -5613,24 +5613,24 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, elif mode in [1,2]: exename = os.path.basename(exe) # For condor cluster, create the input/output files - if 'ajob' in exename: + if 'ajob' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat','dname.mg', pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) - + output_files = [] required_output = [] - + #Find the correct PDF input file input_files.append(self.get_pdf_input_filename()) - + #Find the correct ajob Gre = re.compile("\s*j=(G[\d\.\w]+)") origre = re.compile("grid_directory=(G[\d\.\w]+)") - try : + try : fsock = open(exe) except Exception: fsock = open(pjoin(cwd,exe)) @@ -5648,21 +5648,21 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if os.path.isdir(pjoin(cwd,G)): input_files.append(G) required_output.append('%s/results.dat' % G) - + if origre.search(text): G_grid = origre.search(text).groups()[0] input_files.append(pjoin(G_grid, 'ftn26')) - + #submitting - self.cluster.submit2(exe, stdout=stdout, cwd=cwd, + self.cluster.submit2(exe, stdout=stdout, cwd=cwd, input_files=input_files, output_files=output_files, required_output=required_output) elif 'survey' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5671,7 +5671,7 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [] required_output = [] - + #Find the correct ajob suffix = "_%s" % int(float(argument[0])) if suffix == '_0': @@ -5685,12 +5685,12 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if '.' in argument[0]: offset = int(str(argument[0]).split('.')[1]) else: - offset = 0 - + offset = 0 + if offset ==0 or offset == int(float(argument[0])): if os.path.exists(pjoin(cwd, G, 'input_app.txt')): os.remove(pjoin(cwd, G, 'input_app.txt')) - + if os.path.exists(os.path.realpath(pjoin(cwd, G, 'ftn25'))): if offset == 0 or offset == int(float(argument[0])): os.remove(pjoin(cwd, G, 'ftn25')) @@ -5706,16 +5706,16 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, pass #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, required_output=required_output, **opt) elif "refine_splitted.sh" in exename: input_files = ['madevent','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5725,25 +5725,25 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [argument[0]] required_output = [] for G in output_files: - required_output.append('%s/results.dat' % G) + required_output.append('%s/results.dat' % G) input_files.append(pjoin(argument[1], "input_app.txt")) input_files.append(pjoin(argument[1], "ftn26")) - + #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, - required_output=required_output, **opt) + required_output=required_output, **opt) + + - - else: self.cluster.submit(exe, argument=argument, stdout=stdout, cwd=cwd, **opt) - + ############################################################################ def find_madevent_mode(self): """Find if Madevent is in Group mode or not""" - + # The strategy is too look in the files Source/run_configs.inc # if we found: ChanPerJob=3 then it's a group mode. file_path = pjoin(self.me_dir, 'Source', 'run_config.inc') @@ -5752,11 +5752,11 @@ def find_madevent_mode(self): return 'group' else: return 'v4' - + ############################################################################ def monitor(self, run_type='monitor', mode=None, html=False): """ monitor the progress of running job """ - + starttime = time.time() if mode is None: @@ -5772,8 +5772,8 @@ def monitor(self, run_type='monitor', mode=None, html=False): else: update_status = lambda idle, run, finish: None update_first = None - try: - self.cluster.wait(self.me_dir, update_status, update_first=update_first) + try: + self.cluster.wait(self.me_dir, update_status, update_first=update_first) except Exception as error: logger.info(error) if not self.force: @@ -5788,24 +5788,24 @@ def monitor(self, run_type='monitor', mode=None, html=False): raise except KeyboardInterrupt as error: self.cluster.remove() - raise - - + raise + - ############################################################################ + + ############################################################################ def configure_directory(self, html_opening=True): - """ All action require before any type of run """ + """ All action require before any type of run """ # Basic check assert os.path.exists(pjoin(self.me_dir,'SubProcesses')) # environmental variables to be included in make_opts self.make_opts_var = {} - + #see when the last file was modified time_mod = max([os.path.getmtime(pjoin(self.me_dir,'Cards','run_card.dat')), os.path.getmtime(pjoin(self.me_dir,'Cards','param_card.dat'))]) - + if self.configured >= time_mod and hasattr(self, 'random') and hasattr(self, 'run_card'): #just ensure that cluster specific are correctly handled if self.cluster: @@ -5820,7 +5820,7 @@ def configure_directory(self, html_opening=True): #open only once the web page # Change current working directory self.launching_dir = os.getcwd() - + # Check if we need the MSSM special treatment model = self.find_model_name() if model == 'mssm' or model.startswith('mssm-'): @@ -5828,14 +5828,14 @@ def configure_directory(self, html_opening=True): mg5_param = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') check_param_card.convert_to_mg5card(param_card, mg5_param) check_param_card.check_valid_param_card(mg5_param) - + # limit the number of event to 100k self.check_nb_events() # this is in order to avoid conflicts between runs with and without # lhapdf. not needed anymore the makefile handles it automaticallu #misc.compile(['clean4pdf'], cwd = pjoin(self.me_dir, 'Source')) - + self.make_opts_var['pdlabel1'] = '' self.make_opts_var['pdlabel2'] = '' if self.run_card['pdlabel1'] in ['eva', 'iww']: @@ -5866,7 +5866,7 @@ def configure_directory(self, html_opening=True): self.copy_lep_densities(self.run_card['pdlabel'], pjoin(self.me_dir, 'Source')) self.make_opts_var['pdlabel1'] = 'ee' self.make_opts_var['pdlabel2'] = 'ee' - + # set random number if self.run_card['iseed'] != 0: self.random = int(self.run_card['iseed']) @@ -5885,18 +5885,18 @@ def configure_directory(self, html_opening=True): break else: self.random = random.randint(1, 30107) - + #set random seed for python part of the code if self.run_card['python_seed'] == -2: #-2 means same as run_card import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] elif self.run_card['python_seed'] >= 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] if self.run_card['ickkw'] == 2: logger.info('Running with CKKW matching') self.treat_ckkw_matching() @@ -5905,12 +5905,12 @@ def configure_directory(self, html_opening=True): self.update_make_opts(self.run_card) # reset list of Gdirectory self.Gdirs = None - + # create param_card.inc and run_card.inc self.do_treatcards('') - + logger.info("compile Source Directory") - + # Compile for name in [ 'all']:#, '../bin/internal/combine_events']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) @@ -5933,7 +5933,7 @@ def configure_directory(self, html_opening=True): os.remove(pjoin(self.me_dir, 'lib','libbias.a')) force_subproc_clean = True - + # Finally compile the bias module as well if self.run_card['bias_module'] not in ['dummy',None]: logger.debug("Compiling the bias module '%s'"%bias_name) @@ -5945,7 +5945,7 @@ def configure_directory(self, html_opening=True): 'INVALID' in str(bias_module_valid).upper(): raise InvalidCmd("The bias module '%s' cannot be used because of:\n%s"% (bias_name,bias_module_valid)) - + self.compile(arg=[], cwd=os.path.join(self.me_dir, 'Source','BIAS',bias_name)) self.proc_characteristics['bias_module']=bias_name # Update the proc_characterstics file @@ -5954,7 +5954,7 @@ def configure_directory(self, html_opening=True): if force_subproc_clean: # Make sure that madevent will be recompiled - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] for nb_proc,subdir in enumerate(subproc): Pdir = pjoin(self.me_dir, 'SubProcesses',subdir.strip()) @@ -5971,20 +5971,20 @@ def configure_directory(self, html_opening=True): ############################################################################ @staticmethod def check_dir(path, default=''): - """check if the directory exists. if so return the path otherwise the + """check if the directory exists. if so return the path otherwise the default""" - + if os.path.isdir(path): return path else: return default - + ############################################################################ def get_Gdir(self, Pdir=None, symfact=None): """get the list of Gdirectory if not yet saved.""" - + if hasattr(self, "Gdirs") and self.Gdirs: if self.me_dir in self.Gdirs[0]: if Pdir is None: @@ -6000,8 +6000,8 @@ def get_Gdir(self, Pdir=None, symfact=None): Pdirs = self.get_Pdir() - Gdirs = {self.me_dir:[]} - mfactors = {} + Gdirs = {self.me_dir:[]} + mfactors = {} for P in Pdirs: Gdirs[P] = [] #for the next line do not use P, since in readonly mode it might not have symfact @@ -6012,7 +6012,7 @@ def get_Gdir(self, Pdir=None, symfact=None): mfactors[pjoin(P, "G%s" % tag)] = mfactor self.Gdirs = (Gdirs, mfactors) return self.get_Gdir(Pdir, symfact=symfact) - + ############################################################################ def set_run_name(self, name, tag=None, level='parton', reload_card=False, allow_new_tag=True): @@ -6030,8 +6030,8 @@ def get_last_tag(self, level): tagRun = self.results[self.run_name][i] if tagRun.pythia or tagRun.shower or tagRun.pythia8 : return tagRun['tag'] - - + + # when are we force to change the tag new_run:previous run requiring changes upgrade_tag = {'parton': ['parton','pythia','pgs','delphes','madanalysis5_hadron','madanalysis5_parton', 'rivet'], 'pythia': ['pythia','pgs','delphes','madanalysis5_hadron'], @@ -6044,7 +6044,7 @@ def get_last_tag(self, level): 'syscalc':[], 'rivet':['rivet']} - if name == self.run_name: + if name == self.run_name: if reload_card: run_card = pjoin(self.me_dir, 'Cards','run_card.dat') self.run_card = banner_mod.RunCard(run_card) @@ -6064,13 +6064,13 @@ def get_last_tag(self, level): break return get_last_tag(self, level) - + # save/clean previous run if self.run_name: self.store_result() # store new name self.run_name = name - + new_tag = False # First call for this run -> set the banner self.banner = banner_mod.recover_banner(self.results, level, name) @@ -6079,8 +6079,8 @@ def get_last_tag(self, level): else: # Read run_card run_card = pjoin(self.me_dir, 'Cards','run_card.dat') - self.run_card = banner_mod.RunCard(run_card) - + self.run_card = banner_mod.RunCard(run_card) + if tag: self.run_card['run_tag'] = tag new_tag = True @@ -6093,7 +6093,7 @@ def get_last_tag(self, level): self.results.update('add run %s' % name, 'all', makehtml=False) else: for tag in upgrade_tag[level]: - + if getattr(self.results[self.run_name][-1], tag): # LEVEL is already define in the last tag -> need to switch tag tag = self.get_available_tag() @@ -6103,8 +6103,8 @@ def get_last_tag(self, level): if not new_tag: # We can add the results to the current run tag = self.results[self.run_name][-1]['tag'] - self.run_card['run_tag'] = tag # ensure that run_tag is correct - + self.run_card['run_tag'] = tag # ensure that run_tag is correct + if allow_new_tag and (name in self.results and not new_tag): self.results.def_current(self.run_name) else: @@ -6113,15 +6113,15 @@ def get_last_tag(self, level): self.run_tag = self.run_card['run_tag'] return get_last_tag(self, level) - - - + + + ############################################################################ def check_nb_events(self): - """Find the number of event in the run_card, and check that this is not + """Find the number of event in the run_card, and check that this is not too large""" - + nb_event = int(self.run_card['nevents']) if nb_event > 1000000: logger.warning("Attempting to generate more than 1M events") @@ -6133,20 +6133,20 @@ def check_nb_events(self): return - - ############################################################################ + + ############################################################################ def update_random(self): """ change random number""" - + self.random += 3 if self.random > 30081*30081: # can't use too big random number raise MadGraph5Error('Random seed too large ' + str(self.random) + ' > 30081*30081') - if self.run_card['python_seed'] == -2: + if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.random) + random.seed(self.random) random.mg_seedset = self.random - + ############################################################################ def save_random(self): """save random number in appropirate file""" @@ -6155,14 +6155,14 @@ def save_random(self): fsock.writelines('r=%s\n' % self.random) def do_quit(self, *args, **opts): - + return common_run.CommonRunCmd.do_quit(self, *args, **opts) #return CmdExtended.do_quit(self, *args, **opts) - + ############################################################################ def treat_CKKW_matching(self): """check for ckkw""" - + lpp1 = self.run_card['lpp1'] lpp2 = self.run_card['lpp2'] e1 = self.run_card['ebeam1'] @@ -6170,19 +6170,19 @@ def treat_CKKW_matching(self): pd = self.run_card['pdlabel'] lha = self.run_card['lhaid'] xq = self.run_card['xqcut'] - translation = {'e1': e1, 'e2':e2, 'pd':pd, + translation = {'e1': e1, 'e2':e2, 'pd':pd, 'lha':lha, 'xq':xq} if lpp1 or lpp2: - # Remove ':s from pd + # Remove ':s from pd if pd.startswith("'"): pd = pd[1:] if pd.endswith("'"): - pd = pd[:-1] + pd = pd[:-1] if xq >2 or xq ==2: xq = 2 - + # find data file if pd == "lhapdf": issudfile = 'lib/issudgrid-%(e1)s-%(e2)s-%(pd)s-%(lha)s-%(xq)s.dat.gz' @@ -6192,9 +6192,9 @@ def treat_CKKW_matching(self): issudfile = pjoin(self.webbin, issudfile % translation) else: issudfile = pjoin(self.me_dir, issudfile % translation) - + logger.info('Sudakov grid file: %s' % issudfile) - + # check that filepath exists if os.path.exists(issudfile): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') @@ -6203,20 +6203,20 @@ def treat_CKKW_matching(self): msg = 'No sudakov grid file for parameter choice. Start to generate it. This might take a while' logger.info(msg) self.update_status('GENERATE SUDAKOV GRID', level='parton') - + for i in range(-2,6): - self.cluster.submit('%s/gensudgrid ' % self.dirbin, + self.cluster.submit('%s/gensudgrid ' % self.dirbin, argument = ['%d'%i], - cwd=self.me_dir, + cwd=self.me_dir, stdout=open(pjoin(self.me_dir, 'gensudgrid%s.log' % i),'w')) self.monitor() for i in range(-2,6): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') os.system('cat %s/gensudgrid%s.log >> %s' % (self.me_dir, path)) misc.gzip(path, stdout=issudfile) - + ############################################################################ - def create_root_file(self, input='unweighted_events.lhe', + def create_root_file(self, input='unweighted_events.lhe', output='unweighted_events.root' ): """create the LHE root file """ self.update_status('Creating root files', level='parton') @@ -6233,14 +6233,14 @@ def create_root_file(self, input='unweighted_events.lhe', totar = False torm = True input = input[:-3] - + try: - misc.call(['%s/ExRootLHEFConverter' % eradir, + misc.call(['%s/ExRootLHEFConverter' % eradir, input, output], cwd=pjoin(self.me_dir, 'Events')) except Exception: logger.warning('fail to produce Root output [problem with ExRootAnalysis]') - + if totar: if os.path.exists('%s.gz' % input): try: @@ -6251,13 +6251,13 @@ def create_root_file(self, input='unweighted_events.lhe', misc.gzip(input) if torm: os.remove(input) - + def run_syscalc(self, mode='parton', event_path=None, output=None): - """create the syscalc output""" + """create the syscalc output""" if self.run_card['use_syst'] not in self.true: return - + scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): return @@ -6265,12 +6265,12 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): if self.run_card['event_norm'] != 'sum': logger.critical('SysCalc works only when event_norm is on \'sum\'.') return - logger.info('running SysCalc on mode %s' % mode) - + logger.info('running SysCalc on mode %s' % mode) + # Restore the old default for SysCalc+PY6 if self.run_card['sys_matchscale']=='auto': self.run_card['sys_matchscale'] = "30 50" - + # Check that all pdfset are correctly installed lhaid = [self.run_card.get_lhapdf_id()] if '&&' in self.run_card['sys_pdf']: @@ -6285,20 +6285,20 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.debug(str(error)) logger.warning('Systematic computation requires lhapdf to run. Bypass SysCalc') return - + # Copy all the relevant PDF sets [self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid] - + to_syscalc={'sys_scalefact': self.run_card['sys_scalefact'], 'sys_alpsfact': self.run_card['sys_alpsfact'], 'sys_matchscale': self.run_card['sys_matchscale'], 'sys_scalecorrelation': self.run_card['sys_scalecorrelation'], 'sys_pdf': self.run_card['sys_pdf']} - - tag = self.run_card['run_tag'] + + tag = self.run_card['run_tag'] card = pjoin(self.me_dir, 'bin','internal', 'syscalc_card.dat') template = open(pjoin(self.me_dir, 'bin','internal', 'syscalc_template.dat')).read() - + if '&&' in to_syscalc['sys_pdf']: to_syscalc['sys_pdf'] = to_syscalc['sys_pdf'].split('#',1)[0].replace('&&',' \n ') else: @@ -6311,8 +6311,8 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): new.append(d) else: new[-1] += ' %s' % d - to_syscalc['sys_pdf'] = '\n'.join(new) - + to_syscalc['sys_pdf'] = '\n'.join(new) + if to_syscalc['sys_pdf'].lower() in ['', 'f', 'false', 'none', '.false.']: to_syscalc['sys_pdf'] = '' if to_syscalc['sys_alpsfact'].lower() in ['', 'f', 'false', 'none','.false.']: @@ -6320,17 +6320,17 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): - + # check if the scalecorrelation parameter is define: if not 'sys_scalecorrelation' in self.run_card: self.run_card['sys_scalecorrelation'] = -1 open(card,'w').write(template % self.run_card) - + if not os.path.exists(card): return False - - + + event_dir = pjoin(self.me_dir, 'Events') if not event_path: @@ -6353,19 +6353,19 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): raise SysCalcError('qcut value for sys_matchscale lower than qcut in pythia_card. Bypass syscalc') if float(value) < xqcut: raise SysCalcError('qcut value for sys_matchscale lower than xqcut in run_card. Bypass syscalc') - - + + event_path = pjoin(event_dir,'syst.dat') output = pjoin(event_dir, 'syscalc.dat') else: raise self.InvalidCmd('Invalid mode %s' % mode) - + if not os.path.exists(event_path): if os.path.exists(event_path+'.gz'): misc.gunzip(event_path+'.gz') else: raise SysCalcError('Events file %s does not exits' % event_path) - + self.update_status('Calculating systematics for %s level' % mode, level = mode.lower()) try: proc = misc.call([os.path.join(scdir, 'sys_calc'), @@ -6374,7 +6374,7 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): stderr = subprocess.STDOUT, cwd=event_dir) # Wait 5 s to make sure file is finished writing - time.sleep(5) + time.sleep(5) except OSError as error: logger.error('fail to run syscalc: %s. Please check that SysCalc is correctly installed.' % error) else: @@ -6382,11 +6382,11 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.warning('SysCalc Failed. Please read the associate log to see the reason. Did you install the associate PDF set?') elif mode == 'parton': files.mv(output, event_path) - + self.update_status('End syscalc for %s level' % mode, level = mode.lower(), makehtml=False) - - return True + + return True action_switcher = AskRun @@ -6399,23 +6399,23 @@ def ask_run_configuration(self, mode=None, args=[]): passing_cmd.append('reweight=ON') if '-M' in args or '--madspin' in args: passing_cmd.append('madspin=ON') - + switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, mode=mode, line_args=args, force=self.force, first_cmd=passing_cmd, return_instance=True) # - self.switch = switch # store the value of the switch for plugin purpose + self.switch = switch # store the value of the switch for plugin purpose if 'dynamical' in switch: mode = 'auto' - + # Now that we know in which mode we are check that all the card #exists (copy default if needed) - + cards = ['param_card.dat', 'run_card.dat'] if switch['shower'] == 'Pythia6': cards.append('pythia_card.dat') if switch['shower'] == 'Pythia8': - cards.append('pythia8_card.dat') + cards.append('pythia8_card.dat') if switch['detector'] in ['PGS','DELPHES+PGS']: cards.append('pgs_card.dat') if switch['detector'] in ['Delphes', 'DELPHES+PGS']: @@ -6438,29 +6438,29 @@ def ask_run_configuration(self, mode=None, args=[]): cards.append('rivet_card.dat') self.keep_cards(cards) - + first_cmd = cmd_switch.get_cardcmd() - + if os.path.isfile(pjoin(self.me_dir,'Cards','MadLoopParams.dat')): cards.append('MadLoopParams.dat') - + if self.force: self.check_param_card(pjoin(self.me_dir,'Cards','param_card.dat' )) return switch - + if 'dynamical' in switch and switch['dynamical']: self.ask_edit_cards(cards, plot=False, mode='auto', first_cmd=first_cmd) else: self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) return switch - + ############################################################################ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None): """Ask the question when launching pythia""" - + pythia_suffix = '' if pythia_version==6 else '%d'%pythia_version - + available_mode = ['0', '1'] if pythia_version==6: available_mode.append('2') @@ -6485,10 +6485,10 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = self.ask(question, '0', options) elif not mode: mode = 'auto' - + if mode.isdigit(): mode = name[mode] - + auto = False if mode == 'auto': auto = True @@ -6497,7 +6497,7 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = 'pgs' elif os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')): mode = 'delphes' - else: + else: mode = 'pythia%s'%pythia_suffix logger.info('Will run in mode %s' % mode) # Now that we know in which mode we are check that all the card @@ -6513,15 +6513,15 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) cards.append('delphes_trigger.dat') self.keep_cards(cards, ignore=['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat', 'plot_card.dat']) - + if self.force: return mode - + if not banner: banner = self.banner - + if auto: - self.ask_edit_cards(cards, from_banner=['param', 'run'], + self.ask_edit_cards(cards, from_banner=['param', 'run'], mode='auto', plot=(pythia_version==6), banner=banner ) else: @@ -6529,12 +6529,12 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) plot=(pythia_version==6), banner=banner) return mode - + #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmdShell(MadEventCmd, cmd.CmdShell): - """The command line processor of MadGraph""" + """The command line processor of MadGraph""" @@ -6548,11 +6548,11 @@ class SubProcesses(object): @classmethod def clean(cls): cls.name_to_pdg = {} - + @staticmethod def get_subP(me_dir): """return the list of Subprocesses""" - + out = [] for line in open(pjoin(me_dir,'SubProcesses', 'subproc.mg')): if not line: @@ -6560,9 +6560,9 @@ def get_subP(me_dir): name = line.strip() if os.path.exists(pjoin(me_dir, 'SubProcesses', name)): out.append(pjoin(me_dir, 'SubProcesses', name)) - + return out - + @staticmethod @@ -6623,9 +6623,9 @@ def get_subP_ids(path): particles = re.search("/([\d,-]+)/", line) all_ids.append([int(p) for p in particles.group(1).split(',')]) return all_ids - - -#=============================================================================== + + +#=============================================================================== class GridPackCmd(MadEventCmd): """The command for the gridpack --Those are not suppose to be use interactively--""" @@ -6639,7 +6639,7 @@ def __init__(self, me_dir = None, nb_event=0, seed=0, gran=-1, *completekey, **s self.random = seed self.random_orig = self.random self.granularity = gran - + self.options['automatic_html_opening'] = False #write the grid_card.dat on disk self.nb_event = int(nb_event) @@ -6680,7 +6680,7 @@ def write_RunWeb(self, me_dir): def write_gridcard(self, nb_event, seed, gran): """write the grid_card.dat file at appropriate location""" - + # first try to write grid_card within the gridpack. print("WRITE GRIDCARD", self.me_dir) if self.readonly: @@ -6689,35 +6689,35 @@ def write_gridcard(self, nb_event, seed, gran): fsock = open('grid_card.dat','w') else: fsock = open(pjoin(self.me_dir, 'Cards', 'grid_card.dat'),'w') - + gridpackcard = banner_mod.GridpackCard() gridpackcard['GridRun'] = True gridpackcard['gevents'] = nb_event gridpackcard['gseed'] = seed gridpackcard['ngran'] = gran - + gridpackcard.write(fsock) ############################################################################ def get_Pdir(self): """get the list of Pdirectory if not yet saved.""" - + if hasattr(self, "Pdirs"): if self.me_dir in self.Pdirs[0]: return self.Pdirs - + if not self.readonly: - self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) + self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] else: - self.Pdirs = [l.strip() - for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + self.Pdirs = [l.strip() + for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] + return self.Pdirs - + def prepare_local_dir(self): """create the P directory structure in the local directory""" - + if not self.readonly: os.chdir(self.me_dir) else: @@ -6726,7 +6726,7 @@ def prepare_local_dir(self): os.mkdir(p) files.cp(pjoin(self.me_dir,'SubProcesses',p,'symfact.dat'), pjoin(p, 'symfact.dat')) - + def launch(self, nb_event, seed): """ launch the generation for the grid """ @@ -6742,13 +6742,13 @@ def launch(self, nb_event, seed): if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(seed) + random.seed(seed) random.mg_seedset = seed elif self.run_card['python_seed'] > 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] # 2) Run the refine for the grid self.update_status('Generating Events', level=None) #misc.call([pjoin(self.me_dir,'bin','refine4grid'), @@ -6767,70 +6767,70 @@ def launch(self, nb_event, seed): self.exec_cmd('decay_events -from_cards', postcmd=False) elif self.run_card['use_syst'] and self.run_card['systematics_program'] == 'systematics': self.options['nb_core'] = 1 - self.exec_cmd('systematics %s --from_card' % + self.exec_cmd('systematics %s --from_card' % pjoin('Events', self.run_name, 'unweighted_events.lhe.gz'), postcmd=False,printcmd=False) - + def refine4grid(self, nb_event): """Special refine for gridpack run.""" self.nb_refine += 1 - + precision = nb_event self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) - + # initialize / remove lhapdf mode # self.configure_directory() # All this has been done before self.cluster_mode = 0 # force single machine # Store seed in randinit file, to be read by ranmar.f self.save_random() - + self.update_status('Refine results to %s' % precision, level=None) logger.info("Using random number seed offset = %s" % self.random) refine_opt = {'err_goal': nb_event, 'split_channels': False, - 'ngran':self.granularity, 'readonly': self.readonly} + 'ngran':self.granularity, 'readonly': self.readonly} x_improve = gen_ximprove.gen_ximprove_gridpack(self, refine_opt) x_improve.launch() # create the ajob for the refinment and run those! - self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack - - + self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack + + #bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) #print 'run combine!!!' #combine_runs.CombineRuns(self.me_dir) - + return #update html output Presults = sum_html.collect_result(self) cross, error = Presults.xsec, Presults.xerru self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + #self.update_status('finish refine', 'parton', makehtml=False) #devnull.close() - - - + + + return self.total_jobs = 0 - subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if + subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if P.startswith('P') and os.path.isdir(pjoin(self.me_dir,'SubProcesses', P))] devnull = open(os.devnull, 'w') for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) # clean previous run for match in misc.glob('*ajob*', Pdir): if os.path.basename(match)[:4] in ['ajob', 'wait', 'run.', 'done']: os.remove(pjoin(Pdir, match)) - + logfile = pjoin(Pdir, 'gen_ximprove.log') misc.call([pjoin(bindir, 'gen_ximprove')], @@ -6840,40 +6840,40 @@ def refine4grid(self, nb_event): if os.path.exists(pjoin(Pdir, 'ajob1')): alljobs = misc.glob('ajob*', Pdir) - nb_tot = len(alljobs) + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) if os.path.exists(pjoin(self.me_dir,'error')): self.monitor(html=True) raise MadEventError('Error detected in dir %s: %s' % \ (Pdir, open(pjoin(self.me_dir,'error')).read())) - self.monitor(run_type='All job submitted for refine number %s' % + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) combine_runs.CombineRuns(self.me_dir) - + #update html output cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + self.update_status('finish refine', 'parton', makehtml=False) devnull.close() def do_combine_events(self, line): - """Advanced commands: Launch combine events""" + """Advanced commands: Launch combine events""" if self.readonly: outdir = 'Events' @@ -6895,17 +6895,17 @@ def do_combine_events(self, line): self.banner.add_generation_info(self.results.current['cross'], self.run_card['nevents']) if not hasattr(self, 'random_orig'): self.random_orig = 0 self.banner.change_seed(self.random_orig) - - + + if not os.path.exists(pjoin(outdir, self.run_name)): os.mkdir(pjoin(outdir, self.run_name)) - self.banner.write(pjoin(outdir, self.run_name, + self.banner.write(pjoin(outdir, self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -6915,7 +6915,7 @@ def do_combine_events(self, line): if os.path.exists(pjoin(Gdir, 'events.lhe')): result = sum_html.OneResult('') result.read_results(pjoin(Gdir, 'results.dat')) - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec')*gscalefact[Gdir], result.get('xerru')*gscalefact[Gdir], result.get('axsec')*gscalefact[Gdir] @@ -6924,7 +6924,7 @@ def do_combine_events(self, line): sum_xsec += result.get('xsec')*gscalefact[Gdir] sum_xerru.append(result.get('xerru')*gscalefact[Gdir]) sum_axsec += result.get('axsec')*gscalefact[Gdir] - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.nb_event) @@ -6933,26 +6933,26 @@ def do_combine_events(self, line): AllEvent.add(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() - + self.banner.add_generation_info(sum_xsec, self.nb_event) nb_event = AllEvent.unweight(pjoin(outdir, self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.nb_event, log_level=logging.DEBUG, normalization=self.run_card['event_norm'], proc_charac=self.proc_characteristic) - - + + if partials: for i in range(partials): try: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) self.banner.add_generation_info(sum_xsec, nb_event) if self.run_card['bias_module'].lower() not in ['dummy', 'none']: @@ -6961,7 +6961,7 @@ def do_combine_events(self, line): class MadLoopInitializer(object): """ A container class for the various methods for initializing MadLoop. It is - placed in MadEventInterface because it is used by Madevent for loop-induced + placed in MadEventInterface because it is used by Madevent for loop-induced simulations. """ @staticmethod @@ -6974,7 +6974,7 @@ def make_and_run(dir_name,checkRam=False): if os.path.isfile(pjoin(dir_name,'check')): os.remove(pjoin(dir_name,'check')) os.remove(pjoin(dir_name,'check_sa.o')) - os.remove(pjoin(dir_name,'loop_matrix.o')) + os.remove(pjoin(dir_name,'loop_matrix.o')) # Now run make devnull = open(os.devnull, 'w') start=time.time() @@ -6996,7 +6996,7 @@ def make_and_run(dir_name,checkRam=False): stdout=devnull, stderr=devnull, close_fds=True) try: ptimer.execute() - #poll as often as possible; otherwise the subprocess might + #poll as often as possible; otherwise the subprocess might # "sneak" in some extra memory usage while you aren't looking # Accuracy of .2 seconds is enough for the timing. while ptimer.poll(): @@ -7028,7 +7028,7 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, If mu_r > 0.0, then the renormalization constant value will be hardcoded directly in check_sa.f, if is is 0 it will be set to Sqrt(s) and if it is < 0.0 the value in the param_card.dat is used. - If the split_orders target (i.e. the target squared coupling orders for + If the split_orders target (i.e. the target squared coupling orders for the computation) is != -1, it will be changed in check_sa.f via the subroutine CALL SET_COUPLINGORDERS_TARGET(split_orders).""" @@ -7043,12 +7043,12 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, file_path = pjoin(directories[0],'check_sa.f') if not os.path.isfile(file_path): raise MadGraph5Error('Could not find the location of check_sa.f'+\ - ' from the specified path %s.'%str(file_path)) + ' from the specified path %s.'%str(file_path)) file = open(file_path, 'r') check_sa = file.read() file.close() - + file = open(file_path, 'w') check_sa = re.sub(r"READPS = \S+\)","READPS = %s)"%('.TRUE.' if read_ps \ else '.FALSE.'), check_sa) @@ -7064,42 +7064,42 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, (("%.17e"%mu_r).replace('e','d')),check_sa) elif mu_r < 0.0: check_sa = re.sub(r"MU_R=SQRTS","",check_sa) - + if split_orders > 0: check_sa = re.sub(r"SET_COUPLINGORDERS_TARGET\(-?\d+\)", - "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) - + "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) + file.write(check_sa) file.close() - @staticmethod + @staticmethod def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ req_files = ['HelFilter.dat','LoopFilter.dat'], attempts = [4,15]): - """ Run the initialization of the process in 'run_dir' with success + """ Run the initialization of the process in 'run_dir' with success characterized by the creation of the files req_files in this directory. The directory containing the driving source code 'check_sa.f'. - The list attempt gives the successive number of PS points the + The list attempt gives the successive number of PS points the initialization should be tried with before calling it failed. Returns the number of PS points which were necessary for the init. Notice at least run_dir or SubProc_dir must be provided. A negative attempt number given in input means that quadprec will be forced for initialization.""" - + # If the user does not want detailed info, then set the dictionary # to a dummy one. if infos is None: infos={} - + if SubProc_dir is None and run_dir is None: raise MadGraph5Error('At least one of [SubProc_dir,run_dir] must'+\ ' be provided in run_initialization.') - + # If the user does not specify where is check_sa.f, then it is assumed # to be one levels above run_dir if SubProc_dir is None: SubProc_dir = os.path.abspath(pjoin(run_dir,os.pardir)) - + if run_dir is None: directories =[ dir for dir in misc.glob('P[0-9]*', SubProc_dir) if os.path.isdir(dir) ] @@ -7109,7 +7109,7 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find a valid running directory'+\ ' in %s.'%str(SubProc_dir)) - # Use the presence of the file born_matrix.f to decide if it is a + # Use the presence of the file born_matrix.f to decide if it is a # loop-induced process or not. It's not crucial, but just that because # of the dynamic adjustment of the ref scale used for deciding what are # the zero contributions, more points are neeeded for loop-induced. @@ -7128,9 +7128,9 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ %MLCardPath) else: - MLCard = banner_mod.MadLoopParam(MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) MLCard_orig = banner_mod.MadLoopParam(MLCard) - + # Make sure that LoopFilter really is needed. if not MLCard['UseLoopFilter']: try: @@ -7153,11 +7153,11 @@ def need_init(): proc_prefix+fname)) for fname in my_req_files]) or \ not os.path.isfile(pjoin(run_dir,'check')) or \ not os.access(pjoin(run_dir,'check'), os.X_OK) - + # Check if this is a process without born by checking the presence of the # file born_matrix.f is_loop_induced = os.path.exists(pjoin(run_dir,'born_matrix.f')) - + # For loop induced processes, always attempt quadruple precision if # double precision attempts fail and the user didn't specify himself # quadruple precision initializations attempts @@ -7166,11 +7166,11 @@ def need_init(): use_quad_prec = 1 curr_attempt = 1 - MLCard.set('WriteOutFilters',True) - + MLCard.set('WriteOutFilters',True) + while to_attempt!=[] and need_init(): curr_attempt = to_attempt.pop() - # if the attempt is a negative number it means we must force + # if the attempt is a negative number it means we must force # quadruple precision at initialization time if curr_attempt < 0: use_quad_prec = -1 @@ -7183,11 +7183,11 @@ def need_init(): MLCard.set('ZeroThres',1e-9) # Plus one because the filter are written on the next PS point after curr_attempt = abs(curr_attempt+1) - MLCard.set('MaxAttempts',curr_attempt) + MLCard.set('MaxAttempts',curr_attempt) MLCard.write(pjoin(SubProc_dir,'MadLoopParams.dat')) # initialization is performed. - MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, + MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, npoints = curr_attempt) compile_time, run_time, ram_usage = \ MadLoopInitializer.make_and_run(run_dir) @@ -7200,7 +7200,7 @@ def need_init(): infos['Process_compilation']==None: infos['Process_compilation'] = compile_time infos['Initialization'] = run_time - + MLCard_orig.write(pjoin(SubProc_dir,'MadLoopParams.dat')) if need_init(): return None @@ -7219,8 +7219,8 @@ def need_init(ML_resources_path, proc_prefix, r_files): MLCardPath = pjoin(proc_dir,'SubProcesses','MadLoopParams.dat') if not os.path.isfile(MLCardPath): raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ - %MLCardPath) - MLCard = banner_mod.MadLoopParam(MLCardPath) + %MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) req_files = ['HelFilter.dat','LoopFilter.dat'] # Make sure that LoopFilter really is needed. @@ -7234,9 +7234,9 @@ def need_init(ML_resources_path, proc_prefix, r_files): req_files.remove('HelFilter.dat') except ValueError: pass - + for v_folder in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)): + '%s*'%subproc_prefix)): # Make sure it is a valid MadLoop directory if not os.path.isdir(v_folder) or not os.path.isfile(\ pjoin(v_folder,'loop_matrix.f')): @@ -7247,7 +7247,7 @@ def need_init(ML_resources_path, proc_prefix, r_files): if need_init(pjoin(proc_dir,'SubProcesses','MadLoop5_resources'), proc_prefix, req_files): return True - + return False @staticmethod @@ -7265,7 +7265,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['treatCardsLoopNoInit'], cwd=pjoin(proc_dir,'Source')) else: interface.do_treatcards('all --no_MadLoopInit') - + # First make sure that IREGI and CUTTOOLS are compiled if needed if os.path.exists(pjoin(proc_dir,'Source','CutTools')): misc.compile(arg=['libcuttools'],cwd=pjoin(proc_dir,'Source')) @@ -7273,8 +7273,8 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['libiregi'],cwd=pjoin(proc_dir,'Source')) # Then make sure DHELAS and MODEL are compiled misc.compile(arg=['libmodel'],cwd=pjoin(proc_dir,'Source')) - misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) - + misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) + # Now initialize the MadLoop outputs logger.info('Initializing MadLoop loop-induced matrix elements '+\ '(this can take some time)...') @@ -7283,7 +7283,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, if MG_options: if interface and hasattr(interface, 'cluster') and isinstance(interface.cluster, cluster.MultiCore): mcore = interface.cluster - else: + else: mcore = cluster.MultiCore(**MG_options) else: mcore = cluster.onecore @@ -7294,10 +7294,10 @@ def run_initialization_wrapper(run_dir, infos, attempts): run_dir=run_dir, infos=infos) else: n_PS = MadLoopInitializer.run_initialization( - run_dir=run_dir, infos=infos, attempts=attempts) + run_dir=run_dir, infos=infos, attempts=attempts) infos['nPS'] = n_PS return 0 - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return @@ -7307,21 +7307,21 @@ def wait_monitoring(Idle, Running, Done): init_info = {} # List all virtual folders while making sure they are valid MadLoop folders VirtualFolders = [f for f in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)) if (os.path.isdir(f) or + '%s*'%subproc_prefix)) if (os.path.isdir(f) or os.path.isfile(pjoin(f,'loop_matrix.f')))] logger.debug("Now Initializing MadLoop matrix element in %d folder%s:"%\ (len(VirtualFolders),'s' if len(VirtualFolders)>1 else '')) - logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in + logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in VirtualFolders)) for v_folder in VirtualFolders: init_info[v_folder] = {} - + # We try all multiples of n_PS from 1 to max_mult, first in DP and then # in QP before giving up, or use default values if n_PS is None. max_mult = 3 if n_PS is None: # Then use the default list of number of PS points to try - mcore.submit(run_initialization_wrapper, + mcore.submit(run_initialization_wrapper, [pjoin(v_folder), init_info[v_folder], None]) else: # Use specific set of PS points @@ -7348,8 +7348,8 @@ def wait_monitoring(Idle, Running, Done): '%d PS points (%s), in %.3g(compil.) + %.3g(init.) secs.'%( abs(init['nPS']),'DP' if init['nPS']>0 else 'QP', init['Process_compilation'],init['Initialization'])) - - logger.info('MadLoop initialization finished.') + + logger.info('MadLoop initialization finished.') AskforEditCard = common_run.AskforEditCard @@ -7364,16 +7364,16 @@ def wait_monitoring(Idle, Running, Done): import os import optparse - # Get the directory of the script real path (bin) - # and add it to the current PYTHONPATH + # Get the directory of the script real path (bin) + # and add it to the current PYTHONPATH #root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath( __file__ )))) sys.path.insert(0, root_path) - class MyOptParser(optparse.OptionParser): + class MyOptParser(optparse.OptionParser): class InvalidOption(Exception): pass def error(self, msg=''): raise MyOptParser.InvalidOption(msg) - # Write out nice usage message if called with -h or --help + # Write out nice usage message if called with -h or --help usage = "usage: %prog [options] [FILE] " parser = MyOptParser(usage=usage) parser.add_option("-l", "--logging", default='INFO', @@ -7384,7 +7384,7 @@ def error(self, msg=''): help='force to launch debug mode') parser_error = '' done = False - + for i in range(len(sys.argv)-1): try: (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) @@ -7394,7 +7394,7 @@ def error(self, msg=''): else: args += sys.argv[len(sys.argv)-i:] if not done: - # raise correct error: + # raise correct error: try: (options, args) = parser.parse_args() except MyOptParser.InvalidOption as error: @@ -7407,8 +7407,8 @@ def error(self, msg=''): import subprocess import logging import logging.config - # Set logging level according to the logging level given by options - #logging.basicConfig(level=vars(logging)[options.logging]) + # Set logging level according to the logging level given by options + #logging.basicConfig(level=vars(logging)[options.logging]) import internal import internal.coloring_logging # internal.file = XXX/bin/internal/__init__.py @@ -7431,13 +7431,13 @@ def error(self, msg=''): raise pass - # Call the cmd interface main loop + # Call the cmd interface main loop try: if args: # a single command is provided if '--web' in args: - i = args.index('--web') - args.pop(i) + i = args.index('--web') + args.pop(i) cmd_line = MadEventCmd(me_dir, force_run=True) else: cmd_line = MadEventCmdShell(me_dir, force_run=True) @@ -7457,13 +7457,13 @@ def error(self, msg=''): pass - - - - - - - - + + + + + + + + diff --git a/epochX/cudacpp/ee_mumu.mad/src/cudacpp_src.mk b/epochX/cudacpp/ee_mumu.mad/src/cudacpp_src.mk index d4cc628aec..b4e446bc45 100644 --- a/epochX/cudacpp/ee_mumu.mad/src/cudacpp_src.mk +++ b/epochX/cudacpp/ee_mumu.mad/src/cudacpp_src.mk @@ -1,12 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories - -THISMK = $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #------------------------------------------------------------------------------- @@ -16,165 +11,24 @@ SHELL := /bin/bash #------------------------------------------------------------------------------- -#=== Configure common compiler flags for CUDA and C++ - -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here - -#------------------------------------------------------------------------------- - #=== Configure the C++ compiler -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) $(USE_NVTX) -fPIC -Wall -Wshadow -Wextra +include ../Source/make_opts + +MG_CXXFLAGS += -fPIC -I. $(USE_NVTX) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS+= -ffast-math # see issue #117 +MG_CXXFLAGS += -ffast-math # see issue #117 endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY # Note: AR, CXX and FC are implicitly defined if not set externally # See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html ###RANLIB = ranlib -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -LDFLAGS = -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -LDFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler (note: NVCC is already exported including ccache) - -###$(info NVCC=$(NVCC)) - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ builds (note: NVCC is already exported including ccache) - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for CUDA and C++ - -# Assuming uname is available, detect if architecture is PowerPC -UNAME_P := $(shell uname -p) - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # BUILD ERROR IF THIS ADDED IN SRC?! -else - ###AR=gcc-ar # needed by -flto - ###RANLIB=gcc-ranlib # needed by -flto - ###CXXFLAGS+= -flto # NB: build error from src/Makefile unless gcc-ar and gcc-ranlib are used - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -#------------------------------------------------------------------------------- - #=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the build flags appropriate to OMPFLAGS ###$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -###$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -###$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -###$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -###$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - CXXFLAGS += -DMGONGPU_HAS_NO_CURAND -else ifneq ($(RNDGEN),hasCurand) - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- @@ -182,28 +36,18 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -###$(info Current directory is $(shell pwd)) -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIRREL = ../lib/$(BUILDDIR) - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR=1 is set)) -else - override BUILDDIR = . - override LIBDIRREL = ../lib - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) -endif -######$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) +# Build directory: +BUILDDIR := build.$(DIRTAG) +LIBDIRREL := ../lib/$(BUILDDIR) # Workaround for Mac #375 (I did not manage to fix rpath with @executable_path): use absolute paths for LIBDIR # (NB: this is quite ugly because it creates the directory if it does not exist - to avoid removing src by mistake) -UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Darwin) override LIBDIR = $(shell mkdir -p $(LIBDIRREL); cd $(LIBDIRREL); pwd) ifeq ($(wildcard $(LIBDIR)),) @@ -223,55 +67,35 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -# Target (and build options): debug -debug: OPTFLAGS = -g -O0 -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ #------------------------------------------------------------------------------- cxx_objects=$(addprefix $(BUILDDIR)/, Parameters_sm.o read_slha.o) -ifneq ($(NVCC),) +ifeq ($(AVX),cuda) +COMPILER=$(NVCC) cu_objects=$(addprefix $(BUILDDIR)/, Parameters_sm_cu.o) +else +COMPILER=$(CXX) +cu_objects= endif # Target (and build rules): common (src) library -ifneq ($(NVCC),) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) $(cu_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(NVCC) -shared -o $@ $(cxx_objects) $(cu_objects) $(LDFLAGS) -else -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(CXX) -shared -o $@ $(cxx_objects) $(LDFLAGS) -endif + mkdir -p $(LIBDIR) + $(COMPILER) -shared -o $@ $(cxx_objects) $(cu_objects) $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- @@ -279,19 +103,7 @@ endif .PHONY: clean clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) -else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe -endif - -cleanall: - @echo - $(MAKE) clean -f $(THISMK) - @echo - rm -rf $(LIBDIR)/build.* - rm -rf build.* + $(RM) -f ../lib/build.*/*.so + $(RM) -rf build.* #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/ee_mumu.mad/src/mgOnGpuCxtypes.h b/epochX/cudacpp/ee_mumu.mad/src/mgOnGpuCxtypes.h index ca9a9f00c0..3290d314d6 100644 --- a/epochX/cudacpp/ee_mumu.mad/src/mgOnGpuCxtypes.h +++ b/epochX/cudacpp/ee_mumu.mad/src/mgOnGpuCxtypes.h @@ -21,10 +21,14 @@ // Complex type in cuda: thrust or cucomplex or cxsmpl #ifdef __CUDACC__ #if defined MGONGPU_CUCXTYPE_THRUST +#ifdef __CLANG__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wtautological-compare" // for icpx2021/clang13 (https://stackoverflow.com/a/15864661) +#endif #include +#ifdef __CLANG__ #pragma clang diagnostic pop +#endif #elif defined MGONGPU_CUCXTYPE_CUCOMPLEX #include #elif not defined MGONGPU_CUCXTYPE_CXSMPL diff --git a/epochX/cudacpp/ee_mumu.sa/src/cudacpp_src.mk b/epochX/cudacpp/ee_mumu.sa/src/cudacpp_src.mk index d4cc628aec..c757875347 100644 --- a/epochX/cudacpp/ee_mumu.sa/src/cudacpp_src.mk +++ b/epochX/cudacpp/ee_mumu.sa/src/cudacpp_src.mk @@ -1,7 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. +# Further modified by: J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) #=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories @@ -95,50 +95,52 @@ CXXFLAGS += $(OMPFLAGS) # Set the build flags appropriate to each AVX choice (example: "make AVX=none") # [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] # [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) +ifeq ($(NVCC),) + $(info AVX=$(AVX)) + ifeq ($(UNAME_P),ppc64le) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) + endif + else ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) + endif + else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + ifeq ($(AVX),none) + override AVXFLAGS = -march=x86-64 # no SIMD (see #588) + else ifeq ($(AVX),sse4) + override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif endif + # For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? + CXXFLAGS+= $(AVXFLAGS) endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) # Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") ###$(info FPTYPE=$(FPTYPE)) @@ -182,11 +184,19 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +ifneq ($(NVCC),) + override DIRTAG = cuda_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +else + override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +endif # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +ifneq ($(NVCC),) + override TAG = cuda_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +else + override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +endif # Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 ###$(info Current directory is $(shell pwd)) @@ -223,35 +233,21 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so # Target (and build options): debug debug: OPTFLAGS = -g -O0 debug: all.$(TAG) -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) - #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ @@ -278,20 +274,61 @@ endif # Target: clean the builds .PHONY: clean +BUILD_DIRS := $(wildcard build.*) +NUM_BUILD_DIRS := $(words $(BUILD_DIRS)) + clean: ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) +ifeq ($(NUM_BUILD_DIRS),1) + $(info USEBUILDDIR=1, only one src build directory found.) + rm -rf ../lib/$(BUILD_DIRS) + rm -rf $(BUILD_DIRS) +else ifeq ($(NUM_BUILD_DIRS),0) + $(error USEBUILDDIR=1, but no src build directories are found.) else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe + $(error Multiple src BUILDDIR's found! Use 'cleannone', 'cleansse4', 'cleanavx2', 'clean512y','clean512z', 'cleancuda' or 'cleanall'.) +endif +else + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + rm -f $(BUILDDIR)/*.o $(BUILDDIR)/*.exe endif cleanall: @echo - $(MAKE) clean -f $(THISMK) + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + rm -f $(BUILDDIR)/*.o $(BUILDDIR)/*.exe @echo - rm -rf $(LIBDIR)/build.* + rm -rf ../lib/build.* rm -rf build.* +# Target: clean different builds + +cleannone: + rm -rf ../lib/build.none_* + rm -rf build.none_* + +cleansse4: + rm -rf ../lib/build.sse4_* + rm -rf build.sse4_* + +cleanavx2: + rm -rf ../lib/build.avx2_* + rm -rf build.avx2_* + +clean512y: + rm -rf ../lib/build.512y_* + rm -rf build.512y_* + +clean512z: + rm -rf ../lib/build.512z_* + rm -rf build.512z_* + +cleancuda: + rm -rf ../lib/build.cuda_* + rm -rf build.cuda_* + +cleandir: + rm -f ./*.o ./*.exe + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_tt.mad/CODEGEN_mad_gg_tt_log.txt b/epochX/cudacpp/gg_tt.mad/CODEGEN_mad_gg_tt_log.txt index 3326a8488f..0eabc39d85 100644 --- a/epochX/cudacpp/gg_tt.mad/CODEGEN_mad_gg_tt_log.txt +++ b/epochX/cudacpp/gg_tt.mad/CODEGEN_mad_gg_tt_log.txt @@ -52,7 +52,7 @@ Note that you can still compile and run aMC@NLO with the built-in PDFs Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt import /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt.mg The import format was not given, so we guess it as command set stdout_level DEBUG @@ -62,7 +62,7 @@ generate g g > t t~ No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005394458770751953  +DEBUG: model prefixing takes 0.005516529083251953  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -175,7 +175,7 @@ INFO: Generating Helas calls for process: g g > t t~ WEIGHTED<=2 @1 INFO: Processing color information for process: g g > t t~ @1 INFO: Creating files in directory P1_gg_ttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -191,16 +191,16 @@ INFO: Created files CPPProcess.h and CPPProcess.cc in directory ./. INFO: Generating Feynman diagrams for Process: g g > t t~ WEIGHTED<=2 @1 INFO: Finding symmetric diagrams for subprocess group gg_ttx Generated helas calls for 1 subprocesses (3 diagrams) in 0.006 s -Wrote files for 10 helas calls in 0.102 s +Wrote files for 10 helas calls in 0.100 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates FFV1 routines -ALOHA: aloha creates 2 routines in 0.142 s +ALOHA: aloha creates 2 routines in 0.146 s DEBUG: Entering PLUGIN_ProcessExporter.convert_model (create the model) [output.py at line 202]  ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates FFV1 routines -ALOHA: aloha creates 4 routines in 0.138 s +ALOHA: aloha creates 4 routines in 0.129 s VVV1 FFV1 FFV1 @@ -219,12 +219,14 @@ save configuration file to /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CO INFO: Use Fortran compiler gfortran INFO: Use c++ compiler g++ INFO: Generate web pages +DEBUG: standardise /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt/Source/make_opts (fix f2py3 and sort make_opts_variables) before applying patch.common DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt; patch -p4 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common patching file Source/genps.inc +patching file Source/make_opts patching file Source/makefile patching file SubProcesses/makefile +patching file bin/internal/banner.py patching file bin/internal/gen_ximprove.py -Hunk #1 succeeded at 391 (offset 6 lines). patching file bin/internal/madevent_interface.py DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt/SubProcesses/P1_gg_ttx; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f @@ -237,9 +239,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m1.677s -user 0m1.453s -sys 0m0.213s +real 0m1.684s +user 0m1.475s +sys 0m0.215s ************************************************************ * * * W E L C O M E to * @@ -265,7 +267,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards run quit INFO: @@ -295,7 +297,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards param quit INFO: diff --git a/epochX/cudacpp/gg_tt.mad/Source/make_opts b/epochX/cudacpp/gg_tt.mad/Source/make_opts index e4b87ee6ad..435bed0dc7 100644 --- a/epochX/cudacpp/gg_tt.mad/Source/make_opts +++ b/epochX/cudacpp/gg_tt.mad/Source/make_opts @@ -1,7 +1,7 @@ DEFAULT_CPP_COMPILER=g++ DEFAULT_F2PY_COMPILER=f2py3 DEFAULT_F_COMPILER=gfortran -GLOBAL_FLAG=-O3 -ffast-math -fbounds-check +GLOBAL_FLAG=-O3 -ffast-math MACFLAG= MG5AMC_VERSION=SpecifiedByMG5aMCAtRunTime PYTHIA8_PATH=NotInstalled @@ -13,31 +13,53 @@ BIASLIBDIR=../../../lib/ BIASLIBRARY=libbias.$(libext) # Rest of the makefile -ifeq ($(origin FFLAGS),undefined) -FFLAGS= -w -fPIC -#FFLAGS+= -g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none -endif -FFLAGS += $(GLOBAL_FLAG) +#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) + +# Detect O/S kernel (Linux, Darwin...) +UNAME_S := $(shell uname -s) + +# Detect architecture (x86_64, ppc64le...) +UNAME_P := $(shell uname -p) + +#------------------------------------------------------------------------------- # REMOVE MACFLAG IF NOT ON MAC OR FOR F2PY -UNAME := $(shell uname -s) ifdef f2pymode MACFLAG= else -ifneq ($(UNAME), Darwin) +ifneq ($(UNAME_S), Darwin) MACFLAG= endif endif +############################################################ +# Default compiler flags +# To change optimisation level, override these as follows: +# make CXXFLAGS="-O0 -g" +# or export them as environment variables +# For debugging Fortran, one could e.g. use: +# FCFLAGS="-g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none" +############################################################ +FCFLAGS ?= $(GLOBAL_FLAG) -fbounds-check +CXXFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG +NVCCFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG -use_fast_math -lineinfo +LDFLAGS ?= $(STDLIB) -ifeq ($(origin CXXFLAGS),undefined) -CXXFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +ifneq ($(FFLAGS),) +# Madgraph used to use FFLAGS, so the user probably tries to change the flags specifically for madgraph: +FCFLAGS = $(FFLAGS) endif -ifeq ($(origin CFLAGS),undefined) -CFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +# Madgraph-specific flags: +WARNFLAGS = -Wall -Wshadow -Wextra +ifeq (,$(findstring -std=,$(CXXFLAGS))) +CXXSTANDARD= -std=c++17 endif +MG_FCFLAGS += -fPIC -w +MG_CXXFLAGS += -fPIC $(CXXSTANDARD) $(WARNFLAGS) $(MACFLAG) +MG_NVCCFLAGS += -fPIC $(CXXSTANDARD) --forward-unknown-to-host-compiler $(WARNFLAGS) +MG_LDFLAGS += $(MACFLAG) # Set FC unless it's defined by an environment variable ifeq ($(origin FC),default) @@ -49,45 +71,40 @@ endif # Increase the number of allowed charcters in a Fortran line ifeq ($(FC), ftn) -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler else VERS="$(shell $(FC) --version | grep ifort -i)" ifeq ($(VERS), "") -FFLAGS+= -ffixed-line-length-132 +MG_FCFLAGS += -ffixed-line-length-132 else -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler endif endif -UNAME := $(shell uname -s) -ifeq ($(origin LDFLAGS), undefined) -LDFLAGS=$(STDLIB) $(MACFLAG) -endif - # Options: dynamic, lhapdf # Option dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) dylibext=dylib else dylibext=so endif ifdef dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) libext=dylib -FFLAGS+= -fno-common -LDFLAGS += -bundle +MG_FCFLAGS += -fno-common +MG_LDFLAGS += -bundle define CREATELIB $(FC) -dynamiclib -undefined dynamic_lookup -o $(1) $(2) endef else libext=so -FFLAGS+= -fPIC -LDFLAGS += -shared +MG_FCFLAGS += -fPIC +MG_LDFLAGS += -shared define CREATELIB -$(FC) $(FFLAGS) $(LDFLAGS) -o $(1) $(2) +$(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MG_LDFLAGS) $(LDFLAGS) -o $(1) $(2) endef endif else @@ -101,17 +118,9 @@ endif # Option lhapdf ifneq ($(lhapdf),) -CXXFLAGS += $(shell $(lhapdf) --cppflags) +MG_CXXFLAGS += $(shell $(lhapdf) --cppflags) alfas_functions=alfas_functions_lhapdf llhapdf+= $(shell $(lhapdf) --cflags --libs) -lLHAPDF -# check if we need to activate c++11 (for lhapdf6.2) -ifeq ($(origin CXX),default) -ifeq ($lhapdfversion$lhapdfsubversion,62) -CXX=$(DEFAULT_CPP_COMPILER) -std=c++11 -else -CXX=$(DEFAULT_CPP_COMPILER) -endif -endif else alfas_functions=alfas_functions llhapdf= @@ -120,4 +129,207 @@ endif # Helper function to check MG5 version define CHECK_MG5AMC_VERSION python -c 'import re; from distutils.version import StrictVersion; print StrictVersion("$(MG5AMC_VERSION)") >= StrictVersion("$(1)") if re.match("^[\d\.]+$$","$(MG5AMC_VERSION)") else True;' -endef \ No newline at end of file +endef + +#------------------------------------------------------------------------------- + +# Set special cases for non-gcc/clang builds +# AVX below gets overridden from outside in architecture-specific builds +AVX ?= none +# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] +# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] +$(info AVX=$(AVX)) +ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + endif +else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif +endif + +# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? +MG_CXXFLAGS+= $(AVXFLAGS) + +#------------------------------------------------------------------------------- + +#=== Configure the CUDA compiler if available + +# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) +# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below +ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside + $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") + override CUDA_HOME=disabled +endif + +# If CUDA_HOME is not set, try to set it from the location of nvcc +ifndef CUDA_HOME + CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) + $(info CUDA_HOME="$(CUDA_HOME)") +endif + +# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists +ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) + NVCC = $(CUDA_HOME)/bin/nvcc + USE_NVTX ?=-DUSE_NVTX + # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html + # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ + # Default: use compute capability 70 (Volta architecture), and embed PTX to support later architectures, too. + # Set MADGRAPH_CUDA_ARCHITECTURE to the desired value to change the default. + # Build for multiple architectures using a space-separated list, e.g. MADGRAPH_CUDA_ARCHITECTURE="70 80" + MADGRAPH_CUDA_ARCHITECTURE ?= 70 + # Generate PTX for the first architecture: + CUARCHFLAGS := --generate-code arch=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)),code=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)) + # Generate device code for all architectures: + CUARCHFLAGS += $(foreach arch,$(MADGRAPH_CUDA_ARCHITECTURE), --generate-code arch=compute_$(arch),code=sm_$(arch)) + + CUINC = -I$(CUDA_HOME)/include/ + CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! + MG_LDFLAGS += $(CURANDLIBFLAGS) + MG_NVCCFLAGS += $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) + +else ifeq ($(AVX),cuda) + $(error nvcc is not visible in PATH. Either add it to PATH or export CUDA_HOME to compile with cuda) + ifeq ($(AVX),cuda) + $(error Cannot compile for cuda without NVCC) + endif +endif + +# Set the host C++ compiler for nvcc via "-ccbin " +# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) +MG_NVCCFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) + +# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) +ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) +MG_NVCCFLAGS += -allow-unsupported-compiler +endif + +#------------------------------------------------------------------------------- + +#=== Configure ccache for C++ and CUDA builds + +# Enable ccache if USECCACHE=1 +ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) + override CXX:=ccache $(CXX) +endif + +ifneq ($(NVCC),) + ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) + override NVCC:=ccache $(NVCC) + endif +endif + +#------------------------------------------------------------------------------- + +#=== Configure PowerPC-specific compiler flags for C++ and CUDA + +# PowerPC-specific CXX / CUDA compiler flags (being reviewed) +ifeq ($(UNAME_P),ppc64le) + MG_CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 + MG_NVCCFLAGS+= -Xcompiler -mno-float128 + + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + endif +endif + +#------------------------------------------------------------------------------- +#=== Apple-specific compiler/linker options + +# Add -std=c++17 explicitly to avoid build errors on macOS +# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" +ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) +MG_CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 +endif + +ifeq ($(UNAME_S),Darwin) +STDLIB = -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) +MG_LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" +else +MG_LDFLAGS += -Xlinker --no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +endif + +#------------------------------------------------------------------------------- + +#=== C++/CUDA-specific flags for floating-point types and random generators to use + +# Set the default FPTYPE (floating point type) choice +FPTYPE ?= m + +# Set the default HELINL (inline helicities?) choice +HELINL ?= 0 + +# Set the default HRDCOD (hardcode cIPD physics parameters?) choice +HRDCOD ?= 0 + +# Set the default RNDGEN (random number generator) choice +ifeq ($(NVCC),) + RNDGEN ?= hasNoCurand +else + RNDGEN ?= hasCurand +endif + +# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so sub-makes don't go back to the defaults +export AVX +export AVXFLAGS +export FPTYPE +export HELINL +export HRDCOD +export RNDGEN + +#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN + +# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") +# $(info FPTYPE=$(FPTYPE)) +ifeq ($(FPTYPE),d) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE +else ifeq ($(FPTYPE),f) + COMMONFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT +else ifeq ($(FPTYPE),m) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT +else + $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) +endif + +# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") +# $(info HELINL=$(HELINL)) +ifeq ($(HELINL),1) + COMMONFLAGS += -DMGONGPU_INLINE_HELAMPS +else ifneq ($(HELINL),0) + $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") +# $(info HRDCOD=$(HRDCOD)) +ifeq ($(HRDCOD),1) + COMMONFLAGS += -DMGONGPU_HARDCODE_PARAM +else ifneq ($(HRDCOD),0) + $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") +$(info RNDGEN=$(RNDGEN)) +ifeq ($(RNDGEN),hasNoCurand) + override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND + override CURANDLIBFLAGS = +else ifeq ($(RNDGEN),hasCurand) + CXXFLAGSCURAND = $(CUINC) +else + $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) +endif + +MG_CXXFLAGS += $(COMMONFLAGS) +MG_NVCCFLAGS += $(COMMONFLAGS) + +#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_tt.mad/Source/makefile b/epochX/cudacpp/gg_tt.mad/Source/makefile index 00c73099a0..c9c219d1f7 100644 --- a/epochX/cudacpp/gg_tt.mad/Source/makefile +++ b/epochX/cudacpp/gg_tt.mad/Source/makefile @@ -10,8 +10,8 @@ include make_opts # Source files -PROCESS= hfill.o matrix.o myamp.o -DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o +PROCESS = hfill.o matrix.o myamp.o +DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o HBOOK = hfill.o hcurve.o hbook1.o hbook2.o GENERIC = $(alfas_functions).o transpole.o invarients.o hfill.o pawgraphs.o ran1.o \ rw_events.o rw_routines.o kin_functions.o open_file.o basecode.o setrun.o \ @@ -22,7 +22,7 @@ GENSUDGRID = gensudgrid.o is-sud.o setrun_gen.o rw_routines.o open_file.o # Locally compiled libraries -LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) +LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) # Binaries @@ -32,6 +32,12 @@ BINARIES = $(BINDIR)gen_ximprove $(BINDIR)gensudgrid $(BINDIR)combine_runs all: $(LIBRARIES) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(LIBDIR)libbias.$(libext) +%.o: %.f *.inc + $(FC) -I. $(MG_FCFLAGS) $(FCFLAGS) -c $< -o $@ + +DiscreteSampler.o discretesampler.mod: DiscreteSampler.f *.inc + $(FC) -I. $(MG_FCFLAGS) $(FCFLAGS) -c $< -o DiscreteSampler.o + # Libraries $(LIBDIR)libdsample.$(libext): $(DSAMPLE) @@ -39,40 +45,39 @@ $(LIBDIR)libdsample.$(libext): $(DSAMPLE) $(LIBDIR)libgeneric.$(libext): $(GENERIC) $(call CREATELIB, $@, $^) $(LIBDIR)libdhelas.$(libext): DHELAS - cd DHELAS; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libpdf.$(libext): PDF make_opts - cd PDF; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" ifneq (,$(filter edff chff, $(pdlabel1) $(pdlabel2))) $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make ; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" else $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make -f makefile_dummy; cd ../../ -endif + $(MAKE) -C $< -f makefile_dummy FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" +endif $(LIBDIR)libcernlib.$(libext): CERNLIB - cd CERNLIB; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" # The bias library is here the dummy by default; compilation of other ones specified in the run_card will be done by MG5aMC directly. $(LIBDIR)libbias.$(libext): BIAS/dummy - cd BIAS/dummy; make; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libmodel.$(libext): MODEL param_card.inc - cd MODEL; make + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" param_card.inc: ../Cards/param_card.dat ../bin/madevent treatcards param + touch $@ # madevent doesn't update the time stamp if there's nothing to do -$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o - $(FC) $(LDFLAGS) -o $@ $^ -#$(BINDIR)combine_events: $(COMBINE) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) run_card.inc $(LIBDIR)libbias.$(libext) -# $(FC) -o $@ $(COMBINE) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC $(llhapdf) $(LDFLAGS) -lbias +$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o + $(FC) $(MG_LDFLAGS) $(LDFLAGS) -o $@ $^ $(BINDIR)gensudgrid: $(GENSUDGRID) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libcernlib.$(libext) - $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(LDFLAGS) + $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(MG_LDFLAGS) $(LDFLAGS) # Dependencies -dsample.o: DiscreteSampler.o dsample.f genps.inc StringCast.o +dsample.o: DiscreteSampler.o discretesampler.mod dsample.f genps.inc StringCast.o DiscreteSampler.o: StringCast.o invarients.o: invarients.f genps.inc setrun.o: setrun.f nexternal.inc leshouche.inc genps.inc @@ -85,6 +90,7 @@ rw_events.o: rw_events.f run_config.inc run_card.inc: ../Cards/run_card.dat ../bin/madevent treatcards run + touch $@ # madevent doesn't update the time stamp if there's nothing to do clean4pdf: rm -f ../lib/libpdf.$(libext) @@ -120,7 +126,7 @@ $(LIBDIR)libiregi.a: $(IREGIDIR) cd $(IREGIDIR); make ln -sf ../Source/$(IREGIDIR)libiregi.a $(LIBDIR)libiregi.a -cleanSource: +clean: $(RM) *.o $(LIBRARIES) $(BINARIES) cd PDF; make clean; cd .. cd PDF/gammaUPC; make clean; cd ../../ @@ -132,11 +138,3 @@ cleanSource: cd BIAS/ptj_bias; make clean; cd ../.. if [ -d $(CUTTOOLSDIR) ]; then cd $(CUTTOOLSDIR); make clean; cd ..; fi if [ -d $(IREGIDIR) ]; then cd $(IREGIDIR); make clean; cd ..; fi - -clean: cleanSource - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make clean; cd -; done; - -cleanavx: - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; -cleanall: cleanSource # THIS IS THE ONE - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; diff --git a/epochX/cudacpp/gg_tt.mad/SubProcesses/Bridge.h b/epochX/cudacpp/gg_tt.mad/SubProcesses/Bridge.h index bf8b5e024d..c263f39a62 100644 --- a/epochX/cudacpp/gg_tt.mad/SubProcesses/Bridge.h +++ b/epochX/cudacpp/gg_tt.mad/SubProcesses/Bridge.h @@ -236,7 +236,7 @@ namespace mg5amcCpu #ifdef __CUDACC__ if( ( m_nevt < s_gputhreadsmin ) || ( m_nevt % s_gputhreadsmin != 0 ) ) throw std::runtime_error( "Bridge constructor: nevt should be a multiple of " + std::to_string( s_gputhreadsmin ) ); - while( m_nevt != m_gpublocks * m_gputhreads ) + while( m_nevt != static_cast( m_gpublocks * m_gputhreads ) ) { m_gputhreads /= 2; if( m_gputhreads < s_gputhreadsmin ) @@ -266,7 +266,7 @@ namespace mg5amcCpu template void Bridge::set_gpugrid( const int gpublocks, const int gputhreads ) { - if( m_nevt != gpublocks * gputhreads ) + if( m_nevt != static_cast( gpublocks * gputhreads ) ) throw std::runtime_error( "Bridge: gpublocks*gputhreads must equal m_nevt in set_gpugrid" ); m_gpublocks = gpublocks; m_gputhreads = gputhreads; diff --git a/epochX/cudacpp/gg_tt.mad/SubProcesses/MadgraphTest.h b/epochX/cudacpp/gg_tt.mad/SubProcesses/MadgraphTest.h index ef40624c88..b0f2250c25 100644 --- a/epochX/cudacpp/gg_tt.mad/SubProcesses/MadgraphTest.h +++ b/epochX/cudacpp/gg_tt.mad/SubProcesses/MadgraphTest.h @@ -199,10 +199,6 @@ class MadgraphTest : public testing::TestWithParam } }; -// Since we link both the CPU-only and GPU tests into the same executable, we prevent -// a multiply defined symbol by only compiling this in the non-CUDA phase: -#ifndef __CUDACC__ - /// Compare momenta and matrix elements. /// This uses an implementation of TestDriverBase to run a madgraph workflow, /// and compares momenta and matrix elements with a reference file. @@ -307,6 +303,4 @@ TEST_P( MadgraphTest, CompareMomentaAndME ) } } -#endif // __CUDACC__ - #endif /* MADGRAPHTEST_H_ */ diff --git a/epochX/cudacpp/gg_tt.mad/SubProcesses/MatrixElementKernels.cc b/epochX/cudacpp/gg_tt.mad/SubProcesses/MatrixElementKernels.cc index 74b5239ebf..2d6f27cd5d 100644 --- a/epochX/cudacpp/gg_tt.mad/SubProcesses/MatrixElementKernels.cc +++ b/epochX/cudacpp/gg_tt.mad/SubProcesses/MatrixElementKernels.cc @@ -196,6 +196,9 @@ namespace mg5amcGpu void MatrixElementKernelDevice::setGrid( const int gpublocks, const int gputhreads ) { + m_gpublocks = gpublocks; + m_gputhreads = gputhreads; + if( m_gpublocks == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gpublocks must be > 0 in setGrid" ); if( m_gputhreads == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gputhreads must be > 0 in setGrid" ); if( this->nevt() != m_gpublocks * m_gputhreads ) throw std::runtime_error( "MatrixElementKernelDevice: nevt mismatch in setGrid" ); diff --git a/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/check_sa.cc b/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/check_sa.cc +++ b/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/counters.cc b/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/counters.cc +++ b/epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk index 509307506b..706c03702e 100644 --- a/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk @@ -1,56 +1,41 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: use ':=' to ensure that the value of CUDACPP_MAKEFILE is not modified further down after including make_opts -#=== NB: use 'override' to ensure that the value can not be modified from the outside -override CUDACPP_MAKEFILE := $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) -###$(info CUDACPP_MAKEFILE='$(CUDACPP_MAKEFILE)') - -#=== NB: different names (e.g. cudacpp.mk and cudacpp_src.mk) are used in the Subprocess and src directories -override CUDACPP_SRC_MAKEFILE = cudacpp_src.mk - -#------------------------------------------------------------------------------- - -#=== Use bash in the Makefile (https://www.gnu.org/software/make/manual/html_node/Choosing-the-Shell.html) - -SHELL := /bin/bash - -#------------------------------------------------------------------------------- - -#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) - -# Detect O/S kernel (Linux, Darwin...) -UNAME_S := $(shell uname -s) -###$(info UNAME_S='$(UNAME_S)') - -# Detect architecture (x86_64, ppc64le...) -UNAME_P := $(shell uname -p) -###$(info UNAME_P='$(UNAME_P)') - -#------------------------------------------------------------------------------- - -#=== Include the common MG5aMC Makefile options - -# OM: this is crucial for MG5aMC flag consistency/documentation -# AV: temporarely comment this out because it breaks cudacpp builds -ifneq ($(wildcard ../../Source/make_opts),) -include ../../Source/make_opts -endif +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. + +# This makefile extends the Fortran makefile called "makefile" + +CUDACPP_SRC_MAKEFILE = cudacpp_src.mk + +# Self-invocation with adapted flags: +cppnative: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=native AVXFLAGS="-march=native" cppbuild +cppnone: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=none AVXFLAGS= cppbuild +cppsse4: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=sse4 AVXFLAGS=-march=nehalem cppbuild +cppavx2: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=avx2 AVXFLAGS=-march=haswell cppbuild +cpp512y: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512y AVXFLAGS="-march=skylake-avx512 -mprefer-vector-width=256" cppbuild +cpp512z: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512z AVXFLAGS="-march=skylake-avx512 -DMGONGPU_PVW512" cppbuild +cuda: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=cuda cudabuild #------------------------------------------------------------------------------- #=== Configure common compiler flags for C++ and CUDA +# NB: The base flags are defined in the fortran "makefile" + +# Include directories +INCFLAGS = -I. -I../../src -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here +MG_CXXFLAGS += $(INCFLAGS) +MG_NVCCFLAGS += $(INCFLAGS) # Dependency on src directory -MG5AMC_COMMONLIB = mg5amc_common -LIBFLAGS = -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -INCFLAGS += -I../../src +MG5AMC_COMMONLIB = mg5amc_common # Compiler-specific googletest build directory (#125 and #738) ifneq ($(shell $(CXX) --version | grep '^Intel(R) oneAPI DPC++/C++ Compiler'),) @@ -99,356 +84,42 @@ endif #------------------------------------------------------------------------------- -#=== Configure the C++ compiler - -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) -Wall -Wshadow -Wextra -ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS += -ffast-math # see issue #117 -endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY - -# Optionally add debug flags to display the full list of flags (eg on Darwin) -###CXXFLAGS+= -v - -# Note: AR, CXX and FC are implicitly defined if not set externally -# See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html - -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler - -# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) -# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below -ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside - $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") - override CUDA_HOME=disabled -endif - -# If CUDA_HOME is not set, try to set it from the location of nvcc -ifndef CUDA_HOME - CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) - $(warning CUDA_HOME was not set: using "$(CUDA_HOME)") -endif - -# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists -ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) - NVCC = $(CUDA_HOME)/bin/nvcc - USE_NVTX ?=-DUSE_NVTX - # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html - # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ - # Default: use compute capability 70 for V100 (CERN lxbatch, CERN itscrd, Juwels Cluster). - # Embed device code for 70, and PTX for 70+. - # Export MADGRAPH_CUDA_ARCHITECTURE (comma-separated list) to use another value or list of values (see #533). - # Examples: use 60 for P100 (Piz Daint), 80 for A100 (Juwels Booster, NVidia raplab/Curiosity). - MADGRAPH_CUDA_ARCHITECTURE ?= 70 - ###CUARCHFLAGS = -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=compute_$(MADGRAPH_CUDA_ARCHITECTURE) -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=sm_$(MADGRAPH_CUDA_ARCHITECTURE) # Older implementation (AV): go back to this one for multi-GPU support #533 - ###CUARCHFLAGS = --gpu-architecture=compute_$(MADGRAPH_CUDA_ARCHITECTURE) --gpu-code=sm_$(MADGRAPH_CUDA_ARCHITECTURE),compute_$(MADGRAPH_CUDA_ARCHITECTURE) # Newer implementation (SH): cannot use this as-is for multi-GPU support #533 - comma:=, - CUARCHFLAGS = $(foreach arch,$(subst $(comma), ,$(MADGRAPH_CUDA_ARCHITECTURE)),-gencode arch=compute_$(arch),code=compute_$(arch) -gencode arch=compute_$(arch),code=sm_$(arch)) - CUINC = -I$(CUDA_HOME)/include/ - ifeq ($(RNDGEN),hasNoCurand) - CURANDLIBFLAGS= - else - CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! - endif - CUOPTFLAGS = -lineinfo - CUFLAGS = $(foreach opt, $(OPTFLAGS), -Xcompiler $(opt)) $(CUOPTFLAGS) $(INCFLAGS) $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) -use_fast_math - ###CUFLAGS += -Xcompiler -Wall -Xcompiler -Wextra -Xcompiler -Wshadow - ###NVCC_VERSION = $(shell $(NVCC) --version | grep 'Cuda compilation tools' | cut -d' ' -f5 | cut -d, -f1) - CUFLAGS += -std=c++17 # need CUDA >= 11.2 (see #333): this is enforced in mgOnGpuConfig.h - # Without -maxrregcount: baseline throughput: 6.5E8 (16384 32 12) up to 7.3E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 160 # improves throughput: 6.9E8 (16384 32 12) up to 7.7E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 128 # improves throughput: 7.3E8 (16384 32 12) up to 7.6E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 96 # degrades throughput: 4.1E8 (16384 32 12) up to 4.5E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 64 # degrades throughput: 1.7E8 (16384 32 12) flat at 1.7E8 (65536 128 12) -else ifneq ($(origin REQUIRE_CUDA),undefined) - # If REQUIRE_CUDA is set but no cuda is found, stop here (e.g. for CI tests on GPU #443) - $(error No cuda installation found (set CUDA_HOME or make nvcc visible in PATH)) -else - # No cuda. Switch cuda compilation off and go to common random numbers in C++ - $(warning CUDA_HOME is not set or is invalid: export CUDA_HOME to compile with cuda) - override NVCC= - override USE_NVTX= - override CUINC= - override CURANDLIBFLAGS= -endif -export NVCC -export CUFLAGS - -# Set the host C++ compiler for nvcc via "-ccbin " -# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) -CUFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) - -# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) -ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) -CUFLAGS += -allow-unsupported-compiler -endif - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ and CUDA builds - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif -ifneq ($(NVCC),) - ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) - override NVCC:=ccache $(NVCC) - endif -endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for C++ and CUDA - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # would increase to none=4.08-4.12E6, sse4=4.99-5.03E6! -else - ###CXXFLAGS+= -flto # also on Intel this would increase throughputs by a factor 2 to 4... - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -# PowerPC-specific CUDA compiler flags (to be reviewed!) -ifeq ($(UNAME_P),ppc64le) - CUFLAGS+= -Xcompiler -mno-float128 -endif - -#------------------------------------------------------------------------------- - #=== Configure defaults and check if user-defined choices exist for OMPFLAGS, AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the default OMPFLAGS choice -ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on Intel (was ok without nvcc but not ok with nvcc before #578) -else ifneq ($(shell $(CXX) --version | egrep '^(clang)'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on clang (was not ok without or with nvcc before #578) -###else ifneq ($(shell $(CXX) --version | egrep '^(Apple clang)'),) # AV for Mac (Apple clang compiler) -else ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) +OMPFLAGS ?= -fopenmp +ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) override OMPFLAGS = # AV disable OpenMP MT on Apple clang (builds fail in the CI #578) -###override OMPFLAGS = -fopenmp # OM reenable OpenMP MT on Apple clang? (AV Oct 2023: this still fails in the CI) -else -override OMPFLAGS = -fopenmp # enable OpenMP MT by default on all other platforms -###override OMPFLAGS = # disable OpenMP MT on all other platforms (default before #575) -endif - -# Set the default AVX (vectorization) choice -ifeq ($(AVX),) - ifeq ($(UNAME_P),ppc64le) - ###override AVX = none - override AVX = sse4 - else ifeq ($(UNAME_P),arm) - ###override AVX = none - override AVX = sse4 - else ifeq ($(wildcard /proc/cpuinfo),) - override AVX = none - $(warning Using AVX='$(AVX)' because host SIMD features cannot be read from /proc/cpuinfo) - else ifeq ($(shell grep -m1 -c avx512vl /proc/cpuinfo)$(shell $(CXX) --version | grep ^clang),1) - override AVX = 512y - ###$(info Using AVX='$(AVX)' as no user input exists) - else - override AVX = avx2 - ifneq ($(shell grep -m1 -c avx512vl /proc/cpuinfo),1) - $(warning Using AVX='$(AVX)' because host does not support avx512vl) - else - $(warning Using AVX='$(AVX)' because this is faster than avx512vl for clang) - endif - endif -else - ###$(info Using AVX='$(AVX)' according to user input) -endif - -# Set the default FPTYPE (floating point type) choice -ifeq ($(FPTYPE),) - override FPTYPE = d -endif - -# Set the default HELINL (inline helicities?) choice -ifeq ($(HELINL),) - override HELINL = 0 -endif - -# Set the default HRDCOD (hardcode cIPD physics parameters?) choice -ifeq ($(HRDCOD),) - override HRDCOD = 0 -endif - -# Set the default RNDGEN (random number generator) choice -ifeq ($(RNDGEN),) - ifeq ($(NVCC),) - override RNDGEN = hasNoCurand - else ifeq ($(RNDGEN),) - override RNDGEN = hasCurand - endif endif -# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so that it is not necessary to pass them to the src Makefile too -export AVX -export FPTYPE -export HELINL -export HRDCOD -export RNDGEN +# Export here, so sub makes don't fall back to the defaults: export OMPFLAGS -#------------------------------------------------------------------------------- - -#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN - -# Set the build flags appropriate to OMPFLAGS -$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS - CUFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM - CUFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND -else ifeq ($(RNDGEN),hasCurand) - override CXXFLAGSCURAND = -else - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- #=== Configure build directories and build lockfiles === -# Build directory "short" tag (defines target and path to the optional build directory) -# (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) - -# Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) -# (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) - -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIR = ../../lib/$(BUILDDIR) - override LIBDIRRPATH = '$$ORIGIN/../$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is set = 1)) -else - override BUILDDIR = . - override LIBDIR = ../../lib - override LIBDIRRPATH = '$$ORIGIN/$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) +# Build directory "short" tag (defines target and path to the build directory) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +CUDACPP_BUILDDIR = build.$(DIRTAG) +CUDACPP_LIBDIR := ../../lib/$(CUDACPP_BUILDDIR) +LIBDIRRPATH := '$$ORIGIN:$$ORIGIN/../$(CUDACPP_LIBDIR)' +ifneq ($(AVX),) + $(info Building CUDACPP in CUDACPP_BUILDDIR=$(CUDACPP_BUILDDIR). Libs in $(CUDACPP_LIBDIR)) endif -###override INCDIR = ../../include -###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) -# On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH +# On Linux, set rpath to CUDACPP_LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables or shared libraries ($ORIGIN on Linux) -# On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary +# On Darwin, building libraries with absolute paths in CUDACPP_LIBDIR makes this unnecessary ifeq ($(UNAME_S),Darwin) override CXXLIBFLAGSRPATH = override CULIBFLAGSRPATH = - override CXXLIBFLAGSRPATH2 = - override CULIBFLAGSRPATH2 = else # RPATH to cuda/cpp libs when linking executables override CXXLIBFLAGSRPATH = -Wl,-rpath,$(LIBDIRRPATH) override CULIBFLAGSRPATH = -Xlinker -rpath,$(LIBDIRRPATH) - # RPATH to common lib when linking cuda/cpp libs - override CXXLIBFLAGSRPATH2 = -Wl,-rpath,'$$ORIGIN' - override CULIBFLAGSRPATH2 = -Xlinker -rpath,'$$ORIGIN' endif # Setting LD_LIBRARY_PATH or DYLD_LIBRARY_PATH in the RUNTIME is no longer necessary (neither on Linux nor on Mac) @@ -458,107 +129,68 @@ override RUNTIME = #=== Makefile TARGETS and build rules below #=============================================================================== -cxx_main=$(BUILDDIR)/check.exe -fcxx_main=$(BUILDDIR)/fcheck.exe +cxx_main=$(CUDACPP_BUILDDIR)/check.exe +fcxx_main=$(CUDACPP_BUILDDIR)/fcheck.exe -ifneq ($(NVCC),) -cu_main=$(BUILDDIR)/gcheck.exe -fcu_main=$(BUILDDIR)/fgcheck.exe -else -cu_main= -fcu_main= -endif - -testmain=$(BUILDDIR)/runTest.exe +cu_main=$(CUDACPP_BUILDDIR)/gcheck.exe +fcu_main=$(CUDACPP_BUILDDIR)/fgcheck.exe ifneq ($(GTESTLIBS),) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) $(testmain) -else -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) +testmain=$(CUDACPP_BUILDDIR)/runTest.exe +cutestmain=$(CUDACPP_BUILDDIR)/runTest_cuda.exe endif -# Target (and build options): debug -MAKEDEBUG= -debug: OPTFLAGS = -g -O0 -debug: CUOPTFLAGS = -G -debug: MAKEDEBUG := debug -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -$(BUILDDIR)/.build.$(TAG): - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @if [ "$(oldtagsb)" != "" ]; then echo "Cannot build for tag=$(TAG) as old builds exist for other tags:"; echo " $(oldtagsb)"; echo "Please run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @touch $(BUILDDIR)/.build.$(TAG) +cppbuild: $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(cxx_main) $(fcxx_main) $(testmain) +cudabuild: $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(cu_main) $(fcu_main) $(cutestmain) # Generic target and build rules: objects from CUDA compilation -ifneq ($(NVCC),) -$(BUILDDIR)/%.o : %.cu *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cu *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c $< -o $@ -$(BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ -endif +$(CUDACPP_BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ # Generic target and build rules: objects from C++ compilation # (NB do not include CUINC here! add it only for NVTX or curand #679) -$(BUILDDIR)/%.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Apply special build flags only to CrossSectionKernel.cc and gCrossSectionKernel.cu (no fast math, see #117 and #516) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS := $(filter-out -ffast-math,$(CXXFLAGS)) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math +$(CUDACPP_BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math ifneq ($(NVCC),) -$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -fno-fast-math +$(CUDACPP_BUILDDIR)/gCrossSectionKernels.o: NVCCFLAGS += -Xcompiler -fno-fast-math endif endif # Apply special build flags only to check_sa.o and gcheck_sa.o (NVTX in timermap.h, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) -$(BUILDDIR)/gcheck_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) # Apply special build flags only to check_sa and CurandRandomNumberKernel (curand headers, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gcheck_sa.o: CUFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gCurandRandomNumberKernel.o: CUFLAGS += $(CXXFLAGSCURAND) -ifeq ($(RNDGEN),hasCurand) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CUINC) -endif +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) + # Avoid "warning: builtin __has_trivial_... is deprecated; use __is_trivially_... instead" in nvcc with icx2023 (#592) ifneq ($(shell $(CXX) --version | egrep '^(Intel)'),) ifneq ($(NVCC),) -CUFLAGS += -Xcompiler -Wno-deprecated-builtins +MG_NVCCFLAGS += -Xcompiler -Wno-deprecated-builtins endif endif -# Avoid clang warning "overriding '-ffp-contract=fast' option with '-ffp-contract=on'" (#516) -# This patch does remove the warning, but I prefer to keep it disabled for the moment... -###ifneq ($(shell $(CXX) --version | egrep '^(clang|Apple clang|Intel)'),) -###$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -Wno-overriding-t-option -###ifneq ($(NVCC),) -###$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -Wno-overriding-t-option -###endif -###endif - #### Apply special build flags only to CPPProcess.cc (-flto) ###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += -flto -#### Apply special build flags only to CPPProcess.cc (AVXFLAGS) -###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += $(AVXFLAGS) - #------------------------------------------------------------------------------- -# Target (and build rules): common (src) library -commonlib : $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc $(BUILDDIR)/.build.$(TAG) - $(MAKE) -C ../../src $(MAKEDEBUG) -f $(CUDACPP_SRC_MAKEFILE) +$(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc + $(MAKE) AVX=$(AVX) AVXFLAGS="$(AVXFLAGS)" -C ../../src -f $(CUDACPP_SRC_MAKEFILE) #------------------------------------------------------------------------------- @@ -566,162 +198,123 @@ processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') ###$(info processid_short=$(processid_short)) MG5AMC_CXXLIB = mg5amc_$(processid_short)_cpp -cxx_objects_lib=$(BUILDDIR)/CPPProcess.o $(BUILDDIR)/MatrixElementKernels.o $(BUILDDIR)/BridgeKernels.o $(BUILDDIR)/CrossSectionKernels.o -cxx_objects_exe=$(BUILDDIR)/CommonRandomNumberKernel.o $(BUILDDIR)/RamboSamplingKernels.o +cxx_objects_lib=$(CUDACPP_BUILDDIR)/CPPProcess.o $(CUDACPP_BUILDDIR)/MatrixElementKernels.o $(CUDACPP_BUILDDIR)/BridgeKernels.o $(CUDACPP_BUILDDIR)/CrossSectionKernels.o +cxx_objects_exe=$(CUDACPP_BUILDDIR)/CommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/RamboSamplingKernels.o -ifneq ($(NVCC),) MG5AMC_CULIB = mg5amc_$(processid_short)_cuda -cu_objects_lib=$(BUILDDIR)/gCPPProcess.o $(BUILDDIR)/gMatrixElementKernels.o $(BUILDDIR)/gBridgeKernels.o $(BUILDDIR)/gCrossSectionKernels.o -cu_objects_exe=$(BUILDDIR)/gCommonRandomNumberKernel.o $(BUILDDIR)/gRamboSamplingKernels.o -endif +cu_objects_lib=$(CUDACPP_BUILDDIR)/gCPPProcess.o $(CUDACPP_BUILDDIR)/gMatrixElementKernels.o $(CUDACPP_BUILDDIR)/gBridgeKernels.o $(CUDACPP_BUILDDIR)/gCrossSectionKernels.o +cu_objects_exe=$(CUDACPP_BUILDDIR)/gCommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/gRamboSamplingKernels.o # Target (and build rules): C++ and CUDA shared libraries -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) - $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) - -ifneq ($(NVCC),) -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) - $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -endif +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) + $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) $(MG_LDFLAGS) $(LDFLAGS) -#------------------------------------------------------------------------------- - -# Target (and build rules): Fortran include files -###$(INCDIR)/%.inc : ../%.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### \cp $< $@ +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) + $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) #------------------------------------------------------------------------------- # Target (and build rules): C++ and CUDA standalone executables -$(cxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cxx_main): $(BUILDDIR)/check_sa.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o - $(CXX) -o $@ $(BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o $(CURANDLIBFLAGS) -ifneq ($(NVCC),) +$(cxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(cxx_main): $(CUDACPP_BUILDDIR)/check_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) + ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(cu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(cu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(cu_main): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(cu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cu_main): $(BUILDDIR)/gcheck_sa.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o - $(NVCC) -o $@ $(BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o $(CURANDLIBFLAGS) +$(cu_main): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif +$(cu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(cu_main): $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- - -# Generic target and build rules: objects from Fortran compilation -$(BUILDDIR)/%.o : %.f *.inc - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(FC) -I. -c $< -o $@ - -# Generic target and build rules: objects from Fortran compilation -###$(BUILDDIR)/%.o : %.f *.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi -### $(FC) -I. -I$(INCDIR) -c $< -o $@ - -# Target (and build rules): Fortran standalone executables -###$(BUILDDIR)/fcheck_sa.o : $(INCDIR)/fbridge.inc +# Check executables: ifeq ($(UNAME_S),Darwin) -$(fcxx_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 +$(fcxx_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif -$(fcxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcxx_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) - $(CXX) -o $@ $(BUILDDIR)/fcheck_sa.o $(OMPFLAGS) $(BUILDDIR)/fsampler.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) +$(fcxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(fcxx_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(cxx_objects_exe) $(OMPFLAGS) $(CUDACPP_BUILDDIR)/fsampler.o -lgfortran -L$(CUDACPP_LIBDIR) $(MG_LDFLAGS) $(LDFLAGS) -ifneq ($(NVCC),) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(fcu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(fcu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(fcu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(fcu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') endif ifeq ($(UNAME_S),Darwin) -$(fcu_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 -endif -$(fcu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcu_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) - $(NVCC) -o $@ $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) +$(fcu_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif +$(fcu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(fcu_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(cu_objects_exe) -lgfortran $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- # Target (and build rules): test objects and test executable -$(BUILDDIR)/testxxx.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx_cu.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx_cu.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -endif +$(testmain) $(cutestmain): $(GTESTLIBS) +$(testmain) $(cutestmain): INCFLAGS += $(GTESTINC) +$(testmain) $(cutestmain): MG_LDFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main -$(BUILDDIR)/testmisc.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(CUDACPP_BUILDDIR)/testxxx.o $(CUDACPP_BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) testxxx_cc_ref.txt +$(testmain): $(CUDACPP_BUILDDIR)/testxxx.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions +$(cutestmain): $(CUDACPP_BUILDDIR)/testxxx_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc_cu.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests -endif -$(BUILDDIR)/runTest.o: $(GTESTLIBS) -$(BUILDDIR)/runTest.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/runTest.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/runTest.o +$(CUDACPP_BUILDDIR)/testmisc.o $(CUDACPP_BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/testmisc.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(cutestmain): $(CUDACPP_BUILDDIR)/testmisc_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests + + +$(CUDACPP_BUILDDIR)/runTest.o $(CUDACPP_BUILDDIR)/runTest_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/runTest.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/runTest.o +$(cutestmain): $(CUDACPP_BUILDDIR)/runTest_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/runTest_cu.o + -ifneq ($(NVCC),) -$(BUILDDIR)/runTest_cu.o: $(GTESTLIBS) -$(BUILDDIR)/runTest_cu.o: INCFLAGS += $(GTESTINC) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(testmain): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(testmain): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cutestmain): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cutestmain): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(testmain): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(testmain): $(BUILDDIR)/runTest_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/runTest_cu.o +$(cutestmain): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif -$(testmain): $(GTESTLIBS) -$(testmain): INCFLAGS += $(GTESTINC) -$(testmain): LIBFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main ifneq ($(OMPFLAGS),) ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -$(testmain): LIBFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) +$(testmain): MG_LDFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -$(testmain): LIBFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 +$(testmain): MG_LDFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 ###else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) ###$(testmain): LIBFLAGS += ???? # OMP is not supported yet by cudacpp for Apple clang (see #578 and #604) else -$(testmain): LIBFLAGS += -lgomp +$(testmain): MG_LDFLAGS += -lgomp endif endif -ifeq ($(NVCC),) # link only runTest.o -$(testmain): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) - $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -ldl -pthread $(LIBFLAGS) -else # link both runTest.o and runTest_cu.o -$(testmain): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) - $(NVCC) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) -ldl $(LIBFLAGS) -lcuda -endif +$(testmain): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(testmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) + $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -pthread $(MG_LDFLAGS) $(LDFLAGS) + +$(cutestmain): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cutestmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) + $(NVCC) -o $@ $(cu_objects_lib) $(cu_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -lcuda $(MG_LDFLAGS) $(LDFLAGS) # Use target gtestlibs to build only googletest ifneq ($(GTESTLIBS),) @@ -731,72 +324,15 @@ endif # Use flock (Linux only, no Mac) to allow 'make -j' if googletest has not yet been downloaded https://stackoverflow.com/a/32666215 $(GTESTLIBS): ifneq ($(shell which flock 2>/dev/null),) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - flock $(BUILDDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) + flock $(TESTDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) else if [ -d $(TESTDIR) ]; then $(MAKE) -C $(TESTDIR); fi endif #------------------------------------------------------------------------------- -# Target: build all targets in all AVX modes (each AVX mode in a separate build directory) -# Split the avxall target into five separate targets to allow parallel 'make -j avxall' builds -# (Hack: add a fbridge.inc dependency to avxall, to ensure it is only copied once for all AVX modes) -avxnone: - @echo - $(MAKE) USEBUILDDIR=1 AVX=none -f $(CUDACPP_MAKEFILE) - -avxsse4: - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 -f $(CUDACPP_MAKEFILE) - -avxavx2: - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 -f $(CUDACPP_MAKEFILE) - -avx512y: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y -f $(CUDACPP_MAKEFILE) - -avx512z: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z -f $(CUDACPP_MAKEFILE) - -ifeq ($(UNAME_P),ppc64le) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else ifeq ($(UNAME_P),arm) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 avxavx2 avx512y avx512z -avxall: avxnone avxsse4 avxavx2 avx512y avx512z -endif - -#------------------------------------------------------------------------------- - -# Target: clean the builds -.PHONY: clean - -clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(BUILDDIR) -else - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe - rm -f $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(LIBDIR)/lib$(MG5AMC_CULIB).so -endif - $(MAKE) -C ../../src clean -f $(CUDACPP_SRC_MAKEFILE) -### rm -rf $(INCDIR) - -cleanall: - @echo - $(MAKE) USEBUILDDIR=0 clean -f $(CUDACPP_MAKEFILE) - @echo - $(MAKE) USEBUILDDIR=0 -C ../../src cleanall -f $(CUDACPP_SRC_MAKEFILE) - rm -rf build.* - # Target: clean the builds as well as the gtest installation(s) -distclean: cleanall +distclean: clean cleansrc ifneq ($(wildcard $(TESTDIRCOMMON)),) $(MAKE) -C $(TESTDIRCOMMON) clean endif @@ -848,50 +384,55 @@ endif #------------------------------------------------------------------------------- -# Target: check (run the C++ test executable) +# Target: check/gcheck (run the C++ test executable) # [NB THIS IS WHAT IS USED IN THE GITHUB CI!] -ifneq ($(NVCC),) -check: runTest cmpFcheck cmpFGcheck -else check: runTest cmpFcheck -endif +gcheck: + $(MAKE) AVX=cuda runTest cmpFGcheck # Target: runTest (run the C++ test executable runTest.exe) -runTest: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/runTest.exe +ifneq ($(AVX),cuda) +runTest: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest.exe +else +runTest: cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest_cuda.exe +endif + # Target: runCheck (run the C++ standalone executable check.exe, with a small number of events) -runCheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/check.exe -p 2 32 2 +runCheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe -p 2 32 2 # Target: runGcheck (run the CUDA standalone executable gcheck.exe, with a small number of events) -runGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/gcheck.exe -p 2 32 2 +runGcheck: AVX=cuda +runGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe -p 2 32 2 # Target: runFcheck (run the Fortran standalone executable - with C++ MEs - fcheck.exe, with a small number of events) -runFcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 +runFcheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 # Target: runFGcheck (run the Fortran standalone executable - with CUDA MEs - fgcheck.exe, with a small number of events) -runFGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 +runFGcheck: AVX=cuda +runFGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 # Target: cmpFcheck (compare ME results from the C++ and Fortran with C++ MEs standalone executables, with a small number of events) -cmpFcheck: all.$(TAG) +cmpFcheck: cppbuild @echo - @echo "$(BUILDDIR)/check.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi # Target: cmpFGcheck (compare ME results from the CUDA and Fortran with CUDA MEs standalone executables, with a small number of events) -cmpFGcheck: all.$(TAG) +cmpFGcheck: AVX=cuda +cmpFGcheck: + $(MAKE) AVX=cuda cudabuild @echo - @echo "$(BUILDDIR)/gcheck.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fgcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi -# Target: memcheck (run the CUDA standalone executable gcheck.exe with a small number of events through cuda-memcheck) -memcheck: all.$(TAG) - $(RUNTIME) $(CUDA_HOME)/bin/cuda-memcheck --check-api-memory-access yes --check-deprecated-instr yes --check-device-heap yes --demangle full --language c --leak-check full --racecheck-report all --report-api-errors all --show-backtrace yes --tool memcheck --track-unused-memory yes $(BUILDDIR)/gcheck.exe -p 2 32 2 - -#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_tt.mad/SubProcesses/makefile b/epochX/cudacpp/gg_tt.mad/SubProcesses/makefile index d572486c2e..b1b09cd05e 100644 --- a/epochX/cudacpp/gg_tt.mad/SubProcesses/makefile +++ b/epochX/cudacpp/gg_tt.mad/SubProcesses/makefile @@ -1,27 +1,30 @@ SHELL := /bin/bash -include ../../Source/make_opts -FFLAGS+= -w +# Include general setup +OPTIONS_MAKEFILE := ../../Source/make_opts +include $(OPTIONS_MAKEFILE) # Enable the C preprocessor https://gcc.gnu.org/onlinedocs/gfortran/Preprocessing-Options.html -FFLAGS+= -cpp +MG_FCFLAGS += -cpp +MG_CXXFLAGS += -I. -# Compile counters with -O3 as in the cudacpp makefile (avoid being "unfair" to Fortran #740) -CXXFLAGS = -O3 -Wall -Wshadow -Wextra +all: help cppnative + +# Target if user does not specify target +help: + $(info No target specified.) + $(info Viable targets are 'cppnative' (default), 'cppnone', 'cppsse4', 'cppavx2', 'cpp512y', 'cpp512z' and 'cuda') + $(info Or 'cppall' for all C++ targets) + $(info Or 'ALL' for all C++ and cuda targets) -# Add -std=c++17 explicitly to avoid build errors on macOS -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 -endif -# Enable ccache if USECCACHE=1 +# Enable ccache for C++ if USECCACHE=1 (do not enable it for Fortran since it is not supported for Fortran) ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) override CXX:=ccache $(CXX) endif -ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) - override FC:=ccache $(FC) -endif +###ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) +### override FC:=ccache $(FC) +###endif # Load additional dependencies of the bias module, if present ifeq (,$(wildcard ../bias_dependencies)) @@ -46,34 +49,25 @@ else MADLOOP_LIB = endif -LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias - -processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') -CUDACPP_MAKEFILE=cudacpp.mk -# NB1 Using ":=" below instead of "=" is much faster (it only runs the subprocess once instead of many times) -# NB2 Use '|&' in CUDACPP_BUILDDIR to avoid confusing errors about googletest #507 -# NB3 Do not add a comment inlined "CUDACPP_BUILDDIR=$(shell ...) # comment" as otherwise a trailing space is included... -# NB4 The variables relevant to the cudacpp Makefile must be explicitly passed to $(shell...) -CUDACPP_MAKEENV:=$(shell echo '$(.VARIABLES)' | tr " " "\n" | egrep "(USEBUILDDIR|AVX|FPTYPE|HELINL|HRDCOD)") -###$(info CUDACPP_MAKEENV=$(CUDACPP_MAKEENV)) -###$(info $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))")) -CUDACPP_BUILDDIR:=$(shell $(MAKE) $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))") -f $(CUDACPP_MAKEFILE) -pn 2>&1 | awk '/Building/{print $$3}' | sed s/BUILDDIR=//) -ifeq ($(CUDACPP_BUILDDIR),) -$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) -else -$(info CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)') -endif -CUDACPP_COMMONLIB=mg5amc_common -CUDACPP_CXXLIB=mg5amc_$(processid_short)_cpp -CUDACPP_CULIB=mg5amc_$(processid_short)_cuda - +LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias LIBS = $(LIBDIR)libbias.$(libext) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(MADLOOP_LIB) $(LOOP_LIBS) ifneq ("$(wildcard ../../Source/RUNNING)","") LINKLIBS += -lrunning - LIBS += $(LIBDIR)librunning.$(libext) + LIBS += $(LIBDIR)librunning.$(libext) endif +SOURCEDIR_GUARD:=../../Source/.timestamp_guard +# We use $(SOURCEDIR_GUARD) to figure out if Source is out of date. The Source makefile doesn't correctly +# update all files, so we need a proxy that is updated every time we run "$(MAKE) -C ../../Source". +$(SOURCEDIR_GUARD) ../../Source/discretesampler.mod &: ../../Source/*.f ../../Cards/param_card.dat ../../Cards/run_card.dat +ifneq ($(shell which flock 2>/dev/null),) + flock ../../Source/.lock -c "$(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD)" +else + $(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD) +endif + +$(LIBS): $(SOURCEDIR_GUARD) # Source files @@ -91,82 +85,83 @@ PROCESS= myamp.o genps.o unwgt.o setcuts.o get_color.o \ DSIG=driver.o $(patsubst %.f, %.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) DSIG_cudacpp=driver_cudacpp.o $(patsubst %.f, %_cudacpp.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) -SYMMETRY = symmetry.o idenparts.o +SYMMETRY = symmetry.o idenparts.o -# Binaries +# cudacpp targets: +CUDACPP_MAKEFILE := cudacpp.mk +ifneq (,$(wildcard $(CUDACPP_MAKEFILE))) +include $(CUDACPP_MAKEFILE) +endif -ifeq ($(UNAME),Darwin) -LDFLAGS += -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) -LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" -else -LDFLAGS += -Wl,--no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +ifeq ($(CUDACPP_BUILDDIR),) +$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) endif +CUDACPP_COMMONLIB=mg5amc_common +CUDACPP_CXXLIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so +CUDACPP_CULIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so -all: $(PROG)_fortran $(CUDACPP_BUILDDIR)/$(PROG)_cpp # also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (#503) +# Set up OpenMP if supported +OMPFLAGS ?= -fopenmp ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp LINKLIBS += -liomp5 # see #578 LINKLIBS += -lintlc # undefined reference to `_intel_fast_memcpy' else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -override OMPFLAGS = -fopenmp $(CUDACPP_BUILDDIR)/$(PROG)_cpp: LINKLIBS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -override OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang -else -override OMPFLAGS = -fopenmp +OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang endif -$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o - $(FC) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) - -$(LIBS): .libs -.libs: ../../Cards/param_card.dat ../../Cards/run_card.dat - cd ../../Source; make - touch $@ +# Binaries -$(CUDACPP_BUILDDIR)/.cudacpplibs: - $(MAKE) -f $(CUDACPP_MAKEFILE) - touch $@ +$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) # On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables ($ORIGIN on Linux) # On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary -ifeq ($(UNAME_S),Darwin) - override LIBFLAGSRPATH = -else ifeq ($(USEBUILDDIR),1) - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' -else - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/$(LIBDIR)' +ifneq ($(UNAME_S),Darwin) + LIBFLAGSRPATH := -Wl,-rpath,'$$ORIGIN:$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' endif -.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link +.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link madevent_cppnone_link madevent_cppsse4_link madevent_cppavx2_link madevent_cpp512y_link madevent_cpp512z_link clean cleanall cleansrc madevent_fortran_link: $(PROG)_fortran rm -f $(PROG) ln -s $(PROG)_fortran $(PROG) -madevent_cpp_link: $(CUDACPP_BUILDDIR)/$(PROG)_cpp - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) +madevent_cppnone_link: AVX=none +madevent_cppnone_link: cppnone + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -madevent_cuda_link: $(CUDACPP_BUILDDIR)/$(PROG)_cuda - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) +madevent_cppavx2_link: AVX=avx2 +madevent_cppavx2_link: cppavx2 + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512y_link: AVX=512y +madevent_cpp512y_link: cpp512y + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512z_link: AVX=512z +madevent_cpp512z_link: cpp512z + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cuda_link: AVX=cuda +madevent_cuda_link: cuda + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) -# Building $(PROG)_cpp also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (improved patch for cpp-only builds #503) -$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o $(CUDACPP_BUILDDIR)/.cudacpplibs - $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CXXLIB) $(LIBFLAGSRPATH) $(LDFLAGS) - if [ -f $(LIBDIR)/$(CUDACPP_BUILDDIR)/lib$(CUDACPP_CULIB).* ]; then $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CULIB) $(LIBFLAGSRPATH) $(LDFLAGS); fi +$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(LIBS) $(CUDACPP_CXXLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) -$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(CUDACPP_BUILDDIR)/$(PROG)_cpp +$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(LIBS) $(CUDACPP_CULIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) counters.o: counters.cc timer.h - $(CXX) $(CXXFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ ompnumthreads.o: ompnumthreads.cc ompnumthreads.h - $(CXX) -I. $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) $(FC) -o $(PROG)_forhel $(PROCESS) $(MATRIX_HEL) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) $(OMPFLAGS) @@ -174,27 +169,14 @@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) gensym: $(SYMMETRY) configs.inc $(LIBS) $(FC) -o gensym $(SYMMETRY) -L$(LIBDIR) $(LINKLIBS) $(LDFLAGS) -###ifeq (,$(wildcard fbridge.inc)) # Pointless: fbridge.inc always exists as this is the cudacpp-modified makefile! -###$(LIBDIR)libmodel.$(libext): ../../Cards/param_card.dat -### cd ../../Source/MODEL; make -### -###$(LIBDIR)libgeneric.$(libext): ../../Cards/run_card.dat -### cd ../../Source; make -### -###$(LIBDIR)libpdf.$(libext): -### cd ../../Source/PDF; make -### -###$(LIBDIR)libgammaUPC.$(libext): -### cd ../../Source/PDF/gammaUPC; make -###endif # Add source so that the compiler finds the DiscreteSampler module. $(MATRIX): %.o: %.f - $(FC) $(FFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC -%.o: %.f - $(FC) $(FFLAGS) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC + $(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC +%.o $(CUDACPP_BUILDDIR)/%.o: %.f + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -I../../Source/ -I../../Source/PDF/gammaUPC -c $< -o $@ %_cudacpp.o: %.f - $(FC) $(FFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ # Dependencies @@ -215,60 +197,39 @@ unwgt.o: genps.inc nexternal.inc symswap.inc cluster.inc run.inc message.inc \ initcluster.o: message.inc # Extra dependencies on discretesampler.mod +../../Source/discretesampler.mod: ../../Source/DiscreteSampler.f -auto_dsig.o: .libs -driver.o: .libs -driver_cudacpp.o: .libs -$(MATRIX): .libs -genps.o: .libs +auto_dsig.o: ../../Source/discretesampler.mod +driver.o: ../../Source/discretesampler.mod +driver_cudacpp.o: ../../Source/discretesampler.mod +$(MATRIX): ../../Source/discretesampler.mod +genps.o: ../../Source/discretesampler.mod # Cudacpp avxall targets -UNAME_P := $(shell uname -p) ifeq ($(UNAME_P),ppc64le) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else ifeq ($(UNAME_P),arm) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else -avxall: avxnone avxsse4 avxavx2 avx512y avx512z +cppall: cppnative cppnone cppsse4 cppavx2 cpp512y cpp512z endif -avxnone: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=none - -avxsse4: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 - -avxavx2: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 - -avx512y: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y - -avx512z: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z - -###endif - -# Clean (NB: 'make clean' in Source calls 'make clean' in all P*) - -clean: # Clean builds: fortran in this Pn; cudacpp executables for one AVX in this Pn - $(RM) *.o gensym $(PROG) $(PROG)_fortran $(PROG)_forhel $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(CUDACPP_BUILDDIR)/$(PROG)_cuda +ALL: cppall cuda -cleanavxs: clean # Clean builds: fortran in this Pn; cudacpp for all AVX in this Pn and in src - $(MAKE) -f $(CUDACPP_MAKEFILE) cleanall - rm -f $(CUDACPP_BUILDDIR)/.cudacpplibs - rm -f .libs +# Clean all architecture-specific builds in this P* directory +clean: + $(RM) *.o gensym $(PROG) $(PROG)_* + $(RM) -rf build.*/*{.o,.so,.exe,.dylib,madevent_*} + @for dir in $$(ls -d build.* 2> /dev/null); do if [ -z "$$(ls -A $${dir})" ]; then echo "Removing $${dir}"; $(RM) -r $${dir}; else echo "Not removing $${dir}; not empty"; fi; done -cleanall: # Clean builds: fortran in all P* and in Source; cudacpp for all AVX in all P* and in src - make -C ../../Source cleanall - rm -rf $(LIBDIR)libbias.$(libext) - rm -f ../../Source/*.mod ../../Source/*/*.mod +# Clean common source directories and all architecture-specific builds in all P* directories +cleanall: cleansrc + for PROCESS in ../P[0-9]*; do $(MAKE) -C $${PROCESS} clean; done -distclean: cleanall # Clean all fortran and cudacpp builds as well as the googletest installation - $(MAKE) -f $(CUDACPP_MAKEFILE) distclean +# Clean common source directories (NB these are a dependency for all P* directories) +cleansrc: + make -C ../../Source clean + $(RM) -f $(SOURCEDIR_GUARD) ../../Source/{*.mod,.lock} ../../Source/*/*.mod + $(RM) -r $(LIBDIR)libbias.$(libext) + if [ -d ../../src ]; then $(MAKE) -C ../../src -f cudacpp_src.mk clean; fi diff --git a/epochX/cudacpp/gg_tt.mad/SubProcesses/runTest.cc b/epochX/cudacpp/gg_tt.mad/SubProcesses/runTest.cc index d4a760a71b..6c77775fb2 100644 --- a/epochX/cudacpp/gg_tt.mad/SubProcesses/runTest.cc +++ b/epochX/cudacpp/gg_tt.mad/SubProcesses/runTest.cc @@ -243,18 +243,20 @@ struct CUDATest : public CUDA_CPU_TestBase // Use two levels of macros to force stringification at the right level // (see https://gcc.gnu.org/onlinedocs/gcc-3.0.1/cpp_3.html#SEC17 and https://stackoverflow.com/a/3419392) // Google macro is in https://github.com/google/googletest/blob/master/googletest/include/gtest/gtest-param-test.h +/* clang-format off */ #define TESTID_CPU( s ) s##_CPU #define XTESTID_CPU( s ) TESTID_CPU( s ) #define MG_INSTANTIATE_TEST_SUITE_CPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); #define TESTID_GPU( s ) s##_GPU #define XTESTID_GPU( s ) TESTID_GPU( s ) #define MG_INSTANTIATE_TEST_SUITE_GPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); +/* clang-format on */ #ifdef __CUDACC__ MG_INSTANTIATE_TEST_SUITE_GPU( XTESTID_GPU( MG_EPOCH_PROCESS_ID ), MadgraphTest ); diff --git a/epochX/cudacpp/gg_tt.mad/SubProcesses/testxxx.cc b/epochX/cudacpp/gg_tt.mad/SubProcesses/testxxx.cc index 3361fe5aa9..1d315f6d75 100644 --- a/epochX/cudacpp/gg_tt.mad/SubProcesses/testxxx.cc +++ b/epochX/cudacpp/gg_tt.mad/SubProcesses/testxxx.cc @@ -40,7 +40,7 @@ namespace mg5amcCpu { std::string FPEhandlerMessage = "unknown"; int FPEhandlerIevt = -1; - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU): '" << FPEhandlerMessage << "' ievt=" << FPEhandlerIevt << std::endl; @@ -71,11 +71,10 @@ TEST( XTESTID( MG_EPOCH_PROCESS_ID ), testxxx ) constexpr bool testEvents = !dumpEvents; // run the test? constexpr fptype toleranceXXXs = std::is_same::value ? 1.E-15 : 1.E-5; // Constant parameters - constexpr int neppM = MemoryAccessMomenta::neppM; // AOSOA layout constexpr int np4 = CPPProcess::np4; - const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') - assert( nevt % neppM == 0 ); // nevt must be a multiple of neppM - assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV + const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') + assert( nevt % MemoryAccessMomenta::neppM == 0 ); // nevt must be a multiple of neppM + assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV // Fill in the input momenta #ifdef __CUDACC__ mg5amcGpu::PinnedHostBufferMomenta hstMomenta( nevt ); // AOSOA[npagM][npar=4][np4=4][neppM] diff --git a/epochX/cudacpp/gg_tt.mad/bin/internal/banner.py b/epochX/cudacpp/gg_tt.mad/bin/internal/banner.py index bd1517985f..b408679c2f 100755 --- a/epochX/cudacpp/gg_tt.mad/bin/internal/banner.py +++ b/epochX/cudacpp/gg_tt.mad/bin/internal/banner.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,7 +53,7 @@ MADEVENT = False import madgraph.various.misc as misc import madgraph.iolibs.file_writers as file_writers - import madgraph.iolibs.files as files + import madgraph.iolibs.files as files import models.check_param_card as param_card_reader from madgraph import MG5DIR, MadGraph5Error, InvalidCmd @@ -80,36 +80,36 @@ class Banner(dict): 'mgproccard': 'MGProcCard', 'mgruncard': 'MGRunCard', 'ma5card_parton' : 'MA5Card_parton', - 'ma5card_hadron' : 'MA5Card_hadron', + 'ma5card_hadron' : 'MA5Card_hadron', 'mggenerationinfo': 'MGGenerationInfo', 'mgpythiacard': 'MGPythiaCard', 'mgpgscard': 'MGPGSCard', 'mgdelphescard': 'MGDelphesCard', 'mgdelphestrigger': 'MGDelphesTrigger', 'mgshowercard': 'MGShowerCard' } - + forbid_cdata = ['initrwgt'] - + def __init__(self, banner_path=None): """ """ if isinstance(banner_path, Banner): dict.__init__(self, banner_path) self.lhe_version = banner_path.lhe_version - return + return else: dict.__init__(self) - + #Look at the version if MADEVENT: self['mgversion'] = '#%s\n' % open(pjoin(MEDIR, 'MGMEVersion.txt')).read() else: info = misc.get_pkg_info() self['mgversion'] = info['version']+'\n' - + self.lhe_version = None - + if banner_path: self.read_banner(banner_path) @@ -123,7 +123,7 @@ def __init__(self, banner_path=None): 'mgruncard':'run_card.dat', 'mgpythiacard':'pythia_card.dat', 'mgpgscard' : 'pgs_card.dat', - 'mgdelphescard':'delphes_card.dat', + 'mgdelphescard':'delphes_card.dat', 'mgdelphestrigger':'delphes_trigger.dat', 'mg5proccard':'proc_card_mg5.dat', 'mgproccard': 'proc_card.dat', @@ -137,10 +137,10 @@ def __init__(self, banner_path=None): 'mgshowercard':'shower_card.dat', 'pythia8':'pythia8_card.dat', 'ma5card_parton':'madanalysis5_parton_card.dat', - 'ma5card_hadron':'madanalysis5_hadron_card.dat', + 'ma5card_hadron':'madanalysis5_hadron_card.dat', 'run_settings':'' } - + def read_banner(self, input_path): """read a banner""" @@ -151,7 +151,7 @@ def read_banner(self, input_path): def split_iter(string): return (x.groups(0)[0] for x in re.finditer(r"([^\n]*\n)", string, re.DOTALL)) input_path = split_iter(input_path) - + text = '' store = False for line in input_path: @@ -170,13 +170,13 @@ def split_iter(string): text += line else: text += '%s%s' % (line, '\n') - - #reaching end of the banner in a event file avoid to read full file + + #reaching end of the banner in a event file avoid to read full file if "
" in line: break elif "" in line: break - + def __getattribute__(self, attr): """allow auto-build for the run_card/param_card/... """ try: @@ -187,23 +187,23 @@ def __getattribute__(self, attr): return self.charge_card(attr) - + def change_lhe_version(self, version): """change the lhe version associate to the banner""" - + version = float(version) if version < 3: version = 1 elif version > 3: raise Exception("Not Supported version") self.lhe_version = version - + def get_cross(self, witherror=False): """return the cross-section of the file""" if "init" not in self: raise Exception - + text = self["init"].split('\n') cross = 0 error = 0 @@ -217,13 +217,13 @@ def get_cross(self, witherror=False): return cross else: return cross, math.sqrt(error) - + def scale_init_cross(self, ratio): """modify the init information with the associate scale""" assert "init" in self - + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -231,29 +231,29 @@ def scale_init_cross(self, ratio): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break pid = int(pid) - + line = " %+13.7e %+13.7e %+13.7e %i" % \ (ratio*float(xsec), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + def get_pdg_beam(self): """return the pdg of each beam""" - + assert "init" in self - + all_lines = self["init"].split('\n') pdg1,pdg2,_ = all_lines[0].split(None, 2) return int(pdg1), int(pdg2) - + def load_basic(self, medir): """ Load the proc_card /param_card and run_card """ - + self.add(pjoin(medir,'Cards', 'param_card.dat')) self.add(pjoin(medir,'Cards', 'run_card.dat')) if os.path.exists(pjoin(medir, 'SubProcesses', 'procdef_mg5.dat')): @@ -261,29 +261,29 @@ def load_basic(self, medir): self.add(pjoin(medir,'Cards', 'proc_card_mg5.dat')) else: self.add(pjoin(medir,'Cards', 'proc_card.dat')) - + def change_seed(self, seed): """Change the seed value in the banner""" # 0 = iseed p = re.compile(r'''^\s*\d+\s*=\s*iseed''', re.M) new_seed_str = " %s = iseed" % seed self['mgruncard'] = p.sub(new_seed_str, self['mgruncard']) - + def add_generation_info(self, cross, nb_event): """add info on MGGeneration""" - + text = """ # Number of Events : %s # Integrated weight (pb) : %s """ % (nb_event, cross) self['MGGenerationInfo'] = text - + ############################################################################ # SPLIT BANNER ############################################################################ def split(self, me_dir, proc_card=True): """write the banner in the Cards directory. - proc_card argument is present to avoid the overwrite of proc_card + proc_card argument is present to avoid the overwrite of proc_card information""" for tag, text in self.items(): @@ -305,37 +305,37 @@ def check_pid(self, pid2label): """special routine removing width/mass of particles not present in the model This is usefull in case of loop model card, when we want to use the non loop model.""" - + if not hasattr(self, 'param_card'): self.charge_card('slha') - + for tag in ['mass', 'decay']: block = self.param_card.get(tag) for data in block: pid = data.lhacode[0] - if pid not in list(pid2label.keys()): + if pid not in list(pid2label.keys()): block.remove((pid,)) def get_lha_strategy(self): """get the lha_strategy: how the weight have to be handle by the shower""" - + if not self["init"]: raise Exception("No init block define") - + data = self["init"].split('\n')[0].split() if len(data) != 10: misc.sprint(len(data), self['init']) raise Exception("init block has a wrong format") return int(float(data[-2])) - + def set_lha_strategy(self, value): """set the lha_strategy: how the weight have to be handle by the shower""" - + if not (-4 <= int(value) <= 4): six.reraise(Exception, "wrong value for lha_strategy", value) if not self["init"]: raise Exception("No init block define") - + all_lines = self["init"].split('\n') data = all_lines[0].split() if len(data) != 10: @@ -351,13 +351,13 @@ def modify_init_cross(self, cross, allow_zero=False): assert isinstance(cross, dict) # assert "all" in cross assert "init" in self - + cross = dict(cross) for key in cross.keys(): if isinstance(key, str) and key.isdigit() and int(key) not in cross: cross[int(key)] = cross[key] - - + + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -365,7 +365,7 @@ def modify_init_cross(self, cross, allow_zero=False): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break @@ -383,23 +383,23 @@ def modify_init_cross(self, cross, allow_zero=False): (float(cross[pid]), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + ############################################################################ # WRITE BANNER ############################################################################ def write(self, output_path, close_tag=True, exclude=[]): """write the banner""" - + if isinstance(output_path, str): ff = open(output_path, 'w') else: ff = output_path - + if MADEVENT: header = open(pjoin(MEDIR, 'Source', 'banner_header.txt')).read() else: header = open(pjoin(MG5DIR,'Template', 'LO', 'Source', 'banner_header.txt')).read() - + if not self.lhe_version: self.lhe_version = self.get('run_card', 'lhe_version', default=1.0) if float(self.lhe_version) < 3: @@ -412,7 +412,7 @@ def write(self, output_path, close_tag=True, exclude=[]): for tag in [t for t in self.ordered_items if t in list(self.keys())]+ \ [t for t in self.keys() if t not in self.ordered_items]: - if tag in ['init'] or tag in exclude: + if tag in ['init'] or tag in exclude: continue capitalized_tag = self.capitalized_items[tag] if tag in self.capitalized_items else tag start_data, stop_data = '', '' @@ -422,19 +422,19 @@ def write(self, output_path, close_tag=True, exclude=[]): stop_data = ']]>\n' out = '<%(tag)s>%(start_data)s\n%(text)s\n%(stop_data)s\n' % \ {'tag':capitalized_tag, 'text':self[tag].strip(), - 'start_data': start_data, 'stop_data':stop_data} + 'start_data': start_data, 'stop_data':stop_data} try: ff.write(out) except: ff.write(out.encode('utf-8')) - - + + if not '/header' in exclude: out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) if 'init' in self and not 'init' in exclude: text = self['init'] @@ -444,22 +444,22 @@ def write(self, output_path, close_tag=True, exclude=[]): ff.write(out) except: ff.write(out.encode('utf-8')) - + if close_tag: - out = '\n' + out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) return ff - - + + ############################################################################ # BANNER ############################################################################ def add(self, path, tag=None): """Add the content of the file to the banner""" - + if not tag: card_name = os.path.basename(path) if 'param_card' in card_name: @@ -505,33 +505,33 @@ def add_text(self, tag, text): if tag == 'param_card': tag = 'slha' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' - + self[tag.lower()] = text - - + + def charge_card(self, tag): """Build the python object associated to the card""" - + if tag in ['param_card', 'param']: tag = 'slha' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse'], 'invalid card %s' % tag - + if tag == 'slha': param_card = self[tag].split('\n') self.param_card = param_card_reader.ParamCard(param_card) @@ -544,56 +544,56 @@ def charge_card(self, tag): self.proc_card = ProcCard(proc_card) return self.proc_card elif tag =='mgshowercard': - shower_content = self[tag] + shower_content = self[tag] if MADEVENT: import internal.shower_card as shower_card else: import madgraph.various.shower_card as shower_card self.shower_card = shower_card.ShowerCard(shower_content, True) - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.shower_card.testing = False return self.shower_card elif tag =='foanalyse': - analyse_content = self[tag] + analyse_content = self[tag] if MADEVENT: import internal.FO_analyse_card as FO_analyse_card else: import madgraph.various.FO_analyse_card as FO_analyse_card - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.FOanalyse_card = FO_analyse_card.FOAnalyseCard(analyse_content, True) self.FOanalyse_card.testing = False return self.FOanalyse_card - + def get_detail(self, tag, *arg, **opt): """return a specific """ - + if tag in ['param_card', 'param']: tag = 'slha' attr_tag = 'param_card' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], '%s not recognized' % tag - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) + self.charge_card(attr_tag) card = getattr(self, attr_tag) if len(arg) == 0: @@ -613,7 +613,7 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 2 and tag == 'slha': try: return card[arg[0]].get(arg[1:]) @@ -621,15 +621,15 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 0: return card else: raise Exception("Unknow command") - + #convenient alias get = get_detail - + def set(self, tag, *args): """modify one of the cards""" @@ -637,27 +637,27 @@ def set(self, tag, *args): tag = 'slha' attr_tag = 'param_card' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized' - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) - + self.charge_card(attr_tag) + card = getattr(self, attr_tag) if len(args) ==2: if tag == 'mg5proccard': @@ -666,20 +666,20 @@ def set(self, tag, *args): card[args[0]] = args[1] else: card[args[:-1]] = args[-1] - - + + @misc.multiple_try() def add_to_file(self, path, seed=None, out=None): """Add the banner to a file and change the associate seed in the banner""" if seed is not None: self.set("run_card", "iseed", seed) - + if not out: path_out = "%s.tmp" % path else: path_out = out - + ff = self.write(path_out, close_tag=False, exclude=['MGGenerationInfo', '/header', 'init']) ff.write("## END BANNER##\n") @@ -698,44 +698,44 @@ def add_to_file(self, path, seed=None, out=None): files.mv(path_out, path) - + def split_banner(banner_path, me_dir, proc_card=True): """a simple way to split a banner""" - + banner = Banner(banner_path) banner.split(me_dir, proc_card) - + def recover_banner(results_object, level, run=None, tag=None): """as input we receive a gen_crossxhtml.AllResults object. This define the current banner and load it """ - + if not run: - try: - _run = results_object.current['run_name'] - _tag = results_object.current['tag'] + try: + _run = results_object.current['run_name'] + _tag = results_object.current['tag'] except Exception: return Banner() else: _run = run if not tag: - try: - _tag = results_object[run].tags[-1] + try: + _tag = results_object[run].tags[-1] except Exception as error: if os.path.exists( pjoin(results_object.path,'Events','%s_banner.txt' % (run))): tag = None else: - return Banner() + return Banner() else: _tag = tag - - path = results_object.path - if tag: + + path = results_object.path + if tag: banner_path = pjoin(path,'Events',run,'%s_%s_banner.txt' % (run, tag)) else: banner_path = pjoin(results_object.path,'Events','%s_banner.txt' % (run)) - + if not os.path.exists(banner_path): if level != "parton" and tag != _tag: return recover_banner(results_object, level, _run, results_object[_run].tags[0]) @@ -754,12 +754,12 @@ def recover_banner(results_object, level, run=None, tag=None): return Banner(lhe.banner) # security if the banner was remove (or program canceled before created it) - return Banner() - + return Banner() + banner = Banner(banner_path) - - - + + + if level == 'pythia': if 'mgpythiacard' in banner: del banner['mgpythiacard'] @@ -768,13 +768,13 @@ def recover_banner(results_object, level, run=None, tag=None): if tag in banner: del banner[tag] return banner - + class InvalidRunCard(InvalidCmd): pass class ProcCard(list): """Basic Proccard object""" - + history_header = \ '#************************************************************\n' + \ '#* MadGraph5_aMC@NLO *\n' + \ @@ -798,10 +798,10 @@ class ProcCard(list): '#* run as ./bin/mg5_aMC filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - - - - + + + + def __init__(self, init=None): """ initialize a basic proc_card""" self.info = {'model': 'sm', 'generate':None, @@ -810,13 +810,13 @@ def __init__(self, init=None): if init: self.read(init) - + def read(self, init): """read the proc_card and save the information""" - + if isinstance(init, str): #path to file init = open(init, 'r') - + store_line = '' for line in init: line = line.rstrip() @@ -828,28 +828,28 @@ def read(self, init): store_line = "" if store_line: raise Exception("WRONG CARD FORMAT") - - + + def move_to_last(self, cmd): """move an element to the last history.""" for line in self[:]: if line.startswith(cmd): self.remove(line) list.append(self, line) - + def append(self, line): """"add a line in the proc_card perform automatically cleaning""" - + line = line.strip() cmds = line.split() if len(cmds) == 0: return - + list.append(self, line) - + # command type: cmd = cmds[0] - + if cmd == 'output': # Remove previous outputs from history self.clean(allow_for_removal = ['output'], keep_switch=True, @@ -875,7 +875,7 @@ def append(self, line): elif cmds[1] == 'proc_v4': #full cleaning self[:] = [] - + def clean(self, to_keep=['set','add','load'], remove_bef_last=None, @@ -884,13 +884,13 @@ def clean(self, to_keep=['set','add','load'], keep_switch=False): """Remove command in arguments from history. All command before the last occurrence of 'remove_bef_last' - (including it) will be removed (but if another options tells the opposite). + (including it) will be removed (but if another options tells the opposite). 'to_keep' is a set of line to always keep. - 'to_remove' is a set of line to always remove (don't care about remove_bef_ + 'to_remove' is a set of line to always remove (don't care about remove_bef_ status but keep_switch acts.). - if 'allow_for_removal' is define only the command in that list can be + if 'allow_for_removal' is define only the command in that list can be remove of the history for older command that remove_bef_lb1. all parameter - present in to_remove are always remove even if they are not part of this + present in to_remove are always remove even if they are not part of this list. keep_switch force to keep the statement remove_bef_??? which changes starts the removal mode. @@ -900,8 +900,8 @@ def clean(self, to_keep=['set','add','load'], if __debug__ and allow_for_removal: for arg in to_keep: assert arg not in allow_for_removal - - + + nline = -1 removal = False #looping backward @@ -912,7 +912,7 @@ def clean(self, to_keep=['set','add','load'], if not removal and remove_bef_last: if self[nline].startswith(remove_bef_last): removal = True - switch = True + switch = True # if this is the switch and is protected pass to the next element if switch and keep_switch: @@ -923,12 +923,12 @@ def clean(self, to_keep=['set','add','load'], if any([self[nline].startswith(arg) for arg in to_remove]): self.pop(nline) continue - + # Only if removal mode is active! if removal: if allow_for_removal: # Only a subset of command can be removed - if any([self[nline].startswith(arg) + if any([self[nline].startswith(arg) for arg in allow_for_removal]): self.pop(nline) continue @@ -936,10 +936,10 @@ def clean(self, to_keep=['set','add','load'], # All command have to be remove but protected self.pop(nline) continue - + # update the counter to pass to the next element nline -= 1 - + def get(self, tag, default=None): if isinstance(tag, int): list.__getattr__(self, tag) @@ -954,32 +954,32 @@ def get(self, tag, default=None): except ValueError: name, content = line[7:].split(None,1) out.append((name, content)) - return out + return out else: return self.info[tag] - + def write(self, path): """write the proc_card to a given path""" - + fsock = open(path, 'w') fsock.write(self.history_header) for line in self: while len(line) > 70: - sub, line = line[:70]+"\\" , line[70:] + sub, line = line[:70]+"\\" , line[70:] fsock.write(sub+"\n") else: fsock.write(line+"\n") - -class InvalidCardEdition(InvalidCmd): pass - + +class InvalidCardEdition(InvalidCmd): pass + class ConfigFile(dict): """ a class for storing/dealing with input file. - """ + """ def __init__(self, finput=None, **opt): """initialize a new instance. input can be an instance of MadLoopParam, - a file, a path to a file, or simply Nothing""" - + a file, a path to a file, or simply Nothing""" + if isinstance(finput, self.__class__): dict.__init__(self) for key in finput.__dict__: @@ -989,7 +989,7 @@ def __init__(self, finput=None, **opt): return else: dict.__init__(self) - + # Initialize it with all the default value self.user_set = set() self.auto_set = set() @@ -1000,15 +1000,15 @@ def __init__(self, finput=None, **opt): self.comments = {} # comment associated to parameters. can be display via help message # store the valid options for a given parameter. self.allowed_value = {} - + self.default_setup() self.plugin_input(finput) - + # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, **opt) - + @@ -1028,7 +1028,7 @@ def __add__(self, other): base = self.__class__(self) #base = copy.copy(self) base.update((key.lower(),value) for key, value in other.items()) - + return base def __radd__(self, other): @@ -1036,26 +1036,26 @@ def __radd__(self, other): new = copy.copy(other) new.update((key, value) for key, value in self.items()) return new - + def __contains__(self, key): return dict.__contains__(self, key.lower()) def __iter__(self): - + for name in super(ConfigFile, self).__iter__(): yield self.lower_to_case[name.lower()] - - + + #iter = super(ConfigFile, self).__iter__() #misc.sprint(iter) #return (self.lower_to_case[name] for name in iter) - + def keys(self): return [name for name in self] - + def items(self): return [(name,self[name]) for name in self] - + @staticmethod def warn(text, level, raiseerror=False): """convenient proxy to raiseerror/print warning""" @@ -1071,11 +1071,11 @@ def warn(text, level, raiseerror=False): log = lambda t: logger.log(level, t) elif level: log = level - + return log(text) def post_set(self, name, value, change_userdefine, raiseerror): - + if value is None: value = self[name] @@ -1087,25 +1087,25 @@ def post_set(self, name, value, change_userdefine, raiseerror): return getattr(self, 'post_set_%s' % name)(value, change_userdefine, raiseerror) else: raise - + def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): """set the attribute and set correctly the type if the value is a string. change_userdefine on True if we have to add the parameter in user_set """ - + if not len(self): #Should never happen but when deepcopy/pickle self.__init__() - + name = name.strip() - lower_name = name.lower() - + lower_name = name.lower() + # 0. check if this parameter is a system only one if change_userdefine and lower_name in self.system_only: text='%s is a private entry which can not be modify by the user. Keep value at %s' % (name,self[name]) self.warn(text, 'critical', raiseerror) return - + #1. check if the parameter is set to auto -> pass it to special if lower_name in self: targettype = type(dict.__getitem__(self, lower_name)) @@ -1115,22 +1115,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): self.user_set.remove(lower_name) #keep old value. self.post_set(lower_name, 'auto', change_userdefine, raiseerror) - return + return elif lower_name in self.auto_set: self.auto_set.remove(lower_name) - + # 2. Find the type of the attribute that we want if lower_name in self.list_parameter: targettype = self.list_parameter[lower_name] - - - + + + if isinstance(value, str): # split for each comma/space value = value.strip() if value.startswith('[') and value.endswith(']'): value = value[1:-1] - #do not perform split within a " or ' block + #do not perform split within a " or ' block data = re.split(r"((? bad input dropped.append(val) - + if not new_values: text= "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ % (value, name, self[lower_name]) text += "allowed values are any list composed of the following entries: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) - return self.warn(text, 'warning', raiseerror) - elif dropped: + return self.warn(text, 'warning', raiseerror) + elif dropped: text = "some value for entry '%s' are not valid. Invalid items are: '%s'.\n" \ % (name, dropped) text += "value will be set to %s" % new_values - text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) + text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) self.warn(text, 'warning') values = new_values # make the assignment - dict.__setitem__(self, lower_name, values) + dict.__setitem__(self, lower_name, values) if change_userdefine: self.user_set.add(lower_name) #check for specific action - return self.post_set(lower_name, None, change_userdefine, raiseerror) + return self.post_set(lower_name, None, change_userdefine, raiseerror) elif lower_name in self.dict_parameter: - targettype = self.dict_parameter[lower_name] + targettype = self.dict_parameter[lower_name] full_reset = True #check if we just update the current dict or not - + if isinstance(value, str): value = value.strip() # allowed entry: @@ -1209,7 +1209,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): # name , value => just add the entry # name value => just add the entry # {name1:value1, name2:value2} => full reset - + # split for each comma/space if value.startswith('{') and value.endswith('}'): new_value = {} @@ -1219,23 +1219,23 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): x, y = pair.split(':') x, y = x.strip(), y.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] new_value[x] = y value = new_value elif ',' in value: x,y = value.split(',') value = {x.strip():y.strip()} full_reset = False - + elif ':' in value: x,y = value.split(':') value = {x.strip():y.strip()} - full_reset = False + full_reset = False else: x,y = value.split() value = {x:y} - full_reset = False - + full_reset = False + if isinstance(value, dict): for key in value: value[key] = self.format_variable(value[key], targettype, name=name) @@ -1248,7 +1248,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - elif name in self: + elif name in self: targettype = type(self[name]) else: logger.debug('Trying to add argument %s in %s. ' % (name, self.__class__.__name__) +\ @@ -1256,22 +1256,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): suggestions = [k for k in self.keys() if k.startswith(name[0].lower())] if len(suggestions)>0: logger.debug("Did you mean one of the following: %s"%suggestions) - self.add_param(lower_name, self.format_variable(UnknownType(value), + self.add_param(lower_name, self.format_variable(UnknownType(value), UnknownType, name)) self.lower_to_case[lower_name] = name if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - + value = self.format_variable(value, targettype, name=name) #check that the value is allowed: if lower_name in self.allowed_value and '*' not in self.allowed_value[lower_name]: valid = False allowed = self.allowed_value[lower_name] - + # check if the current value is allowed or not (set valid to True) if value in allowed: - valid=True + valid=True elif isinstance(value, str): value = value.lower().strip() allowed = [str(v).lower() for v in allowed] @@ -1279,7 +1279,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): i = allowed.index(value) value = self.allowed_value[lower_name][i] valid=True - + if not valid: # act if not valid: text = "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ @@ -1303,7 +1303,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, if __debug__: if lower_name in self: raise Exception("Duplicate case for %s in %s" % (name,self.__class__)) - + dict.__setitem__(self, lower_name, value) self.lower_to_case[lower_name] = name if isinstance(value, list): @@ -1318,12 +1318,12 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, elif isinstance(value, dict): allvalues = list(value.values()) if any([type(allvalues[0]) != type(v) for v in allvalues]): - raise Exception("All entry should have the same type") - self.dict_parameter[lower_name] = type(allvalues[0]) + raise Exception("All entry should have the same type") + self.dict_parameter[lower_name] = type(allvalues[0]) if '__type__' in value: del value['__type__'] dict.__setitem__(self, lower_name, value) - + if allowed and allowed != ['*']: self.allowed_value[lower_name] = allowed if lower_name in self.list_parameter: @@ -1333,8 +1333,8 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, assert value in allowed or '*' in allowed #elif isinstance(value, bool) and allowed != ['*']: # self.allowed_value[name] = [True, False] - - + + if system: self.system_only.add(lower_name) if comment: @@ -1342,7 +1342,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, def do_help(self, name): """return a minimal help for the parameter""" - + out = "## Information on parameter %s from class %s\n" % (name, self.__class__.__name__) if name.lower() in self: out += "## current value: %s (parameter should be of type %s)\n" % (self[name], type(self[name])) @@ -1351,7 +1351,7 @@ def do_help(self, name): else: out += "## Unknown for this class\n" if name.lower() in self.user_set: - out += "## This value is considered as being set by the user\n" + out += "## This value is considered as being set by the user\n" else: out += "## This value is considered as being set by the system\n" if name.lower() in self.allowed_value: @@ -1359,17 +1359,17 @@ def do_help(self, name): out += "Allowed value are: %s\n" % ','.join([str(p) for p in self.allowed_value[name.lower()]]) else: out += "Suggested value are : %s\n " % ','.join([str(p) for p in self.allowed_value[name.lower()] if p!='*']) - + logger.info(out) return out @staticmethod def guess_type_from_value(value): "try to guess the type of the string --do not use eval as it might not be safe" - + if not isinstance(value, str): return str(value.__class__.__name__) - + #use ast.literal_eval to be safe since value is untrusted # add a timeout to mitigate infinite loop, memory stack attack with misc.stdchannel_redirected(sys.stdout, os.devnull): @@ -1388,7 +1388,7 @@ def guess_type_from_value(value): @staticmethod def format_variable(value, targettype, name="unknown"): """assign the value to the attribute for the given format""" - + if isinstance(targettype, str): if targettype in ['str', 'int', 'float', 'bool']: targettype = eval(targettype) @@ -1412,7 +1412,7 @@ def format_variable(value, targettype, name="unknown"): (name, type(value), targettype, value)) else: raise InvalidCmd("Wrong input type for %s found %s and expecting %s for value %s" %\ - (name, type(value), targettype, value)) + (name, type(value), targettype, value)) else: if targettype != UnknownType: value = value.strip() @@ -1441,8 +1441,8 @@ def format_variable(value, targettype, name="unknown"): value = int(value) elif value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} - value =int(value[:-1]) * convert[value[-1]] - elif '/' in value or '*' in value: + value =int(value[:-1]) * convert[value[-1]] + elif '/' in value or '*' in value: try: split = re.split('(\*|/)',value) v = float(split[0]) @@ -1461,7 +1461,7 @@ def format_variable(value, targettype, name="unknown"): try: value = float(value.replace('d','e')) except ValueError: - raise InvalidCmd("%s can not be mapped to an integer" % value) + raise InvalidCmd("%s can not be mapped to an integer" % value) try: new_value = int(value) except ValueError: @@ -1471,7 +1471,7 @@ def format_variable(value, targettype, name="unknown"): value = new_value else: raise InvalidCmd("incorect input: %s need an integer for %s" % (value,name)) - + elif targettype == float: if value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} @@ -1496,33 +1496,33 @@ def format_variable(value, targettype, name="unknown"): value = v else: raise InvalidCmd("type %s is not handle by the card" % targettype) - + return value - - + + def __getitem__(self, name): - + lower_name = name.lower() if __debug__: if lower_name not in self: if lower_name in [key.lower() for key in self] : raise Exception("Some key are not lower case %s. Invalid use of the class!"\ % [key for key in self if key.lower() != key]) - + if lower_name in self.auto_set: return 'auto' - + return dict.__getitem__(self, name.lower()) - + get = __getitem__ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): """convenient way to change attribute. changeifuserset=False means that the value is NOT change is the value is not on default. - user=True, means that the value will be marked as modified by the user - (potentially preventing future change to the value) + user=True, means that the value will be marked as modified by the user + (potentially preventing future change to the value) """ # changeifuserset=False -> we need to check if the user force a value. @@ -1530,8 +1530,8 @@ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): if name.lower() in self.user_set: #value modified by the user -> do nothing return - self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) - + self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) + class RivetCard(ConfigFile): @@ -1706,7 +1706,7 @@ def setRelevantParamCard(self, f_params, f_relparams): yexec_dict = {} yexec_line = exec_line + "yaxis_relvar = " + self['yaxis_relvar'] exec(yexec_line, locals(), yexec_dict) - if self['yaxis_label'] == "": + if self['yaxis_label'] == "": self['yaxis_label'] = "yaxis_relvar" f_relparams.write("{0} = {1}\n".format(self['yaxis_label'], yexec_dict['yaxis_relvar'])) else: @@ -1715,11 +1715,11 @@ def setRelevantParamCard(self, f_params, f_relparams): class ProcCharacteristic(ConfigFile): """A class to handle information which are passed from MadGraph to the madevent - interface.""" - + interface.""" + def default_setup(self): """initialize the directory to the default value""" - + self.add_param('loop_induced', False) self.add_param('has_isr', False) self.add_param('has_fsr', False) @@ -1735,16 +1735,16 @@ def default_setup(self): self.add_param('pdg_initial1', [0]) self.add_param('pdg_initial2', [0]) self.add_param('splitting_types',[], typelist=str) - self.add_param('perturbation_order', [], typelist=str) - self.add_param('limitations', [], typelist=str) - self.add_param('hel_recycling', False) + self.add_param('perturbation_order', [], typelist=str) + self.add_param('limitations', [], typelist=str) + self.add_param('hel_recycling', False) self.add_param('single_color', True) - self.add_param('nlo_mixed_expansion', True) + self.add_param('nlo_mixed_expansion', True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1752,49 +1752,49 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: if '#' in line: line = line.split('#',1)[0] if not line: continue - + if '=' in line: key, value = line.split('=',1) self[key.strip()] = value - + def write(self, outputpath): """write the file""" template ="# Information about the process #\n" template +="#########################################\n" - + fsock = open(outputpath, 'w') fsock.write(template) - + for key, value in self.items(): fsock.write(" %s = %s \n" % (key, value)) - - fsock.close() - + + fsock.close() + class GridpackCard(ConfigFile): """an object for the GridpackCard""" - + def default_setup(self): """default value for the GridpackCard""" - + self.add_param("GridRun", True) self.add_param("gevents", 2500) self.add_param("gseed", 1) - self.add_param("ngran", -1) - + self.add_param("ngran", -1) + def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1802,7 +1802,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -1812,19 +1812,19 @@ def read(self, finput): self[line[1].strip()] = line[0].replace('\'','').strip() def write(self, output_file, template=None): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'grid_card_default.dat') else: template = pjoin(MEDIR, 'Cards', 'grid_card_default.dat') - + text = "" - for line in open(template,'r'): + for line in open(template,'r'): nline = line.split('#')[0] nline = nline.split('!')[0] comment = line[len(nline):] @@ -1832,19 +1832,19 @@ def write(self, output_file, template=None): if len(nline) != 2: text += line elif nline[1].strip() in self: - text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) + text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) else: logger.info('Adding missing parameter %s to current run_card (with default value)' % nline[1].strip()) - text += line - + text += line + if isinstance(output_file, str): fsock = open(output_file,'w') else: fsock = output_file - + fsock.write(text) fsock.close() - + class PY8Card(ConfigFile): """ Implements the Pythia8 card.""" @@ -1868,7 +1868,7 @@ def add_default_subruns(self, type): def default_setup(self): """ Sets up the list of available PY8 parameters.""" - + # Visible parameters # ================== self.add_param("Main:numberOfEvents", -1) @@ -1877,11 +1877,11 @@ def default_setup(self): self.add_param("JetMatching:qCut", -1.0, always_write_to_card=False) self.add_param("JetMatching:doShowerKt",False,always_write_to_card=False) # -1 means that it is automatically set. - self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) + self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) # for CKKWL merging self.add_param("Merging:TMS", -1.0, always_write_to_card=False) self.add_param("Merging:Process", '', always_write_to_card=False) - # -1 means that it is automatically set. + # -1 means that it is automatically set. self.add_param("Merging:nJetMax", -1, always_write_to_card=False) # for both merging, chose whether to also consider different merging # scale values for the extra weights related to scale and PDF variations. @@ -1918,10 +1918,10 @@ def default_setup(self): comment='This allows to turn on/off hadronization alltogether.') self.add_param("partonlevel:mpi", True, hidden=True, always_write_to_card=False, comment='This allows to turn on/off MPI alltogether.') - self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, + self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, always_write_to_card=False, comment='This parameter is automatically set to True by MG5aMC when doing MLM merging with PY8.') - + # for MLM merging self.add_param("JetMatching:merge", False, hidden=True, always_write_to_card=False, comment='Specifiy if we are merging sample of different multiplicity.') @@ -1931,9 +1931,9 @@ def default_setup(self): comment='Value of the merging scale below which one does not even write the HepMC event.') self.add_param("JetMatching:doVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') - self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) + self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) self.add_param("JetMatching:setMad", False, hidden=True, always_write_to_card=False, - comment='Specify one must read inputs from the MadGraph banner.') + comment='Specify one must read inputs from the MadGraph banner.') self.add_param("JetMatching:coneRadius", 1.0, hidden=True, always_write_to_card=False) self.add_param("JetMatching:nQmatch",4,hidden=True, always_write_to_card=False) # for CKKWL merging (common with UMEPS, UNLOPS) @@ -1946,7 +1946,7 @@ def default_setup(self): self.add_param("Merging:applyVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') self.add_param("Merging:includeWeightInXsection", True, hidden=True, always_write_to_card=False, - comment='If turned off, then the option belows forces PY8 to keep the original weight.') + comment='If turned off, then the option belows forces PY8 to keep the original weight.') self.add_param("Merging:muRen", 91.188, hidden=True, always_write_to_card=False, comment='Set renormalization scales of the 2->2 process.') self.add_param("Merging:muFacInME", 91.188, hidden=True, always_write_to_card=False, @@ -1958,7 +1958,7 @@ def default_setup(self): # To be added in subruns for CKKWL self.add_param("Merging:mayRemoveDecayProducts", False, hidden=True, always_write_to_card=False) self.add_param("Merging:doKTMerging", False, hidden=True, always_write_to_card=False) - self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) + self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) self.add_param("Merging:doPTLundMerging", False, hidden=True, always_write_to_card=False) # Special Pythia8 paremeters useful to simplify the shower. @@ -1975,33 +1975,33 @@ def default_setup(self): # Add parameters controlling the subruns execution flow. # These parameters should not be part of PY8SubRun daughter. self.add_default_subruns('parameters') - + def __init__(self, *args, **opts): - # Parameters which are not printed in the card unless they are - # 'user_set' or 'system_set' or part of the + # Parameters which are not printed in the card unless they are + # 'user_set' or 'system_set' or part of the # self.hidden_params_to_always_print set. self.hidden_param = [] self.hidden_params_to_always_write = set() self.visible_params_to_always_write = set() # List of parameters that should never be written out given the current context. self.params_to_never_write = set() - + # Parameters which have been set by the system (i.e. MG5 itself during # the regular course of the shower interface) self.system_set = set() - + # Add attributes controlling the subruns execution flow. # These attributes should not be part of PY8SubRun daughter. self.add_default_subruns('attributes') - - # Parameters which have been set by the + + # Parameters which have been set by the super(PY8Card, self).__init__(*args, **opts) - def add_param(self, name, value, hidden=False, always_write_to_card=True, + def add_param(self, name, value, hidden=False, always_write_to_card=True, comment=None): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. The option 'hidden' decides whether the parameter should be visible to the user. The option 'always_write_to_card' decides whether it should @@ -2017,7 +2017,7 @@ def add_param(self, name, value, hidden=False, always_write_to_card=True, self.hidden_params_to_always_write.add(name) else: if always_write_to_card: - self.visible_params_to_always_write.add(name) + self.visible_params_to_always_write.add(name) if not comment is None: if not isinstance(comment, str): raise MadGraph5Error("Option 'comment' must be a string, not"+\ @@ -2036,7 +2036,7 @@ def add_subrun(self, py8_subrun): self.subruns[py8_subrun['Main:subrun']] = py8_subrun if not 'LHEFInputs:nSubruns' in self.user_set: self['LHEFInputs:nSubruns'] = max(self.subruns.keys()) - + def userSet(self, name, value, **opts): """Set an attribute of this card, following a user_request""" self.__setitem__(name, value, change_userdefine=True, **opts) @@ -2044,10 +2044,10 @@ def userSet(self, name, value, **opts): self.system_set.remove(name.lower()) def vetoParamWriteOut(self, name): - """ Forbid the writeout of a specific parameter of this card when the + """ Forbid the writeout of a specific parameter of this card when the "write" function will be invoked.""" self.params_to_never_write.add(name.lower()) - + def systemSet(self, name, value, **opts): """Set an attribute of this card, independently of a specific user request and only if not already user_set.""" @@ -2058,7 +2058,7 @@ def systemSet(self, name, value, **opts): if force or name.lower() not in self.user_set: self.__setitem__(name, value, change_userdefine=False, **opts) self.system_set.add(name.lower()) - + def MadGraphSet(self, name, value, **opts): """ Sets a card attribute, but only if it is absent or not already user_set.""" @@ -2068,18 +2068,18 @@ def MadGraphSet(self, name, value, **opts): force = False if name.lower() not in self or (force or name.lower() not in self.user_set): self.__setitem__(name, value, change_userdefine=False, **opts) - self.system_set.add(name.lower()) - + self.system_set.add(name.lower()) + def defaultSet(self, name, value, **opts): self.__setitem__(name, value, change_userdefine=False, **opts) - + @staticmethod def pythia8_formatting(value, formatv=None): """format the variable into pythia8 card convention. The type is detected by default""" if not formatv: if isinstance(value,UnknownType): - formatv = 'unknown' + formatv = 'unknown' elif isinstance(value, bool): formatv = 'bool' elif isinstance(value, int): @@ -2095,7 +2095,7 @@ def pythia8_formatting(value, formatv=None): formatv = 'str' else: assert formatv - + if formatv == 'unknown': # No formatting then return str(value) @@ -2116,7 +2116,7 @@ def pythia8_formatting(value, formatv=None): elif formatv == 'float': return '%.10e' % float(value) elif formatv == 'shortfloat': - return '%.3f' % float(value) + return '%.3f' % float(value) elif formatv == 'str': return "%s" % value elif formatv == 'list': @@ -2124,9 +2124,9 @@ def pythia8_formatting(value, formatv=None): return ','.join([PY8Card.pythia8_formatting(arg, 'shortfloat') for arg in value]) else: return ','.join([PY8Card.pythia8_formatting(arg) for arg in value]) - - def write(self, output_file, template, read_subrun=False, + + def write(self, output_file, template, read_subrun=False, print_only_visible=False, direct_pythia_input=False, add_missing=True): """ Write the card to output_file using a specific template. > 'print_only_visible' specifies whether or not the hidden parameters @@ -2143,28 +2143,28 @@ def write(self, output_file, template, read_subrun=False, or p.lower() in self.user_set] # Filter against list of parameters vetoed for write-out visible_param = [p for p in visible_param if p.lower() not in self.params_to_never_write] - + # Now the hidden param which must be written out if print_only_visible: hidden_output_param = [] else: hidden_output_param = [p for p in self if p.lower() in self.hidden_param and not p.lower() in self.user_set and - (p.lower() in self.hidden_params_to_always_write or + (p.lower() in self.hidden_params_to_always_write or p.lower() in self.system_set)] # Filter against list of parameters vetoed for write-out hidden_output_param = [p for p in hidden_output_param if p not in self.params_to_never_write] - + if print_only_visible: subruns = [] else: if not read_subrun: subruns = sorted(self.subruns.keys()) - + # Store the subruns to write in a dictionary, with its ID in key # and the corresponding stringstream in value subruns_to_write = {} - + # Sort these parameters nicely so as to put together parameters # belonging to the same group (i.e. prefix before the ':' in their name). def group_params(params): @@ -2191,7 +2191,7 @@ def group_params(params): # First dump in a temporary_output (might need to have a second pass # at the very end to update 'LHEFInputs:nSubruns') output = StringIO.StringIO() - + # Setup template from which to read if isinstance(template, str): if os.path.isfile(template): @@ -2199,7 +2199,7 @@ def group_params(params): elif '\n' in template: tmpl = StringIO.StringIO(template) else: - raise Exception("File input '%s' not found." % file_input) + raise Exception("File input '%s' not found." % file_input) elif template is None: # Then use a dummy empty StringIO, hence skipping the reading tmpl = StringIO.StringIO() @@ -2257,8 +2257,8 @@ def group_params(params): # Remove all of its variables (so that nothing is overwritten) DummySubrun.clear() DummySubrun.write(subruns_to_write[int(value)], - tmpl, read_subrun=True, - print_only_visible=print_only_visible, + tmpl, read_subrun=True, + print_only_visible=print_only_visible, direct_pythia_input=direct_pythia_input) logger.info('Adding new unknown subrun with ID %d.'% @@ -2267,7 +2267,7 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - + # Change parameters which must be output if param in visible_param: new_value = PY8Card.pythia8_formatting(self[param]) @@ -2286,10 +2286,10 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - - # Substitute the value. + + # Substitute the value. # If it is directly the pytia input, then don't write the param if it - # is not in the list of visible_params_to_always_write and was + # is not in the list of visible_params_to_always_write and was # not user_set or system_set if ((not direct_pythia_input) or (param.lower() in self.visible_params_to_always_write) or @@ -2304,16 +2304,16 @@ def group_params(params): output.write(template%(param_entry, value_entry.replace(value,new_value))) - + # Proceed to next line last_pos = tmpl.tell() line = tmpl.readline() - + # If add_missing is False, make sure to empty the list of remaining parameters if not add_missing: visible_param = [] hidden_output_param = [] - + # Now output the missing parameters. Warn about visible ones. if len(visible_param)>0 and not template is None: output.write( @@ -2343,12 +2343,12 @@ def group_params(params): """%(' for subrun %d'%self['Main:subrun'] if 'Main:subrun' in self else '')) for param in hidden_output_param: if param.lower() in self.comments: - comment = '\n'.join('! %s'%c for c in + comment = '\n'.join('! %s'%c for c in self.comments[param.lower()].split('\n')) output.write(comment+'\n') output.write('%s=%s\n'%(param,PY8Card.pythia8_formatting(self[param]))) - - # Don't close the file if we were reading a subrun, but simply write + + # Don't close the file if we were reading a subrun, but simply write # output and return now if read_subrun: output_file.write(output.getvalue()) @@ -2382,12 +2382,12 @@ def group_params(params): out.close() else: output_file.write(output.getvalue()) - + def read(self, file_input, read_subrun=False, setter='default'): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file. - The setter option choses the authority that sets potential - modified/new parameters. It can be either: + The setter option choses the authority that sets potential + modified/new parameters. It can be either: 'default' or 'user' or 'system'""" if isinstance(file_input, str): if "\n" in file_input: @@ -2423,8 +2423,8 @@ def read(self, file_input, read_subrun=False, setter='default'): raise MadGraph5Error("Could not read line '%s' of Pythia8 card."%\ line) if '!' in value: - value,_ = value.split('!',1) - + value,_ = value.split('!',1) + # Read a subrun if detected: if param=='Main:subrun': if read_subrun: @@ -2451,7 +2451,7 @@ def read(self, file_input, read_subrun=False, setter='default'): last_pos = finput.tell() line = finput.readline() continue - + # Read parameter. The case of a parameter not defined in the card is # handled directly in ConfigFile. @@ -2478,7 +2478,7 @@ def add_default_subruns(self, type): def __init__(self, *args, **opts): """ Initialize a subrun """ - + # Force user to set it manually. subrunID = -1 if 'subrun_id' in opts: @@ -2489,7 +2489,7 @@ def __init__(self, *args, **opts): def default_setup(self): """Sets up the list of available PY8SubRun parameters.""" - + # Add all default PY8Card parameters super(PY8SubRun, self).default_setup() # Make sure they are all hidden @@ -2501,33 +2501,33 @@ def default_setup(self): self.add_param("Main:subrun", -1) self.add_param("Beams:LHEF", "events.lhe.gz") - + class RunBlock(object): """ Class for a series of parameter in the run_card that can be either visible or hidden. - name: allow to set in the default run_card $name to set where that + name: allow to set in the default run_card $name to set where that block need to be inserted template_on: information to include is block is active template_off: information to include is block is not active on_fields/off_fields: paramater associated to the block - can be specify but are otherwise automatically but + can be specify but are otherwise automatically but otherwise determined from the template. - + function: status(self,run_card) -> return which template need to be used check_validity(self, runcard) -> sanity check - create_default_for_process(self, run_card, proc_characteristic, - history, proc_def) + create_default_for_process(self, run_card, proc_characteristic, + history, proc_def) post_set_XXXX(card, value, change_userdefine, raiseerror) -> fct called when XXXXX is set post_set(card, value, change_userdefine, raiseerror, **opt) -> fct called when a parameter is changed - -> no access to parameter name + -> no access to parameter name -> not called if post_set_XXXX is defined """ - - + + def __init__(self, name, template_on, template_off, on_fields=False, off_fields=False): self.name = name @@ -2550,7 +2550,7 @@ def fields(self): def find_fields_from_template(template): """ return the list of fields from a template. checking line like %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ - + return re.findall(r"^\s*%\((.*)\)s\s*=\s*\1", template, re.M) def get_template(self, card): @@ -2565,7 +2565,7 @@ def get_unused_template(self, card): if self.status(card): return self.template_off else: - return self.template_on + return self.template_on def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -2594,20 +2594,20 @@ def manage_parameters(self, card, written, to_write): written.add(name) if name in to_write: to_write.remove(name) - + def check_validity(self, runcard): """run self consistency check here --avoid to use runcard[''] = xxx here since it can trigger post_set function""" return def create_default_for_process(self, run_card, proc_characteristic, history, proc_def): - return + return # @staticmethod # def post_set(card, value, change_userdefine, raiseerror, **opt): # """default action to run when a parameter of the block is defined. # Here we do not know which parameter is modified. if this is needed. # then one need to define post_set_XXXXX(card, value, change_userdefine, raiseerror) -# and then only that function is used +# and then only that function is used # """ # # if 'pdlabel' in card.user_set: @@ -2621,7 +2621,7 @@ class RunCard(ConfigFile): blocks = [] parameter_in_block = {} - allowed_lep_densities = {} + allowed_lep_densities = {} default_include_file = 'run_card.inc' default_autodef_file = 'run.inc' donewarning = [] @@ -2637,7 +2637,7 @@ def plugin_input(self, finput): curr_dir = os.path.dirname(os.path.dirname(finput.name)) elif isinstance(finput, str): curr_dir = os.path.dirname(os.path.dirname(finput)) - + if curr_dir: if os.path.exists(pjoin(curr_dir, 'bin', 'internal', 'plugin_run_card')): # expected format {} passing everything as optional argument @@ -2646,7 +2646,7 @@ def plugin_input(self, finput): continue opts = dict(eval(line)) self.add_param(**opts) - + @classmethod def fill_post_set_from_blocks(cls): """set the post_set function for any parameter defined in a run_block""" @@ -2659,8 +2659,8 @@ def fill_post_set_from_blocks(cls): elif hasattr(block, 'post_set'): setattr(cls, 'post_set_%s' % parameter, block.post_set) cls.parameter_in_block[parameter] = block - - + + def __new__(cls, finput=None, **opt): cls.fill_post_set_from_blocks() @@ -2718,9 +2718,9 @@ def __new__(cls, finput=None, **opt): return super(RunCard, cls).__new__(cls, finput, **opt) def __init__(self, *args, **opts): - + # The following parameter are updated in the defaultsetup stage. - + #parameter for which no warning should be raised if not define self.hidden_param = [] # in which include file the parameer should be written @@ -2739,11 +2739,11 @@ def __init__(self, *args, **opts): self.cuts_parameter = {} # parameter added where legacy requires an older value. self.system_default = {} - + self.display_block = [] # set some block to be displayed self.fct_mod = {} # {param: (fct_pointer, *argument, **opts)} - self.cut_class = {} + self.cut_class = {} self.warned=False @@ -2776,11 +2776,11 @@ def get_lepton_densities(cls): else: cls.allowed_lep_densities[identity].append(name) - def add_param(self, name, value, fortran_name=None, include=True, + def add_param(self, name, value, fortran_name=None, include=True, hidden=False, legacy=False, cut=False, system=False, sys_default=None, autodef=False, fct_mod=None, **opts): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. fortran_name: defines what is the associate name in the f77 code include: defines if we have to put the value in the include file @@ -2795,7 +2795,7 @@ def add_param(self, name, value, fortran_name=None, include=True, fct_mod: defines a function to run if the parameter is modify in the include file options of **opts: - allowed: list of valid options. '*' means anything else should be allowed. - empty list means anything possible as well. + empty list means anything possible as well. - comment: add comment for writing/help - typelist: type of the list if default is empty """ @@ -2823,9 +2823,9 @@ def add_param(self, name, value, fortran_name=None, include=True, self.fct_mod[name] = fct_mod def read(self, finput, consistency=True, unknown_warning=True, **opt): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -2836,7 +2836,7 @@ def read(self, finput, consistency=True, unknown_warning=True, **opt): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -2864,8 +2864,8 @@ def add_unknown_entry(self, name, value, unknow_warning): This is based on the guess_entry_fromname for the various syntax providing input. This then call add_param accordingly. - This function does not returns anything. - """ + This function does not returns anything. + """ if name == "dsqrt_q2fact1" and not self.LO: raise InvalidRunCard("Looks like you passed a LO run_card for a NLO run. Please correct") @@ -2903,7 +2903,7 @@ def add_unknown_entry(self, name, value, unknow_warning): " The type was assigned to %s. \n"+\ " The definition of that variable will %sbe automatically added to fortran file %s\n"+\ " The value of that variable will %sbe passed to the fortran code via fortran file %s",\ - name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, + name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, "" if opts.get('autodef', False) else "not", "" if opts.get('autodef', False) in [True,False] else opts.get('autodef'), "" if opts.get('include', True) else "not", "" if opts.get('include', True) in [True,False] else opts.get('include')) RunCard.donewarning.append(name) @@ -2923,19 +2923,19 @@ def valid_line(self, line, tmp): return False elif line.strip().startswith('%'): parameter = line[line.find('(')+1:line.find(')')] - + try: cond = self.cuts_parameter[parameter] except KeyError: return True - - + + if template_options.get(cond, default) or cond is True: return True else: - return False + return False else: - return True + return True def reset_simd(self, old_value, new_value, name, *args, **opts): @@ -2946,28 +2946,28 @@ def make_clean(self,old_value, new_value, name, dir): raise Exception('pass make clean for ', dir) def make_Ptouch(self,old_value, new_value, name, reset): - raise Exception('pass Ptouch for ', reset) - + raise Exception('pass Ptouch for ', reset) + def write(self, output_file, template=None, python_template=False, write_hidden=False, template_options=None, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" - to_write = set(self.user_set) + to_write = set(self.user_set) written = set() if not template: raise Exception if not template_options: template_options = collections.defaultdict(str) - + if python_template: text = open(template,'r').read() - text = text.split('\n') + text = text.split('\n') # remove if templating - text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] + text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] for l in text if self.valid_line(l, template_options)] text ='\n'.join(text) - + if python_template and not to_write: import string if self.blocks: @@ -2981,14 +2981,14 @@ def write(self, output_file, template=None, python_template=False, if not self.list_parameter: text = text % self else: - data = dict((key.lower(),value) for key, value in self.items()) + data = dict((key.lower(),value) for key, value in self.items()) for name in self.list_parameter: if self.list_parameter[name] != str: data[name] = ', '.join(str(v) for v in data[name]) else: data[name] = "['%s']" % "', '".join(str(v) for v in data[name]) text = text % data - else: + else: text = "" for line in open(template,'r'): nline = line.split('#')[0] @@ -3005,11 +3005,11 @@ def write(self, output_file, template=None, python_template=False, this_group = this_group[0] text += this_group.get_template(self) % self this_group.manage_parameters(self, written, to_write) - + elif len(nline) != 2: text += line elif nline[1].strip() in self: - + name = nline[1].strip().lower() value = self[name] if name in self.list_parameter: @@ -3026,15 +3026,15 @@ def write(self, output_file, template=None, python_template=False, else: endline = '' text += ' %s\t= %s %s%s' % (value, name, comment, endline) - written.add(name) + written.add(name) if name in to_write: to_write.remove(name) else: logger.info('Adding missing parameter %s to current %s (with default value)', (name, self.filename)) - written.add(name) - text += line + written.add(name) + text += line for b in self.blocks: if b.status(self): @@ -3057,7 +3057,7 @@ def write(self, output_file, template=None, python_template=False, else: #partial writting -> add only what is needed to_add = [] - for line in b.get_template(self).split('\n'): + for line in b.get_template(self).split('\n'): nline = line.split('#')[0] nline = nline.split('!')[0] nline = nline.split('=') @@ -3072,8 +3072,8 @@ def write(self, output_file, template=None, python_template=False, continue #already include before else: to_add.append(line % {nline[1].strip():value, name:value}) - written.add(name) - + written.add(name) + if name in to_write: to_write.remove(name) else: @@ -3095,13 +3095,13 @@ def write(self, output_file, template=None, python_template=False, text += '\n'.join(to_add) if to_write or write_hidden: - text+="""#********************************************************************* + text+="""#********************************************************************* # Additional hidden parameters #********************************************************************* -""" +""" if write_hidden: # - # do not write hidden parameter not hidden for this template + # do not write hidden parameter not hidden for this template # if python_template: written = written.union(set(re.findall('\%\((\w*)\)s', open(template,'r').read(), re.M))) @@ -3129,7 +3129,7 @@ def get_last_value_include(self, output_dir): if inc file does not exist we will return the current value (i.e. set has no change) """ - #remember that + #remember that # default_include_file is a class variable # self.includepath is on the form include_path : [list of param ] out = {} @@ -3165,7 +3165,7 @@ def get_value_from_include(self, path, list_of_params, output_dir): with open(pjoin(output_dir,path), 'r') as fsock: text = fsock.read() - + for name in list_of_params: misc.sprint(name, name in self.fortran_name) misc.sprint(self.fortran_name[name] if name in self.fortran_name[name] else name) @@ -3191,11 +3191,11 @@ def get_value_from_include(self, path, list_of_params, output_dir): misc.sprint(self.fortran_name) misc.sprint(text) raise Exception - return out + return out def get_default(self, name, default=None, log_level=None): - """return self[name] if exist otherwise default. log control if we + """return self[name] if exist otherwise default. log control if we put a warning or not if we use the default value""" lower_name = name.lower() @@ -3216,13 +3216,13 @@ def get_default(self, name, default=None, log_level=None): log_level = 20 if not default: default = dict.__getitem__(self, name.lower()) - + logger.log(log_level, '%s missed argument %s. Takes default: %s' % (self.filename, name, default)) self[name] = default return default else: - return self[name] + return self[name] def mod_inc_pdlabel(self, value): """flag pdlabel has 'dressed' if one of the special lepton PDF with beamstralung. @@ -3237,16 +3237,16 @@ def edit_dummy_fct_from_file(self, filelist, outdir): filelist is a list of input files (given by the user) containing a series of function to be placed in replacement of standard (typically dummy) functions of the code. - This use LO/NLO class attribute that defines which function name need to - be placed in which file. + This use LO/NLO class attribute that defines which function name need to + be placed in which file. First time this is used, a backup of the original file is done in order to - recover if the user remove some of those files. + recover if the user remove some of those files. The function present in the file are determined automatically via regular expression. and only that function is replaced in the associated file. - function in the filelist starting with user_ will also be include within the + function in the filelist starting with user_ will also be include within the dummy_fct.f file """ @@ -3269,7 +3269,7 @@ def edit_dummy_fct_from_file(self, filelist, outdir): fsock = file_writers.FortranWriter(tmp,'w') function_text = fsock.remove_routine(text, fct) fsock.close() - test = open(tmp,'r').read() + test = open(tmp,'r').read() if fct not in self.dummy_fct_file: if fct.startswith('user_'): self.dummy_fct_file[fct] = self.dummy_fct_file['user_'] @@ -3315,22 +3315,22 @@ def guess_entry_fromname(self, name, value): - vartype: type of the variable - name: name of the variable (stripped from metadata) - options: additional options for the add_param - rules: - - if name starts with str_, int_, float_, bool_, list_, dict_ then + rules: + - if name starts with str_, int_, float_, bool_, list_, dict_ then - vartype is set accordingly - name is strip accordingly - otherwise guessed from value (which is string) - if name contains min/max - vartype is set to float - options has an added {'cut':True} - - suffixes like + - suffixes like - will be removed from named - will be added in options (for add_param) as {'cut':True} see add_param documentation for the list of supported options - if include is on False set autodef to False (i.e. enforce it False for future change) """ - # local function + # local function def update_typelist(value, name, opts): """convert a string to a list and update opts to keep track of the type """ value = value.strip() @@ -3358,7 +3358,7 @@ def update_typelist(value, name, opts): opts[key] = val name = name.replace("<%s=%s>" %(key,val), '') - # get vartype + # get vartype # first check that name does not force it supported_type = ["str", "float", "int", "bool", "list", "dict"] if "_" in name and name.split("_")[0].lower() in supported_type: @@ -3406,13 +3406,13 @@ def f77_formatting(value, formatv=None): value = str(value).lower() else: assert formatv - + if formatv == 'bool': if str(value) in ['1','T','.true.','True']: return '.true.' else: return '.false.' - + elif formatv == 'int': try: return str(int(value)) @@ -3422,12 +3422,12 @@ def f77_formatting(value, formatv=None): return str(int(fl)) else: raise - + elif formatv == 'float': if isinstance(value, str): value = value.replace('d','e') return ('%.10e' % float(value)).replace('e','d') - + elif formatv == 'str': # Check if it is a list if value.strip().startswith('[') and value.strip().endswith(']'): @@ -3437,20 +3437,20 @@ def f77_formatting(value, formatv=None): enumerate(elements)] else: return "'%s'" % value - - + + def check_validity(self, log_level=30): """check that parameter missing in the card are set to the expected value""" for name, value in self.system_default.items(): self.set(name, value, changeifuserset=False) - + for name in self.includepath[False]: to_bypass = self.hidden_param + list(self.legacy_parameter.keys()) if name not in to_bypass: - self.get_default(name, log_level=log_level) + self.get_default(name, log_level=log_level) for name in self.legacy_parameter: if self[name] != self.legacy_parameter[name]: @@ -3458,28 +3458,28 @@ def check_validity(self, log_level=30): for block in self.blocks: block.check_validity(self) - + def update_system_parameter_for_include(self): - """update hidden system only parameter for the correct writtin in the + """update hidden system only parameter for the correct writtin in the include""" return - + def write_include_file(self, output_dir, output_file=None): """Write the various include file in output_dir. The entry True of self.includepath will be written in run_card.inc The entry False will not be written anywhere output_file allows testing by providing stream. - This also call the function to add variable definition for the - variable with autodef=True (handle by write_autodef function) + This also call the function to add variable definition for the + variable with autodef=True (handle by write_autodef function) """ - + # ensure that all parameter are coherent and fix those if needed self.check_validity() - + #ensusre that system only parameter are correctly set self.update_system_parameter_for_include() @@ -3490,10 +3490,10 @@ def write_include_file(self, output_dir, output_file=None): self.write_autodef(output_dir, output_file=None) # check/fix status of customised functions self.edit_dummy_fct_from_file(self["custom_fcts"], os.path.dirname(output_dir)) - + for incname in self.includepath: self.write_one_include_file(output_dir, incname, output_file) - + for name,value in value_in_old_include.items(): if value != self[name]: self.fct_mod[name][0](value, self[name], name, *self.fct_mod[name][1],**self.fct_mod[name][2]) @@ -3515,13 +3515,13 @@ def write_one_include_file(self, output_dir, incname, output_file=None): fsock = file_writers.FortranWriter(pjoin(output_dir,pathinc+'.tmp')) - for key in self.includepath[incname]: + for key in self.includepath[incname]: #define the fortran name if key in self.fortran_name: fortran_name = self.fortran_name[key] else: fortran_name = key - + if incname in self.include_as_parameter: fsock.writelines('INTEGER %s\n' % fortran_name) #get the value with warning if the user didn't set it @@ -3534,7 +3534,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): # in case of a list, add the length of the list as 0th # element in fortran. Only in case of integer or float # list (not for bool nor string) - targettype = self.list_parameter[key] + targettype = self.list_parameter[key] if targettype is bool: pass elif targettype is int: @@ -3550,7 +3550,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): elif isinstance(value, dict): for fortran_name, onevalue in value.items(): line = '%s = %s \n' % (fortran_name, self.f77_formatting(onevalue)) - fsock.writelines(line) + fsock.writelines(line) elif isinstance(incname,str) and 'compile' in incname: if incname in self.include_as_parameter: line = 'PARAMETER (%s=%s)' %( fortran_name, value) @@ -3585,7 +3585,7 @@ def write_autodef(self, output_dir, output_file=None): filetocheck = dict(self.definition_path) if True not in self.definition_path: filetocheck[True] = [] - + for incname in filetocheck: if incname is True: @@ -3598,7 +3598,7 @@ def write_autodef(self, output_dir, output_file=None): if output_file: fsock = output_file input = fsock.getvalue() - + else: input = open(pjoin(output_dir,pathinc),'r').read() # do not define fsock here since we might not need to overwrite it @@ -3608,7 +3608,7 @@ def write_autodef(self, output_dir, output_file=None): previous = re.findall(re_pat, input, re.M) # now check which one needed to be added (and remove those identicaly defined) to_add = [] - for key in filetocheck[incname]: + for key in filetocheck[incname]: curr_type = self[key].__class__.__name__ length = "" if curr_type in [list, "list"]: @@ -3640,10 +3640,10 @@ def write_autodef(self, output_dir, output_file=None): fsock.truncate(0) fsock.seek(0) - # remove outdated lines + # remove outdated lines lines = input.split('\n') if previous: - out = [line for line in lines if not re.search(re_pat, line, re.M) or + out = [line for line in lines if not re.search(re_pat, line, re.M) or re.search(re_pat, line, re.M).groups() not in previous] else: out = lines @@ -3662,7 +3662,7 @@ def write_autodef(self, output_dir, output_file=None): stop = out.index('C STOP USER COMMON BLOCK') out = out[:start]+ out[stop+1:] #add new common-block - if self.definition_path[incname]: + if self.definition_path[incname]: out.append("C START USER COMMON BLOCK") if isinstance(pathinc , str): filename = os.path.basename(pathinc).split('.',1)[0] @@ -3675,10 +3675,10 @@ def write_autodef(self, output_dir, output_file=None): filename = filename.upper() out.append(" COMMON/USER_CUSTOM_%s/%s" %(filename,','.join( self.definition_path[incname]))) out.append('C STOP USER COMMON BLOCK') - + if not output_file: fsock.writelines(out) - fsock.close() + fsock.close() else: # for iotest out = ["%s\n" %l for l in out] @@ -3702,7 +3702,7 @@ def get_idbmup(lpp): def get_banner_init_information(self): """return a dictionary with the information needed to write the first line of the block of the lhe file.""" - + output = {} output["idbmup1"] = self.get_idbmup(self['lpp1']) output["idbmup2"] = self.get_idbmup(self['lpp2']) @@ -3713,7 +3713,7 @@ def get_banner_init_information(self): output["pdfsup1"] = self.get_pdf_id(self["pdlabel"]) output["pdfsup2"] = self.get_pdf_id(self["pdlabel"]) return output - + def get_pdf_id(self, pdf): if pdf == "lhapdf": lhaid = self["lhaid"] @@ -3721,19 +3721,19 @@ def get_pdf_id(self, pdf): return lhaid[0] else: return lhaid - else: + else: try: return {'none': 0, 'iww': 0, 'eva':0, 'edff':0, 'chff':0, 'cteq6_m':10000,'cteq6_l':10041,'cteq6l1':10042, 'nn23lo':246800,'nn23lo1':247000,'nn23nlo':244800 - }[pdf] + }[pdf] except: - return 0 - + return 0 + def get_lhapdf_id(self): return self.get_pdf_id(self['pdlabel']) - def remove_all_cut(self): + def remove_all_cut(self): """remove all the cut""" for name in self.cuts_parameter: @@ -3749,7 +3749,7 @@ def remove_all_cut(self): elif 'eta' in name: self[name] = -1 else: - self[name] = 0 + self[name] = 0 ################################################################################################ ### Define various template subpart for the LO Run_card @@ -3767,11 +3767,11 @@ def remove_all_cut(self): %(nb_proton1)s = nb_proton1 # number of proton for the first beam %(nb_neutron1)s = nb_neutron1 # number of neutron for the first beam %(mass_ion1)s = mass_ion1 # mass of the heavy ion (first beam) -# Note that seting differently the two beams only work if you use +# Note that seting differently the two beams only work if you use # group_subprocess=False when generating your matrix-element %(nb_proton2)s = nb_proton2 # number of proton for the second beam %(nb_neutron2)s = nb_neutron2 # number of neutron for the second beam - %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) + %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ template_off = "# To see heavy ion options: type \"update ion_pdf\"" @@ -3834,11 +3834,11 @@ def remove_all_cut(self): # Frame for polarization ------------------------------------------------------------------------------------ template_on = \ """#********************************************************************* -# Frame where to evaluate the matrix-element (not the cut!) for polarization +# Frame where to evaluate the matrix-element (not the cut!) for polarization #********************************************************************* %(me_frame)s = me_frame ! list of particles to sum-up to define the rest-frame ! in which to evaluate the matrix-element - ! [1,2] means the partonic center of mass + ! [1,2] means the partonic center of mass """ template_off = "" frame_block = RunBlock('frame', template_on=template_on, template_off=template_off) @@ -3891,7 +3891,7 @@ def remove_all_cut(self): # CONTROL The extra running scale (not QCD) * # Such running is NOT include in systematics computation * #*********************************************************************** - %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale + %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode %(mue_over_ref)s = mue_over_ref ! ratio to mur if dynamical scale """ @@ -3908,10 +3908,10 @@ def remove_all_cut(self): %(tmin_for_channel)s = tmin_for_channel ! limit the non-singular reach of --some-- channel of integration related to T-channel diagram (value between -1 and 0), -1 is no impact %(survey_splitting)s = survey_splitting ! for loop-induced control how many core are used at survey for the computation of a single iteration. %(survey_nchannel_per_job)s = survey_nchannel_per_job ! control how many Channel are integrated inside a single job on cluster/multicore - %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) + %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) +#********************************************************************* +# Compilation flag. #********************************************************************* -# Compilation flag. -#********************************************************************* %(global_flag)s = global_flag ! fortran optimization flag use for the all code. %(aloha_flag)s = aloha_flag ! fortran optimization flag for aloha function. Suggestions: '-ffast-math' %(matrix_flag)s = matrix_flag ! fortran optimization flag for matrix.f function. Suggestions: '-O3' @@ -3948,7 +3948,7 @@ def check_validity(self, card): if card['pdlabel'] != card['pdlabel1']: dict.__setitem__(card, 'pdlabel', card['pdlabel1']) elif card['pdlabel1'] in sum(card.allowed_lep_densities.values(),[]): - raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") + raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel2'] in sum(card.allowed_lep_densities.values(),[]): raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel1'] == 'none': @@ -3962,7 +3962,7 @@ def check_validity(self, card): dict.__setitem__(card, 'pdlabel2', card['pdlabel']) if abs(card['lpp1']) == 1 == abs(card['lpp2']) and card['pdlabel1'] != card['pdlabel2']: - raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") + raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -4028,7 +4028,7 @@ def post_set(card, value, change_userdefine, raiseerror, name='unknown', **opt): if name == 'fixed_fac_scale2' and 'fixed_fac_scale1' not in card.user_set: dict.__setitem__(card, 'fixed_fac_scale1', card['fixed_fac_scale']) if name == 'fixed_fac_scale1' and 'fixed_fac_scale2' not in card.user_set: - dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) + dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) def status(self, card): @@ -4061,32 +4061,32 @@ def status(self, card): class RunCardLO(RunCard): """an object to handle in a nice way the run_card information""" - + blocks = [heavy_ion_block, beam_pol_block, syscalc_block, ecut_block, frame_block, eva_scale_block, mlm_block, ckkw_block, psoptim_block, pdlabel_block, fixedfacscale, running_block] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), "get_dummy_x1": pjoin("SubProcesses","dummy_fct.f"), - "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), + "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), "dummy_boostframe": pjoin("SubProcesses","dummy_fct.f"), "user_dynamical_scale": pjoin("SubProcesses","dummy_fct.f"), "bias_wgt_custom": pjoin("SubProcesses","dummy_fct.f"), "user_": pjoin("SubProcesses","dummy_fct.f") # all function starting by user will be added to that file } - + include_as_parameter = ['vector.inc'] if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_lo.dat") - + def default_setup(self): """default value for the run_card.dat""" - + self.add_param("run_tag", "tag_1", include=False) self.add_param("gridpack", False) self.add_param("time_of_flight", -1.0, include=False) - self.add_param("nevents", 10000) + self.add_param("nevents", 10000) self.add_param("iseed", 0) self.add_param("python_seed", -2, include=False, hidden=True, comment="controlling python seed [handling in particular the final unweighting].\n -1 means use default from random module.\n -2 means set to same value as iseed") self.add_param("lpp1", 1, fortran_name="lpp(1)", allowed=[-1,1,0,2,3,9,-2,-3,4,-4], @@ -4106,7 +4106,7 @@ def default_setup(self): self.add_param('nb_neutron1', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(1)", comment='For heavy ion physics nb of neutron in the ion (for both beam but if group_subprocess was False)') self.add_param('nb_neutron2', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(2)", - comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') + comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') self.add_param('mass_ion1', -1.0, hidden=True, fortran_name="mass_ion(1)", allowed=[-1,0, 0.938, 207.9766521*0.938, 0.000511, 0.105, '*'], comment='For heavy ion physics mass in GeV of the ion (of beam 1)') @@ -4133,11 +4133,11 @@ def default_setup(self): self.add_param("mue_over_ref", 1.0, hidden=True, comment='ratio mu_other/mu for dynamical scale') self.add_param("ievo_eva",0,hidden=True, allowed=[0,1],fortran_name="ievo_eva", comment='eva: 0 for EW pdf muf evolution by q^2; 1 for evo by pT^2') - + # Bias module options self.add_param("bias_module", 'None', include=False, hidden=True) self.add_param('bias_parameters', {'__type__':1.0}, include='BIAS/bias.inc', hidden=True) - + #matching self.add_param("scalefact", 1.0) self.add_param("ickkw", 0, allowed=[0,1], hidden=True, comment="\'0\' for standard fixed order computation.\n\'1\' for MLM merging activates alphas and pdf re-weighting according to a kt clustering of the QCD radiation.") @@ -4221,7 +4221,7 @@ def default_setup(self): self.add_param("mmaa", 0.0, cut='aa') self.add_param("mmll", 0.0, cut='ll') self.add_param("mmjjmax", -1.0, cut='jj') - self.add_param("mmbbmax", -1.0, cut='bb') + self.add_param("mmbbmax", -1.0, cut='bb') self.add_param("mmaamax", -1.0, cut='aa') self.add_param("mmllmax", -1.0, cut='ll') self.add_param("mmnl", 0.0, cut='LL') @@ -4231,9 +4231,9 @@ def default_setup(self): self.add_param("ptllmax", -1.0, cut='ll') self.add_param("xptj", 0.0, cut='jj') self.add_param("xptb", 0.0, cut='bb') - self.add_param("xpta", 0.0, cut='aa') + self.add_param("xpta", 0.0, cut='aa') self.add_param("xptl", 0.0, cut='ll') - # ordered pt jet + # ordered pt jet self.add_param("ptj1min", 0.0, cut='jj') self.add_param("ptj1max", -1.0, cut='jj') self.add_param("ptj2min", 0.0, cut='jj') @@ -4241,7 +4241,7 @@ def default_setup(self): self.add_param("ptj3min", 0.0, cut='jjj') self.add_param("ptj3max", -1.0, cut='jjj') self.add_param("ptj4min", 0.0, cut='j'*4) - self.add_param("ptj4max", -1.0, cut='j'*4) + self.add_param("ptj4max", -1.0, cut='j'*4) self.add_param("cutuse", 0, cut='jj') # ordered pt lepton self.add_param("ptl1min", 0.0, cut='l'*2) @@ -4249,7 +4249,7 @@ def default_setup(self): self.add_param("ptl2min", 0.0, cut='l'*2) self.add_param("ptl2max", -1.0, cut='l'*2) self.add_param("ptl3min", 0.0, cut='l'*3) - self.add_param("ptl3max", -1.0, cut='l'*3) + self.add_param("ptl3max", -1.0, cut='l'*3) self.add_param("ptl4min", 0.0, cut='l'*4) self.add_param("ptl4max", -1.0, cut='l'*4) # Ht sum of jets @@ -4257,7 +4257,7 @@ def default_setup(self): self.add_param("htjmax", -1.0, cut='j'*2) self.add_param("ihtmin", 0.0, cut='J'*2) self.add_param("ihtmax", -1.0, cut='J'*2) - self.add_param("ht2min", 0.0, cut='J'*3) + self.add_param("ht2min", 0.0, cut='J'*3) self.add_param("ht3min", 0.0, cut='J'*3) self.add_param("ht4min", 0.0, cut='J'*4) self.add_param("ht2max", -1.0, cut='J'*3) @@ -4267,7 +4267,7 @@ def default_setup(self): self.add_param("ptgmin", 0.0, cut='aj') self.add_param("r0gamma", 0.4, hidden=True) self.add_param("xn", 1.0, hidden=True) - self.add_param("epsgamma", 1.0, hidden=True) + self.add_param("epsgamma", 1.0, hidden=True) self.add_param("isoem", True, hidden=True) self.add_param("xetamin", 0.0, cut='jj') self.add_param("deltaeta", 0.0, cut='j'*2) @@ -4280,7 +4280,7 @@ def default_setup(self): self.add_param("use_syst", True) self.add_param('systematics_program', 'systematics', include=False, hidden=True, comment='Choose which program to use for systematics computation: none, systematics, syscalc') self.add_param('systematics_arguments', ['--mur=0.5,1,2', '--muf=0.5,1,2', '--pdf=errorset'], include=False, hidden=True, comment='Choose the argment to pass to the systematics command. like --mur=0.25,1,4. Look at the help of the systematics function for more details.') - + self.add_param("sys_scalefact", "0.5 1 2", include=False, hidden=True) self.add_param("sys_alpsfact", "None", include=False, hidden=True) self.add_param("sys_matchscale", "auto", include=False, hidden=True) @@ -4315,8 +4315,8 @@ def default_setup(self): self.add_param('aloha_flag', '', include=False, hidden=True, comment='global fortran compilation flag, suggestion: -ffast-math', fct_mod=(self.make_clean, ('Source/DHELAS'),{})) self.add_param('matrix_flag', '', include=False, hidden=True, comment='fortran compilation flag for the matrix-element files, suggestion -O3', - fct_mod=(self.make_Ptouch, ('matrix'),{})) - self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', + fct_mod=(self.make_Ptouch, ('matrix'),{})) + self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', fortran_name='VECSIZE_MEMMAX', fct_mod=(self.reset_simd,(),{})) # parameter allowing to define simple cut via the pdg @@ -4329,24 +4329,24 @@ def default_setup(self): self.add_param('eta_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False) - + self.add_param('pdg_cut',[0], system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], system=True) # store pt min self.add_param('ptmax4pdg',[-1.], system=True) self.add_param('Emin4pdg',[0.], system=True) # store pt min - self.add_param('Emax4pdg',[-1.], system=True) + self.add_param('Emax4pdg',[-1.], system=True) self.add_param('etamin4pdg',[0.], system=True) # store pt min - self.add_param('etamax4pdg',[-1.], system=True) + self.add_param('etamax4pdg',[-1.], system=True) self.add_param('mxxmin4pdg',[-1.], system=True) self.add_param('mxxpart_antipart', [False], system=True) - - - + + + def check_validity(self): """ """ - + super(RunCardLO, self).check_validity() - + #Make sure that nhel is only either 0 (i.e. no MC over hel) or #1 (MC over hel with importance sampling). In particular, it can #no longer be > 1. @@ -4357,12 +4357,12 @@ def check_validity(self): "not %s." % self['nhel']) if int(self['maxjetflavor']) > 6: raise InvalidRunCard('maxjetflavor should be lower than 5! (6 is partly supported)') - + if len(self['pdgs_for_merging_cut']) > 1000: raise InvalidRunCard("The number of elements in "+\ "'pdgs_for_merging_cut' should not exceed 1000.") - + # some cut need to be deactivated in presence of isolation if self['ptgmin'] > 0: if self['pta'] > 0: @@ -4370,18 +4370,18 @@ def check_validity(self): self['pta'] = 0.0 if self['draj'] > 0: logger.warning('draj cut discarded since photon isolation is used') - self['draj'] = 0.0 - - # special treatment for gridpack use the gseed instead of the iseed + self['draj'] = 0.0 + + # special treatment for gridpack use the gseed instead of the iseed if self['gridrun']: self['iseed'] = self['gseed'] - + #Some parameter need to be fixed when using syscalc #if self['use_syst']: # if self['scalefact'] != 1.0: # logger.warning('Since use_syst=T, changing the value of \'scalefact\' to 1') # self['scalefact'] = 1.0 - + # CKKW Treatment if self['ickkw'] > 0: if self['ickkw'] != 1: @@ -4399,7 +4399,7 @@ def check_validity(self): raise InvalidRunCard('maxjetflavor at 6 is NOT supported for matching!') if self['ickkw'] == 2: # add warning if ckkw selected but the associate parameter are empty - self.get_default('highestmult', log_level=20) + self.get_default('highestmult', log_level=20) self.get_default('issgridfile', 'issudgrid.dat', log_level=20) if self['xqcut'] > 0: if self['ickkw'] == 0: @@ -4412,13 +4412,13 @@ def check_validity(self): if self['drjl'] != 0: if 'drjl' in self.user_set: logger.warning('Since icckw>0, changing the value of \'drjl\' to 0') - self['drjl'] = 0 - if not self['auto_ptj_mjj']: + self['drjl'] = 0 + if not self['auto_ptj_mjj']: if self['mmjj'] > self['xqcut']: logger.warning('mmjj > xqcut (and auto_ptj_mjj = F). MMJJ set to 0') - self['mmjj'] = 0.0 - - # check validity of the pdf set + self['mmjj'] = 0.0 + + # check validity of the pdf set # note that pdlabel is automatically set to lhapdf if pdlabel1 or pdlabel2 is set to lhapdf if self['pdlabel'] == 'lhapdf': #add warning if lhaid not define @@ -4426,7 +4426,7 @@ def check_validity(self): mod = False for i in [1,2]: - lpp = 'lpp%i' %i + lpp = 'lpp%i' %i pdlabelX = 'pdlabel%i' % i if self[lpp] == 0: # nopdf if self[pdlabelX] != 'none': @@ -4459,12 +4459,12 @@ def check_validity(self): raise InvalidRunCard( "Heavy ion mode is only supported for lpp1=1/2") if self['lpp2'] not in [1,2]: if self['nb_proton2'] !=1 or self['nb_neutron2'] !=0: - raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") + raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") # check that fixed_fac_scale(1/2) is setting as expected # if lpp=2/3/4 -> default is that beam in fixed scale - # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are + # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are # check that both fixed_fac_scale1/2 are defined together # ensure that fixed_fac_scale1 and fixed_fac_scale2 are setup as needed if 'fixed_fac_scale1' in self.user_set: @@ -4475,13 +4475,13 @@ def check_validity(self): elif 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale1 are defined but not fixed_fac_scale2. The value of fixed_fac_scale2 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale2'] = self['fixed_fac_scale'] - elif self['lpp2'] !=0: + elif self['lpp2'] !=0: raise Exception('fixed_fac_scale2 not defined while fixed_fac_scale1 is. Please fix your run_card.') elif 'fixed_fac_scale2' in self.user_set: if 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale2 are defined but not fixed_fac_scale1. The value of fixed_fac_scale1 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale1'] = self['fixed_fac_scale'] - elif self['lpp1'] !=0: + elif self['lpp1'] !=0: raise Exception('fixed_fac_scale1 not defined while fixed_fac_scale2 is. Please fix your run_card.') else: if 'fixed_fac_scale' in self.user_set: @@ -4500,12 +4500,12 @@ def check_validity(self): logger.warning('fixed_fac_scale1 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale1']) logger.warning('fixed_fac_scale2 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale2']) - # check if lpp = + # check if lpp = if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]): for i in [1,2]: if abs(self['lpp%s' % i ]) in [3,4] and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: logger.warning("Vector boson from lepton PDF is using fixed scale value of muf [dsqrt_q2fact%s]. Looks like you kept the default value (Mz). Is this really the cut-off that you want to use?" % i) - + if abs(self['lpp%s' % i ]) == 2 and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: if self['pdlabel'] in ['edff','chff']: logger.warning("Since 3.5.0 exclusive photon-photon processes in ultraperipheral proton and nuclear collisions from gamma-UPC (arXiv:2207.03012) will ignore the factorisation scale.") @@ -4515,10 +4515,10 @@ def check_validity(self): if six.PY2 and self['hel_recycling']: self['hel_recycling'] = False - logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. + logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. In general this optimization speeds up the computation by a factor of two.""") - + # check that ebeam is bigger than the associated mass. for i in [1,2]: if self['lpp%s' % i ] not in [1,2]: @@ -4529,13 +4529,13 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") - elif self['ebeam%i' % i] < self['mass_ion%i' % i]: + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + elif self['ebeam%i' % i] < self['mass_ion%i' % i]: if self['ebeam%i' %i] == 0: logger.warning("At rest ion mode set: Energy beam set to %s" % self['mass_ion%i' % i]) self.set('ebeam%i' %i, self['mass_ion%i' % i]) - - + + # check the tmin_for_channel is negative if self['tmin_for_channel'] == 0: raise InvalidRunCard('tmin_for_channel can not be set to 0.') @@ -4543,15 +4543,15 @@ def check_validity(self): logger.warning('tmin_for_channel should be negative. Will be using -%f instead' % self['tmin_for_channel']) self.set('tmin_for_channel', -self['tmin_for_channel']) - + def update_system_parameter_for_include(self): """system parameter need to be setupe""" - + # polarization self['frame_id'] = sum(2**(n) for n in self['me_frame']) - + # set the pdg_for_cut fortran parameter - pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + + pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + list(self['e_min_pdg'].keys()) +list(self['e_max_pdg'].keys()) + list(self['eta_min_pdg'].keys()) +list(self['eta_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys()) + list(self['mxx_only_part_antipart'].keys())) @@ -4559,15 +4559,15 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different pdgs are allowed for pdg specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative pdg code') - - + + if any(pdg in pdg_to_cut for pdg in [1,2,3,4,5,21,22,11,13,15]): raise Exception("Can not use PDG related cut for light quark/b quark/lepton/gluon/photon") - + if pdg_to_cut: self['pdg_cut'] = list(pdg_to_cut) self['ptmin4pdg'] = [] @@ -4595,7 +4595,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -4605,11 +4605,11 @@ def update_system_parameter_for_include(self): self['ptmax4pdg'] = [-1.] self['Emax4pdg'] = [-1.] self['etamax4pdg'] =[-1.] - self['mxxmin4pdg'] =[0.] + self['mxxmin4pdg'] =[0.] self['mxxpart_antipart'] = [False] - - - + + + def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules process 1->N all cut set on off. @@ -4626,7 +4626,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if proc_characteristic['loop_induced']: self['nhel'] = 1 self['pdgs_for_merging_cut'] = proc_characteristic['colored_pdgs'] - + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() @@ -4636,7 +4636,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): # check for beam_id beam_id = set() beam_id_split = [set(), set()] - for proc in proc_def: + for proc in proc_def: for oneproc in proc: for i,leg in enumerate(oneproc['legs']): if not leg['state']: @@ -4654,20 +4654,20 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): maxjetflavor = max([4]+[abs(i) for i in beam_id if -7< i < 7]) self['maxjetflavor'] = maxjetflavor self['asrwgtflavor'] = maxjetflavor - + if any(i in beam_id for i in [1,-1,2,-2,3,-3,4,-4,5,-5,21,22]): # check for e p collision if any(id in beam_id for id in [11,-11,13,-13]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [11,-11,13,-13]): - self['lpp1'] = 0 - self['lpp2'] = 1 - self['ebeam1'] = '1k' - self['ebeam2'] = '6500' + self['lpp1'] = 0 + self['lpp2'] = 1 + self['ebeam1'] = '1k' + self['ebeam2'] = '6500' else: - self['lpp1'] = 1 - self['lpp2'] = 0 - self['ebeam1'] = '6500' + self['lpp1'] = 1 + self['lpp2'] = 0 + self['ebeam1'] = '6500' self['ebeam2'] = '1k' # UPC for p p collision @@ -4677,7 +4677,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam1'] = '6500' self['ebeam2'] = '6500' self['pdlabel'] = 'edff' - + elif any(id in beam_id for id in [11,-11,13,-13]): self['lpp1'] = 0 self['lpp2'] = 0 @@ -4688,7 +4688,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('ecut') self.display_block.append('beam_pol') - + # check for possibility of eva eva_in_b1 = any(i in beam_id_split[0] for i in [23,24,-24]) #,12,-12,14,-14]) @@ -4701,10 +4701,10 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['nhel'] = 1 self['pdlabel'] = 'eva' self['fixed_fac_scale'] = True - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') elif eva_in_b1: - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') self['pdlabel1'] = 'eva' self['fixed_fac_scale1'] = True self['nhel'] = 1 @@ -4724,7 +4724,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['pdlabel2'] = 'eva' self['fixed_fac_scale2'] = True self['nhel'] = 1 - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') for i in beam_id_split[0]: if abs(i) == 11: self['lpp1'] = math.copysign(3,i) @@ -4740,34 +4740,34 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if any(i in beam_id for i in [22,23,24,-24,12,-12,14,-14]): self.display_block.append('eva_scale') - # automatic polarisation of the beam if neutrino beam + # automatic polarisation of the beam if neutrino beam if any(id in beam_id for id in [12,-12,14,-14,16,-16]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [12,14,16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = -100 if not all(id in [12,14,16] for id in beam_id_split[0]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1]. %s') elif any(id in beam_id_split[0] for id in [-12,-14,-16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[0]): - logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') + logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') if any(id in beam_id_split[1] for id in [12,14,16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = -100 if not all(id in [12,14,16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') elif any(id in beam_id_split[1] for id in [-12,-14,-16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') - + # Check if need matching min_particle = 99 max_particle = 0 @@ -4798,12 +4798,12 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - + break + if matching: self['ickkw'] = 1 self['xqcut'] = 30 - #self['use_syst'] = False + #self['use_syst'] = False self['drjj'] = 0 self['drjl'] = 0 self['sys_alpsfact'] = "0.5 1 2" @@ -4811,8 +4811,8 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('mlm') self.display_block.append('ckkw') self['dynamical_scale_choice'] = -1 - - + + # For interference module, the systematics are wrong. # automatically set use_syst=F and set systematics_program=none no_systematics = False @@ -4826,14 +4826,14 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): continue break - + if interference or no_systematics: self['use_syst'] = False self['systematics_program'] = 'none' if interference: self['dynamical_scale_choice'] = 3 self['sde_strategy'] = 2 - + # set default integration strategy # interference case is already handle above # here pick strategy 2 if only one QCD color flow @@ -4852,7 +4852,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if pure_lepton and proton_initial: self['sde_strategy'] = 1 else: - # check if multi-jet j + # check if multi-jet j is_multijet = True for proc in proc_def: if any(abs(j.get('id')) not in jet_id for j in proc[0]['legs']): @@ -4860,7 +4860,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): break if is_multijet: self['sde_strategy'] = 2 - + # if polarization is used, set the choice of the frame in the run_card # But only if polarization is used for massive particles for plist in proc_def: @@ -4870,7 +4870,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): model = proc.get('model') particle = model.get_particle(l.get('id')) if particle.get('mass').lower() != 'zero': - self.display_block.append('frame') + self.display_block.append('frame') break else: continue @@ -4894,15 +4894,15 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): proc = proc_list[0] if proc['forbidden_onsh_s_channels']: self['sde_strategy'] = 1 - + if 'fix_scale' in proc_characteristic['limitations']: self['fixed_ren_scale'] = 1 self['fixed_fac_scale'] = 1 if self['ickkw'] == 1: logger.critical("MLM matching/merging not compatible with the model! You need to use another method to remove the double counting!") self['ickkw'] = 0 - - # define class of particles present to hide all the cuts associated to + + # define class of particles present to hide all the cuts associated to # not present class cut_class = collections.defaultdict(int) for proc in proc_def: @@ -4925,41 +4925,41 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): one_proc_cut['L'] += 1 elif abs(pdg) in [12,14,16]: one_proc_cut['n'] += 1 - one_proc_cut['L'] += 1 + one_proc_cut['L'] += 1 elif str(oneproc.get('model').get_particle(pdg)['mass']) != 'ZERO': one_proc_cut['H'] += 1 - + for key, nb in one_proc_cut.items(): cut_class[key] = max(cut_class[key], nb) self.cut_class = dict(cut_class) self.cut_class[''] = True #avoid empty - + # If model has running functionality add the additional parameter model = proc_def[0][0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Read file input/default_run_card_lo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'run_card.dat') python_template = True else: template = pjoin(MEDIR, 'Cards', 'run_card_default.dat') python_template = False - + hid_lines = {'default':True}#collections.defaultdict(itertools.repeat(True).next) if isinstance(output_file, str): @@ -4975,9 +4975,9 @@ def write(self, output_file, template=None, python_template=False, hid_lines[k1+k2] = True super(RunCardLO, self).write(output_file, template=template, - python_template=python_template, + python_template=python_template, template_options=hid_lines, - **opt) + **opt) class InvalidMadAnalysis5Card(InvalidCmd): @@ -4986,19 +4986,19 @@ class InvalidMadAnalysis5Card(InvalidCmd): class MadAnalysis5Card(dict): """ A class to store a MadAnalysis5 card. Very basic since it is basically free format.""" - + _MG5aMC_escape_tag = '@MG5aMC' - + _default_hadron_inputs = ['*.hepmc', '*.hep', '*.stdhep', '*.lhco','*.root'] _default_parton_inputs = ['*.lhe'] _skip_analysis = False - + @classmethod def events_can_be_reconstructed(cls, file_path): """ Checks from the type of an event file whether it can be reconstructed or not.""" return not (file_path.endswith('.lhco') or file_path.endswith('.lhco.gz') or \ file_path.endswith('.root') or file_path.endswith('.root.gz')) - + @classmethod def empty_analysis(cls): """ A method returning the structure of an empty analysis """ @@ -5012,7 +5012,7 @@ def empty_reconstruction(cls): 'reco_output':'lhe'} def default_setup(self): - """define the default value""" + """define the default value""" self['mode'] = 'parton' self['inputs'] = [] # None is the default stdout level, it will be set automatically by MG5aMC @@ -5025,8 +5025,8 @@ def default_setup(self): # of this class and some other property could be added to this dictionary # in the future. self['analyses'] = {} - # The recasting structure contains on set of commands and one set of - # card lines. + # The recasting structure contains on set of commands and one set of + # card lines. self['recasting'] = {'commands':[],'card':[]} # Add the default trivial reconstruction to use an lhco input # This is just for the structure @@ -5035,7 +5035,7 @@ def default_setup(self): 'root_input': MadAnalysis5Card.empty_reconstruction()} self['reconstruction']['lhco_input']['reco_output']='lhco' - self['reconstruction']['root_input']['reco_output']='root' + self['reconstruction']['root_input']['reco_output']='root' # Specify in which order the analysis/recasting were specified self['order'] = [] @@ -5049,7 +5049,7 @@ def __init__(self, finput=None,mode=None): return else: dict.__init__(self) - + # Initialize it with all the default value self.default_setup() if not mode is None: @@ -5058,15 +5058,15 @@ def __init__(self, finput=None,mode=None): # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, mode=mode) - + def read(self, input, mode=None): """ Read an MA5 card""" - + if mode not in [None,'parton','hadron']: raise MadGraph5Error('A MadAnalysis5Card can be read online the modes'+ "'parton' or 'hadron'") card_mode = mode - + if isinstance(input, (file, StringIO.StringIO)): input_stream = input elif isinstance(input, str): @@ -5099,10 +5099,10 @@ def read(self, input, mode=None): except ValueError: option = line[len(self._MG5aMC_escape_tag):] option = option.strip() - + if option=='inputs': self['inputs'].extend([v.strip() for v in value.split(',')]) - + elif option == 'skip_analysis': self._skip_analysis = True @@ -5118,7 +5118,7 @@ def read(self, input, mode=None): except: raise InvalidMadAnalysis5Card( "MA5 output level specification '%s' is incorrect."%str(value)) - + elif option=='analysis_name': current_type = 'analyses' current_name = value @@ -5127,7 +5127,7 @@ def read(self, input, mode=None): "Analysis '%s' already defined in MadAnalysis5 card"%current_name) else: self[current_type][current_name] = MadAnalysis5Card.empty_analysis() - + elif option=='set_reconstructions': try: reconstructions = eval(value) @@ -5142,7 +5142,7 @@ def read(self, input, mode=None): "analysis in a MadAnalysis5 card.") self[current_type][current_name]['reconstructions']=reconstructions continue - + elif option=='reconstruction_name': current_type = 'reconstruction' current_name = value @@ -5161,7 +5161,7 @@ def read(self, input, mode=None): raise InvalidMadAnalysis5Card( "Option '%s' can only take the values 'lhe' or 'root'"%option) self['reconstruction'][current_name]['reco_output'] = value.lower() - + elif option.startswith('recasting'): current_type = 'recasting' try: @@ -5171,11 +5171,11 @@ def read(self, input, mode=None): if len(self['recasting'][current_name])>0: raise InvalidMadAnalysis5Card( "Only one recasting can be defined in MadAnalysis5 hadron card") - + else: raise InvalidMadAnalysis5Card( "Unreckognized MG5aMC instruction in MadAnalysis5 card: '%s'"%option) - + if option in ['analysis_name','reconstruction_name'] or \ option.startswith('recasting'): self['order'].append((current_type,current_name)) @@ -5209,7 +5209,7 @@ def read(self, input, mode=None): self['inputs'] = self._default_hadron_inputs else: self['inputs'] = self._default_parton_inputs - + # Make sure at least one reconstruction is specified for each hadron # level analysis and that it exists. if self['mode']=='hadron': @@ -5221,7 +5221,7 @@ def read(self, input, mode=None): analysis['reconstructions']): raise InvalidMadAnalysis5Card('A reconstructions specified in'+\ " analysis '%s' is not defined."%analysis_name) - + def write(self, output): """ Write an MA5 card.""" @@ -5232,7 +5232,7 @@ def write(self, output): else: raise MadGraph5Error('Incorrect input for the write function of'+\ ' the MadAnalysis5Card card. Received argument type is: %s'%str(type(output))) - + output_lines = [] if self._skip_analysis: output_lines.append('%s skip_analysis'%self._MG5aMC_escape_tag) @@ -5240,11 +5240,11 @@ def write(self, output): if not self['stdout_lvl'] is None: output_lines.append('%s stdout_lvl=%s'%(self._MG5aMC_escape_tag,self['stdout_lvl'])) for definition_type, name in self['order']: - + if definition_type=='analyses': output_lines.append('%s analysis_name = %s'%(self._MG5aMC_escape_tag,name)) output_lines.append('%s set_reconstructions = %s'%(self._MG5aMC_escape_tag, - str(self['analyses'][name]['reconstructions']))) + str(self['analyses'][name]['reconstructions']))) elif definition_type=='reconstruction': output_lines.append('%s reconstruction_name = %s'%(self._MG5aMC_escape_tag,name)) elif definition_type=='recasting': @@ -5254,23 +5254,23 @@ def write(self, output): output_lines.extend(self[definition_type][name]) elif definition_type in ['reconstruction']: output_lines.append('%s reco_output = %s'%(self._MG5aMC_escape_tag, - self[definition_type][name]['reco_output'])) + self[definition_type][name]['reco_output'])) output_lines.extend(self[definition_type][name]['commands']) elif definition_type in ['analyses']: - output_lines.extend(self[definition_type][name]['commands']) - + output_lines.extend(self[definition_type][name]['commands']) + output_stream.write('\n'.join(output_lines)) - + return - - def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, + + def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, UFO_model_path=None, run_tag=''): - """ Returns a list of tuples ('AnalysisTag',['commands']) specifying - the commands of the MadAnalysis runs required from this card. - At parton-level, the number of such commands is the number of analysis + """ Returns a list of tuples ('AnalysisTag',['commands']) specifying + the commands of the MadAnalysis runs required from this card. + At parton-level, the number of such commands is the number of analysis asked for. In the future, the idea is that the entire card can be processed in one go from MA5 directly.""" - + if isinstance(inputs_arg, list): inputs = inputs_arg elif isinstance(inputs_arg, str): @@ -5278,21 +5278,21 @@ def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, else: raise MadGraph5Error("The function 'get_MA5_cmds' can only take "+\ " a string or a list for the argument 'inputs_arg'") - + if len(inputs)==0: raise MadGraph5Error("The function 'get_MA5_cmds' must have "+\ " at least one input specified'") - + if run_dir_path is None: run_dir_path = os.path.dirname(inputs_arg) - + cmds_list = [] - + UFO_load = [] # first import the UFO if provided if UFO_model_path: UFO_load.append('import %s'%UFO_model_path) - + def get_import(input, type=None): """ Generates the MA5 import commands for that event file. """ dataset_name = os.path.basename(input).split('.')[0] @@ -5304,7 +5304,7 @@ def get_import(input, type=None): if not type is None: res.append('set %s.type = %s'%(dataset_name, type)) return res - + fifo_status = {'warned_fifo':False,'fifo_used_up':False} def warn_fifo(input): if not input.endswith('.fifo'): @@ -5317,7 +5317,7 @@ def warn_fifo(input): logger.warning('Only the first MA5 analysis/reconstructions can be run on a fifo. Subsequent runs will skip fifo inputs.') fifo_status['warned_fifo'] = True return True - + # Then the event file(s) input(s) inputs_load = [] for input in inputs: @@ -5325,16 +5325,16 @@ def warn_fifo(input): if len(inputs) > 1: inputs_load.append('set main.stacking_method = superimpose') - + submit_command = 'submit %s'%submit_folder+'_%s' - + # Keep track of the reconstruction outpus in the MA5 workflow # Keys are reconstruction names and values are .lhe.gz reco file paths. # We put by default already the lhco/root ones present reconstruction_outputs = { - 'lhco_input':[f for f in inputs if + 'lhco_input':[f for f in inputs if f.endswith('.lhco') or f.endswith('.lhco.gz')], - 'root_input':[f for f in inputs if + 'root_input':[f for f in inputs if f.endswith('.root') or f.endswith('.root.gz')]} # If a recasting card has to be written out, chose here its path @@ -5343,7 +5343,7 @@ def warn_fifo(input): # Make sure to only run over one analysis over each fifo. for definition_type, name in self['order']: - if definition_type == 'reconstruction': + if definition_type == 'reconstruction': analysis_cmds = list(self['reconstruction'][name]['commands']) reco_outputs = [] for i_input, input in enumerate(inputs): @@ -5365,8 +5365,8 @@ def warn_fifo(input): analysis_cmds.append( submit_command%('reco_%s_%d'%(name,i_input+1))) analysis_cmds.append('remove reco_events') - - reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) + + reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) for rec_out in reco_outputs] if len(reco_outputs)>0: cmds_list.append(('_reco_%s'%name,analysis_cmds)) @@ -5386,7 +5386,7 @@ def warn_fifo(input): analysis_cmds = ['set main.mode = parton'] else: analysis_cmds = [] - analysis_cmds.extend(sum([get_import(rec_out) for + analysis_cmds.extend(sum([get_import(rec_out) for rec_out in reconstruction_outputs[reco]],[])) analysis_cmds.extend(self['analyses'][name]['commands']) analysis_cmds.append(submit_command%('%s_%s'%(name,reco))) @@ -5427,12 +5427,12 @@ def warn_fifo(input): %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode """ running_block_nlo = RunBlock('RUNNING', template_on=template_on, template_off="") - + class RunCardNLO(RunCard): """A class object for the run_card for a (aMC@)NLO pocess""" - + LO = False - + blocks = [running_block_nlo] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), @@ -5443,11 +5443,11 @@ class RunCardNLO(RunCard): if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_nlo.dat") - - + + def default_setup(self): """define the default value""" - + self.add_param('run_tag', 'tag_1', include=False) self.add_param('nevents', 10000) self.add_param('req_acc', -1.0, include=False) @@ -5455,27 +5455,27 @@ def default_setup(self): self.add_param("time_of_flight", -1.0, include=False) self.add_param('event_norm', 'average') #FO parameter - self.add_param('req_acc_fo', 0.01, include=False) + self.add_param('req_acc_fo', 0.01, include=False) self.add_param('npoints_fo_grid', 5000, include=False) self.add_param('niters_fo_grid', 4, include=False) - self.add_param('npoints_fo', 10000, include=False) + self.add_param('npoints_fo', 10000, include=False) self.add_param('niters_fo', 6, include=False) #seed and collider self.add_param('iseed', 0) - self.add_param('lpp1', 1, fortran_name='lpp(1)') - self.add_param('lpp2', 1, fortran_name='lpp(2)') + self.add_param('lpp1', 1, fortran_name='lpp(1)') + self.add_param('lpp2', 1, fortran_name='lpp(2)') self.add_param('ebeam1', 6500.0, fortran_name='ebeam(1)') - self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') + self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') self.add_param('pdlabel', 'nn23nlo', allowed=['lhapdf', 'emela', 'cteq6_m','cteq6_d','cteq6_l','cteq6l1', 'nn23lo','nn23lo1','nn23nlo','ct14q00','ct14q07','ct14q14','ct14q21'] +\ - sum(self.allowed_lep_densities.values(),[]) ) + sum(self.allowed_lep_densities.values(),[]) ) self.add_param('lhaid', [244600],fortran_name='lhaPDFid') self.add_param('pdfscheme', 0) # whether to include or not photon-initiated processes in lepton collisions self.add_param('photons_from_lepton', True) self.add_param('lhapdfsetname', ['internal_use_only'], system=True) - # stuff for lepton collisions - # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set - # whether the current PDF set has or not beamstrahlung + # stuff for lepton collisions + # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set + # whether the current PDF set has or not beamstrahlung self.add_param('has_bstrahl', False, system=True) # renormalisation scheme of alpha self.add_param('alphascheme', 0, system=True) @@ -5486,31 +5486,31 @@ def default_setup(self): # w contribution included or not in the running of alpha self.add_param('w_run', 1, system=True) #shower and scale - self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') + self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') self.add_param('shower_scale_factor',1.0) self.add_param('mcatnlo_delta', False) self.add_param('fixed_ren_scale', False) self.add_param('fixed_fac_scale', False) self.add_param('fixed_extra_scale', True, hidden=True, system=True) # set system since running from Ellis-Sexton scale not implemented - self.add_param('mur_ref_fixed', 91.118) + self.add_param('mur_ref_fixed', 91.118) self.add_param('muf1_ref_fixed', -1.0, hidden=True) - self.add_param('muf_ref_fixed', 91.118) + self.add_param('muf_ref_fixed', 91.118) self.add_param('muf2_ref_fixed', -1.0, hidden=True) - self.add_param('mue_ref_fixed', 91.118, hidden=True) - self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', + self.add_param('mue_ref_fixed', 91.118, hidden=True) + self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', allowed = [-2,-1,0,1,2,3,10], comment="\'-1\' is based on CKKW back clustering (following feynman diagram).\n \'1\' is the sum of transverse energy.\n '2' is HT (sum of the transverse mass)\n '3' is HT/2, '0' allows to use the user_hook definition (need to be defined via custom_fct entry) ") self.add_param('fixed_qes_scale', False, hidden=True) self.add_param('qes_ref_fixed', -1.0, hidden=True) self.add_param('mur_over_ref', 1.0) - self.add_param('muf_over_ref', 1.0) - self.add_param('muf1_over_ref', -1.0, hidden=True) + self.add_param('muf_over_ref', 1.0) + self.add_param('muf1_over_ref', -1.0, hidden=True) self.add_param('muf2_over_ref', -1.0, hidden=True) self.add_param('mue_over_ref', 1.0, hidden=True, system=True) # forbid the user to modigy due to incorrect handling of the Ellis-Sexton scale self.add_param('qes_over_ref', -1.0, hidden=True) self.add_param('reweight_scale', [True], fortran_name='lscalevar') - self.add_param('rw_rscale_down', -1.0, hidden=True) + self.add_param('rw_rscale_down', -1.0, hidden=True) self.add_param('rw_rscale_up', -1.0, hidden=True) - self.add_param('rw_fscale_down', -1.0, hidden=True) + self.add_param('rw_fscale_down', -1.0, hidden=True) self.add_param('rw_fscale_up', -1.0, hidden=True) self.add_param('rw_rscale', [1.0,2.0,0.5], fortran_name='scalevarR') self.add_param('rw_fscale', [1.0,2.0,0.5], fortran_name='scalevarF') @@ -5523,60 +5523,60 @@ def default_setup(self): #technical self.add_param('folding', [1,1,1], include=False) - + #merging self.add_param('ickkw', 0, allowed=[-1,0,3,4], comment=" - 0: No merging\n - 3: FxFx Merging : http://amcatnlo.cern.ch/FxFx_merging.htm\n - 4: UNLOPS merging (No interface within MG5aMC)\n - -1: NNLL+NLO jet-veto computation. See arxiv:1412.8408 [hep-ph]") self.add_param('bwcutoff', 15.0) - #cuts + #cuts self.add_param('jetalgo', 1.0) - self.add_param('jetradius', 0.7) + self.add_param('jetradius', 0.7) self.add_param('ptj', 10.0 , cut=True) - self.add_param('etaj', -1.0, cut=True) - self.add_param('gamma_is_j', True) + self.add_param('etaj', -1.0, cut=True) + self.add_param('gamma_is_j', True) self.add_param('ptl', 0.0, cut=True) - self.add_param('etal', -1.0, cut=True) + self.add_param('etal', -1.0, cut=True) self.add_param('drll', 0.0, cut=True) - self.add_param('drll_sf', 0.0, cut=True) + self.add_param('drll_sf', 0.0, cut=True) self.add_param('mll', 0.0, cut=True) - self.add_param('mll_sf', 30.0, cut=True) - self.add_param('rphreco', 0.1) - self.add_param('etaphreco', -1.0) - self.add_param('lepphreco', True) - self.add_param('quarkphreco', True) + self.add_param('mll_sf', 30.0, cut=True) + self.add_param('rphreco', 0.1) + self.add_param('etaphreco', -1.0) + self.add_param('lepphreco', True) + self.add_param('quarkphreco', True) self.add_param('ptgmin', 20.0, cut=True) - self.add_param('etagamma', -1.0) + self.add_param('etagamma', -1.0) self.add_param('r0gamma', 0.4) - self.add_param('xn', 1.0) + self.add_param('xn', 1.0) self.add_param('epsgamma', 1.0) - self.add_param('isoem', True) + self.add_param('isoem', True) self.add_param('maxjetflavor', 4, hidden=True) - self.add_param('pineappl', False) + self.add_param('pineappl', False) self.add_param('lhe_version', 3, hidden=True, include=False) - + # customization self.add_param("custom_fcts",[],typelist="str", include=False, comment="list of files containing function that overwritte dummy function of the code (like adding cuts/...)") #internal variable related to FO_analyse_card self.add_param('FO_LHE_weight_ratio',1e-3, hidden=True, system=True) - self.add_param('FO_LHE_postprocessing',['grouping','random'], + self.add_param('FO_LHE_postprocessing',['grouping','random'], hidden=True, system=True, include=False) - + # parameter allowing to define simple cut via the pdg self.add_param('pt_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('pt_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False, hidden=True) - + #hidden parameter that are transfer to the fortran code self.add_param('pdg_cut',[0], hidden=True, system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], hidden=True, system=True) # store pt min self.add_param('ptmax4pdg',[-1.], hidden=True, system=True) self.add_param('mxxmin4pdg',[0.], hidden=True, system=True) self.add_param('mxxpart_antipart', [False], hidden=True, system=True) - + def check_validity(self): """check the validity of the various input""" - + super(RunCardNLO, self).check_validity() # for lepton-lepton collisions, ignore 'pdlabel' and 'lhaid' @@ -5588,12 +5588,12 @@ def check_validity(self): # for dressed lepton collisions, check that the lhaid is a valid one if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]) + ['emela']: raise InvalidRunCard('pdlabel %s not allowed for dressed-lepton collisions' % self['pdlabel']) - + elif self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' self['reweight_pdf']=[False] logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') - + if self['lpp1'] == 0 == self['lpp2']: if self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' @@ -5601,8 +5601,8 @@ def check_validity(self): logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') # For FxFx merging, make sure that the following parameters are set correctly: - if self['ickkw'] == 3: - # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed + if self['ickkw'] == 3: + # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed scales=['fixed_ren_scale','fixed_fac_scale','fixed_QES_scale'] for scale in scales: if self[scale]: @@ -5615,7 +5615,7 @@ def check_validity(self): self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency in FxFx merging, dynamical_scale_choice has been set to -1 (default)''' ,'$MG:BOLD') - + # 2. Use kT algorithm for jets with pseudo-code size R=1.0 jetparams=['jetradius','jetalgo'] for jetparam in jetparams: @@ -5628,8 +5628,8 @@ def check_validity(self): self["dynamical_scale_choice"] = [-1] self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency with the jet veto, the scale which will be used is ptj. dynamical_scale_choice will be set at -1.''' - ,'$MG:BOLD') - + ,'$MG:BOLD') + # For interface to PINEAPPL, need to use LHAPDF and reweighting to get scale uncertainties if self['pineappl'] and self['pdlabel'].lower() != 'lhapdf': raise InvalidRunCard('PineAPPL generation only possible with the use of LHAPDF') @@ -5661,7 +5661,7 @@ def check_validity(self): if (self['rw_fscale_down'] != -1.0 and ['rw_fscale_down'] not in self['rw_fscale']) or\ (self['rw_fscale_up'] != -1.0 and ['rw_fscale_up'] not in self['rw_fscale']): self['rw_fscale']=[1.0,self['rw_fscale_up'],self['rw_fscale_down']] - + # PDF reweighting check if any(self['reweight_pdf']): # check that we use lhapdf if reweighting is ON @@ -5672,7 +5672,7 @@ def check_validity(self): if self['pdlabel'] != "lhapdf": self['reweight_pdf']=[self['reweight_pdf'][0]] self['lhaid']=[self['lhaid'][0]] - + # make sure set have reweight_scale and dyn_scale_choice of length 1 when fixed scales: if self['fixed_ren_scale'] and self['fixed_fac_scale']: self['reweight_scale']=[self['reweight_scale'][0]] @@ -5685,7 +5685,7 @@ def check_validity(self): self['reweight_pdf']=self['reweight_pdf']*len(self['lhaid']) logger.warning("Setting 'reweight_pdf' for all 'lhaid' to %s" % self['reweight_pdf'][0]) if len(self['reweight_scale']) == 1 and len(self['dynamical_scale_choice']) != 1: - self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) + self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) logger.warning("Setting 'reweight_scale' for all 'dynamical_scale_choice' to %s" % self['reweight_pdf'][0]) # Check that there are no identical elements in lhaid or dynamical_scale_choice @@ -5693,7 +5693,7 @@ def check_validity(self): raise InvalidRunCard("'lhaid' has two or more identical entries. They have to be all different for the code to work correctly.") if len(self['dynamical_scale_choice']) != len(set(self['dynamical_scale_choice'])): raise InvalidRunCard("'dynamical_scale_choice' has two or more identical entries. They have to be all different for the code to work correctly.") - + # Check that lenght of lists are consistent if len(self['reweight_pdf']) != len(self['lhaid']): raise InvalidRunCard("'reweight_pdf' and 'lhaid' lists should have the same length") @@ -5730,7 +5730,7 @@ def check_validity(self): if len(self['folding']) != 3: raise InvalidRunCard("'folding' should contain exactly three integers") for ifold in self['folding']: - if ifold not in [1,2,4,8]: + if ifold not in [1,2,4,8]: raise InvalidRunCard("The three 'folding' parameters should be equal to 1, 2, 4, or 8.") # Check MC@NLO-Delta if self['mcatnlo_delta'] and not self['parton_shower'].lower() == 'pythia8': @@ -5746,11 +5746,11 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938 GeV") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") def update_system_parameter_for_include(self): - + # set the pdg_for_cut fortran parameter pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys())+ list(self['mxx_only_part_antipart'].keys())) @@ -5758,12 +5758,12 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different PDGs are allowed for PDG specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative PDG codes') - - + + if any(pdg in pdg_to_cut for pdg in [21,22,11,13,15]+ list(range(self['maxjetflavor']+1))): # Note that this will double check in the fortran code raise Exception("Can not use PDG related cuts for massless SM particles/leptons") @@ -5790,7 +5790,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -5800,12 +5800,12 @@ def update_system_parameter_for_include(self): self['mxxpart_antipart'] = [False] def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', 'run_card.dat') python_template = True else: @@ -5818,7 +5818,7 @@ def write(self, output_file, template=None, python_template=False, **opt): def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules - e+ e- beam -> lpp:0 ebeam:500 + e+ e- beam -> lpp:0 ebeam:500 p p beam -> set maxjetflavor automatically process with tagged photons -> gamma_is_j = false process without QED splittings -> gamma_is_j = false, recombination = false @@ -5844,19 +5844,19 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam2'] = 500 else: self['lpp1'] = 0 - self['lpp2'] = 0 - + self['lpp2'] = 0 + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() # check for tagged photons tagged_particles = set() - + # If model has running functionality add the additional parameter model = proc_def[0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Check if need matching min_particle = 99 @@ -5885,7 +5885,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: idsmin = [l['id'] for l in procmin['legs']] break - + for procmax in proc_def: if len(procmax['legs']) != max_particle: continue @@ -5901,9 +5901,9 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - - if matching: + break + + if matching: self['ickkw'] = 3 self['fixed_ren_scale'] = False self["fixed_fac_scale"] = False @@ -5911,17 +5911,17 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self["jetalgo"] = 1 self["jetradius"] = 1 self["parton_shower"] = "PYTHIA8" - + # Read file input/default_run_card_nlo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + class MadLoopParam(ConfigFile): """ a class for storing/dealing with the file MadLoopParam.dat contains a parser to read it, facilities to write a new file,... """ - + _ID_reduction_tool_map = {1:'CutTools', 2:'PJFry++', 3:'IREGI', @@ -5929,10 +5929,10 @@ class MadLoopParam(ConfigFile): 5:'Samurai', 6:'Ninja', 7:'COLLIER'} - + def default_setup(self): """initialize the directory to the default value""" - + self.add_param("MLReductionLib", "6|7|1") self.add_param("IREGIMODE", 2) self.add_param("IREGIRECY", True) @@ -5954,7 +5954,7 @@ def default_setup(self): self.add_param("HelicityFilterLevel", 2) self.add_param("LoopInitStartOver", False) self.add_param("HelInitStartOver", False) - self.add_param("UseQPIntegrandForNinja", True) + self.add_param("UseQPIntegrandForNinja", True) self.add_param("UseQPIntegrandForCutTools", True) self.add_param("COLLIERMode", 1) self.add_param("COLLIERComputeUVpoles", True) @@ -5966,9 +5966,9 @@ def default_setup(self): self.add_param("COLLIERUseInternalStabilityTest",True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -5976,7 +5976,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % input) - + previous_line= '' for line in finput: if previous_line.startswith('#'): @@ -5985,20 +5985,20 @@ def read(self, finput): if len(value) and value[0] not in ['#', '!']: self.__setitem__(name, value, change_userdefine=True) previous_line = line - - + + def write(self, outputpath, template=None,commentdefault=False): - + if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', + template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', 'Cards', 'MadLoopParams.dat') else: template = pjoin(MEDIR, 'Cards', 'MadLoopParams_default.dat') fsock = open(template, 'r') template = fsock.readlines() fsock.close() - + if isinstance(outputpath, str): output = open(outputpath, 'w') else: @@ -6019,7 +6019,7 @@ def f77format(value): return value else: raise Exception("Can not format input %s" % type(value)) - + name = '' done = set() for line in template: @@ -6034,12 +6034,12 @@ def f77format(value): elif line.startswith('#'): name = line[1:].split()[0] output.write(line) - - - - - -class eMELA_info(ConfigFile): + + + + + +class eMELA_info(ConfigFile): """ a class for eMELA (LHAPDF-like) info files """ path = '' @@ -6053,7 +6053,7 @@ def __init__(self, finput, me_dir): def read(self, finput): - if isinstance(finput, file): + if isinstance(finput, file): lines = finput.open().read().split('\n') self.path = finput.name else: @@ -6066,7 +6066,7 @@ def read(self, finput): k, v = l.split(':', 1) # ignore further occurrences of : try: self[k.strip()] = eval(v) - except (NameError, SyntaxError): + except (NameError, SyntaxError): self[k.strip()] = v def default_setup(self): @@ -6091,7 +6091,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): +"powers of alpha should be reweighted a posteriori") - logger.info('Updating variables according to %s' % self.path) + logger.info('Updating variables according to %s' % self.path) # Flavours in the running of alpha nd, nu, nl = self['eMELA_ActiveFlavoursAlpha'] self.log_and_update(banner, 'run_card', 'ndnq_run', nd) @@ -6130,8 +6130,8 @@ def update_epdf_emela_variables(self, banner, uvscheme): logger.warning('Cannot treat the following renormalisation schemes for ME and PDFs: %d, %d' \ % (uvscheme, uvscheme_pdf)) - # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref - # also check that the com energy is equal to qref, otherwise print a + # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref + # also check that the com energy is equal to qref, otherwise print a # warning if uvscheme_pdf == 1: qref = self['eMELA_AlphaQref'] @@ -6144,23 +6144,23 @@ def update_epdf_emela_variables(self, banner, uvscheme): # LL / NLL PDF (0/1) pdforder = self['eMELA_PerturbativeOrder'] - # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) + # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) # 4->mixed (leptonic); 5-> nobeta (leptonic); 6->delta (leptonic) # if LL, use nobeta scheme unless LEGACYLLPDF > 0 if pdforder == 0: if 'eMELA_LEGACYLLPDF' not in self.keys() or self['eMELA_LEGACYLLPDF'] in [-1, 0]: self.log_and_update(banner, 'run_card', 'pdfscheme', 5) - elif self['eMELA_LEGACYLLPDF'] == 1: + elif self['eMELA_LEGACYLLPDF'] == 1: # mixed self.log_and_update(banner, 'run_card', 'pdfscheme', 4) - elif self['eMELA_LEGACYLLPDF'] == 2: + elif self['eMELA_LEGACYLLPDF'] == 2: # eta self.log_and_update(banner, 'run_card', 'pdfscheme', 2) - elif self['eMELA_LEGACYLLPDF'] == 3: + elif self['eMELA_LEGACYLLPDF'] == 3: # beta self.log_and_update(banner, 'run_card', 'pdfscheme', 3) elif pdforder == 1: - # for NLL, use eMELA_FactorisationSchemeInt = 0/1 + # for NLL, use eMELA_FactorisationSchemeInt = 0/1 # for delta/MSbar if self['eMELA_FactorisationSchemeInt'] == 0: # MSbar @@ -6177,7 +6177,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): - + def log_and_update(self, banner, card, par, v): """update the card parameter par to value v diff --git a/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py b/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py index 5fd170d18d..cc842aa50f 100755 --- a/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py +++ b/epochX/cudacpp/gg_tt.mad/bin/internal/gen_ximprove.py @@ -2,18 +2,18 @@ # # Copyright (c) 2014 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch # ################################################################################ """ A python file to replace the fortran script gen_ximprove. - This script analyses the result of the survey/ previous refine and + This script analyses the result of the survey/ previous refine and creates the jobs for the following script. """ from __future__ import division @@ -66,77 +66,77 @@ class gensym(object): """a class to call the fortran gensym executable and handle it's output in order to create the various job that are needed for the survey""" - + #convenient shortcut for the formatting of variable @ staticmethod def format_variable(*args): return bannermod.ConfigFile.format_variable(*args) - + combining_job = 2 # number of channel by ajob - splitted_grid = False + splitted_grid = False min_iterations = 3 mode= "survey" - + def __init__(self, cmd, opt=None): - + try: super(gensym, self).__init__(cmd, opt) except TypeError: pass - - # Run statistics, a dictionary of RunStatistics(), with + + # Run statistics, a dictionary of RunStatistics(), with self.run_statistics = {} - + self.cmd = cmd self.run_card = cmd.run_card self.me_dir = cmd.me_dir - - + + # dictionary to keep track of the precision when combining iteration self.cross = collections.defaultdict(int) self.abscross = collections.defaultdict(int) self.sigma = collections.defaultdict(int) self.chi2 = collections.defaultdict(int) - + self.splitted_grid = False if self.cmd.proc_characteristics['loop_induced']: nexternal = self.cmd.proc_characteristics['nexternal'] self.splitted_grid = max(2, (nexternal-2)**2) if hasattr(self.cmd, "opts") and self.cmd.opts['accuracy'] == 0.1: self.cmd.opts['accuracy'] = 0.02 - + if isinstance(cmd.cluster, cluster.MultiCore) and self.splitted_grid > 1: self.splitted_grid = int(cmd.cluster.nb_core**0.5) if self.splitted_grid == 1 and cmd.cluster.nb_core >1: self.splitted_grid = 2 - + #if the user defines it in the run_card: if self.run_card['survey_splitting'] != -1: self.splitted_grid = self.run_card['survey_splitting'] if self.run_card['survey_nchannel_per_job'] != 1 and 'survey_nchannel_per_job' in self.run_card.user_set: - self.combining_job = self.run_card['survey_nchannel_per_job'] + self.combining_job = self.run_card['survey_nchannel_per_job'] elif self.run_card['hard_survey'] > 1: self.combining_job = 1 - - + + self.splitted_Pdir = {} self.splitted_for_dir = lambda x,y: self.splitted_grid self.combining_job_for_Pdir = lambda x: self.combining_job self.lastoffset = {} - + done_warning_zero_coupling = False def get_helicity(self, to_submit=True, clean=True): """launch a single call to madevent to get the list of non zero helicity""" - - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc P_zero_result = [] nb_tot_proc = len(subproc) - job_list = {} - - + job_list = {} + + for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -154,7 +154,7 @@ def get_helicity(self, to_submit=True, clean=True): p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts - + (stdout, _) = p.communicate(''.encode()) stdout = stdout.decode('ascii',errors='ignore') if stdout: @@ -166,11 +166,11 @@ def get_helicity(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir, 'error')): os.remove(pjoin(self.me_dir, 'error')) continue # bypass bad process - + self.cmd.compile(['madevent_forhel'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'madevent_forhel')): - raise Exception('Error make madevent_forhel not successful') - + raise Exception('Error make madevent_forhel not successful') + if not os.path.exists(pjoin(Pdir, 'Hel')): os.mkdir(pjoin(Pdir, 'Hel')) ff = open(pjoin(Pdir, 'Hel', 'input_app.txt'),'w') @@ -180,15 +180,15 @@ def get_helicity(self, to_submit=True, clean=True): try: os.remove(pjoin(Pdir, 'Hel','results.dat')) except Exception: - pass + pass # Launch gensym - p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, + p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=pjoin(Pdir,'Hel'), shell=True) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(" ".encode()) stdout = stdout.decode('ascii',errors='ignore') if os.path.exists(pjoin(self.me_dir, 'error')): - raise Exception(pjoin(self.me_dir,'error')) + raise Exception(pjoin(self.me_dir,'error')) # note a continue is not enough here, we have in top to link # the matrixX_optim.f to matrixX_orig.f to let the code to work # after this error. @@ -203,7 +203,7 @@ def get_helicity(self, to_submit=True, clean=True): zero_gc = list() all_zampperhel = set() all_bad_amps_perhel = set() - + for line in stdout.splitlines(): if "=" not in line and ":" not in line: continue @@ -229,22 +229,22 @@ def get_helicity(self, to_submit=True, clean=True): "%s\n" % (' '.join(zero_gc)) +\ "This will slow down the computation. Please consider using restricted model:\n" +\ "https://answers.launchpad.net/mg5amcnlo/+faq/2312") - - + + all_good_hels = collections.defaultdict(list) for me_index, hel in all_hel: - all_good_hels[me_index].append(int(hel)) - + all_good_hels[me_index].append(int(hel)) + #print(all_hel) if self.run_card['hel_zeroamp']: all_bad_amps = collections.defaultdict(list) for me_index, amp in all_zamp: all_bad_amps[me_index].append(int(amp)) - + all_bad_amps_perhel = collections.defaultdict(list) for me_index, hel, amp in all_zampperhel: - all_bad_amps_perhel[me_index].append((int(hel),int(amp))) - + all_bad_amps_perhel[me_index].append((int(hel),int(amp))) + elif all_zamp: nb_zero = sum(int(a[1]) for a in all_zamp) if zero_gc: @@ -254,7 +254,7 @@ def get_helicity(self, to_submit=True, clean=True): else: logger.warning("The optimization detected that you have %i zero matrix-element for this SubProcess: %s.\n" % nb_zero +\ "This part can optimize if you set the flag hel_zeroamp to True in the run_card.") - + #check if we need to do something and write associate information" data = [all_hel, all_zamp, all_bad_amps_perhel] if not self.run_card['hel_zeroamp']: @@ -266,14 +266,14 @@ def get_helicity(self, to_submit=True, clean=True): old_data = open(pjoin(Pdir,'Hel','selection')).read() if old_data == data: continue - - + + with open(pjoin(Pdir,'Hel','selection'),'w') as fsock: - fsock.write(data) - - + fsock.write(data) + + for matrix_file in misc.glob('matrix*orig.f', Pdir): - + split_file = matrix_file.split('/') me_index = split_file[-1][len('matrix'):-len('_orig.f')] @@ -289,11 +289,11 @@ def get_helicity(self, to_submit=True, clean=True): #good_hels = sorted(list(good_hels)) good_hels = [str(x) for x in sorted(all_good_hels[me_index])] if self.run_card['hel_zeroamp']: - + bad_amps = [str(x) for x in sorted(all_bad_amps[me_index])] bad_amps_perhel = [x for x in sorted(all_bad_amps_perhel[me_index])] else: - bad_amps = [] + bad_amps = [] bad_amps_perhel = [] if __debug__: mtext = open(matrix_file).read() @@ -310,7 +310,7 @@ def get_helicity(self, to_submit=True, clean=True): recycler.set_input(matrix_file) recycler.set_output(out_file) - recycler.set_template(templ_file) + recycler.set_template(templ_file) recycler.generate_output_file() del recycler @@ -321,19 +321,19 @@ def get_helicity(self, to_submit=True, clean=True): return {}, P_zero_result - + def launch(self, to_submit=True, clean=True): """ """ if not hasattr(self, 'subproc'): - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc - + P_zero_result = [] # check the number of times where they are no phase-space - + nb_tot_proc = len(subproc) - job_list = {} + job_list = {} for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.
(previous processes already running)' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -341,7 +341,7 @@ def launch(self, to_submit=True, clean=True): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) logger.info(' %s ' % subdir) - + # clean previous run if clean: for match in misc.glob('*ajob*', Pdir): @@ -349,17 +349,17 @@ def launch(self, to_submit=True, clean=True): os.remove(match) for match in misc.glob('G*', Pdir): if os.path.exists(pjoin(match,'results.dat')): - os.remove(pjoin(match, 'results.dat')) + os.remove(pjoin(match, 'results.dat')) if os.path.exists(pjoin(match, 'ftn25')): - os.remove(pjoin(match, 'ftn25')) - + os.remove(pjoin(match, 'ftn25')) + #compile gensym self.cmd.compile(['gensym'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'gensym')): - raise Exception('Error make gensym not successful') - + raise Exception('Error make gensym not successful') + # Launch gensym - p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, + p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(''.encode()) @@ -367,8 +367,8 @@ def launch(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir,'error')): files.mv(pjoin(self.me_dir,'error'), pjoin(Pdir,'ajob.no_ps.log')) P_zero_result.append(subdir) - continue - + continue + jobs = stdout.split() job_list[Pdir] = jobs try: @@ -386,8 +386,8 @@ def launch(self, to_submit=True, clean=True): continue else: if done: - raise Exception('Parsing error in gensym: %s' % stdout) - job_list[Pdir] = l.split() + raise Exception('Parsing error in gensym: %s' % stdout) + job_list[Pdir] = l.split() done = True if not done: raise Exception('Parsing error in gensym: %s' % stdout) @@ -408,16 +408,16 @@ def launch(self, to_submit=True, clean=True): if to_submit: self.submit_to_cluster(job_list) job_list = {} - + return job_list, P_zero_result - + def resubmit(self, min_precision=1.0, resubmit_zero=False): """collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - + job_list, P_zero_result = self.launch(to_submit=False, clean=False) - + for P , jobs in dict(job_list).items(): misc.sprint(jobs) to_resub = [] @@ -434,7 +434,7 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): elif max(one_result.xerru, one_result.xerrc)/one_result.xsec > min_precision: to_resub.append(job) else: - to_resub.append(job) + to_resub.append(job) if to_resub: for G in to_resub: try: @@ -442,19 +442,19 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): except Exception as error: misc.sprint(error) pass - misc.sprint(to_resub) + misc.sprint(to_resub) self.submit_to_cluster({P: to_resub}) - - - - - - - - - - - + + + + + + + + + + + def submit_to_cluster(self, job_list): """ """ @@ -467,7 +467,7 @@ def submit_to_cluster(self, job_list): nexternal = self.cmd.proc_characteristics['nexternal'] current = open(pjoin(path, "nexternal.inc")).read() ext = re.search(r"PARAMETER \(NEXTERNAL=(\d+)\)", current).group(1) - + if self.run_card['job_strategy'] == 2: self.splitted_grid = 2 if nexternal == int(ext): @@ -498,18 +498,18 @@ def submit_to_cluster(self, job_list): return self.submit_to_cluster_no_splitting(job_list) else: return self.submit_to_cluster_splitted(job_list) - - + + def submit_to_cluster_no_splitting(self, job_list): """submit the survey without the parralelization. This is the old mode which is still usefull in single core""" - - # write the template file for the parameter file + + # write the template file for the parameter file self.write_parameter(parralelization=False, Pdirs=list(job_list.keys())) - - + + # launch the job with the appropriate grouping - for Pdir, jobs in job_list.items(): + for Pdir, jobs in job_list.items(): jobs = list(jobs) i=0 while jobs: @@ -518,16 +518,16 @@ def submit_to_cluster_no_splitting(self, job_list): for _ in range(self.combining_job_for_Pdir(Pdir)): if jobs: to_submit.append(jobs.pop(0)) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=to_submit, cwd=pjoin(self.me_dir,'SubProcesses' , Pdir)) - + def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): """prepare the input_file for submitting the channel""" - + if 'SubProcesses' not in Pdir: Pdir = pjoin(self.me_dir, 'SubProcesses', Pdir) @@ -535,8 +535,8 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): self.splitted_Pdir[(Pdir, G)] = int(nb_job) - # 1. write the new input_app.txt - run_card = self.cmd.run_card + # 1. write the new input_app.txt + run_card = self.cmd.run_card options = {'event' : submit_ps, 'maxiter': 1, 'miniter': 1, @@ -545,29 +545,29 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): else run_card['nhel'], 'gridmode': -2, 'channel' : G - } - + } + Gdir = pjoin(Pdir, 'G%s' % G) - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + # 2. check that ftn25 exists. - assert os.path.exists(pjoin(Gdir, "ftn25")) - - + assert os.path.exists(pjoin(Gdir, "ftn25")) + + # 3. Submit the new jobs #call back function - packet = cluster.Packet((Pdir, G, step+1), + packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, (Pdir, G, step+1)) - + if step ==0: - self.lastoffset[(Pdir, G)] = 0 - - # resubmit the new jobs + self.lastoffset[(Pdir, G)] = 0 + + # resubmit the new jobs for i in range(int(nb_job)): name = "G%s_%s" % (G,i+1) self.lastoffset[(Pdir, G)] += 1 - offset = self.lastoffset[(Pdir, G)] + offset = self.lastoffset[(Pdir, G)] self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'refine_splitted.sh'), argument=[name, 'G%s'%G, offset], cwd= Pdir, @@ -575,9 +575,9 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): def submit_to_cluster_splitted(self, job_list): - """ submit the version of the survey with splitted grid creation - """ - + """ submit the version of the survey with splitted grid creation + """ + #if self.splitted_grid <= 1: # return self.submit_to_cluster_no_splitting(job_list) @@ -592,7 +592,7 @@ def submit_to_cluster_splitted(self, job_list): for job in jobs: packet = cluster.Packet((Pdir, job, 1), self.combine_iteration, (Pdir, job, 1)) - for i in range(self.splitted_for_dir(Pdir, job)): + for i in range(self.splitted_for_dir(Pdir, job)): self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[i+1, job], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), @@ -601,15 +601,15 @@ def submit_to_cluster_splitted(self, job_list): def combine_iteration(self, Pdir, G, step): grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - - # Compute the number of events used for this run. + + # Compute the number of events used for this run. nb_events = grid_calculator.target_evt Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) - + # 4. make the submission of the next iteration # Three cases - less than 3 iteration -> continue # - more than 3 and less than 5 -> check error @@ -627,15 +627,15 @@ def combine_iteration(self, Pdir, G, step): need_submit = False else: need_submit = True - + elif step >= self.cmd.opts['iterations']: need_submit = False elif self.cmd.opts['accuracy'] < 0: #check for luminosity raise Exception("Not Implemented") elif self.abscross[(Pdir,G)] == 0: - need_submit = False - else: + need_submit = False + else: across = self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) tot_across = self.get_current_axsec() if across == 0: @@ -646,20 +646,20 @@ def combine_iteration(self, Pdir, G, step): need_submit = True else: need_submit = False - - + + if cross: grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_events,mode=self.mode, conservative_factor=5.0) - - xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) - if float(cross)!=0.0 and float(error)!=0.0 else 8) + + xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) + if float(cross)!=0.0 and float(error)!=0.0 else 8) if need_submit: message = "%%s/G%%s is at %%%s +- %%.3g pb. Now submitting iteration #%s."%(xsec_format, step+1) logger.info(message%\ - (os.path.basename(Pdir), G, float(cross), + (os.path.basename(Pdir), G, float(cross), float(error)*float(cross))) self.resubmit_survey(Pdir,G, Gdirs, step) elif cross: @@ -670,26 +670,26 @@ def combine_iteration(self, Pdir, G, step): newGpath = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(newGpath): os.mkdir(newGpath) - + # copy the new grid: - files.cp(pjoin(Gdirs[0], 'ftn25'), + files.cp(pjoin(Gdirs[0], 'ftn25'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, 'ftn26')) - + # copy the events fsock = open(pjoin(newGpath, 'events.lhe'), 'w') for Gdir in Gdirs: - fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) - + fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) + # copy one log - files.cp(pjoin(Gdirs[0], 'log.txt'), + files.cp(pjoin(Gdirs[0], 'log.txt'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G)) - - + + # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) else: logger.info("Survey finished for %s/G%s [0 cross]", os.path.basename(Pdir),G) - + Gdir = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(Gdir): os.mkdir(Gdir) @@ -697,21 +697,21 @@ def combine_iteration(self, Pdir, G, step): files.cp(pjoin(Gdirs[0], 'log.txt'), Gdir) # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) - + return 0 def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): """ exclude_sub_jobs is to remove some of the subjobs if a numerical issue is detected in one of them. Warning is issue when this occurs. """ - + # 1. create an object to combine the grid information and fill it grid_calculator = combine_grid.grid_information(self.run_card['nhel']) - + for i in range(self.splitted_for_dir(Pdir, G)): if i in exclude_sub_jobs: continue - path = pjoin(Pdir, "G%s_%s" % (G, i+1)) + path = pjoin(Pdir, "G%s_%s" % (G, i+1)) fsock = misc.mult_try_open(pjoin(path, 'results.dat')) one_result = grid_calculator.add_results_information(fsock) fsock.close() @@ -723,9 +723,9 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): fsock.close() os.remove(pjoin(path, 'results.dat')) #os.remove(pjoin(path, 'grid_information')) - - - + + + #2. combine the information about the total crossection / error # start by keep the interation in memory cross, across, sigma = grid_calculator.get_cross_section() @@ -736,12 +736,12 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): if maxwgt: nunwgt = grid_calculator.get_nunwgt(maxwgt) # Make sure not to apply the security below during the first step of the - # survey. Also, disregard channels with a contribution relative to the + # survey. Also, disregard channels with a contribution relative to the # total cross-section smaller than 1e-8 since in this case it is unlikely # that this channel will need more than 1 event anyway. apply_instability_security = False rel_contrib = 0.0 - if (self.__class__ != gensym or step > 1): + if (self.__class__ != gensym or step > 1): Pdir_across = 0.0 Gdir_across = 0.0 for (mPdir,mG) in self.abscross.keys(): @@ -750,7 +750,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): (self.sigma[(mPdir,mG)]+1e-99)) if mG == G: Gdir_across += (self.abscross[(mPdir,mG)]/ - (self.sigma[(mPdir,mG)]+1e-99)) + (self.sigma[(mPdir,mG)]+1e-99)) rel_contrib = abs(Gdir_across/(Pdir_across+1e-99)) if rel_contrib > (1.0e-8) and \ nunwgt < 2 and len(grid_calculator.results) > 1: @@ -770,14 +770,14 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): exclude_sub_jobs = list(exclude_sub_jobs) exclude_sub_jobs.append(th_maxwgt[-1][1]) grid_calculator.results.run_statistics['skipped_subchannel'] += 1 - + # Add some monitoring of the problematic events - gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) + gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) if os.path.isfile(pjoin(gPath,'events.lhe')): lhe_file = lhe_parser.EventFile(pjoin(gPath,'events.lhe')) discardedPath = pjoin(Pdir,'DiscardedUnstableEvents') if not os.path.exists(discardedPath): - os.mkdir(discardedPath) + os.mkdir(discardedPath) if os.path.isdir(discardedPath): # Keep only the event with a maximum weight, as it surely # is the problematic one. @@ -790,10 +790,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): lhe_file.close() evtRecord.write(pjoin(gPath,'events.lhe').read()) evtRecord.close() - + return self.combine_grid(Pdir, G, step, exclude_sub_jobs) - + if across !=0: if sigma != 0: self.cross[(Pdir,G)] += cross**3/sigma**2 @@ -814,10 +814,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): self.chi2[(Pdir,G)] = 0 cross = self.cross[(Pdir,G)] error = 0 - + else: error = 0 - + grid_calculator.results.compute_values(update_statistics=True) if (str(os.path.basename(Pdir)), G) in self.run_statistics: self.run_statistics[(str(os.path.basename(Pdir)), G)]\ @@ -825,8 +825,8 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): else: self.run_statistics[(str(os.path.basename(Pdir)), G)] = \ grid_calculator.results.run_statistics - - self.warnings_from_statistics(G, grid_calculator.results.run_statistics) + + self.warnings_from_statistics(G, grid_calculator.results.run_statistics) stats_msg = grid_calculator.results.run_statistics.nice_output( '/'.join([os.path.basename(Pdir),'G%s'%G])) @@ -836,7 +836,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): # Clean up grid_information to avoid border effects in case of a crash for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) - try: + try: os.remove(pjoin(path, 'grid_information')) except OSError as oneerror: if oneerror.errno != 2: @@ -850,7 +850,7 @@ def warnings_from_statistics(self,G,stats): return EPS_fraction = float(stats['exceptional_points'])/stats['n_madloop_calls'] - + msg = "Channel %s has encountered a fraction of %.3g\n"+ \ "of numerically unstable loop matrix element computations\n"+\ "(which could not be rescued using quadruple precision).\n"+\ @@ -861,16 +861,16 @@ def warnings_from_statistics(self,G,stats): elif EPS_fraction > 0.01: logger.critical((msg%(G,EPS_fraction)).replace('might', 'can')) raise Exception((msg%(G,EPS_fraction)).replace('might', 'can')) - + def get_current_axsec(self): - + across = 0 for (Pdir,G) in self.abscross: across += self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) return across - + def write_results(self, grid_calculator, cross, error, Pdir, G, step): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -888,7 +888,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step): maxwgt = grid_calculator.get_max_wgt() nunwgt = grid_calculator.get_nunwgt() luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -897,20 +897,20 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s %s 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross), fstr(maxwgt)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - + def resubmit_survey(self, Pdir, G, Gdirs, step): """submit the next iteration of the survey""" # 1. write the new input_app.txt to double the number of points - run_card = self.cmd.run_card + run_card = self.cmd.run_card options = {'event' : 2**(step) * self.cmd.opts['points'] / self.splitted_grid, 'maxiter': 1, 'miniter': 1, @@ -919,18 +919,18 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): else run_card['nhel'], 'gridmode': -2, 'channel' : '' - } - + } + if int(options['helicity']) == 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + for Gdir in Gdirs: - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + + #2. resubmit the new jobs packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, \ - (Pdir, G, step+1)) + (Pdir, G, step+1)) nb_step = len(Gdirs) * (step+1) for i,subdir in enumerate(Gdirs): subdir = subdir.rsplit('_',1)[1] @@ -938,34 +938,34 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): offset = nb_step+i+1 offset=str(offset) tag = "%s.%s" % (subdir, offset) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[tag, G], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), packet_member=packet) - + def write_parameter_file(self, path, options): """ """ - + template =""" %(event)s %(maxiter)s %(miniter)s !Number of events and max and min iterations %(accuracy)s !Accuracy %(gridmode)s !Grid Adjustment 0=none, 2=adjust 1 !Suppress Amplitude 1=yes %(helicity)s !Helicity Sum/event 0=exact - %(channel)s """ + %(channel)s """ options['event'] = int(options['event']) open(path, 'w').write(template % options) - - + + def write_parameter(self, parralelization, Pdirs=None): """Write the parameter of the survey run""" run_card = self.cmd.run_card - + options = {'event' : self.cmd.opts['points'], 'maxiter': self.cmd.opts['iterations'], 'miniter': self.min_iterations, @@ -975,36 +975,36 @@ def write_parameter(self, parralelization, Pdirs=None): 'gridmode': 2, 'channel': '' } - + if int(options['helicity'])== 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + if parralelization: options['gridmode'] = -2 options['maxiter'] = 1 #this is automatic in dsample anyway options['miniter'] = 1 #this is automatic in dsample anyway options['event'] /= self.splitted_grid - + if not Pdirs: Pdirs = self.subproc - + for Pdir in Pdirs: - path =pjoin(Pdir, 'input_app.txt') + path =pjoin(Pdir, 'input_app.txt') self.write_parameter_file(path, options) - - -class gen_ximprove(object): - - + + +class gen_ximprove(object): + + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job @@ -1022,7 +1022,7 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_gridpack) elif cls.force_class == 'loop_induced': return super(gen_ximprove, cls).__new__(gen_ximprove_share) - + if cmd.proc_characteristics['loop_induced']: return super(gen_ximprove, cls).__new__(gen_ximprove_share) elif gen_ximprove.format_variable(cmd.run_card['gridpack'], bool): @@ -1031,31 +1031,31 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_share) else: return super(gen_ximprove, cls).__new__(gen_ximprove_v4) - - + + def __init__(self, cmd, opt=None): - + try: super(gen_ximprove, self).__init__(cmd, opt) except TypeError: pass - + self.run_statistics = {} self.cmd = cmd self.run_card = cmd.run_card run_card = self.run_card self.me_dir = cmd.me_dir - + #extract from the run_card the information that we need. self.gridpack = run_card['gridpack'] self.nhel = run_card['nhel'] if "nhel_refine" in run_card: self.nhel = run_card["nhel_refine"] - + if self.run_card['refine_evt_by_job'] != -1: self.max_request_event = run_card['refine_evt_by_job'] - - + + # Default option for the run self.gen_events = True self.parralel = False @@ -1066,7 +1066,7 @@ def __init__(self, cmd, opt=None): # parameter for the gridpack run self.nreq = 2000 self.iseed = 4321 - + # placeholder for information self.results = 0 #updated in launch/update_html @@ -1074,16 +1074,16 @@ def __init__(self, cmd, opt=None): self.configure(opt) elif isinstance(opt, bannermod.GridpackCard): self.configure_gridpack(opt) - + def __call__(self): return self.launch() - + def launch(self): - """running """ - + """running """ + #start the run self.handle_seed() - self.results = sum_html.collect_result(self.cmd, + self.results = sum_html.collect_result(self.cmd, main_dir=pjoin(self.cmd.me_dir,'SubProcesses')) #main_dir is for gridpack readonly mode if self.gen_events: # We run to provide a given number of events @@ -1095,15 +1095,15 @@ def launch(self): def configure(self, opt): """Defines some parameter of the run""" - + for key, value in opt.items(): if key in self.__dict__: targettype = type(getattr(self, key)) setattr(self, key, self.format_variable(value, targettype, key)) else: raise Exception('%s not define' % key) - - + + # special treatment always do outside the loop to avoid side effect if 'err_goal' in opt: if self.err_goal < 1: @@ -1113,24 +1113,24 @@ def configure(self, opt): logger.info("Generating %s unweighted events." % self.err_goal) self.gen_events = True self.err_goal = self.err_goal * self.gen_events_security # security - + def handle_seed(self): """not needed but for gridpack --which is not handle here for the moment""" return - - + + def find_job_for_event(self): """return the list of channel that need to be improved""" - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) - - goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 + + goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) - all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) - + all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) + to_refine = [] for C in all_channels: if C.get('axsec') == 0: @@ -1141,61 +1141,61 @@ def find_job_for_event(self): elif C.get('xerr') > max(C.get('axsec'), (1/(100*math.sqrt(self.err_goal)))*all_channels[-1].get('axsec')): to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru + - class gen_ximprove_v4(gen_ximprove): - + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + super(gen_ximprove_v4, self).__init__(cmd, opt) - + if cmd.opts['accuracy'] < cmd._survey_options['accuracy'][1]: self.increase_precision(cmd._survey_options['accuracy'][1]/cmd.opts['accuracy']) @@ -1203,7 +1203,7 @@ def reset_multijob(self): for path in misc.glob(pjoin('*', '*','multijob.dat'), pjoin(self.me_dir, 'SubProcesses')): open(path,'w').write('0\n') - + def write_multijob(self, Channel, nb_split): """ """ if nb_split <=1: @@ -1211,7 +1211,7 @@ def write_multijob(self, Channel, nb_split): f = open(pjoin(self.me_dir, 'SubProcesses', Channel.get('name'), 'multijob.dat'), 'w') f.write('%i\n' % nb_split) f.close() - + def increase_precision(self, rate=3): #misc.sprint(rate) if rate < 3: @@ -1222,25 +1222,25 @@ def increase_precision(self, rate=3): rate = rate -2 self.max_event_in_iter = int((rate+1) * 10000) self.min_events = int(rate+2) * 2500 - self.gen_events_security = 1 + 0.1 * (rate+2) - + self.gen_events_security = 1 + 0.1 * (rate+2) + if int(self.nhel) == 1: self.min_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//3) self.max_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//2) - - + + alphabet = "abcdefghijklmnopqrstuvwxyz" def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() #reset the potential multijob of previous run self.reset_multijob() - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. @@ -1257,17 +1257,17 @@ def get_job_for_event(self): else: for i in range(len(to_refine) //3): new_order.append(to_refine[i]) - new_order.append(to_refine[-2*i-1]) + new_order.append(to_refine[-2*i-1]) new_order.append(to_refine[-2*i-2]) if len(to_refine) % 3 == 1: - new_order.append(to_refine[i+1]) + new_order.append(to_refine[i+1]) elif len(to_refine) % 3 == 2: - new_order.append(to_refine[i+2]) + new_order.append(to_refine[i+2]) #ensure that the reordering is done nicely assert set([id(C) for C in to_refine]) == set([id(C) for C in new_order]) - to_refine = new_order - - + to_refine = new_order + + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target @@ -1279,7 +1279,7 @@ def get_job_for_event(self): nb_split = self.max_splitting nb_split=max(1, nb_split) - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1296,21 +1296,21 @@ def get_job_for_event(self): nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + # write the multi-job information self.write_multijob(C, nb_split) - + packet = cluster.Packet((C.parent_name, C.name), combine_runs.CombineRuns, (pjoin(self.me_dir, 'SubProcesses', C.parent_name)), {"subproc": C.name, "nb_split":nb_split}) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1321,7 +1321,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': packet, + 'packet': packet, } if nb_split == 1: @@ -1334,19 +1334,19 @@ def get_job_for_event(self): if self.keep_grid_for_refine: new_info['base_directory'] = info['directory'] jobs.append(new_info) - - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def create_ajob(self, template, jobs, write_dir=None): """create the ajob""" - + if not jobs: return if not write_dir: write_dir = pjoin(self.me_dir, 'SubProcesses') - + #filter the job according to their SubProcess directory # no mix submition P2job= collections.defaultdict(list) for j in jobs: @@ -1355,11 +1355,11 @@ def create_ajob(self, template, jobs, write_dir=None): for P in P2job.values(): self.create_ajob(template, P, write_dir) return - - + + #Here we can assume that all job are for the same directory. path = pjoin(write_dir, jobs[0]['P_dir']) - + template_text = open(template, 'r').read() # special treatment if needed to combine the script # computes how many submition miss one job @@ -1384,8 +1384,8 @@ def create_ajob(self, template, jobs, write_dir=None): skip1=0 combining_job =1 nb_sub = len(jobs) - - + + nb_use = 0 for i in range(nb_sub): script_number = i+1 @@ -1404,14 +1404,14 @@ def create_ajob(self, template, jobs, write_dir=None): info["base_directory"] = "./" fsock.write(template_text % info) nb_use += nb_job - + fsock.close() return script_number def get_job_for_precision(self): """create the ajob to achieve a give precision on the total cross-section""" - + assert self.err_goal <=1 xtot = abs(self.results.xsec) logger.info("Working on precision: %s %%" %(100*self.err_goal)) @@ -1428,46 +1428,46 @@ def get_job_for_precision(self): rerr *=rerr if not len(to_refine): return - - # change limit since most don't contribute + + # change limit since most don't contribute limit = math.sqrt((self.err_goal * xtot)**2 - rerr/math.sqrt(len(to_refine))) for C in to_refine[:]: cerr = C.mfactor*(C.xerru + len(to_refine)*C.xerrc) if cerr < limit: to_refine.remove(C) - + # all the channel are now selected. create the channel information logger.info('need to improve %s channels' % len(to_refine)) - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. # loop over the channel to refine for C in to_refine: - + #1. Determine how many events we need in each iteration yerr = C.mfactor*(C.xerru+len(to_refine)*C.xerrc) nevents = 0.2*C.nevents*(yerr/limit)**2 - + nb_split = int((nevents*(C.nunwgt/C.nevents)/self.max_request_event/ (2**self.min_iter-1))**(2/3)) nb_split = max(nb_split, 1) - # **(2/3) to slow down the increase in number of jobs + # **(2/3) to slow down the increase in number of jobs if nb_split > self.max_splitting: nb_split = self.max_splitting - + if nb_split >1: nevents = nevents / nb_split self.write_multijob(C, nb_split) # forbid too low/too large value nevents = min(self.min_event_in_iter, max(self.max_event_in_iter, nevents)) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1487,38 +1487,38 @@ def get_job_for_precision(self): new_info['offset'] = i+1 new_info['directory'] += self.alphabet[i % 26] + str((i+1)//26) jobs.append(new_info) - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru @@ -1528,27 +1528,27 @@ class gen_ximprove_v4_nogridupdate(gen_ximprove_v4): # some hardcoded value which impact the generation gen_events_security = 1.1 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 400 # split jobs if a channel if it needs more than that + max_request_event = 400 # split jobs if a channel if it needs more than that max_event_in_iter = 500 min_event_in_iter = 250 - max_splitting = 260 # maximum duplication of a given channel - min_iter = 2 + max_splitting = 260 # maximum duplication of a given channel + min_iter = 2 max_iter = 6 keep_grid_for_refine = True - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + gen_ximprove.__init__(cmd, opt) - + if cmd.proc_characteristics['loopinduced'] and \ cmd.proc_characteristics['nexternal'] > 2: self.increase_parralelization(cmd.proc_characteristics['nexternal']) - + def increase_parralelization(self, nexternal): - self.max_splitting = 1000 - + self.max_splitting = 1000 + if self.run_card['refine_evt_by_job'] != -1: pass elif nexternal == 3: @@ -1563,27 +1563,27 @@ def increase_parralelization(self, nexternal): class gen_ximprove_share(gen_ximprove, gensym): """Doing the refine in multicore. Each core handle a couple of PS point.""" - nb_ps_by_job = 2000 + nb_ps_by_job = 2000 mode = "refine" gen_events_security = 1.15 # Note the real security is lower since we stop the jobs if they are at 96% # of this target. def __init__(self, *args, **opts): - + super(gen_ximprove_share, self).__init__(*args, **opts) self.generated_events = {} self.splitted_for_dir = lambda x,y : self.splitted_Pdir[(x,y)] - + def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - + goal_lum, to_refine = self.find_job_for_event() self.goal_lum = goal_lum - + # loop over the channel to refine to find the number of PS point to launch total_ps_points = 0 channel_to_ps_point = [] @@ -1593,7 +1593,7 @@ def get_job_for_event(self): os.remove(pjoin(self.me_dir, "SubProcesses",C.parent_name, C.name, "events.lhe")) except: pass - + #1. Compute the number of points are needed to reach target needed_event = goal_lum*C.get('axsec') if needed_event == 0: @@ -1609,18 +1609,18 @@ def get_job_for_event(self): nb_split = 1 if nb_split > self.max_splitting: nb_split = self.max_splitting - nevents = self.max_event_in_iter * self.max_splitting + nevents = self.max_event_in_iter * self.max_splitting else: nevents = self.max_event_in_iter * nb_split if nevents > self.max_splitting*self.max_event_in_iter: logger.warning("Channel %s/%s has a very low efficiency of unweighting. Might not be possible to reach target" % \ (C.name, C.parent_name)) - nevents = self.max_event_in_iter * self.max_splitting - - total_ps_points += nevents - channel_to_ps_point.append((C, nevents)) - + nevents = self.max_event_in_iter * self.max_splitting + + total_ps_points += nevents + channel_to_ps_point.append((C, nevents)) + if self.cmd.options["run_mode"] == 1: if self.cmd.options["cluster_size"]: nb_ps_by_job = total_ps_points /int(self.cmd.options["cluster_size"]) @@ -1634,7 +1634,7 @@ def get_job_for_event(self): nb_ps_by_job = total_ps_points / self.cmd.options["nb_core"] else: nb_ps_by_job = self.nb_ps_by_job - + nb_ps_by_job = int(max(nb_ps_by_job, 500)) for C, nevents in channel_to_ps_point: @@ -1648,20 +1648,20 @@ def get_job_for_event(self): self.create_resubmit_one_iter(C.parent_name, C.name[1:], submit_ps, nb_job, step=0) needed_event = goal_lum*C.get('xsec') logger.debug("%s/%s : need %s event. Need %s split job of %s points", C.parent_name, C.name, needed_event, nb_job, submit_ps) - - + + def combine_iteration(self, Pdir, G, step): - + grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - + # collect all the generated_event Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) assert len(grid_calculator.results) == len(Gdirs) == self.splitted_for_dir(Pdir, G) - - + + # Check how many events are going to be kept after un-weighting. needed_event = cross * self.goal_lum if needed_event == 0: @@ -1671,19 +1671,19 @@ def combine_iteration(self, Pdir, G, step): if self.err_goal >=1: if needed_event > self.gen_events_security * self.err_goal: needed_event = int(self.gen_events_security * self.err_goal) - + if (Pdir, G) in self.generated_events: old_nunwgt, old_maxwgt = self.generated_events[(Pdir, G)] else: old_nunwgt, old_maxwgt = 0, 0 - + if old_nunwgt == 0 and os.path.exists(pjoin(Pdir,"G%s" % G, "events.lhe")): # possible for second refine. lhe = lhe_parser.EventFile(pjoin(Pdir,"G%s" % G, "events.lhe")) old_nunwgt = lhe.unweight(None, trunc_error=0.005, log_level=0) old_maxwgt = lhe.max_wgt - - + + maxwgt = max(grid_calculator.get_max_wgt(), old_maxwgt) new_evt = grid_calculator.get_nunwgt(maxwgt) @@ -1695,35 +1695,35 @@ def combine_iteration(self, Pdir, G, step): one_iter_nb_event = max(grid_calculator.get_nunwgt(),1) drop_previous_iteration = False # compare the number of events to generate if we discard the previous iteration - n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) n_target_combined = (needed_event-nunwgt) / efficiency if n_target_one_iter < n_target_combined: # the last iteration alone has more event that the combine iteration. - # it is therefore interesting to drop previous iteration. + # it is therefore interesting to drop previous iteration. drop_previous_iteration = True nunwgt = one_iter_nb_event maxwgt = grid_calculator.get_max_wgt() new_evt = nunwgt - efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) - + efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + try: if drop_previous_iteration: raise IOError output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'a') except IOError: output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'w') - + misc.call(["cat"] + [pjoin(d, "events.lhe") for d in Gdirs], stdout=output_file) output_file.close() # For large number of iteration. check the number of event by doing the # real unweighting. - if nunwgt < 0.6 * needed_event and step > self.min_iter: + if nunwgt < 0.6 * needed_event and step > self.min_iter: lhe = lhe_parser.EventFile(output_file.name) old_nunwgt =nunwgt nunwgt = lhe.unweight(None, trunc_error=0.01, log_level=0) - - + + self.generated_events[(Pdir, G)] = (nunwgt, maxwgt) # misc.sprint("Adding %s event to %s. Currently at %s" % (new_evt, G, nunwgt)) @@ -1742,21 +1742,21 @@ def combine_iteration(self, Pdir, G, step): nevents = grid_calculator.results[0].nevents if nevents == 0: # possible if some integral returns 0 nevents = max(g.nevents for g in grid_calculator.results) - + need_ps_point = (needed_event - nunwgt)/(efficiency+1e-99) - need_job = need_ps_point // nevents + 1 - + need_job = need_ps_point // nevents + 1 + if step < self.min_iter: # This is normal but check if we are on the good track - job_at_first_iter = nb_split_before/2**(step-1) + job_at_first_iter = nb_split_before/2**(step-1) expected_total_job = job_at_first_iter * (2**self.min_iter-1) done_job = job_at_first_iter * (2**step-1) expected_remaining_job = expected_total_job - done_job - logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) + logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) # increase if needed but not too much need_job = min(need_job, expected_remaining_job*1.25) - + nb_job = (need_job-0.5)//(2**(self.min_iter-step)-1) + 1 nb_job = max(1, nb_job) grid_calculator.write_grid_for_submission(Pdir,G, @@ -1768,7 +1768,7 @@ def combine_iteration(self, Pdir, G, step): nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) #self.create_job(Pdir, G, nb_job, nevents, step) - + elif step < self.max_iter: if step + 1 == self.max_iter: need_job = 1.20 * need_job # avoid to have just too few event. @@ -1777,21 +1777,21 @@ def combine_iteration(self, Pdir, G, step): grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_job*nevents ,mode=self.mode, conservative_factor=self.max_iter) - - + + logger.info("%s/G%s is at %i/%i ('%.2g%%') event. Resubmit %i job at iteration %i." \ % (os.path.basename(Pdir), G, int(nunwgt),int(needed_event)+1, (float(nunwgt)/needed_event)*100.0 if needed_event>0.0 else 0.0, nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) - - + + return 0 - - + + def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -1807,7 +1807,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency nevents = nunwgt # make the unweighting to compute the number of events: luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -1816,23 +1816,23 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s 0.0 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - - - + + + class gen_ximprove_gridpack(gen_ximprove_v4): - - min_iter = 1 + + min_iter = 1 max_iter = 13 - max_request_event = 1e12 # split jobs if a channel if it needs more than that + max_request_event = 1e12 # split jobs if a channel if it needs more than that max_event_in_iter = 4000 min_event_in_iter = 500 combining_job = sys.maxsize @@ -1844,7 +1844,7 @@ def __new__(cls, *args, **opts): return super(gen_ximprove_gridpack, cls).__new__(cls, *args, **opts) def __init__(self, *args, **opts): - + self.ngran = -1 self.gscalefact = {} self.readonly = False @@ -1855,23 +1855,23 @@ def __init__(self, *args, **opts): self.readonly = opts['readonly'] super(gen_ximprove_gridpack,self).__init__(*args, **opts) if self.ngran == -1: - self.ngran = 1 - + self.ngran = 1 + def find_job_for_event(self): """return the list of channel that need to be improved""" import random - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) self.gscalefact = {} - + xtot = self.results.axsec - goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 + goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 # logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) all_channels.sort(key=lambda x : x.get('luminosity'), reverse=True) - + to_refine = [] for C in all_channels: tag = C.get('name') @@ -1885,27 +1885,27 @@ def find_job_for_event(self): #need to generate events logger.debug('request events for ', C.get('name'), 'cross=', C.get('axsec'), 'needed events = ', goal_lum * C.get('axsec')) - to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + to_refine.append(C) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. - + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target needed_event = max(goal_lum*C.get('axsec'), self.ngran) nb_split = 1 - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1920,13 +1920,13 @@ def get_job_for_event(self): # forbid too low/too large value nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': os.path.basename(C.parent_name), + 'P_dir': os.path.basename(C.parent_name), 'offset': 1, # need to be change for splitted job 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'nevents': nevents, #int(nevents*self.gen_events_security)+1, @@ -1938,7 +1938,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': None, + 'packet': None, } if self.readonly: @@ -1946,11 +1946,11 @@ def get_job_for_event(self): info['base_directory'] = basedir jobs.append(info) - - write_dir = '.' if self.readonly else None - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) - + + write_dir = '.' if self.readonly else None + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) + done = [] for j in jobs: if j['P_dir'] in done: @@ -1967,22 +1967,22 @@ def get_job_for_event(self): write_dir = '.' if self.readonly else pjoin(self.me_dir, 'SubProcesses') self.check_events(goal_lum, to_refine, jobs, write_dir) - + def check_events(self, goal_lum, to_refine, jobs, Sdir): """check that we get the number of requested events if not resubmit.""" - + new_jobs = [] - + for C, job_info in zip(to_refine, jobs): - P = job_info['P_dir'] + P = job_info['P_dir'] G = job_info['channel'] axsec = C.get('axsec') - requested_events= job_info['requested_event'] - + requested_events= job_info['requested_event'] + new_results = sum_html.OneResult((P,G)) new_results.read_results(pjoin(Sdir,P, 'G%s'%G, 'results.dat')) - + # need to resubmit? if new_results.get('nunwgt') < requested_events: pwd = pjoin(os.getcwd(),job_info['P_dir'],'G%s'%G) if self.readonly else \ @@ -1992,10 +1992,10 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): job_info['offset'] += 1 new_jobs.append(job_info) files.mv(pjoin(pwd, 'events.lhe'), pjoin(pwd, 'events.lhe.previous')) - + if new_jobs: - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) + done = [] for j in new_jobs: if j['P_dir'] in done: @@ -2015,9 +2015,9 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): files.put_at_end(pjoin(pwd, 'events.lhe'),pjoin(pwd, 'events.lhe.previous')) return self.check_events(goal_lum, to_refine, new_jobs, Sdir) - - - - + + + + diff --git a/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py b/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py index cb6bf4ca57..8abba3f33f 100755 --- a/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py +++ b/epochX/cudacpp/gg_tt.mad/bin/internal/madevent_interface.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,10 +53,10 @@ # Special logger for the Cmd Interface logger = logging.getLogger('madevent.stdout') # -> stdout logger_stderr = logging.getLogger('madevent.stderr') # ->stderr - + try: import madgraph -except ImportError as error: +except ImportError as error: # import from madevent directory MADEVENT = True import internal.extended_cmd as cmd @@ -92,7 +92,7 @@ import madgraph.various.lhe_parser as lhe_parser # import madgraph.various.histograms as histograms # imported later to not slow down the loading of the code import models.check_param_card as check_param_card - from madgraph.iolibs.files import ln + from madgraph.iolibs.files import ln from madgraph import InvalidCmd, MadGraph5Error, MG5DIR, ReadWrite @@ -113,10 +113,10 @@ class CmdExtended(common_run.CommonRunCmd): next_possibility = { 'start': [], } - + debug_output = 'ME5_debug' error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' - error_debug += 'More information is found in \'%(debug)s\'.\n' + error_debug += 'More information is found in \'%(debug)s\'.\n' error_debug += 'Please attach this file to your report.' config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' @@ -124,18 +124,18 @@ class CmdExtended(common_run.CommonRunCmd): keyboard_stop_msg = """stopping all operation in order to quit MadGraph5_aMC@NLO please enter exit""" - + # Define the Error InvalidCmd = InvalidCmd ConfigurationError = MadGraph5Error def __init__(self, me_dir, options, *arg, **opt): """Init history and line continuation""" - + # Tag allowing/forbiding question self.force = False - - # If possible, build an info line with current version number + + # If possible, build an info line with current version number # and date, from the VERSION text file info = misc.get_pkg_info() info_line = "" @@ -150,7 +150,7 @@ def __init__(self, me_dir, options, *arg, **opt): else: version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() info_line = "#* VERSION %s %s *\n" % \ - (version, (24 - len(version)) * ' ') + (version, (24 - len(version)) * ' ') # Create a header for the history file. # Remember to fill in time at writeout time! @@ -177,7 +177,7 @@ def __init__(self, me_dir, options, *arg, **opt): '#* run as ./bin/madevent.py filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - + if info_line: info_line = info_line[1:] @@ -203,11 +203,11 @@ def __init__(self, me_dir, options, *arg, **opt): "* *\n" + \ "************************************************************") super(CmdExtended, self).__init__(me_dir, options, *arg, **opt) - + def get_history_header(self): - """return the history header""" + """return the history header""" return self.history_header % misc.get_time_info() - + def stop_on_keyboard_stop(self): """action to perform to close nicely on a keyboard interupt""" try: @@ -219,20 +219,20 @@ def stop_on_keyboard_stop(self): self.add_error_log_in_html(KeyboardInterrupt) except: pass - + def postcmd(self, stop, line): """ Update the status of the run for finishing interactive command """ - - stop = super(CmdExtended, self).postcmd(stop, line) + + stop = super(CmdExtended, self).postcmd(stop, line) # relaxing the tag forbidding question self.force = False - + if not self.use_rawinput: return stop - + if self.results and not self.results.current: return stop - + arg = line.split() if len(arg) == 0: return stop @@ -240,41 +240,41 @@ def postcmd(self, stop, line): return stop if isinstance(self.results.status, str) and self.results.status == 'Stop by the user': self.update_status('%s Stop by the user' % arg[0], level=None, error=True) - return stop + return stop elif not self.results.status: return stop elif str(arg[0]) in ['exit','quit','EOF']: return stop - + try: - self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], + self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], level=None, error=True) except Exception: misc.sprint('update_status fails') pass - - + + def nice_user_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() - return cmd.Cmd.nice_user_error(self, error, line) - + return cmd.Cmd.nice_user_error(self, error, line) + def nice_config_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() stop = cmd.Cmd.nice_config_error(self, error, line) - - + + try: debug_file = open(self.debug_output, 'a') debug_file.write(open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat'))) debug_file.close() except: - pass + pass return stop - + def nice_error_handling(self, error, line): """If a ME run is currently running add a link in the html output""" @@ -294,7 +294,7 @@ def nice_error_handling(self, error, line): proc_card = pjoin(self.me_dir,'Cards','proc_card_mg5.dat') if os.path.exists(proc_card): self.banner.add(proc_card) - + out_dir = pjoin(self.me_dir, 'Events', self.run_name) if not os.path.isdir(out_dir): os.mkdir(out_dir) @@ -307,7 +307,7 @@ def nice_error_handling(self, error, line): else: pass else: - self.add_error_log_in_html() + self.add_error_log_in_html() stop = cmd.Cmd.nice_error_handling(self, error, line) try: debug_file = open(self.debug_output, 'a') @@ -316,14 +316,14 @@ def nice_error_handling(self, error, line): except: pass return stop - - + + #=============================================================================== # HelpToCmd #=============================================================================== class HelpToCmd(object): """ The Series of help routine for the MadEventCmd""" - + def help_pythia(self): logger.info("syntax: pythia [RUN] [--run_options]") logger.info("-- run pythia on RUN (current one by default)") @@ -352,29 +352,29 @@ def help_banner_run(self): logger.info(" Path should be the path of a valid banner.") logger.info(" RUN should be the name of a run of the current directory") self.run_options_help([('-f','answer all question by default'), - ('--name=X', 'Define the name associated with the new run')]) - + ('--name=X', 'Define the name associated with the new run')]) + def help_open(self): logger.info("syntax: open FILE ") logger.info("-- open a file with the appropriate editor.") logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') logger.info(' the path to the last created/used directory is used') logger.info(' The program used to open those files can be chosen in the') - logger.info(' configuration file ./input/mg5_configuration.txt') - - + logger.info(' configuration file ./input/mg5_configuration.txt') + + def run_options_help(self, data): if data: logger.info('-- local options:') for name, info in data: logger.info(' %s : %s' % (name, info)) - + logger.info("-- session options:") - logger.info(" Note that those options will be kept for the current session") + logger.info(" Note that those options will be kept for the current session") logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) logger.info(" --multicore : Run in multi-core configuration") logger.info(" --nb_core=X : limit the number of core to use to X.") - + def help_generate_events(self): logger.info("syntax: generate_events [run_name] [options]",) @@ -398,16 +398,16 @@ def help_initMadLoop(self): logger.info(" -f : Bypass the edition of MadLoopParams.dat.",'$MG:color:BLUE') logger.info(" -r : Refresh of the existing filters (erasing them if already present).",'$MG:color:BLUE') logger.info(" --nPS= : Specify how many phase-space points should be tried to set up the filters.",'$MG:color:BLUE') - + def help_calculate_decay_widths(self): - + if self.ninitial != 1: logger.warning("This command is only valid for processes of type A > B C.") logger.warning("This command can not be run in current context.") logger.warning("") - + logger.info("syntax: calculate_decay_widths [run_name] [options])") logger.info("-- Calculate decay widths and enter widths and BRs in param_card") logger.info(" for a series of processes of type A > B C ...") @@ -428,8 +428,8 @@ def help_survey(self): logger.info("-- evaluate the different channel associate to the process") self.run_options_help([("--" + key,value[-1]) for (key,value) in \ self._survey_options.items()]) - - + + def help_restart_gridpack(self): logger.info("syntax: restart_gridpack --precision= --restart_zero") @@ -439,14 +439,14 @@ def help_launch(self): logger.info("syntax: launch [run_name] [options])") logger.info(" --alias for either generate_events/calculate_decay_widths") logger.info(" depending of the number of particles in the initial state.") - + if self.ninitial == 1: logger.info("For this directory this is equivalent to calculate_decay_widths") self.help_calculate_decay_widths() else: logger.info("For this directory this is equivalent to $generate_events") self.help_generate_events() - + def help_refine(self): logger.info("syntax: refine require_precision [max_channel] [--run_options]") logger.info("-- refine the LAST run to achieve a given precision.") @@ -454,14 +454,14 @@ def help_refine(self): logger.info(' or the required relative error') logger.info(' max_channel:[5] maximal number of channel per job') self.run_options_help([]) - + def help_combine_events(self): """ """ logger.info("syntax: combine_events [run_name] [--tag=tag_name] [--run_options]") logger.info("-- Combine the last run in order to write the number of events") logger.info(" asked in the run_card.") self.run_options_help([]) - + def help_store_events(self): """ """ logger.info("syntax: store_events [--run_options]") @@ -481,7 +481,7 @@ def help_import(self): logger.info("syntax: import command PATH") logger.info("-- Execute the command present in the file") self.run_options_help([]) - + def help_syscalc(self): logger.info("syntax: syscalc [RUN] [%s] [-f | --tag=]" % '|'.join(self._plot_mode)) logger.info("-- calculate systematics information for the RUN (current run by default)") @@ -506,18 +506,18 @@ class AskRun(cmd.ControlSwitch): ('madspin', 'Decay onshell particles'), ('reweight', 'Add weights to events for new hypp.') ] - + def __init__(self, question, line_args=[], mode=None, force=False, *args, **opt): - + self.check_available_module(opt['mother_interface'].options) self.me_dir = opt['mother_interface'].me_dir super(AskRun,self).__init__(self.to_control, opt['mother_interface'], *args, **opt) - - + + def check_available_module(self, options): - + self.available_module = set() if options['pythia-pgs_path']: self.available_module.add('PY6') @@ -540,32 +540,32 @@ def check_available_module(self, options): self.available_module.add('Rivet') else: logger.warning("Rivet program installed but no parton shower with hepmc output detected.\n Please install pythia8") - + if not MADEVENT or ('mg5_path' in options and options['mg5_path']): self.available_module.add('MadSpin') if misc.has_f2py() or options['f2py_compiler']: self.available_module.add('reweight') -# old mode to activate the shower +# old mode to activate the shower def ans_parton(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if value is None: self.set_all_off() else: logger.warning('Invalid command: parton=%s' % value) - - + + # -# HANDLING SHOWER +# HANDLING SHOWER # def get_allowed_shower(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_shower'): return self.allowed_shower - + self.allowed_shower = [] if 'PY6' in self.available_module: self.allowed_shower.append('Pythia6') @@ -574,9 +574,9 @@ def get_allowed_shower(self): if self.allowed_shower: self.allowed_shower.append('OFF') return self.allowed_shower - + def set_default_shower(self): - + if 'PY6' in self.available_module and\ os.path.exists(pjoin(self.me_dir,'Cards','pythia_card.dat')): self.switch['shower'] = 'Pythia6' @@ -590,10 +590,10 @@ def set_default_shower(self): def check_value_shower(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_shower(): return True - + value =value.lower() if value in ['py6','p6','pythia_6'] and 'PY6' in self.available_module: return 'Pythia6' @@ -601,13 +601,13 @@ def check_value_shower(self, value): return 'Pythia8' else: return False - - -# old mode to activate the shower + + +# old mode to activate the shower def ans_pythia(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if 'PY6' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return @@ -621,13 +621,13 @@ def ans_pythia(self, value=None): self.set_switch('shower', 'OFF') else: logger.warning('Invalid command: pythia=%s' % value) - - + + def consistency_shower_detector(self, vshower, vdetector): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower == 'OFF': @@ -635,35 +635,35 @@ def consistency_shower_detector(self, vshower, vdetector): return 'OFF' if vshower == 'Pythia8' and vdetector == 'PGS': return 'OFF' - + return None - + # # HANDLING DETECTOR # def get_allowed_detector(self): """return valid entry for the switch""" - + if hasattr(self, 'allowed_detector'): - return self.allowed_detector - + return self.allowed_detector + self.allowed_detector = [] if 'PGS' in self.available_module: self.allowed_detector.append('PGS') if 'Delphes' in self.available_module: self.allowed_detector.append('Delphes') - + if self.allowed_detector: self.allowed_detector.append('OFF') - return self.allowed_detector + return self.allowed_detector def set_default_detector(self): - + self.set_default_shower() #ensure that this one is called first! - + if 'PGS' in self.available_module and self.switch['shower'] == 'Pythia6'\ and os.path.exists(pjoin(self.me_dir,'Cards','pgs_card.dat')): self.switch['detector'] = 'PGS' @@ -674,16 +674,16 @@ def set_default_detector(self): self.switch['detector'] = 'OFF' else: self.switch['detector'] = 'Not Avail.' - -# old mode to activate pgs + +# old mode to activate pgs def ans_pgs(self, value=None): """None: means that the user type 'pgs' - value: means that the user type pgs=value""" - + value: means that the user type pgs=value""" + if 'PGS' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return - + if value is None: self.set_all_off() self.switch['shower'] = 'Pythia6' @@ -696,16 +696,16 @@ def ans_pgs(self, value=None): else: logger.warning('Invalid command: pgs=%s' % value) - + # old mode to activate Delphes def ans_delphes(self, value=None): """None: means that the user type 'delphes' - value: means that the user type delphes=value""" - + value: means that the user type delphes=value""" + if 'Delphes' not in self.available_module: logger.warning('Delphes not available. Ignore commmand') return - + if value is None: self.set_all_off() if 'PY6' in self.available_module: @@ -718,15 +718,15 @@ def ans_delphes(self, value=None): elif value == 'off': self.set_switch('detector', 'OFF') else: - logger.warning('Invalid command: pgs=%s' % value) + logger.warning('Invalid command: pgs=%s' % value) def consistency_detector_shower(self,vdetector, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ - + if vdetector == 'PGS' and vshower != 'Pythia6': return 'Pythia6' if vdetector == 'Delphes' and vshower not in ['Pythia6', 'Pythia8']: @@ -744,28 +744,28 @@ def consistency_detector_shower(self,vdetector, vshower): # def get_allowed_analysis(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_analysis'): return self.allowed_analysis - + self.allowed_analysis = [] if 'ExRoot' in self.available_module: self.allowed_analysis.append('ExRoot') if 'MA4' in self.available_module: self.allowed_analysis.append('MadAnalysis4') if 'MA5' in self.available_module: - self.allowed_analysis.append('MadAnalysis5') + self.allowed_analysis.append('MadAnalysis5') if 'Rivet' in self.available_module: - self.allowed_analysis.append('Rivet') - + self.allowed_analysis.append('Rivet') + if self.allowed_analysis: self.allowed_analysis.append('OFF') - + return self.allowed_analysis - + def check_analysis(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_analysis(): return True if value.lower() in ['ma4', 'madanalysis4', 'madanalysis_4','4']: @@ -786,30 +786,30 @@ def consistency_shower_analysis(self, vshower, vanalysis): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'OFF' #new value for analysis - + return None - + def consistency_analysis_shower(self, vanalysis, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'Pythia8' #new value for analysis - + return None def set_default_analysis(self): """initialise the switch for analysis""" - + if 'MA4' in self.available_module and \ os.path.exists(pjoin(self.me_dir,'Cards','plot_card.dat')): self.switch['analysis'] = 'MadAnalysis4' @@ -818,46 +818,46 @@ def set_default_analysis(self): or os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat'))): self.switch['analysis'] = 'MadAnalysis5' elif 'ExRoot' in self.available_module: - self.switch['analysis'] = 'ExRoot' - elif self.get_allowed_analysis(): + self.switch['analysis'] = 'ExRoot' + elif self.get_allowed_analysis(): self.switch['analysis'] = 'OFF' else: self.switch['analysis'] = 'Not Avail.' - + # # MADSPIN handling # def get_allowed_madspin(self): """ ON|OFF|onshell """ - + if hasattr(self, 'allowed_madspin'): return self.allowed_madspin - + self.allowed_madspin = [] if 'MadSpin' in self.available_module: self.allowed_madspin = ['OFF',"ON",'onshell',"full"] return self.allowed_madspin - + def check_value_madspin(self, value): """handle alias and valid option not present in get_allowed_madspin""" - + if value.upper() in self.get_allowed_madspin(): return True elif value.lower() in self.get_allowed_madspin(): return True - + if 'MadSpin' not in self.available_module: return False - + if value.lower() in ['madspin', 'full']: return 'full' elif value.lower() in ['none']: return 'none' - - + + def set_default_madspin(self): """initialise the switch for madspin""" - + if 'MadSpin' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): self.switch['madspin'] = 'ON' @@ -865,10 +865,10 @@ def set_default_madspin(self): self.switch['madspin'] = 'OFF' else: self.switch['madspin'] = 'Not Avail.' - + def get_cardcmd_for_madspin(self, value): """set some command to run before allowing the user to modify the cards.""" - + if value == 'onshell': return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] elif value in ['full', 'madspin']: @@ -877,36 +877,36 @@ def get_cardcmd_for_madspin(self, value): return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] else: return [] - + # # ReWeight handling # def get_allowed_reweight(self): """ return the list of valid option for reweight=XXX """ - + if hasattr(self, 'allowed_reweight'): return getattr(self, 'allowed_reweight') - + if 'reweight' not in self.available_module: self.allowed_reweight = [] return self.allowed_reweight = ['OFF', 'ON'] - + # check for plugin mode plugin_path = self.mother_interface.plugin_path opts = misc.from_plugin_import(plugin_path, 'new_reweight', warning=False) self.allowed_reweight += opts - + def set_default_reweight(self): """initialise the switch for reweight""" - + if 'reweight' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): self.switch['reweight'] = 'ON' else: self.switch['reweight'] = 'OFF' else: - self.switch['reweight'] = 'Not Avail.' + self.switch['reweight'] = 'Not Avail.' #=============================================================================== # CheckValidForCmd @@ -916,14 +916,14 @@ class CheckValidForCmd(object): def check_banner_run(self, args): """check the validity of line""" - + if len(args) == 0: self.help_banner_run() raise self.InvalidCmd('banner_run requires at least one argument.') - + tag = [a[6:] for a in args if a.startswith('--tag=')] - - + + if os.path.exists(args[0]): type ='banner' format = self.detect_card_type(args[0]) @@ -931,7 +931,7 @@ def check_banner_run(self, args): raise self.InvalidCmd('The file is not a valid banner.') elif tag: args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) + (args[0], tag)) if not os.path.exists(args[0]): raise self.InvalidCmd('No banner associates to this name and tag.') else: @@ -939,7 +939,7 @@ def check_banner_run(self, args): type = 'run' banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) if not banners: - raise self.InvalidCmd('No banner associates to this name.') + raise self.InvalidCmd('No banner associates to this name.') elif len(banners) == 1: args[0] = banners[0] else: @@ -947,8 +947,8 @@ def check_banner_run(self, args): tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] tag = self.ask('which tag do you want to use?', tags[0], tags) args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) - + (args[0], tag)) + run_name = [arg[7:] for arg in args if arg.startswith('--name=')] if run_name: try: @@ -970,14 +970,14 @@ def check_banner_run(self, args): except Exception: pass self.set_run_name(name) - + def check_history(self, args): """check the validity of line""" - + if len(args) > 1: self.help_history() raise self.InvalidCmd('\"history\" command takes at most one argument') - + if not len(args): return elif args[0] != 'clean': @@ -985,16 +985,16 @@ def check_history(self, args): if dirpath and not os.path.exists(dirpath) or \ os.path.isdir(args[0]): raise self.InvalidCmd("invalid path %s " % dirpath) - + def check_save(self, args): """ check the validity of the line""" - + if len(args) == 0: args.append('options') if args[0] not in self._save_opts: raise self.InvalidCmd('wrong \"save\" format') - + if args[0] != 'options' and len(args) != 2: self.help_save() raise self.InvalidCmd('wrong \"save\" format') @@ -1003,7 +1003,7 @@ def check_save(self, args): if not os.path.exists(basename): raise self.InvalidCmd('%s is not a valid path, please retry' % \ args[1]) - + if args[0] == 'options': has_path = None for arg in args[1:]: @@ -1024,9 +1024,9 @@ def check_save(self, args): has_path = True if not has_path: if '--auto' in arg and self.options['mg5_path']: - args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) + args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) else: - args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) + args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) def check_set(self, args): """ check the validity of the line""" @@ -1039,20 +1039,20 @@ def check_set(self, args): self.help_set() raise self.InvalidCmd('Possible options for set are %s' % \ self._set_options) - + if args[0] in ['stdout_level']: if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \ and not args[1].isdigit(): raise self.InvalidCmd('output_level needs ' + \ - 'a valid level') - + 'a valid level') + if args[0] in ['timeout']: if not args[1].isdigit(): - raise self.InvalidCmd('timeout values should be a integer') - + raise self.InvalidCmd('timeout values should be a integer') + def check_open(self, args): """ check the validity of the line """ - + if len(args) != 1: self.help_open() raise self.InvalidCmd('OPEN command requires exactly one argument') @@ -1069,7 +1069,7 @@ def check_open(self, args): raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file') else: return True - + path = self.me_dir if os.path.isfile(os.path.join(path,args[0])): args[0] = os.path.join(path,args[0]) @@ -1078,7 +1078,7 @@ def check_open(self, args): elif os.path.isfile(os.path.join(path,'HTML',args[0])): args[0] = os.path.join(path,'HTML',args[0]) # special for card with _default define: copy the default and open it - elif '_card.dat' in args[0]: + elif '_card.dat' in args[0]: name = args[0].replace('_card.dat','_card_default.dat') if os.path.isfile(os.path.join(path,'Cards', name)): files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0])) @@ -1086,13 +1086,13 @@ def check_open(self, args): else: raise self.InvalidCmd('No default path for this file') elif not os.path.isfile(args[0]): - raise self.InvalidCmd('No default path for this file') - + raise self.InvalidCmd('No default path for this file') + def check_initMadLoop(self, args): """ check initMadLoop command arguments are valid.""" - + opt = {'refresh': False, 'nPS': None, 'force': False} - + for arg in args: if arg in ['-r','--refresh']: opt['refresh'] = True @@ -1105,14 +1105,14 @@ def check_initMadLoop(self, args): except ValueError: raise InvalidCmd("The number of attempts specified "+ "'%s' is not a valid integer."%n_attempts) - + return opt - + def check_treatcards(self, args): """check that treatcards arguments are valid [param|run|all] [--output_dir=] [--param_card=] [--run_card=] """ - + opt = {'output_dir':pjoin(self.me_dir,'Source'), 'param_card':pjoin(self.me_dir,'Cards','param_card.dat'), 'run_card':pjoin(self.me_dir,'Cards','run_card.dat'), @@ -1129,14 +1129,14 @@ def check_treatcards(self, args): if os.path.isfile(value): card_name = self.detect_card_type(value) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' % (card_name, key)) opt[key] = value elif os.path.isfile(pjoin(self.me_dir,value)): card_name = self.detect_card_type(pjoin(self.me_dir,value)) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' - % (card_name, key)) + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + % (card_name, key)) opt[key] = value else: raise self.InvalidCmd('No such file: %s ' % value) @@ -1154,14 +1154,14 @@ def check_treatcards(self, args): else: self.help_treatcards() raise self.InvalidCmd('Unvalid argument %s' % arg) - - return mode, opt - - + + return mode, opt + + def check_survey(self, args, cmd='survey'): """check that the argument for survey are valid""" - - + + self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) @@ -1183,41 +1183,41 @@ def check_survey(self, args, cmd='survey'): self.help_survey() raise self.InvalidCmd('Too many argument for %s command' % cmd) elif not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], None,'parton', True) args.pop(0) - + return True def check_generate_events(self, args): """check that the argument for generate_events are valid""" - + run = None if args and args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['auto','parton', 'pythia', 'pgs', 'delphes']: self.help_generate_events() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + #if len(args) > 1: # self.help_generate_events() # raise self.InvalidCmd('Too many argument for generate_events command: %s' % cmd) - + return run def check_calculate_decay_widths(self, args): """check that the argument for calculate_decay_widths are valid""" - + if self.ninitial != 1: raise self.InvalidCmd('Can only calculate decay widths for decay processes A > B C ...') @@ -1232,7 +1232,7 @@ def check_calculate_decay_widths(self, args): if len(args) > 1: self.help_calculate_decay_widths() raise self.InvalidCmd('Too many argument for calculate_decay_widths command: %s' % cmd) - + return accuracy @@ -1241,25 +1241,25 @@ def check_multi_run(self, args): """check that the argument for survey are valid""" run = None - + if not len(args): self.help_multi_run() raise self.InvalidCmd("""multi_run command requires at least one argument for the number of times that it call generate_events command""") - + if args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['parton', 'pythia', 'pgs', 'delphes']: self.help_multi_run() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + elif not args[0].isdigit(): self.help_multi_run() @@ -1267,7 +1267,7 @@ def check_multi_run(self, args): #pass nb run to an integer nb_run = args.pop(0) args.insert(0, int(nb_run)) - + return run @@ -1284,7 +1284,7 @@ def check_refine(self, args): self.help_refine() raise self.InvalidCmd('require_precision argument is require for refine cmd') - + if not self.run_name: if self.results.lastrun: self.set_run_name(self.results.lastrun) @@ -1296,17 +1296,17 @@ def check_refine(self, args): else: try: [float(arg) for arg in args] - except ValueError: - self.help_refine() + except ValueError: + self.help_refine() raise self.InvalidCmd('refine arguments are suppose to be number') - + return True - + def check_combine_events(self, arg): """ Check the argument for the combine events command """ - + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] elif not self.run_tag: @@ -1314,53 +1314,53 @@ def check_combine_events(self, arg): else: tag = self.run_tag self.run_tag = tag - + if len(arg) > 1: self.help_combine_events() raise self.InvalidCmd('Too many argument for combine_events command') - + if len(arg) == 1: self.set_run_name(arg[0], self.run_tag, 'parton', True) - + if not self.run_name: if not self.results.lastrun: raise self.InvalidCmd('No run_name currently define. Unable to run combine') else: self.set_run_name(self.results.lastrun) - + return True - + def check_pythia(self, args): """Check the argument for pythia command - syntax: pythia [NAME] + syntax: pythia [NAME] Note that other option are already removed at this point """ - + mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia', 'pgs', 'delphes']: self.help_pythia() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') - + if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' raise self.InvalidCmd(error_msg) - - - + + + tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1368,8 +1368,8 @@ def check_pythia(self, args): if self.results.lastrun: args.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): @@ -1388,21 +1388,21 @@ def check_pythia(self, args): files.ln(input_file, os.path.dirname(output_file)) else: misc.gunzip(input_file, keep=True, stdout=output_file) - + args.append(mode) - + def check_pythia8(self, args): """Check the argument for pythia command - syntax: pythia8 [NAME] + syntax: pythia8 [NAME] Note that other option are already removed at this point - """ + """ mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia','pythia8','delphes']: self.help_pythia8() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') @@ -1410,7 +1410,7 @@ def check_pythia8(self, args): if not self.options['pythia8_path']: logger.info('Retry reading configuration file to find pythia8 path') self.set_configuration() - + if not self.options['pythia8_path'] or not \ os.path.exists(pjoin(self.options['pythia8_path'],'bin','pythia8-config')): error_msg = 'No valid pythia8 path set.\n' @@ -1421,7 +1421,7 @@ def check_pythia8(self, args): raise self.InvalidCmd(error_msg) tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1430,11 +1430,11 @@ def check_pythia8(self, args): args.insert(0, self.results.lastrun) else: raise self.InvalidCmd('No run name currently define. '+ - 'Please add this information.') - + 'Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ - not os.path.exists(pjoin(self.me_dir,'Events',args[0], + not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): raise self.InvalidCmd('No events file corresponding to %s run. ' % args[0]) @@ -1451,9 +1451,9 @@ def check_pythia8(self, args): else: raise self.InvalidCmd('No event file corresponding to %s run. ' % self.run_name) - + args.append(mode) - + def check_remove(self, args): """Check that the remove command is valid""" @@ -1484,33 +1484,33 @@ def check_plot(self, args): madir = self.options['madanalysis_path'] td = self.options['td_path'] - + if not madir or not td: logger.info('Retry to read configuration file to find madanalysis/td') self.set_configuration() madir = self.options['madanalysis_path'] - td = self.options['td_path'] - + td = self.options['td_path'] + if not madir: error_msg = 'No valid MadAnalysis path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) + raise self.InvalidCmd(error_msg) if not td: error_msg = 'No valid td path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') + raise self.InvalidCmd('No run name currently define. Please add this information.') args.append('all') return - + if args[0] not in self._plot_mode: self.set_run_name(args[0], level='plot') del args[0] @@ -1518,45 +1518,45 @@ def check_plot(self, args): args.append('all') elif not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + for arg in args: if arg not in self._plot_mode and arg != self.run_name: self.help_plot() - raise self.InvalidCmd('unknown options %s' % arg) - + raise self.InvalidCmd('unknown options %s' % arg) + def check_syscalc(self, args): """Check the argument for the syscalc command syscalc run_name modes""" scdir = self.options['syscalc_path'] - + if not scdir: logger.info('Retry to read configuration file to find SysCalc') self.set_configuration() scdir = self.options['syscalc_path'] - + if not scdir: error_msg = 'No valid SysCalc path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' error_msg += 'Please note that you need to compile SysCalc first.' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') args.append('all') return #deal options tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] - + if args[0] not in self._syscalc_mode: self.set_run_name(args[0], tag=tag, level='syscalc') del args[0] @@ -1564,61 +1564,61 @@ def check_syscalc(self, args): args.append('all') elif not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') elif tag and tag != self.run_tag: self.set_run_name(self.run_name, tag=tag, level='syscalc') - + for arg in args: if arg not in self._syscalc_mode and arg != self.run_name: self.help_syscalc() - raise self.InvalidCmd('unknown options %s' % arg) + raise self.InvalidCmd('unknown options %s' % arg) if self.run_card['use_syst'] not in self.true: raise self.InvalidCmd('Run %s does not include ' % self.run_name + \ 'systematics information needed for syscalc.') - - + + def check_pgs(self, arg, no_default=False): """Check the argument for pythia command - syntax is "pgs [NAME]" + syntax is "pgs [NAME]" Note that other option are already remove at this point """ - + # If not pythia-pgs path if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] - - + + if len(arg) == 0 and not self.run_name: if self.results.lastrun: arg.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(arg) == 1 and self.run_name == arg[0]: arg.pop(0) - + if not len(arg) and \ not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): if not no_default: self.help_pgs() raise self.InvalidCmd('''No file file pythia_events.hep currently available Please specify a valid run_name''') - - lock = None + + lock = None if len(arg) == 1: prev_tag = self.set_run_name(arg[0], tag, 'pgs') if not os.path.exists(pjoin(self.me_dir,'Events',self.run_name,'%s_pythia_events.hep.gz' % prev_tag)): @@ -1626,25 +1626,25 @@ def check_pgs(self, arg, no_default=False): else: input_file = pjoin(self.me_dir,'Events', self.run_name, '%s_pythia_events.hep.gz' % prev_tag) output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') - lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), + lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), argument=['-c', input_file]) else: - if tag: + if tag: self.run_card['run_tag'] = tag self.set_run_name(self.run_name, tag, 'pgs') - - return lock + + return lock def check_display(self, args): """check the validity of line syntax is "display XXXXX" """ - + if len(args) < 1 or args[0] not in self._display_opts: self.help_display() raise self.InvalidCmd - + if args[0] == 'variable' and len(args) !=2: raise self.InvalidCmd('variable need a variable name') @@ -1654,39 +1654,39 @@ def check_display(self, args): def check_import(self, args): """check the validity of line""" - + if not args: self.help_import() raise self.InvalidCmd('wrong \"import\" format') - + if args[0] != 'command': args.insert(0,'command') - - + + if not len(args) == 2 or not os.path.exists(args[1]): raise self.InvalidCmd('PATH is mandatory for import command\n') - + #=============================================================================== # CompleteForCmd #=============================================================================== class CompleteForCmd(CheckValidForCmd): """ The Series of help routine for the MadGraphCmd""" - - + + def complete_banner_run(self, text, line, begidx, endidx, formatting=True): "Complete the banner run command" try: - - + + args = self.split_arg(line[0:begidx], error=False) - + if args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args \ - if a.endswith(os.path.sep)])) - - + if a.endswith(os.path.sep)])) + + if len(args) > 1: # only options are possible tags = misc.glob('%s_*_banner.txt' % args[1], pjoin(self.me_dir, 'Events' , args[1])) @@ -1697,9 +1697,9 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): else: return self.list_completion(text, tags) return self.list_completion(text, tags +['--name=','-f'], line) - + # First argument - possibilites = {} + possibilites = {} comp = self.path_completion(text, os.path.join('.',*[a for a in args \ if a.endswith(os.path.sep)])) @@ -1711,10 +1711,10 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) run_list = [n.rsplit('/',2)[1] for n in run_list] possibilites['RUN Name'] = self.list_completion(text, run_list) - + return self.deal_multiple_categories(possibilites, formatting) - - + + except Exception as error: print(error) @@ -1732,12 +1732,12 @@ def complete_history(self, text, line, begidx, endidx): if len(args) == 1: return self.path_completion(text) - - def complete_open(self, text, line, begidx, endidx): + + def complete_open(self, text, line, begidx, endidx): """ complete the open command """ args = self.split_arg(line[0:begidx]) - + # Directory continuation if os.path.sep in args[-1] + text: return self.path_completion(text, @@ -1751,10 +1751,10 @@ def complete_open(self, text, line, begidx, endidx): if os.path.isfile(os.path.join(path,'README')): possibility.append('README') if os.path.isdir(os.path.join(path,'Cards')): - possibility += [f for f in os.listdir(os.path.join(path,'Cards')) + possibility += [f for f in os.listdir(os.path.join(path,'Cards')) if f.endswith('.dat')] if os.path.isdir(os.path.join(path,'HTML')): - possibility += [f for f in os.listdir(os.path.join(path,'HTML')) + possibility += [f for f in os.listdir(os.path.join(path,'HTML')) if f.endswith('.html') and 'default' not in f] else: possibility.extend(['./','../']) @@ -1763,7 +1763,7 @@ def complete_open(self, text, line, begidx, endidx): if os.path.exists('MG5_debug'): possibility.append('MG5_debug') return self.list_completion(text, possibility) - + def complete_set(self, text, line, begidx, endidx): "Complete the set command" @@ -1784,27 +1784,27 @@ def complete_set(self, text, line, begidx, endidx): elif len(args) >2 and args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args if a.endswith(os.path.sep)]), - only_dirs = True) - + only_dirs = True) + def complete_survey(self, text, line, begidx, endidx): """ Complete the survey command """ - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + return self.list_completion(text, self._run_options, line) - + complete_refine = complete_survey complete_combine_events = complete_survey complite_store = complete_survey complete_generate_events = complete_survey complete_create_gridpack = complete_survey - + def complete_generate_events(self, text, line, begidx, endidx): """ Complete the generate events""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() @@ -1813,17 +1813,17 @@ def complete_generate_events(self, text, line, begidx, endidx): return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) def complete_initMadLoop(self, text, line, begidx, endidx): "Complete the initMadLoop command" - + numbers = [str(i) for i in range(10)] opts = ['-f','-r','--nPS='] - + args = self.split_arg(line[0:begidx], error=False) if len(line) >=6 and line[begidx-6:begidx]=='--nPS=': return self.list_completion(text, numbers, line) @@ -1840,18 +1840,18 @@ def complete_launch(self, *args, **opts): def complete_calculate_decay_widths(self, text, line, begidx, endidx): """ Complete the calculate_decay_widths command""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + opts = self._run_options + self._calculate_decay_options return self.list_completion(text, opts, line) - + def complete_display(self, text, line, begidx, endidx): - """ Complete the display command""" - + """ Complete the display command""" + args = self.split_arg(line[0:begidx], error=False) if len(args) >= 2 and args[1] =='results': start = line.find('results') @@ -1860,44 +1860,44 @@ def complete_display(self, text, line, begidx, endidx): def complete_multi_run(self, text, line, begidx, endidx): """complete multi run command""" - + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: data = [str(i) for i in range(0,20)] return self.list_completion(text, data, line) - + if line.endswith('run=') and not text: return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - - - + + + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - + def complete_plot(self, text, line, begidx, endidx): """ Complete the plot command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1: return self.list_completion(text, self._plot_mode) else: return self.list_completion(text, self._plot_mode + list(self.results.keys())) - + def complete_syscalc(self, text, line, begidx, endidx, formatting=True): """ Complete the syscalc command """ - + output = {} args = self.split_arg(line[0:begidx], error=False) - + if len(args) <=1: output['RUN_NAME'] = self.list_completion(list(self.results.keys())) output['MODE'] = self.list_completion(text, self._syscalc_mode) @@ -1907,12 +1907,12 @@ def complete_syscalc(self, text, line, begidx, endidx, formatting=True): if run in self.results: tags = ['--tag=%s' % tag['tag'] for tag in self.results[run]] output['options'] += tags - + return self.deal_multiple_categories(output, formatting) - + def complete_remove(self, text, line, begidx, endidx): """Complete the remove command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1 and (text.startswith('--t')): run = args[1] @@ -1932,8 +1932,8 @@ def complete_remove(self, text, line, begidx, endidx): data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1] for n in data] return self.list_completion(text, ['all'] + data) - - + + def complete_shower(self,text, line, begidx, endidx): "Complete the shower command" args = self.split_arg(line[0:begidx], error=False) @@ -1941,7 +1941,7 @@ def complete_shower(self,text, line, begidx, endidx): return self.list_completion(text, self._interfaced_showers) elif len(args)>1 and args[1] in self._interfaced_showers: return getattr(self, 'complete_%s' % text)\ - (text, args[1],line.replace(args[0]+' ',''), + (text, args[1],line.replace(args[0]+' ',''), begidx-len(args[0])-1, endidx-len(args[0])-1) def complete_pythia8(self,text, line, begidx, endidx): @@ -1955,11 +1955,11 @@ def complete_pythia8(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_madanalysis5_parton(self,text, line, begidx, endidx): @@ -1978,19 +1978,19 @@ def complete_madanalysis5_parton(self,text, line, begidx, endidx): else: tmp2 = self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) - return tmp1 + tmp2 + return tmp1 + tmp2 elif '--MA5_stdout_lvl=' in line and not any(arg.startswith( '--MA5_stdout_lvl=') for arg in args): - return self.list_completion(text, - ['--MA5_stdout_lvl=%s'%opt for opt in + return self.list_completion(text, + ['--MA5_stdout_lvl=%s'%opt for opt in ['logging.INFO','logging.DEBUG','logging.WARNING', 'logging.CRITICAL','90']], line) else: - return self.list_completion(text, ['-f', + return self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) def complete_pythia(self,text, line, begidx, endidx): - "Complete the pythia command" + "Complete the pythia command" args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: @@ -2001,16 +2001,16 @@ def complete_pythia(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_pgs(self,text, line, begidx, endidx): "Complete the pythia command" - args = self.split_arg(line[0:begidx], error=False) + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: #return valid run_name data = misc.glob(pjoin('*', '*_pythia_events.hep.gz'), pjoin(self.me_dir, 'Events')) @@ -2019,23 +2019,23 @@ def complete_pgs(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--tag=' ,'--no_default'], line) - return tmp1 + tmp2 + return tmp1 + tmp2 else: - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--tag=','--no_default'], line) - complete_delphes = complete_pgs - complete_rivet = complete_pgs + complete_delphes = complete_pgs + complete_rivet = complete_pgs #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCmd): - """The command line processor of Mad Graph""" - + """The command line processor of Mad Graph""" + LO = True # Truth values @@ -2063,7 +2063,7 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm cluster_mode = 0 queue = 'madgraph' nb_core = None - + next_possibility = { 'start': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]', 'calculate_decay_widths [OPTIONS]', @@ -2080,9 +2080,9 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm 'pgs': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'], 'delphes' : ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'] } - + asking_for_run = AskRun - + ############################################################################ def __init__(self, me_dir = None, options={}, *completekey, **stdin): """ add information to the cmd """ @@ -2095,16 +2095,16 @@ def __init__(self, me_dir = None, options={}, *completekey, **stdin): if self.web: os.system('touch %s' % pjoin(self.me_dir,'Online')) - self.load_results_db() + self.load_results_db() self.results.def_web_mode(self.web) self.Gdirs = None - + self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) self.configured = 0 # time for reading the card self._options = {} # for compatibility with extended_cmd - - + + def pass_in_web_mode(self): """configure web data""" self.web = True @@ -2113,22 +2113,22 @@ def pass_in_web_mode(self): if os.environ['MADGRAPH_BASE']: self.options['mg5_path'] = pjoin(os.environ['MADGRAPH_BASE'],'MG5') - ############################################################################ + ############################################################################ def check_output_type(self, path): """ Check that the output path is a valid madevent directory """ - + bin_path = os.path.join(path,'bin') if os.path.isfile(os.path.join(bin_path,'generate_events')): return True - else: + else: return False ############################################################################ def set_configuration(self, amcatnlo=False, final=True, **opt): - """assign all configuration variable from file + """assign all configuration variable from file loop over the different config file if config_file not define """ - - super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, + + super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, final=final, **opt) if not final: @@ -2171,24 +2171,24 @@ def set_configuration(self, amcatnlo=False, final=True, **opt): if not os.path.exists(pjoin(path, 'sys_calc')): logger.info("No valid SysCalc path found") continue - # No else since the next line reinitialize the option to the + # No else since the next line reinitialize the option to the #previous value anyway self.options[key] = os.path.realpath(path) continue else: self.options[key] = None - - + + return self.options ############################################################################ - def do_banner_run(self, line): + def do_banner_run(self, line): """Make a run from the banner file""" - + args = self.split_arg(line) #check the validity of the arguments - self.check_banner_run(args) - + self.check_banner_run(args) + # Remove previous cards for name in ['delphes_trigger.dat', 'delphes_card.dat', 'pgs_card.dat', 'pythia_card.dat', 'madspin_card.dat', @@ -2197,20 +2197,20 @@ def do_banner_run(self, line): os.remove(pjoin(self.me_dir, 'Cards', name)) except Exception: pass - + banner_mod.split_banner(args[0], self.me_dir, proc_card=False) - + # Check if we want to modify the run if not self.force: ans = self.ask('Do you want to modify the Cards?', 'n', ['y','n']) if ans == 'n': self.force = True - + # Call Generate events self.exec_cmd('generate_events %s %s' % (self.run_name, self.force and '-f' or '')) - - - + + + ############################################################################ def do_display(self, line, output=sys.stdout): """Display current internal status""" @@ -2223,7 +2223,7 @@ def do_display(self, line, output=sys.stdout): #return valid run_name data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1:] for n in data] - + if data: out = {} for name, tag in data: @@ -2235,11 +2235,11 @@ def do_display(self, line, output=sys.stdout): print('the runs available are:') for run_name, tags in out.items(): print(' run: %s' % run_name) - print(' tags: ', end=' ') + print(' tags: ', end=' ') print(', '.join(tags)) else: print('No run detected.') - + elif args[0] == 'options': outstr = " Run Options \n" outstr += " ----------- \n" @@ -2260,8 +2260,8 @@ def do_display(self, line, output=sys.stdout): if value == default: outstr += " %25s \t:\t%s\n" % (key,value) else: - outstr += " %25s \t:\t%s (user set)\n" % (key,value) - outstr += "\n" + outstr += " %25s \t:\t%s (user set)\n" % (key,value) + outstr += "\n" outstr += " Configuration Options \n" outstr += " --------------------- \n" for key, default in self.options_configuration.items(): @@ -2275,15 +2275,15 @@ def do_display(self, line, output=sys.stdout): self.do_print_results(' '.join(args[1:])) else: super(MadEventCmd, self).do_display(line, output) - + def do_save(self, line, check=True, to_keep={}): - """Not in help: Save information to file""" + """Not in help: Save information to file""" args = self.split_arg(line) # Check argument validity if check: self.check_save(args) - + if args[0] == 'options': # First look at options which should be put in MG5DIR/input to_define = {} @@ -2295,7 +2295,7 @@ def do_save(self, line, check=True, to_keep={}): for key, default in self.options_madevent.items(): if self.options[key] != self.options_madevent[key]: to_define[key] = self.options[key] - + if '--all' in args: for key, default in self.options_madgraph.items(): if self.options[key] != self.options_madgraph[key]: @@ -2312,12 +2312,12 @@ def do_save(self, line, check=True, to_keep={}): filepath = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basefile = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basedir = self.me_dir - + if to_keep: to_define = to_keep self.write_configuration(filepath, basefile, basedir, to_define) - - + + def do_edit_cards(self, line): @@ -2326,80 +2326,80 @@ def do_edit_cards(self, line): # Check argument's validity mode = self.check_generate_events(args) self.ask_run_configuration(mode) - + return ############################################################################ - + ############################################################################ def do_restart_gridpack(self, line): """ syntax restart_gridpack --precision=1.0 --restart_zero collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) - + # initialize / remove lhapdf mode #self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) #self.configure_directory() - + gensym = gen_ximprove.gensym(self) - + min_precision = 1.0 resubmit_zero=False if '--precision=' in line: s = line.index('--precision=') + len('--precision=') arg=line[s:].split(1)[0] min_precision = float(arg) - + if '--restart_zero' in line: resubmit_zero = True - - + + gensym.resubmit(min_precision, resubmit_zero) self.monitor(run_type='All jobs submitted for gridpack', html=True) #will be done during the refine (more precisely in gen_ximprove) cross, error = sum_html.make_all_html_results(self) self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(gensym.run_statistics)) - + #self.exec_cmd('combine_events', postcmd=False) #self.exec_cmd('store_events', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) self.exec_cmd('create_gridpack', postcmd=False) - - - ############################################################################ + + + ############################################################################ ############################################################################ def do_generate_events(self, line): """Main Commands: launch the full chain """ - + self.banner = None self.Gdirs = None - + args = self.split_arg(line) # Check argument's validity mode = self.check_generate_events(args) switch_mode = self.ask_run_configuration(mode, args) if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir), None, 'parton') else: self.set_run_name(args[0], None, 'parton', True) args.pop(0) - + self.run_generate_events(switch_mode, args) self.postprocessing() @@ -2420,8 +2420,8 @@ def postprocessing(self): def rivet_postprocessing(self, rivet_config, postprocess_RIVET, postprocess_CONTUR): - # Check number of Rivet jobs to run - run_dirs = [pjoin(self.me_dir, 'Events',run_name) + # Check number of Rivet jobs to run + run_dirs = [pjoin(self.me_dir, 'Events',run_name) for run_name in self.postprocessing_dirs] nb_rivet = len(run_dirs) @@ -2550,10 +2550,10 @@ def wait_monitoring(Idle, Running, Done): wrapper = open(pjoin(self.me_dir, "Analysis", "contur", "run_contur.sh"), "w") wrapper.write(set_env) - + wrapper.write('{0}\n'.format(contur_cmd)) wrapper.close() - + misc.call(["run_contur.sh"], cwd=(pjoin(self.me_dir, "Analysis", "contur"))) logger.info("Contur outputs are stored in {0}".format(pjoin(self.me_dir, "Analysis", "contur","conturPlot"))) @@ -2572,7 +2572,7 @@ def run_generate_events(self, switch_mode, args): self.do_set('run_mode 2') self.do_set('nb_core 1') - if self.run_card['gridpack'] in self.true: + if self.run_card['gridpack'] in self.true: # Running gridpack warmup gridpack_opts=[('accuracy', 0.01), ('points', 2000), @@ -2593,7 +2593,7 @@ def run_generate_events(self, switch_mode, args): # Regular run mode logger.info('Generating %s events with run name %s' % (self.run_card['nevents'], self.run_name)) - + self.exec_cmd('survey %s %s' % (self.run_name,' '.join(args)), postcmd=False) nb_event = self.run_card['nevents'] @@ -2601,7 +2601,7 @@ def run_generate_events(self, switch_mode, args): self.exec_cmd('refine %s' % nb_event, postcmd=False) if not float(self.results.current['cross']): # Zero cross-section. Try to guess why - text = '''Survey return zero cross section. + text = '''Survey return zero cross section. Typical reasons are the following: 1) A massive s-channel particle has a width set to zero. 2) The pdf are zero for at least one of the initial state particles @@ -2613,17 +2613,17 @@ def run_generate_events(self, switch_mode, args): raise ZeroResult('See https://cp3.irmp.ucl.ac.be/projects/madgraph/wiki/FAQ-General-14') else: bypass_run = True - + #we can bypass the following if scan and first result is zero if not bypass_run: self.exec_cmd('refine %s --treshold=%s' % (nb_event,self.run_card['second_refine_treshold']) , postcmd=False) - + self.exec_cmd('combine_events', postcmd=False,printcmd=False) self.print_results_in_shell(self.results.current) if self.run_card['use_syst']: - if self.run_card['systematics_program'] == 'auto': + if self.run_card['systematics_program'] == 'auto': scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): to_use = 'systematics' @@ -2634,26 +2634,26 @@ def run_generate_events(self, switch_mode, args): else: logger.critical('Unvalid options for systematics_program: bypass computation of systematics variations.') to_use = 'none' - + if to_use == 'systematics': if self.run_card['systematics_arguments'] != ['']: self.exec_cmd('systematics %s %s ' % (self.run_name, - ' '.join(self.run_card['systematics_arguments'])), + ' '.join(self.run_card['systematics_arguments'])), postcmd=False, printcmd=False) else: self.exec_cmd('systematics %s --from_card' % self.run_name, - postcmd=False,printcmd=False) + postcmd=False,printcmd=False) elif to_use == 'syscalc': self.run_syscalc('parton') - - - self.create_plot('parton') - self.exec_cmd('store_events', postcmd=False) + + + self.create_plot('parton') + self.exec_cmd('store_events', postcmd=False) if self.run_card['boost_event'].strip() and self.run_card['boost_event'] != 'False': self.boost_events() - - - self.exec_cmd('reweight -from_cards', postcmd=False) + + + self.exec_cmd('reweight -from_cards', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) if self.run_card['time_of_flight']>=0: self.exec_cmd("add_time_of_flight --threshold=%s" % self.run_card['time_of_flight'] ,postcmd=False) @@ -2664,43 +2664,43 @@ def run_generate_events(self, switch_mode, args): self.create_root_file(input , output) self.exec_cmd('madanalysis5_parton --no_default', postcmd=False, printcmd=False) - # shower launches pgs/delphes if needed + # shower launches pgs/delphes if needed self.exec_cmd('shower --no_default', postcmd=False, printcmd=False) self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) self.exec_cmd('rivet --no_default', postcmd=False, printcmd=False) self.store_result() - - if self.allow_notification_center: - misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), - '%s: %s +- %s ' % (self.results.current['run_name'], + + if self.allow_notification_center: + misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), + '%s: %s +- %s ' % (self.results.current['run_name'], self.results.current['cross'], self.results.current['error'])) - + def boost_events(self): - + if not self.run_card['boost_event']: return - + if self.run_card['boost_event'].startswith('lambda'): if not isinstance(self, cmd.CmdShell): raise Exception("boost not allowed online") filter = eval(self.run_card['boost_event']) else: raise Exception - + path = [pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')] - + for p in path: if os.path.exists(p): event_path = p break else: raise Exception("fail to find event file for the boost") - - + + lhe = lhe_parser.EventFile(event_path) with misc.TMP_directory() as tmp_dir: output = lhe_parser.EventFile(pjoin(tmp_dir, os.path.basename(event_path)), 'w') @@ -2711,28 +2711,28 @@ def boost_events(self): event.boost(filter) #write this modify event output.write(str(event)) - output.write('\n') + output.write('\n') lhe.close() - files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) - - - - - + files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) + + + + + def do_initMadLoop(self,line): - """Compile and run MadLoop for a certain number of PS point so as to + """Compile and run MadLoop for a certain number of PS point so as to initialize MadLoop (setup the zero helicity and loop filter.)""" - + args = line.split() # Check argument's validity options = self.check_initMadLoop(args) - + if not options['force']: self.ask_edit_cards(['MadLoopParams.dat'], mode='fixed', plot=False) self.exec_cmd('treatcards loop --no_MadLoopInit') if options['refresh']: - for filter in misc.glob('*Filter*', + for filter in misc.glob('*Filter*', pjoin(self.me_dir,'SubProcesses','MadLoop5_resources')): logger.debug("Resetting filter '%s'."%os.path.basename(filter)) os.remove(filter) @@ -2753,14 +2753,14 @@ def do_initMadLoop(self,line): def do_launch(self, line, *args, **opt): """Main Commands: exec generate_events for 2>N and calculate_width for 1>N""" - + if self.ninitial == 1: logger.info("Note that since 2.3. The launch for 1>N pass in event generation\n"+ " To have the previous behavior use the calculate_decay_widths function") # self.do_calculate_decay_widths(line, *args, **opt) #else: self.do_generate_events(line, *args, **opt) - + def print_results_in_shell(self, data): """Have a nice results prints in the shell, data should be of type: gen_crossxhtml.OneTagResults""" @@ -2770,7 +2770,7 @@ def print_results_in_shell(self, data): if data['run_statistics']: globalstat = sum_html.RunStatistics() - + logger.info(" " ) logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2786,13 +2786,13 @@ def print_results_in_shell(self, data): logger.warning(globalstat.get_warning_text()) logger.info(" ") - + logger.info(" === Results Summary for run: %s tag: %s ===\n" % (data['run_name'],data['tag'])) - + total_time = int(sum(_['cumulative_timing'] for _ in data['run_statistics'].values())) if total_time > 0: logger.info(" Cumulative sequential time for this run: %s"%misc.format_time(total_time)) - + if self.ninitial == 1: logger.info(" Width : %.4g +- %.4g GeV" % (data['cross'], data['error'])) else: @@ -2810,18 +2810,18 @@ def print_results_in_shell(self, data): if len(split)!=3: continue scale, cross, error = split - cross_sections[float(scale)] = (float(cross), float(error)) + cross_sections[float(scale)] = (float(cross), float(error)) if len(cross_sections)>0: logger.info(' Pythia8 merged cross-sections are:') for scale in sorted(cross_sections.keys()): logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ (scale,cross_sections[scale][0],cross_sections[scale][1])) - + else: if self.ninitial == 1: logger.info(" Matched width : %.4g +- %.4g GeV" % (data['cross_pythia'], data['error_pythia'])) else: - logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) + logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) logger.info(" Nb of events after matching/merging : %d" % int(data['nb_event_pythia'])) if self.run_card['use_syst'] in self.true and \ (int(self.run_card['ickkw'])==1 or self.run_card['ktdurham']>0.0 @@ -2838,9 +2838,9 @@ def print_results_in_file(self, data, path, mode='w', format='full'): data should be of type: gen_crossxhtml.OneTagResults""" if not data: return - + fsock = open(path, mode) - + if data['run_statistics']: logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2851,7 +2851,7 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if format == "full": fsock.write(" === Results Summary for run: %s tag: %s process: %s ===\n" % \ (data['run_name'],data['tag'], os.path.basename(self.me_dir))) - + if self.ninitial == 1: fsock.write(" Width : %.4g +- %.4g GeV\n" % (data['cross'], data['error'])) else: @@ -2861,20 +2861,20 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if self.ninitial == 1: fsock.write(" Matched Width : %.4g +- %.4g GeV\n" % (data['cross_pythia'], data['error_pythia'])) else: - fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) + fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) fsock.write(" Nb of events after Matching : %s\n" % data['nb_event_pythia']) fsock.write(" \n" ) elif format == "short": if mode == "w": fsock.write("# run_name tag cross error Nb_event cross_after_matching nb_event_after matching\n") - + if data['cross_pythia'] and data['nb_event_pythia']: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s %(cross_pythia)s %(nb_event_pythia)s\n" else: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s\n" fsock.write(text % data) - - ############################################################################ + + ############################################################################ def do_calculate_decay_widths(self, line): """Main Commands: launch decay width calculation and automatic inclusion of calculated widths and BRs in the param_card.""" @@ -2887,21 +2887,21 @@ def do_calculate_decay_widths(self, line): self.Gdirs = None if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], reload_card=True) args.pop(0) self.configure_directory() - + # Running gridpack warmup opts=[('accuracy', accuracy), # default 0.01 ('points', 1000), ('iterations',9)] logger.info('Calculating decay widths with run name %s' % self.run_name) - + self.exec_cmd('survey %s %s' % \ (self.run_name, " ".join(['--' + opt + '=' + str(val) for (opt,val) \ @@ -2910,26 +2910,26 @@ def do_calculate_decay_widths(self, line): self.refine_mode = "old" # specify how to combine event self.exec_cmd('combine_events', postcmd=False) self.exec_cmd('store_events', postcmd=False) - + self.collect_decay_widths() self.print_results_in_shell(self.results.current) - self.update_status('calculate_decay_widths done', - level='parton', makehtml=False) + self.update_status('calculate_decay_widths done', + level='parton', makehtml=False) + - ############################################################################ def collect_decay_widths(self): - """ Collect the decay widths and calculate BRs for all particles, and put - in param_card form. + """ Collect the decay widths and calculate BRs for all particles, and put + in param_card form. """ - + particle_dict = {} # store the results run_name = self.run_name # Looping over the Subprocesses for P_path in SubProcesses.get_subP(self.me_dir): ids = SubProcesses.get_subP_ids(P_path) - # due to grouping we need to compute the ratio factor for the + # due to grouping we need to compute the ratio factor for the # ungroup resutls (that we need here). Note that initial particles # grouping are not at the same stage as final particle grouping nb_output = len(ids) / (len(set([p[0] for p in ids]))) @@ -2940,30 +2940,30 @@ def collect_decay_widths(self): particle_dict[particles[0]].append([particles[1:], result/nb_output]) except KeyError: particle_dict[particles[0]] = [[particles[1:], result/nb_output]] - + self.update_width_in_param_card(particle_dict, initial = pjoin(self.me_dir, 'Cards', 'param_card.dat'), output=pjoin(self.me_dir, 'Events', run_name, "param_card.dat")) - + @staticmethod def update_width_in_param_card(decay_info, initial=None, output=None): # Open the param_card.dat and insert the calculated decays and BRs - + if not output: output = initial - + param_card_file = open(initial) param_card = param_card_file.read().split('\n') param_card_file.close() decay_lines = [] line_number = 0 - # Read and remove all decays from the param_card + # Read and remove all decays from the param_card while line_number < len(param_card): line = param_card[line_number] if line.lower().startswith('decay'): - # Read decay if particle in decay_info - # DECAY 6 1.455100e+00 + # Read decay if particle in decay_info + # DECAY 6 1.455100e+00 line = param_card.pop(line_number) line = line.split() particle = 0 @@ -2996,7 +2996,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): break line=param_card[line_number] if particle and particle not in decay_info: - # No decays given, only total width + # No decays given, only total width decay_info[particle] = [[[], width]] else: # Not decay line_number += 1 @@ -3004,7 +3004,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): while not param_card[-1] or param_card[-1].startswith('#'): param_card.pop(-1) - # Append calculated and read decays to the param_card + # Append calculated and read decays to the param_card param_card.append("#\n#*************************") param_card.append("# Decay widths *") param_card.append("#*************************") @@ -3018,7 +3018,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): param_card.append("# BR NDA ID1 ID2 ...") brs = [[(val[1]/width).real, val[0]] for val in decay_info[key] if val[1]] for val in sorted(brs, reverse=True): - param_card.append(" %e %i %s # %s" % + param_card.append(" %e %i %s # %s" % (val[0].real, len(val[1]), " ".join([str(v) for v in val[1]]), val[0] * width @@ -3031,7 +3031,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): ############################################################################ def do_multi_run(self, line): - + args = self.split_arg(line) # Check argument's validity mode = self.check_multi_run(args) @@ -3047,7 +3047,7 @@ def do_multi_run(self, line): self.check_param_card(path, run=False) #store it locally to avoid relaunch param_card_iterator, self.param_card_iterator = self.param_card_iterator, [] - + crossoversig = 0 inv_sq_err = 0 nb_event = 0 @@ -3055,8 +3055,8 @@ def do_multi_run(self, line): self.nb_refine = 0 self.exec_cmd('generate_events %s_%s -f' % (main_name, i), postcmd=False) # Update collected value - nb_event += int(self.results[self.run_name][-1]['nb_event']) - self.results.add_detail('nb_event', nb_event , run=main_name) + nb_event += int(self.results[self.run_name][-1]['nb_event']) + self.results.add_detail('nb_event', nb_event , run=main_name) cross = self.results[self.run_name][-1]['cross'] error = self.results[self.run_name][-1]['error'] + 1e-99 crossoversig+=cross/error**2 @@ -3070,7 +3070,7 @@ def do_multi_run(self, line): os.mkdir(pjoin(self.me_dir,'Events', self.run_name)) except Exception: pass - os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' + os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' % {'bin': self.dirbin, 'event': pjoin(self.me_dir,'Events'), 'name': self.run_name}) @@ -3084,19 +3084,19 @@ def do_multi_run(self, line): self.create_root_file('%s/unweighted_events.lhe' % self.run_name, '%s/unweighted_events.root' % self.run_name) - - path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") + + path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") self.create_plot('parton', path, pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') ) - - if not os.path.exists('%s.gz' % path): + + if not os.path.exists('%s.gz' % path): misc.gzip(path) self.update_status('', level='parton') - self.print_results_in_shell(self.results.current) - + self.print_results_in_shell(self.results.current) + cpath = pjoin(self.me_dir,'Cards','param_card.dat') if param_card_iterator: @@ -3112,21 +3112,21 @@ def do_multi_run(self, line): path = pjoin(self.me_dir, 'Events','scan_%s.txt' % scan_name) logger.info("write all cross-section results in %s" % path, '$MG:BOLD') param_card_iterator.write_summary(path) - - ############################################################################ + + ############################################################################ def do_treatcards(self, line, mode=None, opt=None): """Advanced commands: create .inc files from param_card.dat/run_card.dat""" if not mode and not opt: args = self.split_arg(line) mode, opt = self.check_treatcards(args) - + # To decide whether to refresh MadLoop's helicity filters, it is necessary # to check if the model parameters where modified or not, before doing - # anything else. + # anything else. need_MadLoopFilterUpdate = False - # Just to record what triggered the reinitialization of MadLoop for a + # Just to record what triggered the reinitialization of MadLoop for a # nice debug message. type_of_change = '' if not opt['forbid_MadLoopInit'] and self.proc_characteristics['loop_induced'] \ @@ -3137,10 +3137,10 @@ def do_treatcards(self, line, mode=None, opt=None): (os.path.getmtime(paramDat)-os.path.getmtime(paramInc)) > 0.0: need_MadLoopFilterUpdate = True type_of_change = 'model' - + ML_in = pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat') ML_out = pjoin(self.me_dir,"SubProcesses", - "MadLoop5_resources", "MadLoopParams.dat") + "MadLoop5_resources", "MadLoopParams.dat") if (not os.path.isfile(ML_in)) or (not os.path.isfile(ML_out)) or \ (os.path.getmtime(ML_in)-os.path.getmtime(ML_out)) > 0.0: need_MadLoopFilterUpdate = True @@ -3148,7 +3148,7 @@ def do_treatcards(self, line, mode=None, opt=None): #check if no 'Auto' are present in the file self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) - + if mode in ['param', 'all']: model = self.find_model_name() tmp_model = os.path.basename(model) @@ -3160,9 +3160,9 @@ def do_treatcards(self, line, mode=None, opt=None): check_param_card.check_valid_param_card(mg5_param) opt['param_card'] = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') else: - check_param_card.check_valid_param_card(opt['param_card']) - - logger.debug('write compile file for card: %s' % opt['param_card']) + check_param_card.check_valid_param_card(opt['param_card']) + + logger.debug('write compile file for card: %s' % opt['param_card']) param_card = check_param_card.ParamCard(opt['param_card']) outfile = pjoin(opt['output_dir'], 'param_card.inc') ident_card = pjoin(self.me_dir,'Cards','ident_card.dat') @@ -3185,10 +3185,10 @@ def do_treatcards(self, line, mode=None, opt=None): devnull.close() default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat') - need_mp = self.proc_characteristics['loop_induced'] + need_mp = self.proc_characteristics['loop_induced'] param_card.write_inc_file(outfile, ident_card, default, need_mp=need_mp) - - + + if mode in ['run', 'all']: if not hasattr(self, 'run_card'): run_card = banner_mod.RunCard(opt['run_card'], path=pjoin(self.me_dir, 'Cards', 'run_card.dat')) @@ -3202,7 +3202,7 @@ def do_treatcards(self, line, mode=None, opt=None): run_card['lpp2'] = 0 run_card['ebeam1'] = 0 run_card['ebeam2'] = 0 - + # Ensure that the bias parameters has all the required input from the # run_card if run_card['bias_module'].lower() not in ['dummy','none']: @@ -3219,7 +3219,7 @@ def do_treatcards(self, line, mode=None, opt=None): mandatory_file,run_card['bias_module'])) misc.copytree(run_card['bias_module'], pjoin(self.me_dir,'Source','BIAS', os.path.basename(run_card['bias_module']))) - + #check expected parameters for the module. default_bias_parameters = {} start, last = False,False @@ -3244,50 +3244,50 @@ def do_treatcards(self, line, mode=None, opt=None): for pair in line.split(','): if not pair.strip(): continue - x,y =pair.split(':') + x,y =pair.split(':') x=x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y elif ':' in line: x,y = line.split(':') x = x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y for key,value in run_card['bias_parameters'].items(): if key not in default_bias_parameters: logger.warning('%s not supported by the bias module. We discard this entry.', key) else: default_bias_parameters[key] = value - run_card['bias_parameters'] = default_bias_parameters - - - # Finally write the include file + run_card['bias_parameters'] = default_bias_parameters + + + # Finally write the include file run_card.write_include_file(opt['output_dir']) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: - self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, + self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat')) # The writing out of MadLoop filter is potentially dangerous # when running in multi-core with a central disk. So it is turned - # off here. If these filters were not initialized then they will + # off here. If these filters were not initialized then they will # have to be re-computed at the beginning of each run. if 'WriteOutFilters' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('WriteOutFilters'): logger.info( -"""You chose to have MadLoop writing out filters. +"""You chose to have MadLoop writing out filters. Beware that this can be dangerous for local multicore runs.""") self.MadLoopparam.set('WriteOutFilters',False, changeifuserset=False) - + # The conservative settings below for 'CTModeInit' and 'ZeroThres' # help adress issues for processes like g g > h z, and g g > h g - # where there are some helicity configuration heavily suppressed - # (by several orders of magnitude) so that the helicity filter + # where there are some helicity configuration heavily suppressed + # (by several orders of magnitude) so that the helicity filter # needs high numerical accuracy to correctly handle this spread in # magnitude. Also, because one cannot use the Born as a reference - # scale, it is better to force quadruple precision *for the + # scale, it is better to force quadruple precision *for the # initialization points only*. This avoids numerical accuracy issues # when setting up the helicity filters and does not significantly # slow down the run. @@ -3298,21 +3298,21 @@ def do_treatcards(self, line, mode=None, opt=None): # It is a bit superficial to use the level 2 which tries to numerically # map matching helicities (because of CP symmetry typically) together. -# It is useless in the context of MC over helicities and it can +# It is useless in the context of MC over helicities and it can # potentially make the helicity double checking fail. self.MadLoopparam.set('HelicityFilterLevel',1, changeifuserset=False) # To be on the safe side however, we ask for 4 consecutive matching # helicity filters. self.MadLoopparam.set('CheckCycle',4, changeifuserset=False) - + # For now it is tricky to have each channel performing the helicity # double check. What we will end up doing is probably some kind # of new initialization round at the beginning of each launch - # command, to reset the filters. + # command, to reset the filters. self.MadLoopparam.set('DoubleCheckHelicityFilter',False, changeifuserset=False) - + # Thanks to TIR recycling, TIR is typically much faster for Loop-induced # processes when not doing MC over helicities, so that we place OPP last. if not hasattr(self, 'run_card'): @@ -3349,7 +3349,7 @@ def do_treatcards(self, line, mode=None, opt=None): logger.warning( """You chose to also use a lorentz rotation for stability tests (see parameter NRotations_[DP|QP]). Beware that, for optimization purposes, MadEvent uses manual TIR cache clearing which is not compatible - with the lorentz rotation stability test. The number of these rotations to be used will be reset to + with the lorentz rotation stability test. The number of these rotations to be used will be reset to zero by MadLoop. You can avoid this by changing the parameter 'FORCE_ML_HELICITY_SUM' int he matrix.f files to be .TRUE. so that the sum over helicity configurations is performed within MadLoop (in which case the helicity of final state particles cannot be speicfied in the LHE file.""") @@ -3363,15 +3363,15 @@ def do_treatcards(self, line, mode=None, opt=None): # self.MadLoopparam.set('NRotations_DP',0,changeifuserset=False) # Revert to the above to be slightly less robust but twice faster. self.MadLoopparam.set('NRotations_DP',1,changeifuserset=False) - self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) - + self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) + # Finally, the stability tests are slightly less reliable for process - # with less or equal than 4 final state particles because the + # with less or equal than 4 final state particles because the # accessible kinematic is very limited (i.e. lorentz rotations don't # shuffle invariants numerics much). In these cases, we therefore # increase the required accuracy to 10^-7. # This is important for getting g g > z z [QCD] working with a - # ptheavy cut as low as 1 GeV. + # ptheavy cut as low as 1 GeV. if self.proc_characteristics['nexternal']<=4: if ('MLStabThres' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('MLStabThres')>1.0e-7): @@ -3381,12 +3381,12 @@ def do_treatcards(self, line, mode=None, opt=None): than four external legs, so this is not recommended (especially not for g g > z z).""") self.MadLoopparam.set('MLStabThres',1.0e-7,changeifuserset=False) else: - self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) + self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) #write the output file self.MadLoopparam.write(pjoin(self.me_dir,"SubProcesses","MadLoop5_resources", "MadLoopParams.dat")) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: # Now Update MadLoop filters if necessary (if modifications were made to # the model parameters). @@ -3403,12 +3403,12 @@ def do_treatcards(self, line, mode=None, opt=None): elif not opt['forbid_MadLoopInit'] and \ MadLoopInitializer.need_MadLoopInit(self.me_dir): self.exec_cmd('initMadLoop -f') - - ############################################################################ + + ############################################################################ def do_survey(self, line): """Advanced commands: launch survey for the current process """ - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) @@ -3416,7 +3416,7 @@ def do_survey(self, line): if os.path.exists(pjoin(self.me_dir,'error')): os.remove(pjoin(self.me_dir,'error')) - + self.configure_directory() # Save original random number self.random_orig = self.random @@ -3435,9 +3435,9 @@ def do_survey(self, line): P_zero_result = [] # check the number of times where they are no phase-space # File for the loop (for loop induced) - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources')) and cluster.need_transfer(self.options): - tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', + tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', 'MadLoop5_resources.tar.gz'), 'w:gz', dereference=True) tf.add(pjoin(self.me_dir,'SubProcesses','MadLoop5_resources'), arcname='MadLoop5_resources') @@ -3467,7 +3467,7 @@ def do_survey(self, line): except Exception as error: logger.debug(error) pass - + jobs, P_zero_result = ajobcreator.launch() # Check if all or only some fails if P_zero_result: @@ -3481,60 +3481,60 @@ def do_survey(self, line): self.get_Gdir() for P in P_zero_result: self.Gdirs[0][pjoin(self.me_dir,'SubProcesses',P)] = [] - + self.monitor(run_type='All jobs submitted for survey', html=True) if not self.history or 'survey' in self.history[-1] or self.ninitial ==1 or \ self.run_card['gridpack']: #will be done during the refine (more precisely in gen_ximprove) cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(ajobcreator.run_statistics)) self.update_status('End survey', 'parton', makehtml=False) ############################################################################ def pass_in_difficult_integration_mode(self, rate=1): """be more secure for the integration to not miss it due to strong cut""" - + # improve survey options if default if self.opts['points'] == self._survey_options['points'][1]: self.opts['points'] = (rate+2) * self._survey_options['points'][1] if self.opts['iterations'] == self._survey_options['iterations'][1]: self.opts['iterations'] = 1 + rate + self._survey_options['iterations'][1] if self.opts['accuracy'] == self._survey_options['accuracy'][1]: - self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) - + self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) + # Modify run_config.inc in order to improve the refine conf_path = pjoin(self.me_dir, 'Source','run_config.inc') files.cp(conf_path, conf_path + '.bk') # text = open(conf_path).read() - min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) - + min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) + text = re.sub('''\(min_events = \d+\)''', '(min_events = %i )' % min_evt, text) text = re.sub('''\(max_events = \d+\)''', '(max_events = %i )' % max_evt, text) fsock = open(conf_path, 'w') fsock.write(text) fsock.close() - + # Compile for name in ['../bin/internal/gen_ximprove', 'all']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) - - - ############################################################################ + + + ############################################################################ def do_refine(self, line): """Advanced commands: launch survey for the current process """ - devnull = open(os.devnull, 'w') + devnull = open(os.devnull, 'w') self.nb_refine += 1 args = self.split_arg(line) treshold=None - - + + for a in args: if a.startswith('--treshold='): treshold = float(a.split('=',1)[1]) @@ -3548,8 +3548,8 @@ def do_refine(self, line): break # Check argument's validity self.check_refine(args) - - refine_opt = {'err_goal': args[0], 'split_channels': True} + + refine_opt = {'err_goal': args[0], 'split_channels': True} precision = args[0] if len(args) == 2: refine_opt['max_process']= args[1] @@ -3560,15 +3560,15 @@ def do_refine(self, line): # Update random number self.update_random() self.save_random() - + if self.cluster_mode: logger.info('Creating Jobs') self.update_status('Refine results to %s' % precision, level=None) - + self.total_jobs = 0 - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + # cleanning the previous job for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() @@ -3589,14 +3589,14 @@ def do_refine(self, line): level = 5 if value.has_warning(): level = 10 - logger.log(level, + logger.log(level, value.nice_output(str('/'.join([key[0],'G%s'%key[1]]))). replace(' statistics','')) logger.debug(globalstat.nice_output('combined', no_warning=True)) - + if survey_statistics: x_improve.run_statistics = survey_statistics - + x_improve.launch() # create the ajob for the refinment. if not self.history or 'refine' not in self.history[-1]: cross, error = x_improve.update_html() #update html results for survey @@ -3610,9 +3610,9 @@ def do_refine(self, line): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) - + if os.path.exists(pjoin(Pdir, 'ajob1')): cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py @@ -3629,7 +3629,7 @@ def do_refine(self, line): ###self.compile(['all'], cwd=Pdir) alljobs = misc.glob('ajob*', Pdir) - + #remove associated results.dat (ensure to not mix with all data) Gre = re.compile("\s*j=(G[\d\.\w]+)") for job in alljobs: @@ -3637,49 +3637,49 @@ def do_refine(self, line): for Gdir in Gdirs: if os.path.exists(pjoin(Pdir, Gdir, 'results.dat')): os.remove(pjoin(Pdir, Gdir,'results.dat')) - - nb_tot = len(alljobs) + + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), - run_type='Refine number %s on %s (%s/%s)' % + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) - self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, html=True) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + if isinstance(x_improve, gen_ximprove.gen_ximprove_v4): # the merge of the events.lhe is handle in the x_improve class - # for splitted runs. (and partly in store_events). + # for splitted runs. (and partly in store_events). combine_runs.CombineRuns(self.me_dir) self.refine_mode = "old" else: self.refine_mode = "new" - + cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - self.results.add_detail('run_statistics', + self.results.add_detail('run_statistics', dict(self.results.get_detail('run_statistics'))) self.update_status('finish refine', 'parton', makehtml=False) devnull.close() - - ############################################################################ + + ############################################################################ def do_comine_iteration(self, line): """Not in help: Combine a given iteration combine_iteration Pdir Gdir S|R step - S is for survey + S is for survey R is for refine - step is the iteration number (not very critical)""" + step is the iteration number (not very critical)""" self.set_run_name("tmp") self.configure_directory(html_opening=False) @@ -3695,12 +3695,12 @@ def do_comine_iteration(self, line): gensym.combine_iteration(Pdir, Gdir, int(step)) elif mode == "R": refine = gen_ximprove.gen_ximprove_share(self) - refine.combine_iteration(Pdir, Gdir, int(step)) - - + refine.combine_iteration(Pdir, Gdir, int(step)) - - ############################################################################ + + + + ############################################################################ def do_combine_events(self, line): """Advanced commands: Launch combine events""" start=time.time() @@ -3710,11 +3710,11 @@ def do_combine_events(self, line): self.check_combine_events(args) self.update_status('Combining Events', level='parton') - + if self.run_card['gridpack'] and isinstance(self, GridPackCmd): return GridPackCmd.do_combine_events(self, line) - + # Define The Banner tag = self.run_card['run_tag'] # Update the banner with the pythia card @@ -3727,14 +3727,14 @@ def do_combine_events(self, line): self.banner.change_seed(self.random_orig) if not os.path.exists(pjoin(self.me_dir, 'Events', self.run_name)): os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) - self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, + self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -3751,12 +3751,12 @@ def do_combine_events(self, line): os.remove(pjoin(Gdir, 'events.lhe')) continue - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec'), result.get('xerru'), result.get('axsec') ) - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.run_card['nevents']) @@ -3765,13 +3765,13 @@ def do_combine_events(self, line): AllEvent.add(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() if len(AllEvent) == 0: - nb_event = 0 + nb_event = 0 else: nb_event = AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.run_card['nevents'], @@ -3791,22 +3791,22 @@ def do_combine_events(self, line): os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) - + if self.run_card['bias_module'].lower() not in ['dummy', 'none'] and nb_event: self.correct_bias() elif self.run_card['custom_fcts']: self.correct_bias() logger.info("combination of events done in %s s ", time.time()-start) - + self.to_store.append('event') - - ############################################################################ + + ############################################################################ def correct_bias(self): - """check the first event and correct the weight by the bias + """check the first event and correct the weight by the bias and correct the cross-section. - If the event do not have the bias tag it means that the bias is + If the event do not have the bias tag it means that the bias is one modifying the cross-section/shape so we have nothing to do """ @@ -3834,7 +3834,7 @@ def correct_bias(self): output.write('') output.close() lhe.close() - + # MODIFY THE BANNER i.e. INIT BLOCK # ensure information compatible with normalisation choice total_cross = sum(cross[key] for key in cross) @@ -3846,8 +3846,8 @@ def correct_bias(self): elif self.run_card['event_norm'] == 'unity': total_cross = self.results.current['cross'] * total_cross / nb_event for key in cross: - cross[key] *= total_cross / nb_event - + cross[key] *= total_cross / nb_event + bannerfile = lhe_parser.EventFile(pjoin(self.me_dir, 'Events', self.run_name, '.banner.tmp.gz'),'w') banner = banner_mod.Banner(lhe.banner) banner.modify_init_cross(cross) @@ -3862,12 +3862,12 @@ def correct_bias(self): os.remove(lhe.name) os.remove(bannerfile.name) os.remove(output.name) - - + + self.results.current['cross'] = total_cross self.results.current['error'] = 0 - - ############################################################################ + + ############################################################################ def do_store_events(self, line): """Advanced commands: Launch store events""" @@ -3883,16 +3883,16 @@ def do_store_events(self, line): if not os.path.exists(pjoin(self.me_dir, 'Events', run)): os.mkdir(pjoin(self.me_dir, 'Events', run)) if not os.path.exists(pjoin(self.me_dir, 'HTML', run)): - os.mkdir(pjoin(self.me_dir, 'HTML', run)) - + os.mkdir(pjoin(self.me_dir, 'HTML', run)) + # 1) Store overall process information #input = pjoin(self.me_dir, 'SubProcesses', 'results.dat') #output = pjoin(self.me_dir, 'SubProcesses', '%s_results.dat' % run) - #files.cp(input, output) + #files.cp(input, output) # 2) Treat the files present in the P directory - # Ensure that the number of events is different of 0 + # Ensure that the number of events is different of 0 if self.results.current['nb_event'] == 0 and not self.run_card['gridpack']: logger.warning("No event detected. No cleaning performed! This should allow to run:\n" + " cd Subprocesses; ../bin/internal/combine_events\n"+ @@ -3910,18 +3910,18 @@ def do_store_events(self, line): # if os.path.exists(pjoin(G_path, 'results.dat')): # input = pjoin(G_path, 'results.dat') # output = pjoin(G_path, '%s_results.dat' % run) - # files.cp(input, output) + # files.cp(input, output) #except Exception: - # continue + # continue # Store log try: if os.path.exists(pjoin(G_path, 'log.txt')): input = pjoin(G_path, 'log.txt') output = pjoin(G_path, '%s_log.txt' % run) - files.mv(input, output) + files.mv(input, output) except Exception: continue - #try: + #try: # # Grid # for name in ['ftn26']: # if os.path.exists(pjoin(G_path, name)): @@ -3930,7 +3930,7 @@ def do_store_events(self, line): # input = pjoin(G_path, name) # output = pjoin(G_path, '%s_%s' % (run,name)) # files.mv(input, output) - # misc.gzip(pjoin(G_path, output), error=None) + # misc.gzip(pjoin(G_path, output), error=None) #except Exception: # continue # Delete ftn25 to ensure reproducible runs @@ -3940,11 +3940,11 @@ def do_store_events(self, line): # 3) Update the index.html self.gen_card_html() - + # 4) Move the Files present in Events directory E_path = pjoin(self.me_dir, 'Events') O_path = pjoin(self.me_dir, 'Events', run) - + # The events file for name in ['events.lhe', 'unweighted_events.lhe']: finput = pjoin(E_path, name) @@ -3960,30 +3960,30 @@ def do_store_events(self, line): # os.remove(pjoin(O_path, '%s.gz' % name)) # input = pjoin(E_path, name) ## output = pjoin(O_path, name) - + self.update_status('End Parton', level='parton', makehtml=False) devnull.close() - - - ############################################################################ + + + ############################################################################ def do_create_gridpack(self, line): """Advanced commands: Create gridpack from present run""" self.update_status('Creating gridpack', level='parton') # compile gen_ximprove misc.compile(['../bin/internal/gen_ximprove'], cwd=pjoin(self.me_dir, "Source")) - + Gdir = self.get_Gdir() Pdir = set([os.path.dirname(G) for G in Gdir]) - for P in Pdir: + for P in Pdir: allG = misc.glob('G*', path=P) for G in allG: if pjoin(P, G) not in Gdir: logger.debug('removing %s', pjoin(P,G)) shutil.rmtree(pjoin(P,G)) - - + + args = self.split_arg(line) self.check_combine_events(args) if not self.run_tag: self.run_tag = 'tag_1' @@ -3996,13 +3996,13 @@ def do_create_gridpack(self, line): cwd=self.me_dir) misc.call(['./bin/internal/clean'], cwd=self.me_dir) misc.call(['./bin/internal/make_gridpack'], cwd=self.me_dir) - files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), + files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), pjoin(self.me_dir, '%s_gridpack.tar.gz' % self.run_name)) os.system("sed -i.bak \"s/\s*.true.*=.*GridRun/ .false. = GridRun/g\" %s/Cards/grid_card.dat" \ % self.me_dir) self.update_status('gridpack created', level='gridpack') - - ############################################################################ + + ############################################################################ def do_shower(self, line): """launch the shower""" @@ -4010,7 +4010,7 @@ def do_shower(self, line): if len(args)>1 and args[0] in self._interfaced_showers: chosen_showers = [args.pop(0)] elif '--no_default' in line: - # If '--no_default' was specified in the arguments, then only one + # If '--no_default' was specified in the arguments, then only one # shower will be run, depending on which card is present. # but we each of them are called. (each of them check if the file exists) chosen_showers = list(self._interfaced_showers) @@ -4021,9 +4021,9 @@ def do_shower(self, line): shower_priority = ['pythia8','pythia'] chosen_showers = [sorted(chosen_showers,key=lambda sh: shower_priority.index(sh) if sh in shower_priority else len(shower_priority)+1)[0]] - + for shower in chosen_showers: - self.exec_cmd('%s %s'%(shower,' '.join(args)), + self.exec_cmd('%s %s'%(shower,' '.join(args)), postcmd=False, printcmd=False) def do_madanalysis5_parton(self, line): @@ -4039,11 +4039,11 @@ def do_madanalysis5_parton(self, line): def mg5amc_py8_interface_consistency_warning(options): """ Check the consistency of the mg5amc_py8_interface installed with the current MG5 and Pythia8 versions. """ - + # All this is only relevant is Pythia8 is interfaced to MG5 if not options['pythia8_path']: return None - + if not options['mg5amc_py8_interface_path']: return \ """ @@ -4053,7 +4053,7 @@ def mg5amc_py8_interface_consistency_warning(options): Consider installing the MG5_aMC-PY8 interface with the following command: MG5_aMC>install mg5amc_py8_interface """ - + mg5amc_py8_interface_path = options['mg5amc_py8_interface_path'] py8_path = options['pythia8_path'] # If the specified interface path is relative, make it absolut w.r.t MGDIR if @@ -4062,7 +4062,7 @@ def mg5amc_py8_interface_consistency_warning(options): mg5amc_py8_interface_path = pjoin(MG5DIR,mg5amc_py8_interface_path) py8_path = pjoin(MG5DIR,py8_path) - # Retrieve all the on-install and current versions + # Retrieve all the on-install and current versions fsock = open(pjoin(mg5amc_py8_interface_path, 'MG5AMC_VERSION_ON_INSTALL')) MG5_version_on_install = fsock.read().replace('\n','') fsock.close() @@ -4074,7 +4074,7 @@ def mg5amc_py8_interface_consistency_warning(options): MG5_curr_version =misc.get_pkg_info()['version'] try: p = subprocess.Popen(['./get_pythia8_version.py',py8_path], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mg5amc_py8_interface_path) (out, err) = p.communicate() out = out.decode(errors='ignore').replace('\n','') @@ -4084,37 +4084,37 @@ def mg5amc_py8_interface_consistency_warning(options): float(out) except: PY8_curr_version = None - + if not MG5_version_on_install is None and not MG5_curr_version is None: if MG5_version_on_install != MG5_curr_version: return \ """ The current version of MG5_aMC (v%s) is different than the one active when - installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). + installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(MG5_curr_version, MG5_version_on_install) - + if not PY8_version_on_install is None and not PY8_curr_version is None: if PY8_version_on_install != PY8_curr_version: return \ """ The current version of Pythia8 (v%s) is different than the one active when - installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). + installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(PY8_curr_version,PY8_version_on_install) - + return None def setup_Pythia8RunAndCard(self, PY8_Card, run_type): """ Setup the Pythia8 Run environment and card. In particular all the process and run specific parameters of the card are automatically set here. This function returns the path where HEPMC events will be output, if any.""" - + HepMC_event_output = None tag = self.run_tag - + PY8_Card.subruns[0].systemSet('Beams:LHEF',"unweighted_events.lhe.gz") hepmc_format = PY8_Card['HEPMCoutput:file'].lower() @@ -4185,7 +4185,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): misc.mkfifo(fifo_path) # Use defaultSet not to overwrite the current userSet status PY8_Card.defaultSet('HEPMCoutput:file',fifo_path) - HepMC_event_output=fifo_path + HepMC_event_output=fifo_path elif hepmc_format in ['','/dev/null','None']: logger.warning('User disabled the HepMC output of Pythia8.') HepMC_event_output = None @@ -4206,7 +4206,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): # only if it is not already user_set. if PY8_Card['JetMatching:qCut']==-1.0: PY8_Card.MadGraphSet('JetMatching:qCut',1.5*self.run_card['xqcut'], force=True) - + if PY8_Card['JetMatching:qCut']<(1.5*self.run_card['xqcut']): logger.error( 'The MLM merging qCut parameter you chose (%f) is less than '%PY8_Card['JetMatching:qCut']+ @@ -4233,7 +4233,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): if PY8_Card['JetMatching:qCut'] not in qCutList: qCutList.append(PY8_Card['JetMatching:qCut']) PY8_Card.MadGraphSet('SysCalc:qCutList', qCutList, force=True) - + if PY8_Card['SysCalc:qCutList']!='auto': for scale in PY8_Card['SysCalc:qCutList']: @@ -4244,7 +4244,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): "'sys_matchscale' in the run_card) is less than 1.5*xqcut, where xqcut is"+ ' the run_card parameter (=%f)\n'%self.run_card['xqcut']+ 'It would be better/safer to use a larger qCut or a smaller xqcut.') - + # Specific MLM settings # PY8 should not implement the MLM veto since the driver should do it # if merging scale variation is turned on @@ -4294,18 +4294,18 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): CKKW_cut = 'ktdurham' elif self.run_card['ptlund']>0.0 and self.run_card['ktdurham']<=0.0: PY8_Card.subruns[0].MadGraphSet('Merging:doPTLundMerging',True) - CKKW_cut = 'ptlund' + CKKW_cut = 'ptlund' else: raise InvalidCmd("*Either* the 'ptlund' or 'ktdurham' cut in "+\ " the run_card must be turned on to activate CKKW(L) merging"+ " with Pythia8, but *both* cuts cannot be turned on at the same time."+ "\n ptlund=%f, ktdurham=%f."%(self.run_card['ptlund'],self.run_card['ktdurham'])) - + # Automatically set qWeed to the CKKWL cut if not defined by the user. if PY8_Card['SysCalc:qWeed']==-1.0: PY8_Card.MadGraphSet('SysCalc:qWeed',self.run_card[CKKW_cut], force=True) - + # MadGraphSet sets the corresponding value (in system mode) # only if it is not already user_set. if PY8_Card['Merging:TMS']==-1.0: @@ -4319,7 +4319,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): 'The CKKWl merging scale you chose (%f) is less than '%PY8_Card['Merging:TMS']+ 'the %s cut specified in the run_card parameter (=%f).\n'%(CKKW_cut,self.run_card[CKKW_cut])+ 'It is incorrect to use a smaller CKKWl scale than the generation-level %s cut!'%CKKW_cut) - + PY8_Card.MadGraphSet('TimeShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:rapidityOrder',False) @@ -4381,7 +4381,7 @@ def do_pythia8(self, line): try: import madgraph - except ImportError: + except ImportError: import internal.histograms as histograms else: import madgraph.various.histograms as histograms @@ -4400,16 +4400,16 @@ def do_pythia8(self, line): self.check_pythia8(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) - self.check_pythia8(args) + self.check_pythia8(args) # Update the banner with the pythia card if not self.banner or len(self.banner) <=1: # Here the level keyword 'pythia' must not be changed to 'pythia8'. self.banner = banner_mod.recover_banner(self.results, 'pythia') - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1], pythia_version=8, banner=self.banner) @@ -4425,7 +4425,7 @@ def do_pythia8(self, line): #"Please use 'event_norm = average' in the run_card to avoid this problem.") - + if not self.options['mg5amc_py8_interface_path'] or not \ os.path.exists(pjoin(self.options['mg5amc_py8_interface_path'], 'MG5aMC_PY8_interface')): @@ -4444,16 +4444,16 @@ def do_pythia8(self, line): # Again here 'pythia' is just a keyword for the simulation level. self.update_status('\033[92mRunning Pythia8 [arXiv:1410.3012]\033[0m', 'pythia8') - - tag = self.run_tag + + tag = self.run_tag # Now write Pythia8 card # Start by reading, starting from the default one so that the 'user_set' # tag are correctly set. - PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', + PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', 'pythia8_card_default.dat')) PY8_Card.read(pjoin(self.me_dir, 'Cards', 'pythia8_card.dat'), setter='user') - + run_type = 'default' merged_run_types = ['MLM','CKKW'] if int(self.run_card['ickkw'])==1: @@ -4471,7 +4471,7 @@ def do_pythia8(self, line): cmd_card = StringIO.StringIO() PY8_Card.write(cmd_card,pjoin(self.me_dir,'Cards','pythia8_card_default.dat'), direct_pythia_input=True) - + # Now setup the preamble to make sure that everything will use the locally # installed tools (if present) even if the user did not add it to its # environment variables. @@ -4486,13 +4486,13 @@ def do_pythia8(self, line): preamble = misc.get_HEPTools_location_setter( pjoin(MG5DIR,'HEPTools'),'lib') preamble += "\n unset PYTHIA8DATA\n" - + open(pythia_cmd_card,'w').write("""! ! It is possible to run this card manually with: ! %s %s ! """%(preamble+pythia_main,os.path.basename(pythia_cmd_card))+cmd_card.getvalue()) - + # launch pythia8 pythia_log = pjoin(self.me_dir , 'Events', self.run_name , '%s_pythia8.log' % tag) @@ -4504,13 +4504,13 @@ def do_pythia8(self, line): shell_exe = None if os.path.exists('/usr/bin/env'): shell_exe = '/usr/bin/env %s'%shell - else: + else: shell_exe = misc.which(shell) if not shell_exe: raise self.InvalidCmd('No s hell could be found in your environment.\n'+ "Make sure that either '%s' is in your path or that the"%shell+\ " command '/usr/bin/env %s' exists and returns a valid path."%shell) - + exe_cmd = "#!%s\n%s"%(shell_exe,' '.join( [preamble+pythia_main, os.path.basename(pythia_cmd_card)])) @@ -4528,7 +4528,7 @@ def do_pythia8(self, line): ( os.path.exists(HepMC_event_output) and \ stat.S_ISFIFO(os.stat(HepMC_event_output).st_mode)) startPY8timer = time.time() - + # Information that will be extracted from this PY8 run PY8_extracted_information={ 'sigma_m':None, 'Nacc':None, 'Ntry':None, 'cross_sections':{} } @@ -4556,7 +4556,7 @@ def do_pythia8(self, line): n_cores = max(int(self.options['cluster_size']),1) elif self.options['run_mode']==2: n_cores = max(int(self.cluster.nb_core),1) - + lhe_file_name = os.path.basename(PY8_Card.subruns[0]['Beams:LHEF']) lhe_file = lhe_parser.EventFile(pjoin(self.me_dir,'Events', self.run_name,PY8_Card.subruns[0]['Beams:LHEF'])) @@ -4574,7 +4574,7 @@ def do_pythia8(self, line): if self.options['run_mode']==2: min_n_events_per_job = 100 elif self.options['run_mode']==1: - min_n_events_per_job = 1000 + min_n_events_per_job = 1000 min_n_core = n_events//min_n_events_per_job n_cores = max(min(min_n_core,n_cores),1) @@ -4584,8 +4584,8 @@ def do_pythia8(self, line): logger.info('Follow Pythia8 shower by running the '+ 'following command (in a separate terminal):\n tail -f %s'%pythia_log) - if self.options['run_mode']==2 and self.options['nb_core']>1: - ret_code = self.cluster.launch_and_wait(wrapper_path, + if self.options['run_mode']==2 and self.options['nb_core']>1: + ret_code = self.cluster.launch_and_wait(wrapper_path, argument= [], stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events',self.run_name)) else: @@ -4630,10 +4630,10 @@ def do_pythia8(self, line): wrapper = open(wrapper_path,'w') if self.options['cluster_temp_path'] is None: exe_cmd = \ -"""#!%s +"""#!%s ./%s PY8Card.dat >& PY8_log.txt """ - else: + else: exe_cmd = \ """#!%s ln -s ./events_$1.lhe.gz ./events.lhe.gz @@ -4663,21 +4663,21 @@ def do_pythia8(self, line): # Set it as executable st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) - + # Split the .lhe event file, create event partition partition=[n_available_events//n_cores]*n_cores for i in range(n_available_events%n_cores): partition[i] += 1 - + # Splitting according to the total number of events requested by the user # Will be used to determine the number of events to indicate in the PY8 split cards. partition_for_PY8=[n_events//n_cores]*n_cores for i in range(n_events%n_cores): partition_for_PY8[i] += 1 - - logger.info('Splitting .lhe event file for PY8 parallelization...') - n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) - + + logger.info('Splitting .lhe event file for PY8 parallelization...') + n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) + if n_splits!=len(partition): raise MadGraph5Error('Error during lhe file splitting. Expected %d files but obtained %d.' %(len(partition),n_splits)) @@ -4690,7 +4690,7 @@ def do_pythia8(self, line): # Add the necessary run content shutil.move(pjoin(parallelization_dir,lhe_file.name+'_%d.lhe.gz'%split_id), pjoin(parallelization_dir,split_files[-1])) - + logger.info('Submitting Pythia8 jobs...') for i, split_file in enumerate(split_files): # We must write a PY8Card tailored for each split so as to correct the normalization @@ -4706,7 +4706,7 @@ def do_pythia8(self, line): split_PY8_Card.write(pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,'PY8Card.dat'), add_missing=False) in_files = [pjoin(parallelization_dir,os.path.basename(pythia_main)), - pjoin(parallelization_dir,'PY8Card_%d.dat'%i), + pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,split_file)] if self.options['cluster_temp_path'] is None: out_files = [] @@ -4718,35 +4718,35 @@ def do_pythia8(self, line): if os.path.basename(in_file)==split_file: ln(in_file,selected_cwd,name='events.lhe.gz') elif os.path.basename(in_file).startswith('PY8Card'): - ln(in_file,selected_cwd,name='PY8Card.dat') + ln(in_file,selected_cwd,name='PY8Card.dat') else: - ln(in_file,selected_cwd) + ln(in_file,selected_cwd) in_files = [] wrapper_path = os.path.basename(wrapper_path) else: out_files = ['split_%d.tar.gz'%i] selected_cwd = parallelization_dir - self.cluster.submit2(wrapper_path, - argument=[str(i)], cwd=selected_cwd, + self.cluster.submit2(wrapper_path, + argument=[str(i)], cwd=selected_cwd, input_files=in_files, output_files=out_files, required_output=out_files) - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return logger.info('Pythia8 shower jobs: %d Idle, %d Running, %d Done [%s]'\ %(Idle, Running, Done, misc.format_time(time.time() - startPY8timer))) self.cluster.wait(parallelization_dir,wait_monitoring) - + logger.info('Merging results from the split PY8 runs...') if self.options['cluster_temp_path']: # Decompressing the output for i, split_file in enumerate(split_files): misc.call(['tar','-xzf','split_%d.tar.gz'%i],cwd=parallelization_dir) os.remove(pjoin(parallelization_dir,'split_%d.tar.gz'%i)) - + # Now merge logs pythia_log_file = open(pythia_log,'w') n_added = 0 @@ -4778,7 +4778,7 @@ def wait_monitoring(Idle, Running, Done): if n_added>0: PY8_extracted_information['sigma_m'] /= float(n_added) pythia_log_file.close() - + # djr plots djr_HwU = None n_added = 0 @@ -4845,7 +4845,7 @@ def wait_monitoring(Idle, Running, Done): if not os.path.isfile(hepmc_file): continue all_hepmc_files.append(hepmc_file) - + if len(all_hepmc_files)>0: hepmc_output = pjoin(self.me_dir,'Events',self.run_name,HepMC_event_output) with misc.TMP_directory() as tmp_dir: @@ -4860,8 +4860,8 @@ def wait_monitoring(Idle, Running, Done): break header.close() tail = open(pjoin(tmp_dir,'tail.hepmc'),'w') - n_tail = 0 - + n_tail = 0 + for line in misc.reverse_readline(all_hepmc_files[-1]): if line.startswith('HepMC::'): n_tail += 1 @@ -4871,7 +4871,7 @@ def wait_monitoring(Idle, Running, Done): tail.close() if n_tail>1: raise MadGraph5Error('HEPMC files should only have one trailing command.') - ###################################################################### + ###################################################################### # This is the most efficient way of putting together HEPMC's, *BUT* # # WARNING: NEED TO RENDER THE CODE BELOW SAFE TOWARDS INJECTION # ###################################################################### @@ -4888,12 +4888,12 @@ def wait_monitoring(Idle, Running, Done): elif sys.platform == 'darwin': # sed on MAC has slightly different synthax than on os.system(' '.join(['sed','-i',"''","'%s;$d'"% - (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) - else: - # other UNIX systems + (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) + else: + # other UNIX systems os.system(' '.join(['sed','-i']+["-e '%id'"%(i+1) for i in range(n_head)]+ ["-e '$d'",hepmc_file])) - + os.system(' '.join(['cat',pjoin(tmp_dir,'header.hepmc')]+all_hepmc_files+ [pjoin(tmp_dir,'tail.hepmc'),'>',hepmc_output])) @@ -4915,12 +4915,12 @@ def wait_monitoring(Idle, Running, Done): 'Inclusive cross section:' not in '\n'.join(open(pythia_log,'r').readlines()[-20:]): logger.warning('Fail to produce a pythia8 output. More info in \n %s'%pythia_log) return - + # Plot for Pythia8 successful = self.create_plot('Pythia8') if not successful: logger.warning('Failed to produce Pythia8 merging plots.') - + self.to_store.append('pythia8') # Study matched cross-sections @@ -4931,7 +4931,7 @@ def wait_monitoring(Idle, Running, Done): if self.options['run_mode']==0 or (self.options['run_mode']==2 and self.options['nb_core']==1): PY8_extracted_information['sigma_m'],PY8_extracted_information['Nacc'],\ PY8_extracted_information['Ntry'] = self.parse_PY8_log_file( - pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) + pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) else: logger.warning('Pythia8 cross-section could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1. YYYYY') @@ -4944,8 +4944,8 @@ def wait_monitoring(Idle, Running, Done): Ntry = PY8_extracted_information['Ntry'] sigma_m = PY8_extracted_information['sigma_m'] # Compute pythia error - error = self.results[self.run_name].return_tag(self.run_tag)['error'] - try: + error = self.results[self.run_name].return_tag(self.run_tag)['error'] + try: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) except ZeroDivisionError: # Cannot compute error @@ -4966,31 +4966,31 @@ def wait_monitoring(Idle, Running, Done): else: logger.warning('Pythia8 merged cross-sections could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1.XXXXX') - PY8_extracted_information['cross_sections'] = {} - + PY8_extracted_information['cross_sections'] = {} + cross_sections = PY8_extracted_information['cross_sections'] if cross_sections: - # Filter the cross_sections specified an keep only the ones + # Filter the cross_sections specified an keep only the ones # with central parameters and a different merging scale a_float_re = '[\+|-]?\d+(\.\d*)?([EeDd][\+|-]?\d+)?' central_merging_re = re.compile( '^\s*Weight_MERGING\s*=\s*(?P%s)\s*$'%a_float_re, - re.IGNORECASE) + re.IGNORECASE) cross_sections = dict( (float(central_merging_re.match(xsec).group('merging')),value) - for xsec, value in cross_sections.items() if not + for xsec, value in cross_sections.items() if not central_merging_re.match(xsec) is None) central_scale = PY8_Card['JetMatching:qCut'] if \ int(self.run_card['ickkw'])==1 else PY8_Card['Merging:TMS'] if central_scale in cross_sections: self.results.add_detail('cross_pythia8', cross_sections[central_scale][0]) self.results.add_detail('error_pythia8', cross_sections[central_scale][1]) - + #logger.info('Pythia8 merged cross-sections are:') #for scale in sorted(cross_sections.keys()): # logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ - # (scale,cross_sections[scale][0],cross_sections[scale][1])) - + # (scale,cross_sections[scale][0],cross_sections[scale][1])) + xsecs_file = open(pjoin(self.me_dir,'Events',self.run_name, '%s_merged_xsecs.txt'%tag),'w') if cross_sections: @@ -5003,9 +5003,9 @@ def wait_monitoring(Idle, Running, Done): xsecs_file.write('Cross-sections could not be read from the'+\ "XML node 'xsection' of the .dat file produced by Pythia8.") xsecs_file.close() - + #Update the banner - # We add directly the pythia command card because it has the full + # We add directly the pythia command card because it has the full # information self.banner.add(pythia_cmd_card) @@ -5022,13 +5022,13 @@ def wait_monitoring(Idle, Running, Done): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + def parse_PY8_log_file(self, log_file_path): """ Parse a log file to extract number of event and cross-section. """ pythiare = re.compile("Les Houches User Process\(es\)\s*\d+\s*\|\s*(?P\d+)\s*(?P\d+)\s*(?P\d+)\s*\|\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") pythia_xsec_re = re.compile("Inclusive cross section\s*:\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") sigma_m, Nacc, Ntry = None, None, None - for line in misc.BackRead(log_file_path): + for line in misc.BackRead(log_file_path): info = pythiare.search(line) if not info: # Also try to obtain the cross-section and error from the final xsec line of pythia8 log @@ -5058,7 +5058,7 @@ def parse_PY8_log_file(self, log_file_path): raise self.InvalidCmd("Could not find cross-section and event number information "+\ "in Pythia8 log\n '%s'."%log_file_path) - + def extract_cross_sections_from_DJR(self,djr_output): """Extract cross-sections from a djr XML output.""" import xml.dom.minidom as minidom @@ -5075,11 +5075,11 @@ def extract_cross_sections_from_DJR(self,djr_output): [float(xsec.childNodes[0].data.split()[0]), float(xsec.childNodes[0].data.split()[1])]) for xsec in xsections) - + def do_pythia(self, line): """launch pythia""" - - + + # Check argument's validity args = self.split_arg(line) if '--no_default' in args: @@ -5089,12 +5089,12 @@ def do_pythia(self, line): args.remove('--no_default') else: no_default = False - + if not self.run_name: self.check_pythia(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) self.check_pythia(args) @@ -5102,7 +5102,7 @@ def do_pythia(self, line): logger.error('pythia-pgs require event_norm to be on sum. Do not run pythia6') return - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1]) if self.options['automatic_html_opening']: @@ -5114,35 +5114,35 @@ def do_pythia(self, line): self.banner = banner_mod.recover_banner(self.results, 'pythia') pythia_src = pjoin(self.options['pythia-pgs_path'],'src') - + self.results.add_detail('run_mode', 'madevent') self.update_status('Running Pythia', 'pythia') try: os.remove(pjoin(self.me_dir,'Events','pythia.done')) except Exception: - pass - + pass + ## LAUNCHING PYTHIA # check that LHAPATH is define. if not re.search(r'^\s*LHAPATH=%s/PDFsets' % pythia_src, - open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), + open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), re.M): f = open(pjoin(self.me_dir,'Cards','pythia_card.dat'),'a') f.write('\n LHAPATH=%s/PDFsets' % pythia_src) f.close() tag = self.run_tag pythia_log = pjoin(self.me_dir, 'Events', self.run_name , '%s_pythia.log' % tag) - #self.cluster.launch_and_wait('../bin/internal/run_pythia', + #self.cluster.launch_and_wait('../bin/internal/run_pythia', # argument= [pythia_src], stdout= pythia_log, # stderr=subprocess.STDOUT, # cwd=pjoin(self.me_dir,'Events')) output_files = ['pythia_events.hep'] if self.run_card['use_syst']: output_files.append('syst.dat') - if self.run_card['ickkw'] == 1: + if self.run_card['ickkw'] == 1: output_files += ['beforeveto.tree', 'xsecs.tree', 'events.tree'] - + os.environ['PDG_MASS_TBL'] = pjoin(pythia_src,'mass_width_2004.mc') self.cluster.launch_and_wait(pjoin(pythia_src, 'pythia'), input_files=[pjoin(self.me_dir, "Events", "unweighted_events.lhe"), @@ -5152,23 +5152,23 @@ def do_pythia(self, line): stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events')) - + os.remove(pjoin(self.me_dir, "Events", "unweighted_events.lhe")) if not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): logger.warning('Fail to produce pythia output. More info in \n %s' % pythia_log) return - + self.to_store.append('pythia') - + # Find the matched cross-section if int(self.run_card['ickkw']): # read the line from the bottom of the file - #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, + #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, # '%s_pythia.log' % tag)) - pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") - for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, + pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") + for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, '%s_pythia.log' % tag)): info = pythiare.search(line) if not info: @@ -5188,16 +5188,16 @@ def do_pythia(self, line): self.results.add_detail('nb_event_pythia', Nacc) #compute pythia error error = self.results[self.run_name].return_tag(self.run_tag)['error'] - if Nacc: + if Nacc: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) else: error_m = 10000 * sigma_m # works both for fixed number of generated events and fixed accepted events self.results.add_detail('error_pythia', error_m) - break + break #pythia_log.close() - + pydir = pjoin(self.options['pythia-pgs_path'], 'src') eradir = self.options['exrootanalysis_path'] madir = self.options['madanalysis_path'] @@ -5216,12 +5216,12 @@ def do_pythia(self, line): # Creating LHE file self.run_hep2lhe(banner_path) - + if int(self.run_card['ickkw']): misc.gzip(pjoin(self.me_dir,'Events','beforeveto.tree'), - stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + - if self.run_card['use_syst'] in self.true: # Calculate syscalc info based on syst.dat try: @@ -5233,7 +5233,7 @@ def do_pythia(self, line): # Store syst.dat misc.gzip(pjoin(self.me_dir,'Events', 'syst.dat'), stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_syst.dat.gz')) - + # Store syscalc.dat if os.path.exists(pjoin(self.me_dir, 'Events', 'syscalc.dat')): filename = pjoin(self.me_dir, 'Events' ,self.run_name, @@ -5253,7 +5253,7 @@ def do_pythia(self, line): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + ################################################################################ def do_remove(self, line): @@ -5263,8 +5263,8 @@ def do_remove(self, line): run, tag, mode = self.check_remove(args) if 'banner' in mode: mode.append('all') - - + + if run == 'all': # Check first if they are not a run with a name run. if os.path.exists(pjoin(self.me_dir, 'Events', 'all')): @@ -5280,7 +5280,7 @@ def do_remove(self, line): logger.info(error) pass # run already clear return - + # Check that run exists if not os.path.exists(pjoin(self.me_dir, 'Events', run)): raise self.InvalidCmd('No run \'%s\' detected' % run) @@ -5294,7 +5294,7 @@ def do_remove(self, line): # Found the file to delete - + to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) to_delete += misc.glob('*', pjoin(self.me_dir, 'HTML', run)) # forbid the banner to be removed @@ -5314,7 +5314,7 @@ def do_remove(self, line): if os.path.exists(pjoin(self.me_dir, 'Events', run, 'unweighted_events.lhe.gz')): to_delete.append('unweighted_events.lhe.gz') if os.path.exists(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')): - to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) + to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) if nb_rm != len(to_delete): logger.warning('Be carefull that partonic information are on the point to be removed.') if 'all' in mode: @@ -5327,8 +5327,8 @@ def do_remove(self, line): if 'delphes' not in mode: to_delete = [f for f in to_delete if 'delphes' not in f] if 'parton' not in mode: - to_delete = [f for f in to_delete if 'delphes' in f - or 'pgs' in f + to_delete = [f for f in to_delete if 'delphes' in f + or 'pgs' in f or 'pythia' in f] if not self.force and len(to_delete): question = 'Do you want to delete the following files?\n %s' % \ @@ -5336,7 +5336,7 @@ def do_remove(self, line): ans = self.ask(question, 'y', choices=['y','n']) else: ans = 'y' - + if ans == 'y': for file2rm in to_delete: if os.path.exists(pjoin(self.me_dir, 'Events', run, file2rm)): @@ -5374,7 +5374,7 @@ def do_remove(self, line): if ans == 'y': for file2rm in to_delete: os.remove(file2rm) - + if 'banner' in mode: to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) if tag: @@ -5389,8 +5389,8 @@ def do_remove(self, line): return elif any(['banner' not in os.path.basename(p) for p in to_delete]): if to_delete: - raise MadGraph5Error('''Some output still exists for this run. - Please remove those output first. Do for example: + raise MadGraph5Error('''Some output still exists for this run. + Please remove those output first. Do for example: remove %s all banner ''' % run) else: @@ -5400,7 +5400,7 @@ def do_remove(self, line): return else: logger.info('''The banner is not removed. In order to remove it run: - remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) + remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) # update database. self.results.clean(mode, run, tag) @@ -5420,7 +5420,7 @@ def do_plot(self, line): logger.info('plot for run %s' % self.run_name) if not self.force: self.ask_edit_cards(['plot_card.dat'], args, plot=True) - + if any([arg in ['all','parton'] for arg in args]): filename = pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe') if os.path.exists(filename+'.gz'): @@ -5438,8 +5438,8 @@ def do_plot(self, line): except Exception: pass else: - logger.info('No valid files for partonic plot') - + logger.info('No valid files for partonic plot') + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_events.lhe' % self.run_tag) @@ -5452,10 +5452,10 @@ def do_plot(self, line): stdout= "%s.gz" % filename) else: logger.info('No valid files for pythia plot') - - + + if any([arg in ['all','pgs'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) @@ -5464,15 +5464,15 @@ def do_plot(self, line): misc.gzip(filename) else: logger.info('No valid files for pgs plot') - + if any([arg in ['all','delphes'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) if os.path.exists(filename): self.create_plot('Delphes') - misc.gzip(filename) + misc.gzip(filename) else: logger.info('No valid files for delphes plot') @@ -5488,9 +5488,9 @@ def do_syscalc(self, line): if self.ninitial == 1: logger.error('SysCalc can\'t be run for decay processes') return - + logger.info('Calculating systematics for run %s' % self.run_name) - + self.ask_edit_cards(['run_card.dat'], args, plot=False) self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) if any([arg in ['all','parton'] for arg in args]): @@ -5504,7 +5504,7 @@ def do_syscalc(self, line): stdout="%s.gz" % filename) else: logger.info('No valid files for parton level systematics run.') - + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_syst.dat' % self.run_tag) @@ -5525,17 +5525,17 @@ def do_syscalc(self, line): else: logger.info('No valid files for pythia level') - + def store_result(self): - """ tar the pythia results. This is done when we are quite sure that + """ tar the pythia results. This is done when we are quite sure that the pythia output will not be use anymore """ if not self.run_name: return - + if not self.to_store: - return - + return + tag = self.run_card['run_tag'] self.update_status('storing files of previous run', level=None,\ error=True) @@ -5546,14 +5546,14 @@ def store_result(self): misc.gzip(pjoin(self.me_dir,'Events',self.run_name,"unweighted_events.lhe")) if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) - + if 'pythia' in self.to_store: self.update_status('Storing Pythia files of previous run', level='pythia', error=True) p = pjoin(self.me_dir,'Events') n = self.run_name t = tag self.to_store.remove('pythia') - misc.gzip(pjoin(p,'pythia_events.hep'), + misc.gzip(pjoin(p,'pythia_events.hep'), stdout=pjoin(p, str(n),'%s_pythia_events.hep' % t),forceexternal=True) if 'pythia8' in self.to_store: @@ -5581,26 +5581,26 @@ def store_result(self): os.system("mv " + file_path + hepmc_fileformat + " " + move_hepmc_path) self.update_status('Done', level='pythia',makehtml=False,error=True) - self.results.save() - + self.results.save() + self.to_store = [] - def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, + def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, run_type='', mode=None, **opt): """ """ argument = [str(arg) for arg in argument] if mode is None: mode = self.cluster_mode - + # ensure that exe is executable if os.path.exists(exe) and not os.access(exe, os.X_OK): os.system('chmod +x %s ' % exe) elif (cwd and os.path.exists(pjoin(cwd, exe))) and not \ os.access(pjoin(cwd, exe), os.X_OK): os.system('chmod +x %s ' % pjoin(cwd, exe)) - + if mode == 0: - self.update_status((remaining, 1, + self.update_status((remaining, 1, self.total_jobs - remaining -1, run_type), level=None, force=False) start = time.time() #os.system('cd %s; ./%s' % (cwd,exe)) @@ -5613,24 +5613,24 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, elif mode in [1,2]: exename = os.path.basename(exe) # For condor cluster, create the input/output files - if 'ajob' in exename: + if 'ajob' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat','dname.mg', pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) - + output_files = [] required_output = [] - + #Find the correct PDF input file input_files.append(self.get_pdf_input_filename()) - + #Find the correct ajob Gre = re.compile("\s*j=(G[\d\.\w]+)") origre = re.compile("grid_directory=(G[\d\.\w]+)") - try : + try : fsock = open(exe) except Exception: fsock = open(pjoin(cwd,exe)) @@ -5648,21 +5648,21 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if os.path.isdir(pjoin(cwd,G)): input_files.append(G) required_output.append('%s/results.dat' % G) - + if origre.search(text): G_grid = origre.search(text).groups()[0] input_files.append(pjoin(G_grid, 'ftn26')) - + #submitting - self.cluster.submit2(exe, stdout=stdout, cwd=cwd, + self.cluster.submit2(exe, stdout=stdout, cwd=cwd, input_files=input_files, output_files=output_files, required_output=required_output) elif 'survey' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5671,7 +5671,7 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [] required_output = [] - + #Find the correct ajob suffix = "_%s" % int(float(argument[0])) if suffix == '_0': @@ -5685,12 +5685,12 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if '.' in argument[0]: offset = int(str(argument[0]).split('.')[1]) else: - offset = 0 - + offset = 0 + if offset ==0 or offset == int(float(argument[0])): if os.path.exists(pjoin(cwd, G, 'input_app.txt')): os.remove(pjoin(cwd, G, 'input_app.txt')) - + if os.path.exists(os.path.realpath(pjoin(cwd, G, 'ftn25'))): if offset == 0 or offset == int(float(argument[0])): os.remove(pjoin(cwd, G, 'ftn25')) @@ -5706,16 +5706,16 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, pass #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, required_output=required_output, **opt) elif "refine_splitted.sh" in exename: input_files = ['madevent','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5725,25 +5725,25 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [argument[0]] required_output = [] for G in output_files: - required_output.append('%s/results.dat' % G) + required_output.append('%s/results.dat' % G) input_files.append(pjoin(argument[1], "input_app.txt")) input_files.append(pjoin(argument[1], "ftn26")) - + #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, - required_output=required_output, **opt) + required_output=required_output, **opt) + + - - else: self.cluster.submit(exe, argument=argument, stdout=stdout, cwd=cwd, **opt) - + ############################################################################ def find_madevent_mode(self): """Find if Madevent is in Group mode or not""" - + # The strategy is too look in the files Source/run_configs.inc # if we found: ChanPerJob=3 then it's a group mode. file_path = pjoin(self.me_dir, 'Source', 'run_config.inc') @@ -5752,11 +5752,11 @@ def find_madevent_mode(self): return 'group' else: return 'v4' - + ############################################################################ def monitor(self, run_type='monitor', mode=None, html=False): """ monitor the progress of running job """ - + starttime = time.time() if mode is None: @@ -5772,8 +5772,8 @@ def monitor(self, run_type='monitor', mode=None, html=False): else: update_status = lambda idle, run, finish: None update_first = None - try: - self.cluster.wait(self.me_dir, update_status, update_first=update_first) + try: + self.cluster.wait(self.me_dir, update_status, update_first=update_first) except Exception as error: logger.info(error) if not self.force: @@ -5788,24 +5788,24 @@ def monitor(self, run_type='monitor', mode=None, html=False): raise except KeyboardInterrupt as error: self.cluster.remove() - raise - - + raise + - ############################################################################ + + ############################################################################ def configure_directory(self, html_opening=True): - """ All action require before any type of run """ + """ All action require before any type of run """ # Basic check assert os.path.exists(pjoin(self.me_dir,'SubProcesses')) # environmental variables to be included in make_opts self.make_opts_var = {} - + #see when the last file was modified time_mod = max([os.path.getmtime(pjoin(self.me_dir,'Cards','run_card.dat')), os.path.getmtime(pjoin(self.me_dir,'Cards','param_card.dat'))]) - + if self.configured >= time_mod and hasattr(self, 'random') and hasattr(self, 'run_card'): #just ensure that cluster specific are correctly handled if self.cluster: @@ -5820,7 +5820,7 @@ def configure_directory(self, html_opening=True): #open only once the web page # Change current working directory self.launching_dir = os.getcwd() - + # Check if we need the MSSM special treatment model = self.find_model_name() if model == 'mssm' or model.startswith('mssm-'): @@ -5828,14 +5828,14 @@ def configure_directory(self, html_opening=True): mg5_param = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') check_param_card.convert_to_mg5card(param_card, mg5_param) check_param_card.check_valid_param_card(mg5_param) - + # limit the number of event to 100k self.check_nb_events() # this is in order to avoid conflicts between runs with and without # lhapdf. not needed anymore the makefile handles it automaticallu #misc.compile(['clean4pdf'], cwd = pjoin(self.me_dir, 'Source')) - + self.make_opts_var['pdlabel1'] = '' self.make_opts_var['pdlabel2'] = '' if self.run_card['pdlabel1'] in ['eva', 'iww']: @@ -5866,7 +5866,7 @@ def configure_directory(self, html_opening=True): self.copy_lep_densities(self.run_card['pdlabel'], pjoin(self.me_dir, 'Source')) self.make_opts_var['pdlabel1'] = 'ee' self.make_opts_var['pdlabel2'] = 'ee' - + # set random number if self.run_card['iseed'] != 0: self.random = int(self.run_card['iseed']) @@ -5885,18 +5885,18 @@ def configure_directory(self, html_opening=True): break else: self.random = random.randint(1, 30107) - + #set random seed for python part of the code if self.run_card['python_seed'] == -2: #-2 means same as run_card import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] elif self.run_card['python_seed'] >= 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] if self.run_card['ickkw'] == 2: logger.info('Running with CKKW matching') self.treat_ckkw_matching() @@ -5905,12 +5905,12 @@ def configure_directory(self, html_opening=True): self.update_make_opts(self.run_card) # reset list of Gdirectory self.Gdirs = None - + # create param_card.inc and run_card.inc self.do_treatcards('') - + logger.info("compile Source Directory") - + # Compile for name in [ 'all']:#, '../bin/internal/combine_events']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) @@ -5933,7 +5933,7 @@ def configure_directory(self, html_opening=True): os.remove(pjoin(self.me_dir, 'lib','libbias.a')) force_subproc_clean = True - + # Finally compile the bias module as well if self.run_card['bias_module'] not in ['dummy',None]: logger.debug("Compiling the bias module '%s'"%bias_name) @@ -5945,7 +5945,7 @@ def configure_directory(self, html_opening=True): 'INVALID' in str(bias_module_valid).upper(): raise InvalidCmd("The bias module '%s' cannot be used because of:\n%s"% (bias_name,bias_module_valid)) - + self.compile(arg=[], cwd=os.path.join(self.me_dir, 'Source','BIAS',bias_name)) self.proc_characteristics['bias_module']=bias_name # Update the proc_characterstics file @@ -5954,7 +5954,7 @@ def configure_directory(self, html_opening=True): if force_subproc_clean: # Make sure that madevent will be recompiled - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] for nb_proc,subdir in enumerate(subproc): Pdir = pjoin(self.me_dir, 'SubProcesses',subdir.strip()) @@ -5971,20 +5971,20 @@ def configure_directory(self, html_opening=True): ############################################################################ @staticmethod def check_dir(path, default=''): - """check if the directory exists. if so return the path otherwise the + """check if the directory exists. if so return the path otherwise the default""" - + if os.path.isdir(path): return path else: return default - + ############################################################################ def get_Gdir(self, Pdir=None, symfact=None): """get the list of Gdirectory if not yet saved.""" - + if hasattr(self, "Gdirs") and self.Gdirs: if self.me_dir in self.Gdirs[0]: if Pdir is None: @@ -6000,8 +6000,8 @@ def get_Gdir(self, Pdir=None, symfact=None): Pdirs = self.get_Pdir() - Gdirs = {self.me_dir:[]} - mfactors = {} + Gdirs = {self.me_dir:[]} + mfactors = {} for P in Pdirs: Gdirs[P] = [] #for the next line do not use P, since in readonly mode it might not have symfact @@ -6012,7 +6012,7 @@ def get_Gdir(self, Pdir=None, symfact=None): mfactors[pjoin(P, "G%s" % tag)] = mfactor self.Gdirs = (Gdirs, mfactors) return self.get_Gdir(Pdir, symfact=symfact) - + ############################################################################ def set_run_name(self, name, tag=None, level='parton', reload_card=False, allow_new_tag=True): @@ -6030,8 +6030,8 @@ def get_last_tag(self, level): tagRun = self.results[self.run_name][i] if tagRun.pythia or tagRun.shower or tagRun.pythia8 : return tagRun['tag'] - - + + # when are we force to change the tag new_run:previous run requiring changes upgrade_tag = {'parton': ['parton','pythia','pgs','delphes','madanalysis5_hadron','madanalysis5_parton', 'rivet'], 'pythia': ['pythia','pgs','delphes','madanalysis5_hadron'], @@ -6044,7 +6044,7 @@ def get_last_tag(self, level): 'syscalc':[], 'rivet':['rivet']} - if name == self.run_name: + if name == self.run_name: if reload_card: run_card = pjoin(self.me_dir, 'Cards','run_card.dat') self.run_card = banner_mod.RunCard(run_card) @@ -6064,13 +6064,13 @@ def get_last_tag(self, level): break return get_last_tag(self, level) - + # save/clean previous run if self.run_name: self.store_result() # store new name self.run_name = name - + new_tag = False # First call for this run -> set the banner self.banner = banner_mod.recover_banner(self.results, level, name) @@ -6079,8 +6079,8 @@ def get_last_tag(self, level): else: # Read run_card run_card = pjoin(self.me_dir, 'Cards','run_card.dat') - self.run_card = banner_mod.RunCard(run_card) - + self.run_card = banner_mod.RunCard(run_card) + if tag: self.run_card['run_tag'] = tag new_tag = True @@ -6093,7 +6093,7 @@ def get_last_tag(self, level): self.results.update('add run %s' % name, 'all', makehtml=False) else: for tag in upgrade_tag[level]: - + if getattr(self.results[self.run_name][-1], tag): # LEVEL is already define in the last tag -> need to switch tag tag = self.get_available_tag() @@ -6103,8 +6103,8 @@ def get_last_tag(self, level): if not new_tag: # We can add the results to the current run tag = self.results[self.run_name][-1]['tag'] - self.run_card['run_tag'] = tag # ensure that run_tag is correct - + self.run_card['run_tag'] = tag # ensure that run_tag is correct + if allow_new_tag and (name in self.results and not new_tag): self.results.def_current(self.run_name) else: @@ -6113,15 +6113,15 @@ def get_last_tag(self, level): self.run_tag = self.run_card['run_tag'] return get_last_tag(self, level) - - - + + + ############################################################################ def check_nb_events(self): - """Find the number of event in the run_card, and check that this is not + """Find the number of event in the run_card, and check that this is not too large""" - + nb_event = int(self.run_card['nevents']) if nb_event > 1000000: logger.warning("Attempting to generate more than 1M events") @@ -6133,20 +6133,20 @@ def check_nb_events(self): return - - ############################################################################ + + ############################################################################ def update_random(self): """ change random number""" - + self.random += 3 if self.random > 30081*30081: # can't use too big random number raise MadGraph5Error('Random seed too large ' + str(self.random) + ' > 30081*30081') - if self.run_card['python_seed'] == -2: + if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.random) + random.seed(self.random) random.mg_seedset = self.random - + ############################################################################ def save_random(self): """save random number in appropirate file""" @@ -6155,14 +6155,14 @@ def save_random(self): fsock.writelines('r=%s\n' % self.random) def do_quit(self, *args, **opts): - + return common_run.CommonRunCmd.do_quit(self, *args, **opts) #return CmdExtended.do_quit(self, *args, **opts) - + ############################################################################ def treat_CKKW_matching(self): """check for ckkw""" - + lpp1 = self.run_card['lpp1'] lpp2 = self.run_card['lpp2'] e1 = self.run_card['ebeam1'] @@ -6170,19 +6170,19 @@ def treat_CKKW_matching(self): pd = self.run_card['pdlabel'] lha = self.run_card['lhaid'] xq = self.run_card['xqcut'] - translation = {'e1': e1, 'e2':e2, 'pd':pd, + translation = {'e1': e1, 'e2':e2, 'pd':pd, 'lha':lha, 'xq':xq} if lpp1 or lpp2: - # Remove ':s from pd + # Remove ':s from pd if pd.startswith("'"): pd = pd[1:] if pd.endswith("'"): - pd = pd[:-1] + pd = pd[:-1] if xq >2 or xq ==2: xq = 2 - + # find data file if pd == "lhapdf": issudfile = 'lib/issudgrid-%(e1)s-%(e2)s-%(pd)s-%(lha)s-%(xq)s.dat.gz' @@ -6192,9 +6192,9 @@ def treat_CKKW_matching(self): issudfile = pjoin(self.webbin, issudfile % translation) else: issudfile = pjoin(self.me_dir, issudfile % translation) - + logger.info('Sudakov grid file: %s' % issudfile) - + # check that filepath exists if os.path.exists(issudfile): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') @@ -6203,20 +6203,20 @@ def treat_CKKW_matching(self): msg = 'No sudakov grid file for parameter choice. Start to generate it. This might take a while' logger.info(msg) self.update_status('GENERATE SUDAKOV GRID', level='parton') - + for i in range(-2,6): - self.cluster.submit('%s/gensudgrid ' % self.dirbin, + self.cluster.submit('%s/gensudgrid ' % self.dirbin, argument = ['%d'%i], - cwd=self.me_dir, + cwd=self.me_dir, stdout=open(pjoin(self.me_dir, 'gensudgrid%s.log' % i),'w')) self.monitor() for i in range(-2,6): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') os.system('cat %s/gensudgrid%s.log >> %s' % (self.me_dir, path)) misc.gzip(path, stdout=issudfile) - + ############################################################################ - def create_root_file(self, input='unweighted_events.lhe', + def create_root_file(self, input='unweighted_events.lhe', output='unweighted_events.root' ): """create the LHE root file """ self.update_status('Creating root files', level='parton') @@ -6233,14 +6233,14 @@ def create_root_file(self, input='unweighted_events.lhe', totar = False torm = True input = input[:-3] - + try: - misc.call(['%s/ExRootLHEFConverter' % eradir, + misc.call(['%s/ExRootLHEFConverter' % eradir, input, output], cwd=pjoin(self.me_dir, 'Events')) except Exception: logger.warning('fail to produce Root output [problem with ExRootAnalysis]') - + if totar: if os.path.exists('%s.gz' % input): try: @@ -6251,13 +6251,13 @@ def create_root_file(self, input='unweighted_events.lhe', misc.gzip(input) if torm: os.remove(input) - + def run_syscalc(self, mode='parton', event_path=None, output=None): - """create the syscalc output""" + """create the syscalc output""" if self.run_card['use_syst'] not in self.true: return - + scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): return @@ -6265,12 +6265,12 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): if self.run_card['event_norm'] != 'sum': logger.critical('SysCalc works only when event_norm is on \'sum\'.') return - logger.info('running SysCalc on mode %s' % mode) - + logger.info('running SysCalc on mode %s' % mode) + # Restore the old default for SysCalc+PY6 if self.run_card['sys_matchscale']=='auto': self.run_card['sys_matchscale'] = "30 50" - + # Check that all pdfset are correctly installed lhaid = [self.run_card.get_lhapdf_id()] if '&&' in self.run_card['sys_pdf']: @@ -6285,20 +6285,20 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.debug(str(error)) logger.warning('Systematic computation requires lhapdf to run. Bypass SysCalc') return - + # Copy all the relevant PDF sets [self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid] - + to_syscalc={'sys_scalefact': self.run_card['sys_scalefact'], 'sys_alpsfact': self.run_card['sys_alpsfact'], 'sys_matchscale': self.run_card['sys_matchscale'], 'sys_scalecorrelation': self.run_card['sys_scalecorrelation'], 'sys_pdf': self.run_card['sys_pdf']} - - tag = self.run_card['run_tag'] + + tag = self.run_card['run_tag'] card = pjoin(self.me_dir, 'bin','internal', 'syscalc_card.dat') template = open(pjoin(self.me_dir, 'bin','internal', 'syscalc_template.dat')).read() - + if '&&' in to_syscalc['sys_pdf']: to_syscalc['sys_pdf'] = to_syscalc['sys_pdf'].split('#',1)[0].replace('&&',' \n ') else: @@ -6311,8 +6311,8 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): new.append(d) else: new[-1] += ' %s' % d - to_syscalc['sys_pdf'] = '\n'.join(new) - + to_syscalc['sys_pdf'] = '\n'.join(new) + if to_syscalc['sys_pdf'].lower() in ['', 'f', 'false', 'none', '.false.']: to_syscalc['sys_pdf'] = '' if to_syscalc['sys_alpsfact'].lower() in ['', 'f', 'false', 'none','.false.']: @@ -6320,17 +6320,17 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): - + # check if the scalecorrelation parameter is define: if not 'sys_scalecorrelation' in self.run_card: self.run_card['sys_scalecorrelation'] = -1 open(card,'w').write(template % self.run_card) - + if not os.path.exists(card): return False - - + + event_dir = pjoin(self.me_dir, 'Events') if not event_path: @@ -6353,19 +6353,19 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): raise SysCalcError('qcut value for sys_matchscale lower than qcut in pythia_card. Bypass syscalc') if float(value) < xqcut: raise SysCalcError('qcut value for sys_matchscale lower than xqcut in run_card. Bypass syscalc') - - + + event_path = pjoin(event_dir,'syst.dat') output = pjoin(event_dir, 'syscalc.dat') else: raise self.InvalidCmd('Invalid mode %s' % mode) - + if not os.path.exists(event_path): if os.path.exists(event_path+'.gz'): misc.gunzip(event_path+'.gz') else: raise SysCalcError('Events file %s does not exits' % event_path) - + self.update_status('Calculating systematics for %s level' % mode, level = mode.lower()) try: proc = misc.call([os.path.join(scdir, 'sys_calc'), @@ -6374,7 +6374,7 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): stderr = subprocess.STDOUT, cwd=event_dir) # Wait 5 s to make sure file is finished writing - time.sleep(5) + time.sleep(5) except OSError as error: logger.error('fail to run syscalc: %s. Please check that SysCalc is correctly installed.' % error) else: @@ -6382,11 +6382,11 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.warning('SysCalc Failed. Please read the associate log to see the reason. Did you install the associate PDF set?') elif mode == 'parton': files.mv(output, event_path) - + self.update_status('End syscalc for %s level' % mode, level = mode.lower(), makehtml=False) - - return True + + return True action_switcher = AskRun @@ -6399,23 +6399,23 @@ def ask_run_configuration(self, mode=None, args=[]): passing_cmd.append('reweight=ON') if '-M' in args or '--madspin' in args: passing_cmd.append('madspin=ON') - + switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, mode=mode, line_args=args, force=self.force, first_cmd=passing_cmd, return_instance=True) # - self.switch = switch # store the value of the switch for plugin purpose + self.switch = switch # store the value of the switch for plugin purpose if 'dynamical' in switch: mode = 'auto' - + # Now that we know in which mode we are check that all the card #exists (copy default if needed) - + cards = ['param_card.dat', 'run_card.dat'] if switch['shower'] == 'Pythia6': cards.append('pythia_card.dat') if switch['shower'] == 'Pythia8': - cards.append('pythia8_card.dat') + cards.append('pythia8_card.dat') if switch['detector'] in ['PGS','DELPHES+PGS']: cards.append('pgs_card.dat') if switch['detector'] in ['Delphes', 'DELPHES+PGS']: @@ -6438,29 +6438,29 @@ def ask_run_configuration(self, mode=None, args=[]): cards.append('rivet_card.dat') self.keep_cards(cards) - + first_cmd = cmd_switch.get_cardcmd() - + if os.path.isfile(pjoin(self.me_dir,'Cards','MadLoopParams.dat')): cards.append('MadLoopParams.dat') - + if self.force: self.check_param_card(pjoin(self.me_dir,'Cards','param_card.dat' )) return switch - + if 'dynamical' in switch and switch['dynamical']: self.ask_edit_cards(cards, plot=False, mode='auto', first_cmd=first_cmd) else: self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) return switch - + ############################################################################ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None): """Ask the question when launching pythia""" - + pythia_suffix = '' if pythia_version==6 else '%d'%pythia_version - + available_mode = ['0', '1'] if pythia_version==6: available_mode.append('2') @@ -6485,10 +6485,10 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = self.ask(question, '0', options) elif not mode: mode = 'auto' - + if mode.isdigit(): mode = name[mode] - + auto = False if mode == 'auto': auto = True @@ -6497,7 +6497,7 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = 'pgs' elif os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')): mode = 'delphes' - else: + else: mode = 'pythia%s'%pythia_suffix logger.info('Will run in mode %s' % mode) # Now that we know in which mode we are check that all the card @@ -6513,15 +6513,15 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) cards.append('delphes_trigger.dat') self.keep_cards(cards, ignore=['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat', 'plot_card.dat']) - + if self.force: return mode - + if not banner: banner = self.banner - + if auto: - self.ask_edit_cards(cards, from_banner=['param', 'run'], + self.ask_edit_cards(cards, from_banner=['param', 'run'], mode='auto', plot=(pythia_version==6), banner=banner ) else: @@ -6529,12 +6529,12 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) plot=(pythia_version==6), banner=banner) return mode - + #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmdShell(MadEventCmd, cmd.CmdShell): - """The command line processor of MadGraph""" + """The command line processor of MadGraph""" @@ -6548,11 +6548,11 @@ class SubProcesses(object): @classmethod def clean(cls): cls.name_to_pdg = {} - + @staticmethod def get_subP(me_dir): """return the list of Subprocesses""" - + out = [] for line in open(pjoin(me_dir,'SubProcesses', 'subproc.mg')): if not line: @@ -6560,9 +6560,9 @@ def get_subP(me_dir): name = line.strip() if os.path.exists(pjoin(me_dir, 'SubProcesses', name)): out.append(pjoin(me_dir, 'SubProcesses', name)) - + return out - + @staticmethod @@ -6623,9 +6623,9 @@ def get_subP_ids(path): particles = re.search("/([\d,-]+)/", line) all_ids.append([int(p) for p in particles.group(1).split(',')]) return all_ids - - -#=============================================================================== + + +#=============================================================================== class GridPackCmd(MadEventCmd): """The command for the gridpack --Those are not suppose to be use interactively--""" @@ -6639,7 +6639,7 @@ def __init__(self, me_dir = None, nb_event=0, seed=0, gran=-1, *completekey, **s self.random = seed self.random_orig = self.random self.granularity = gran - + self.options['automatic_html_opening'] = False #write the grid_card.dat on disk self.nb_event = int(nb_event) @@ -6680,7 +6680,7 @@ def write_RunWeb(self, me_dir): def write_gridcard(self, nb_event, seed, gran): """write the grid_card.dat file at appropriate location""" - + # first try to write grid_card within the gridpack. print("WRITE GRIDCARD", self.me_dir) if self.readonly: @@ -6689,35 +6689,35 @@ def write_gridcard(self, nb_event, seed, gran): fsock = open('grid_card.dat','w') else: fsock = open(pjoin(self.me_dir, 'Cards', 'grid_card.dat'),'w') - + gridpackcard = banner_mod.GridpackCard() gridpackcard['GridRun'] = True gridpackcard['gevents'] = nb_event gridpackcard['gseed'] = seed gridpackcard['ngran'] = gran - + gridpackcard.write(fsock) ############################################################################ def get_Pdir(self): """get the list of Pdirectory if not yet saved.""" - + if hasattr(self, "Pdirs"): if self.me_dir in self.Pdirs[0]: return self.Pdirs - + if not self.readonly: - self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) + self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] else: - self.Pdirs = [l.strip() - for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + self.Pdirs = [l.strip() + for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] + return self.Pdirs - + def prepare_local_dir(self): """create the P directory structure in the local directory""" - + if not self.readonly: os.chdir(self.me_dir) else: @@ -6726,7 +6726,7 @@ def prepare_local_dir(self): os.mkdir(p) files.cp(pjoin(self.me_dir,'SubProcesses',p,'symfact.dat'), pjoin(p, 'symfact.dat')) - + def launch(self, nb_event, seed): """ launch the generation for the grid """ @@ -6742,13 +6742,13 @@ def launch(self, nb_event, seed): if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(seed) + random.seed(seed) random.mg_seedset = seed elif self.run_card['python_seed'] > 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] # 2) Run the refine for the grid self.update_status('Generating Events', level=None) #misc.call([pjoin(self.me_dir,'bin','refine4grid'), @@ -6767,70 +6767,70 @@ def launch(self, nb_event, seed): self.exec_cmd('decay_events -from_cards', postcmd=False) elif self.run_card['use_syst'] and self.run_card['systematics_program'] == 'systematics': self.options['nb_core'] = 1 - self.exec_cmd('systematics %s --from_card' % + self.exec_cmd('systematics %s --from_card' % pjoin('Events', self.run_name, 'unweighted_events.lhe.gz'), postcmd=False,printcmd=False) - + def refine4grid(self, nb_event): """Special refine for gridpack run.""" self.nb_refine += 1 - + precision = nb_event self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) - + # initialize / remove lhapdf mode # self.configure_directory() # All this has been done before self.cluster_mode = 0 # force single machine # Store seed in randinit file, to be read by ranmar.f self.save_random() - + self.update_status('Refine results to %s' % precision, level=None) logger.info("Using random number seed offset = %s" % self.random) refine_opt = {'err_goal': nb_event, 'split_channels': False, - 'ngran':self.granularity, 'readonly': self.readonly} + 'ngran':self.granularity, 'readonly': self.readonly} x_improve = gen_ximprove.gen_ximprove_gridpack(self, refine_opt) x_improve.launch() # create the ajob for the refinment and run those! - self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack - - + self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack + + #bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) #print 'run combine!!!' #combine_runs.CombineRuns(self.me_dir) - + return #update html output Presults = sum_html.collect_result(self) cross, error = Presults.xsec, Presults.xerru self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + #self.update_status('finish refine', 'parton', makehtml=False) #devnull.close() - - - + + + return self.total_jobs = 0 - subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if + subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if P.startswith('P') and os.path.isdir(pjoin(self.me_dir,'SubProcesses', P))] devnull = open(os.devnull, 'w') for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) # clean previous run for match in misc.glob('*ajob*', Pdir): if os.path.basename(match)[:4] in ['ajob', 'wait', 'run.', 'done']: os.remove(pjoin(Pdir, match)) - + logfile = pjoin(Pdir, 'gen_ximprove.log') misc.call([pjoin(bindir, 'gen_ximprove')], @@ -6840,40 +6840,40 @@ def refine4grid(self, nb_event): if os.path.exists(pjoin(Pdir, 'ajob1')): alljobs = misc.glob('ajob*', Pdir) - nb_tot = len(alljobs) + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) if os.path.exists(pjoin(self.me_dir,'error')): self.monitor(html=True) raise MadEventError('Error detected in dir %s: %s' % \ (Pdir, open(pjoin(self.me_dir,'error')).read())) - self.monitor(run_type='All job submitted for refine number %s' % + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) combine_runs.CombineRuns(self.me_dir) - + #update html output cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + self.update_status('finish refine', 'parton', makehtml=False) devnull.close() def do_combine_events(self, line): - """Advanced commands: Launch combine events""" + """Advanced commands: Launch combine events""" if self.readonly: outdir = 'Events' @@ -6895,17 +6895,17 @@ def do_combine_events(self, line): self.banner.add_generation_info(self.results.current['cross'], self.run_card['nevents']) if not hasattr(self, 'random_orig'): self.random_orig = 0 self.banner.change_seed(self.random_orig) - - + + if not os.path.exists(pjoin(outdir, self.run_name)): os.mkdir(pjoin(outdir, self.run_name)) - self.banner.write(pjoin(outdir, self.run_name, + self.banner.write(pjoin(outdir, self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -6915,7 +6915,7 @@ def do_combine_events(self, line): if os.path.exists(pjoin(Gdir, 'events.lhe')): result = sum_html.OneResult('') result.read_results(pjoin(Gdir, 'results.dat')) - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec')*gscalefact[Gdir], result.get('xerru')*gscalefact[Gdir], result.get('axsec')*gscalefact[Gdir] @@ -6924,7 +6924,7 @@ def do_combine_events(self, line): sum_xsec += result.get('xsec')*gscalefact[Gdir] sum_xerru.append(result.get('xerru')*gscalefact[Gdir]) sum_axsec += result.get('axsec')*gscalefact[Gdir] - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.nb_event) @@ -6933,26 +6933,26 @@ def do_combine_events(self, line): AllEvent.add(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() - + self.banner.add_generation_info(sum_xsec, self.nb_event) nb_event = AllEvent.unweight(pjoin(outdir, self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.nb_event, log_level=logging.DEBUG, normalization=self.run_card['event_norm'], proc_charac=self.proc_characteristic) - - + + if partials: for i in range(partials): try: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) self.banner.add_generation_info(sum_xsec, nb_event) if self.run_card['bias_module'].lower() not in ['dummy', 'none']: @@ -6961,7 +6961,7 @@ def do_combine_events(self, line): class MadLoopInitializer(object): """ A container class for the various methods for initializing MadLoop. It is - placed in MadEventInterface because it is used by Madevent for loop-induced + placed in MadEventInterface because it is used by Madevent for loop-induced simulations. """ @staticmethod @@ -6974,7 +6974,7 @@ def make_and_run(dir_name,checkRam=False): if os.path.isfile(pjoin(dir_name,'check')): os.remove(pjoin(dir_name,'check')) os.remove(pjoin(dir_name,'check_sa.o')) - os.remove(pjoin(dir_name,'loop_matrix.o')) + os.remove(pjoin(dir_name,'loop_matrix.o')) # Now run make devnull = open(os.devnull, 'w') start=time.time() @@ -6996,7 +6996,7 @@ def make_and_run(dir_name,checkRam=False): stdout=devnull, stderr=devnull, close_fds=True) try: ptimer.execute() - #poll as often as possible; otherwise the subprocess might + #poll as often as possible; otherwise the subprocess might # "sneak" in some extra memory usage while you aren't looking # Accuracy of .2 seconds is enough for the timing. while ptimer.poll(): @@ -7028,7 +7028,7 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, If mu_r > 0.0, then the renormalization constant value will be hardcoded directly in check_sa.f, if is is 0 it will be set to Sqrt(s) and if it is < 0.0 the value in the param_card.dat is used. - If the split_orders target (i.e. the target squared coupling orders for + If the split_orders target (i.e. the target squared coupling orders for the computation) is != -1, it will be changed in check_sa.f via the subroutine CALL SET_COUPLINGORDERS_TARGET(split_orders).""" @@ -7043,12 +7043,12 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, file_path = pjoin(directories[0],'check_sa.f') if not os.path.isfile(file_path): raise MadGraph5Error('Could not find the location of check_sa.f'+\ - ' from the specified path %s.'%str(file_path)) + ' from the specified path %s.'%str(file_path)) file = open(file_path, 'r') check_sa = file.read() file.close() - + file = open(file_path, 'w') check_sa = re.sub(r"READPS = \S+\)","READPS = %s)"%('.TRUE.' if read_ps \ else '.FALSE.'), check_sa) @@ -7064,42 +7064,42 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, (("%.17e"%mu_r).replace('e','d')),check_sa) elif mu_r < 0.0: check_sa = re.sub(r"MU_R=SQRTS","",check_sa) - + if split_orders > 0: check_sa = re.sub(r"SET_COUPLINGORDERS_TARGET\(-?\d+\)", - "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) - + "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) + file.write(check_sa) file.close() - @staticmethod + @staticmethod def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ req_files = ['HelFilter.dat','LoopFilter.dat'], attempts = [4,15]): - """ Run the initialization of the process in 'run_dir' with success + """ Run the initialization of the process in 'run_dir' with success characterized by the creation of the files req_files in this directory. The directory containing the driving source code 'check_sa.f'. - The list attempt gives the successive number of PS points the + The list attempt gives the successive number of PS points the initialization should be tried with before calling it failed. Returns the number of PS points which were necessary for the init. Notice at least run_dir or SubProc_dir must be provided. A negative attempt number given in input means that quadprec will be forced for initialization.""" - + # If the user does not want detailed info, then set the dictionary # to a dummy one. if infos is None: infos={} - + if SubProc_dir is None and run_dir is None: raise MadGraph5Error('At least one of [SubProc_dir,run_dir] must'+\ ' be provided in run_initialization.') - + # If the user does not specify where is check_sa.f, then it is assumed # to be one levels above run_dir if SubProc_dir is None: SubProc_dir = os.path.abspath(pjoin(run_dir,os.pardir)) - + if run_dir is None: directories =[ dir for dir in misc.glob('P[0-9]*', SubProc_dir) if os.path.isdir(dir) ] @@ -7109,7 +7109,7 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find a valid running directory'+\ ' in %s.'%str(SubProc_dir)) - # Use the presence of the file born_matrix.f to decide if it is a + # Use the presence of the file born_matrix.f to decide if it is a # loop-induced process or not. It's not crucial, but just that because # of the dynamic adjustment of the ref scale used for deciding what are # the zero contributions, more points are neeeded for loop-induced. @@ -7128,9 +7128,9 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ %MLCardPath) else: - MLCard = banner_mod.MadLoopParam(MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) MLCard_orig = banner_mod.MadLoopParam(MLCard) - + # Make sure that LoopFilter really is needed. if not MLCard['UseLoopFilter']: try: @@ -7153,11 +7153,11 @@ def need_init(): proc_prefix+fname)) for fname in my_req_files]) or \ not os.path.isfile(pjoin(run_dir,'check')) or \ not os.access(pjoin(run_dir,'check'), os.X_OK) - + # Check if this is a process without born by checking the presence of the # file born_matrix.f is_loop_induced = os.path.exists(pjoin(run_dir,'born_matrix.f')) - + # For loop induced processes, always attempt quadruple precision if # double precision attempts fail and the user didn't specify himself # quadruple precision initializations attempts @@ -7166,11 +7166,11 @@ def need_init(): use_quad_prec = 1 curr_attempt = 1 - MLCard.set('WriteOutFilters',True) - + MLCard.set('WriteOutFilters',True) + while to_attempt!=[] and need_init(): curr_attempt = to_attempt.pop() - # if the attempt is a negative number it means we must force + # if the attempt is a negative number it means we must force # quadruple precision at initialization time if curr_attempt < 0: use_quad_prec = -1 @@ -7183,11 +7183,11 @@ def need_init(): MLCard.set('ZeroThres',1e-9) # Plus one because the filter are written on the next PS point after curr_attempt = abs(curr_attempt+1) - MLCard.set('MaxAttempts',curr_attempt) + MLCard.set('MaxAttempts',curr_attempt) MLCard.write(pjoin(SubProc_dir,'MadLoopParams.dat')) # initialization is performed. - MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, + MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, npoints = curr_attempt) compile_time, run_time, ram_usage = \ MadLoopInitializer.make_and_run(run_dir) @@ -7200,7 +7200,7 @@ def need_init(): infos['Process_compilation']==None: infos['Process_compilation'] = compile_time infos['Initialization'] = run_time - + MLCard_orig.write(pjoin(SubProc_dir,'MadLoopParams.dat')) if need_init(): return None @@ -7219,8 +7219,8 @@ def need_init(ML_resources_path, proc_prefix, r_files): MLCardPath = pjoin(proc_dir,'SubProcesses','MadLoopParams.dat') if not os.path.isfile(MLCardPath): raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ - %MLCardPath) - MLCard = banner_mod.MadLoopParam(MLCardPath) + %MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) req_files = ['HelFilter.dat','LoopFilter.dat'] # Make sure that LoopFilter really is needed. @@ -7234,9 +7234,9 @@ def need_init(ML_resources_path, proc_prefix, r_files): req_files.remove('HelFilter.dat') except ValueError: pass - + for v_folder in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)): + '%s*'%subproc_prefix)): # Make sure it is a valid MadLoop directory if not os.path.isdir(v_folder) or not os.path.isfile(\ pjoin(v_folder,'loop_matrix.f')): @@ -7247,7 +7247,7 @@ def need_init(ML_resources_path, proc_prefix, r_files): if need_init(pjoin(proc_dir,'SubProcesses','MadLoop5_resources'), proc_prefix, req_files): return True - + return False @staticmethod @@ -7265,7 +7265,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['treatCardsLoopNoInit'], cwd=pjoin(proc_dir,'Source')) else: interface.do_treatcards('all --no_MadLoopInit') - + # First make sure that IREGI and CUTTOOLS are compiled if needed if os.path.exists(pjoin(proc_dir,'Source','CutTools')): misc.compile(arg=['libcuttools'],cwd=pjoin(proc_dir,'Source')) @@ -7273,8 +7273,8 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['libiregi'],cwd=pjoin(proc_dir,'Source')) # Then make sure DHELAS and MODEL are compiled misc.compile(arg=['libmodel'],cwd=pjoin(proc_dir,'Source')) - misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) - + misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) + # Now initialize the MadLoop outputs logger.info('Initializing MadLoop loop-induced matrix elements '+\ '(this can take some time)...') @@ -7283,7 +7283,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, if MG_options: if interface and hasattr(interface, 'cluster') and isinstance(interface.cluster, cluster.MultiCore): mcore = interface.cluster - else: + else: mcore = cluster.MultiCore(**MG_options) else: mcore = cluster.onecore @@ -7294,10 +7294,10 @@ def run_initialization_wrapper(run_dir, infos, attempts): run_dir=run_dir, infos=infos) else: n_PS = MadLoopInitializer.run_initialization( - run_dir=run_dir, infos=infos, attempts=attempts) + run_dir=run_dir, infos=infos, attempts=attempts) infos['nPS'] = n_PS return 0 - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return @@ -7307,21 +7307,21 @@ def wait_monitoring(Idle, Running, Done): init_info = {} # List all virtual folders while making sure they are valid MadLoop folders VirtualFolders = [f for f in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)) if (os.path.isdir(f) or + '%s*'%subproc_prefix)) if (os.path.isdir(f) or os.path.isfile(pjoin(f,'loop_matrix.f')))] logger.debug("Now Initializing MadLoop matrix element in %d folder%s:"%\ (len(VirtualFolders),'s' if len(VirtualFolders)>1 else '')) - logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in + logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in VirtualFolders)) for v_folder in VirtualFolders: init_info[v_folder] = {} - + # We try all multiples of n_PS from 1 to max_mult, first in DP and then # in QP before giving up, or use default values if n_PS is None. max_mult = 3 if n_PS is None: # Then use the default list of number of PS points to try - mcore.submit(run_initialization_wrapper, + mcore.submit(run_initialization_wrapper, [pjoin(v_folder), init_info[v_folder], None]) else: # Use specific set of PS points @@ -7348,8 +7348,8 @@ def wait_monitoring(Idle, Running, Done): '%d PS points (%s), in %.3g(compil.) + %.3g(init.) secs.'%( abs(init['nPS']),'DP' if init['nPS']>0 else 'QP', init['Process_compilation'],init['Initialization'])) - - logger.info('MadLoop initialization finished.') + + logger.info('MadLoop initialization finished.') AskforEditCard = common_run.AskforEditCard @@ -7364,16 +7364,16 @@ def wait_monitoring(Idle, Running, Done): import os import optparse - # Get the directory of the script real path (bin) - # and add it to the current PYTHONPATH + # Get the directory of the script real path (bin) + # and add it to the current PYTHONPATH #root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath( __file__ )))) sys.path.insert(0, root_path) - class MyOptParser(optparse.OptionParser): + class MyOptParser(optparse.OptionParser): class InvalidOption(Exception): pass def error(self, msg=''): raise MyOptParser.InvalidOption(msg) - # Write out nice usage message if called with -h or --help + # Write out nice usage message if called with -h or --help usage = "usage: %prog [options] [FILE] " parser = MyOptParser(usage=usage) parser.add_option("-l", "--logging", default='INFO', @@ -7384,7 +7384,7 @@ def error(self, msg=''): help='force to launch debug mode') parser_error = '' done = False - + for i in range(len(sys.argv)-1): try: (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) @@ -7394,7 +7394,7 @@ def error(self, msg=''): else: args += sys.argv[len(sys.argv)-i:] if not done: - # raise correct error: + # raise correct error: try: (options, args) = parser.parse_args() except MyOptParser.InvalidOption as error: @@ -7407,8 +7407,8 @@ def error(self, msg=''): import subprocess import logging import logging.config - # Set logging level according to the logging level given by options - #logging.basicConfig(level=vars(logging)[options.logging]) + # Set logging level according to the logging level given by options + #logging.basicConfig(level=vars(logging)[options.logging]) import internal import internal.coloring_logging # internal.file = XXX/bin/internal/__init__.py @@ -7431,13 +7431,13 @@ def error(self, msg=''): raise pass - # Call the cmd interface main loop + # Call the cmd interface main loop try: if args: # a single command is provided if '--web' in args: - i = args.index('--web') - args.pop(i) + i = args.index('--web') + args.pop(i) cmd_line = MadEventCmd(me_dir, force_run=True) else: cmd_line = MadEventCmdShell(me_dir, force_run=True) @@ -7457,13 +7457,13 @@ def error(self, msg=''): pass - - - - - - - - + + + + + + + + diff --git a/epochX/cudacpp/gg_tt.mad/src/cudacpp_src.mk b/epochX/cudacpp/gg_tt.mad/src/cudacpp_src.mk index d4cc628aec..b4e446bc45 100644 --- a/epochX/cudacpp/gg_tt.mad/src/cudacpp_src.mk +++ b/epochX/cudacpp/gg_tt.mad/src/cudacpp_src.mk @@ -1,12 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories - -THISMK = $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #------------------------------------------------------------------------------- @@ -16,165 +11,24 @@ SHELL := /bin/bash #------------------------------------------------------------------------------- -#=== Configure common compiler flags for CUDA and C++ - -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here - -#------------------------------------------------------------------------------- - #=== Configure the C++ compiler -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) $(USE_NVTX) -fPIC -Wall -Wshadow -Wextra +include ../Source/make_opts + +MG_CXXFLAGS += -fPIC -I. $(USE_NVTX) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS+= -ffast-math # see issue #117 +MG_CXXFLAGS += -ffast-math # see issue #117 endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY # Note: AR, CXX and FC are implicitly defined if not set externally # See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html ###RANLIB = ranlib -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -LDFLAGS = -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -LDFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler (note: NVCC is already exported including ccache) - -###$(info NVCC=$(NVCC)) - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ builds (note: NVCC is already exported including ccache) - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for CUDA and C++ - -# Assuming uname is available, detect if architecture is PowerPC -UNAME_P := $(shell uname -p) - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # BUILD ERROR IF THIS ADDED IN SRC?! -else - ###AR=gcc-ar # needed by -flto - ###RANLIB=gcc-ranlib # needed by -flto - ###CXXFLAGS+= -flto # NB: build error from src/Makefile unless gcc-ar and gcc-ranlib are used - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -#------------------------------------------------------------------------------- - #=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the build flags appropriate to OMPFLAGS ###$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -###$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -###$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -###$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -###$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - CXXFLAGS += -DMGONGPU_HAS_NO_CURAND -else ifneq ($(RNDGEN),hasCurand) - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- @@ -182,28 +36,18 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -###$(info Current directory is $(shell pwd)) -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIRREL = ../lib/$(BUILDDIR) - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR=1 is set)) -else - override BUILDDIR = . - override LIBDIRREL = ../lib - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) -endif -######$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) +# Build directory: +BUILDDIR := build.$(DIRTAG) +LIBDIRREL := ../lib/$(BUILDDIR) # Workaround for Mac #375 (I did not manage to fix rpath with @executable_path): use absolute paths for LIBDIR # (NB: this is quite ugly because it creates the directory if it does not exist - to avoid removing src by mistake) -UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Darwin) override LIBDIR = $(shell mkdir -p $(LIBDIRREL); cd $(LIBDIRREL); pwd) ifeq ($(wildcard $(LIBDIR)),) @@ -223,55 +67,35 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -# Target (and build options): debug -debug: OPTFLAGS = -g -O0 -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ #------------------------------------------------------------------------------- cxx_objects=$(addprefix $(BUILDDIR)/, Parameters_sm.o read_slha.o) -ifneq ($(NVCC),) +ifeq ($(AVX),cuda) +COMPILER=$(NVCC) cu_objects=$(addprefix $(BUILDDIR)/, Parameters_sm_cu.o) +else +COMPILER=$(CXX) +cu_objects= endif # Target (and build rules): common (src) library -ifneq ($(NVCC),) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) $(cu_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(NVCC) -shared -o $@ $(cxx_objects) $(cu_objects) $(LDFLAGS) -else -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(CXX) -shared -o $@ $(cxx_objects) $(LDFLAGS) -endif + mkdir -p $(LIBDIR) + $(COMPILER) -shared -o $@ $(cxx_objects) $(cu_objects) $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- @@ -279,19 +103,7 @@ endif .PHONY: clean clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) -else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe -endif - -cleanall: - @echo - $(MAKE) clean -f $(THISMK) - @echo - rm -rf $(LIBDIR)/build.* - rm -rf build.* + $(RM) -f ../lib/build.*/*.so + $(RM) -rf build.* #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_tt.mad/src/mgOnGpuCxtypes.h b/epochX/cudacpp/gg_tt.mad/src/mgOnGpuCxtypes.h index ca9a9f00c0..3290d314d6 100644 --- a/epochX/cudacpp/gg_tt.mad/src/mgOnGpuCxtypes.h +++ b/epochX/cudacpp/gg_tt.mad/src/mgOnGpuCxtypes.h @@ -21,10 +21,14 @@ // Complex type in cuda: thrust or cucomplex or cxsmpl #ifdef __CUDACC__ #if defined MGONGPU_CUCXTYPE_THRUST +#ifdef __CLANG__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wtautological-compare" // for icpx2021/clang13 (https://stackoverflow.com/a/15864661) +#endif #include +#ifdef __CLANG__ #pragma clang diagnostic pop +#endif #elif defined MGONGPU_CUCXTYPE_CUCOMPLEX #include #elif not defined MGONGPU_CUCXTYPE_CXSMPL diff --git a/epochX/cudacpp/gg_tt.sa/src/cudacpp_src.mk b/epochX/cudacpp/gg_tt.sa/src/cudacpp_src.mk index d4cc628aec..c757875347 100644 --- a/epochX/cudacpp/gg_tt.sa/src/cudacpp_src.mk +++ b/epochX/cudacpp/gg_tt.sa/src/cudacpp_src.mk @@ -1,7 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. +# Further modified by: J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) #=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories @@ -95,50 +95,52 @@ CXXFLAGS += $(OMPFLAGS) # Set the build flags appropriate to each AVX choice (example: "make AVX=none") # [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] # [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) +ifeq ($(NVCC),) + $(info AVX=$(AVX)) + ifeq ($(UNAME_P),ppc64le) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) + endif + else ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) + endif + else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + ifeq ($(AVX),none) + override AVXFLAGS = -march=x86-64 # no SIMD (see #588) + else ifeq ($(AVX),sse4) + override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif endif + # For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? + CXXFLAGS+= $(AVXFLAGS) endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) # Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") ###$(info FPTYPE=$(FPTYPE)) @@ -182,11 +184,19 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +ifneq ($(NVCC),) + override DIRTAG = cuda_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +else + override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +endif # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +ifneq ($(NVCC),) + override TAG = cuda_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +else + override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +endif # Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 ###$(info Current directory is $(shell pwd)) @@ -223,35 +233,21 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so # Target (and build options): debug debug: OPTFLAGS = -g -O0 debug: all.$(TAG) -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) - #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ @@ -278,20 +274,61 @@ endif # Target: clean the builds .PHONY: clean +BUILD_DIRS := $(wildcard build.*) +NUM_BUILD_DIRS := $(words $(BUILD_DIRS)) + clean: ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) +ifeq ($(NUM_BUILD_DIRS),1) + $(info USEBUILDDIR=1, only one src build directory found.) + rm -rf ../lib/$(BUILD_DIRS) + rm -rf $(BUILD_DIRS) +else ifeq ($(NUM_BUILD_DIRS),0) + $(error USEBUILDDIR=1, but no src build directories are found.) else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe + $(error Multiple src BUILDDIR's found! Use 'cleannone', 'cleansse4', 'cleanavx2', 'clean512y','clean512z', 'cleancuda' or 'cleanall'.) +endif +else + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + rm -f $(BUILDDIR)/*.o $(BUILDDIR)/*.exe endif cleanall: @echo - $(MAKE) clean -f $(THISMK) + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + rm -f $(BUILDDIR)/*.o $(BUILDDIR)/*.exe @echo - rm -rf $(LIBDIR)/build.* + rm -rf ../lib/build.* rm -rf build.* +# Target: clean different builds + +cleannone: + rm -rf ../lib/build.none_* + rm -rf build.none_* + +cleansse4: + rm -rf ../lib/build.sse4_* + rm -rf build.sse4_* + +cleanavx2: + rm -rf ../lib/build.avx2_* + rm -rf build.avx2_* + +clean512y: + rm -rf ../lib/build.512y_* + rm -rf build.512y_* + +clean512z: + rm -rf ../lib/build.512z_* + rm -rf build.512z_* + +cleancuda: + rm -rf ../lib/build.cuda_* + rm -rf build.cuda_* + +cleandir: + rm -f ./*.o ./*.exe + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_tt01g.mad/CODEGEN_mad_gg_tt01g_log.txt b/epochX/cudacpp/gg_tt01g.mad/CODEGEN_mad_gg_tt01g_log.txt index acacaf4036..0753dd223b 100644 --- a/epochX/cudacpp/gg_tt01g.mad/CODEGEN_mad_gg_tt01g_log.txt +++ b/epochX/cudacpp/gg_tt01g.mad/CODEGEN_mad_gg_tt01g_log.txt @@ -52,7 +52,7 @@ Note that you can still compile and run aMC@NLO with the built-in PDFs Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt import /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt01g.mg The import format was not given, so we guess it as command set stdout_level DEBUG @@ -62,7 +62,7 @@ generate g g > t t~ No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.0055658817291259766  +DEBUG: model prefixing takes 0.0053136348724365234  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -185,7 +185,7 @@ INFO: Generating Helas calls for process: g g > t t~ WEIGHTED<=2 @1 INFO: Processing color information for process: g g > t t~ @1 INFO: Creating files in directory P2_gg_ttxg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -202,7 +202,7 @@ INFO: Generating Feynman diagrams for Process: g g > t t~ g WEIGHTED<=3 @2 INFO: Finding symmetric diagrams for subprocess group gg_ttxg INFO: Creating files in directory P1_gg_ttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -217,15 +217,15 @@ INFO: Created files CPPProcess.h and CPPProcess.cc in directory ./. DEBUG: vector, subproc_group,self.opt['vector_size'] =  32 True 32 [export_v4.py at line 1872]  INFO: Generating Feynman diagrams for Process: g g > t t~ WEIGHTED<=2 @1 INFO: Finding symmetric diagrams for subprocess group gg_ttx -Generated helas calls for 2 subprocesses (19 diagrams) in 0.042 s -Wrote files for 46 helas calls in 0.245 s +Generated helas calls for 2 subprocesses (19 diagrams) in 0.043 s +Wrote files for 46 helas calls in 0.239 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 set of routines with options: P0 ALOHA: aloha creates VVVV3 set of routines with options: P0 ALOHA: aloha creates VVVV4 set of routines with options: P0 -ALOHA: aloha creates 5 routines in 0.321 s +ALOHA: aloha creates 5 routines in 0.328 s DEBUG: Entering PLUGIN_ProcessExporter.convert_model (create the model) [output.py at line 202]  ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines @@ -233,7 +233,7 @@ ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 set of routines with options: P0 ALOHA: aloha creates VVVV3 set of routines with options: P0 ALOHA: aloha creates VVVV4 set of routines with options: P0 -ALOHA: aloha creates 10 routines in 0.304 s +ALOHA: aloha creates 10 routines in 0.310 s VVV1 VVV1 FFV1 @@ -257,12 +257,14 @@ save configuration file to /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CO INFO: Use Fortran compiler gfortran INFO: Use c++ compiler g++ INFO: Generate web pages +DEBUG: standardise /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt01g/Source/make_opts (fix f2py3 and sort make_opts_variables) before applying patch.common DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt01g; patch -p4 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common patching file Source/genps.inc +patching file Source/make_opts patching file Source/makefile patching file SubProcesses/makefile +patching file bin/internal/banner.py patching file bin/internal/gen_ximprove.py -Hunk #1 succeeded at 391 (offset 6 lines). patching file bin/internal/madevent_interface.py DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt01g/SubProcesses/P1_gg_ttx; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f @@ -283,9 +285,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m2.697s -user 0m2.018s -sys 0m0.247s +real 0m2.288s +user 0m2.041s +sys 0m0.253s ************************************************************ * * * W E L C O M E to * @@ -311,7 +313,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt01g/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards run quit INFO: @@ -341,7 +343,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_tt01g/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards param quit INFO: diff --git a/epochX/cudacpp/gg_tt01g.mad/Source/make_opts b/epochX/cudacpp/gg_tt01g.mad/Source/make_opts index e4b87ee6ad..435bed0dc7 100644 --- a/epochX/cudacpp/gg_tt01g.mad/Source/make_opts +++ b/epochX/cudacpp/gg_tt01g.mad/Source/make_opts @@ -1,7 +1,7 @@ DEFAULT_CPP_COMPILER=g++ DEFAULT_F2PY_COMPILER=f2py3 DEFAULT_F_COMPILER=gfortran -GLOBAL_FLAG=-O3 -ffast-math -fbounds-check +GLOBAL_FLAG=-O3 -ffast-math MACFLAG= MG5AMC_VERSION=SpecifiedByMG5aMCAtRunTime PYTHIA8_PATH=NotInstalled @@ -13,31 +13,53 @@ BIASLIBDIR=../../../lib/ BIASLIBRARY=libbias.$(libext) # Rest of the makefile -ifeq ($(origin FFLAGS),undefined) -FFLAGS= -w -fPIC -#FFLAGS+= -g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none -endif -FFLAGS += $(GLOBAL_FLAG) +#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) + +# Detect O/S kernel (Linux, Darwin...) +UNAME_S := $(shell uname -s) + +# Detect architecture (x86_64, ppc64le...) +UNAME_P := $(shell uname -p) + +#------------------------------------------------------------------------------- # REMOVE MACFLAG IF NOT ON MAC OR FOR F2PY -UNAME := $(shell uname -s) ifdef f2pymode MACFLAG= else -ifneq ($(UNAME), Darwin) +ifneq ($(UNAME_S), Darwin) MACFLAG= endif endif +############################################################ +# Default compiler flags +# To change optimisation level, override these as follows: +# make CXXFLAGS="-O0 -g" +# or export them as environment variables +# For debugging Fortran, one could e.g. use: +# FCFLAGS="-g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none" +############################################################ +FCFLAGS ?= $(GLOBAL_FLAG) -fbounds-check +CXXFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG +NVCCFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG -use_fast_math -lineinfo +LDFLAGS ?= $(STDLIB) -ifeq ($(origin CXXFLAGS),undefined) -CXXFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +ifneq ($(FFLAGS),) +# Madgraph used to use FFLAGS, so the user probably tries to change the flags specifically for madgraph: +FCFLAGS = $(FFLAGS) endif -ifeq ($(origin CFLAGS),undefined) -CFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +# Madgraph-specific flags: +WARNFLAGS = -Wall -Wshadow -Wextra +ifeq (,$(findstring -std=,$(CXXFLAGS))) +CXXSTANDARD= -std=c++17 endif +MG_FCFLAGS += -fPIC -w +MG_CXXFLAGS += -fPIC $(CXXSTANDARD) $(WARNFLAGS) $(MACFLAG) +MG_NVCCFLAGS += -fPIC $(CXXSTANDARD) --forward-unknown-to-host-compiler $(WARNFLAGS) +MG_LDFLAGS += $(MACFLAG) # Set FC unless it's defined by an environment variable ifeq ($(origin FC),default) @@ -49,45 +71,40 @@ endif # Increase the number of allowed charcters in a Fortran line ifeq ($(FC), ftn) -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler else VERS="$(shell $(FC) --version | grep ifort -i)" ifeq ($(VERS), "") -FFLAGS+= -ffixed-line-length-132 +MG_FCFLAGS += -ffixed-line-length-132 else -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler endif endif -UNAME := $(shell uname -s) -ifeq ($(origin LDFLAGS), undefined) -LDFLAGS=$(STDLIB) $(MACFLAG) -endif - # Options: dynamic, lhapdf # Option dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) dylibext=dylib else dylibext=so endif ifdef dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) libext=dylib -FFLAGS+= -fno-common -LDFLAGS += -bundle +MG_FCFLAGS += -fno-common +MG_LDFLAGS += -bundle define CREATELIB $(FC) -dynamiclib -undefined dynamic_lookup -o $(1) $(2) endef else libext=so -FFLAGS+= -fPIC -LDFLAGS += -shared +MG_FCFLAGS += -fPIC +MG_LDFLAGS += -shared define CREATELIB -$(FC) $(FFLAGS) $(LDFLAGS) -o $(1) $(2) +$(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MG_LDFLAGS) $(LDFLAGS) -o $(1) $(2) endef endif else @@ -101,17 +118,9 @@ endif # Option lhapdf ifneq ($(lhapdf),) -CXXFLAGS += $(shell $(lhapdf) --cppflags) +MG_CXXFLAGS += $(shell $(lhapdf) --cppflags) alfas_functions=alfas_functions_lhapdf llhapdf+= $(shell $(lhapdf) --cflags --libs) -lLHAPDF -# check if we need to activate c++11 (for lhapdf6.2) -ifeq ($(origin CXX),default) -ifeq ($lhapdfversion$lhapdfsubversion,62) -CXX=$(DEFAULT_CPP_COMPILER) -std=c++11 -else -CXX=$(DEFAULT_CPP_COMPILER) -endif -endif else alfas_functions=alfas_functions llhapdf= @@ -120,4 +129,207 @@ endif # Helper function to check MG5 version define CHECK_MG5AMC_VERSION python -c 'import re; from distutils.version import StrictVersion; print StrictVersion("$(MG5AMC_VERSION)") >= StrictVersion("$(1)") if re.match("^[\d\.]+$$","$(MG5AMC_VERSION)") else True;' -endef \ No newline at end of file +endef + +#------------------------------------------------------------------------------- + +# Set special cases for non-gcc/clang builds +# AVX below gets overridden from outside in architecture-specific builds +AVX ?= none +# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] +# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] +$(info AVX=$(AVX)) +ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + endif +else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif +endif + +# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? +MG_CXXFLAGS+= $(AVXFLAGS) + +#------------------------------------------------------------------------------- + +#=== Configure the CUDA compiler if available + +# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) +# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below +ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside + $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") + override CUDA_HOME=disabled +endif + +# If CUDA_HOME is not set, try to set it from the location of nvcc +ifndef CUDA_HOME + CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) + $(info CUDA_HOME="$(CUDA_HOME)") +endif + +# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists +ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) + NVCC = $(CUDA_HOME)/bin/nvcc + USE_NVTX ?=-DUSE_NVTX + # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html + # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ + # Default: use compute capability 70 (Volta architecture), and embed PTX to support later architectures, too. + # Set MADGRAPH_CUDA_ARCHITECTURE to the desired value to change the default. + # Build for multiple architectures using a space-separated list, e.g. MADGRAPH_CUDA_ARCHITECTURE="70 80" + MADGRAPH_CUDA_ARCHITECTURE ?= 70 + # Generate PTX for the first architecture: + CUARCHFLAGS := --generate-code arch=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)),code=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)) + # Generate device code for all architectures: + CUARCHFLAGS += $(foreach arch,$(MADGRAPH_CUDA_ARCHITECTURE), --generate-code arch=compute_$(arch),code=sm_$(arch)) + + CUINC = -I$(CUDA_HOME)/include/ + CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! + MG_LDFLAGS += $(CURANDLIBFLAGS) + MG_NVCCFLAGS += $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) + +else ifeq ($(AVX),cuda) + $(error nvcc is not visible in PATH. Either add it to PATH or export CUDA_HOME to compile with cuda) + ifeq ($(AVX),cuda) + $(error Cannot compile for cuda without NVCC) + endif +endif + +# Set the host C++ compiler for nvcc via "-ccbin " +# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) +MG_NVCCFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) + +# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) +ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) +MG_NVCCFLAGS += -allow-unsupported-compiler +endif + +#------------------------------------------------------------------------------- + +#=== Configure ccache for C++ and CUDA builds + +# Enable ccache if USECCACHE=1 +ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) + override CXX:=ccache $(CXX) +endif + +ifneq ($(NVCC),) + ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) + override NVCC:=ccache $(NVCC) + endif +endif + +#------------------------------------------------------------------------------- + +#=== Configure PowerPC-specific compiler flags for C++ and CUDA + +# PowerPC-specific CXX / CUDA compiler flags (being reviewed) +ifeq ($(UNAME_P),ppc64le) + MG_CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 + MG_NVCCFLAGS+= -Xcompiler -mno-float128 + + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + endif +endif + +#------------------------------------------------------------------------------- +#=== Apple-specific compiler/linker options + +# Add -std=c++17 explicitly to avoid build errors on macOS +# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" +ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) +MG_CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 +endif + +ifeq ($(UNAME_S),Darwin) +STDLIB = -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) +MG_LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" +else +MG_LDFLAGS += -Xlinker --no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +endif + +#------------------------------------------------------------------------------- + +#=== C++/CUDA-specific flags for floating-point types and random generators to use + +# Set the default FPTYPE (floating point type) choice +FPTYPE ?= m + +# Set the default HELINL (inline helicities?) choice +HELINL ?= 0 + +# Set the default HRDCOD (hardcode cIPD physics parameters?) choice +HRDCOD ?= 0 + +# Set the default RNDGEN (random number generator) choice +ifeq ($(NVCC),) + RNDGEN ?= hasNoCurand +else + RNDGEN ?= hasCurand +endif + +# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so sub-makes don't go back to the defaults +export AVX +export AVXFLAGS +export FPTYPE +export HELINL +export HRDCOD +export RNDGEN + +#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN + +# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") +# $(info FPTYPE=$(FPTYPE)) +ifeq ($(FPTYPE),d) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE +else ifeq ($(FPTYPE),f) + COMMONFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT +else ifeq ($(FPTYPE),m) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT +else + $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) +endif + +# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") +# $(info HELINL=$(HELINL)) +ifeq ($(HELINL),1) + COMMONFLAGS += -DMGONGPU_INLINE_HELAMPS +else ifneq ($(HELINL),0) + $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") +# $(info HRDCOD=$(HRDCOD)) +ifeq ($(HRDCOD),1) + COMMONFLAGS += -DMGONGPU_HARDCODE_PARAM +else ifneq ($(HRDCOD),0) + $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") +$(info RNDGEN=$(RNDGEN)) +ifeq ($(RNDGEN),hasNoCurand) + override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND + override CURANDLIBFLAGS = +else ifeq ($(RNDGEN),hasCurand) + CXXFLAGSCURAND = $(CUINC) +else + $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) +endif + +MG_CXXFLAGS += $(COMMONFLAGS) +MG_NVCCFLAGS += $(COMMONFLAGS) + +#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_tt01g.mad/Source/makefile b/epochX/cudacpp/gg_tt01g.mad/Source/makefile index 00c73099a0..407b1b753e 100644 --- a/epochX/cudacpp/gg_tt01g.mad/Source/makefile +++ b/epochX/cudacpp/gg_tt01g.mad/Source/makefile @@ -10,8 +10,8 @@ include make_opts # Source files -PROCESS= hfill.o matrix.o myamp.o -DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o +PROCESS = hfill.o matrix.o myamp.o +DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o HBOOK = hfill.o hcurve.o hbook1.o hbook2.o GENERIC = $(alfas_functions).o transpole.o invarients.o hfill.o pawgraphs.o ran1.o \ rw_events.o rw_routines.o kin_functions.o open_file.o basecode.o setrun.o \ @@ -22,7 +22,7 @@ GENSUDGRID = gensudgrid.o is-sud.o setrun_gen.o rw_routines.o open_file.o # Locally compiled libraries -LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) +LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) # Binaries @@ -32,6 +32,9 @@ BINARIES = $(BINDIR)gen_ximprove $(BINDIR)gensudgrid $(BINDIR)combine_runs all: $(LIBRARIES) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(LIBDIR)libbias.$(libext) +%.o: %.f *.inc + $(FC) -I. $(MG_FCFLAGS) $(FCFLAGS) -c $< -o $@ + # Libraries $(LIBDIR)libdsample.$(libext): $(DSAMPLE) @@ -39,36 +42,35 @@ $(LIBDIR)libdsample.$(libext): $(DSAMPLE) $(LIBDIR)libgeneric.$(libext): $(GENERIC) $(call CREATELIB, $@, $^) $(LIBDIR)libdhelas.$(libext): DHELAS - cd DHELAS; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libpdf.$(libext): PDF make_opts - cd PDF; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" ifneq (,$(filter edff chff, $(pdlabel1) $(pdlabel2))) $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make ; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" else $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make -f makefile_dummy; cd ../../ -endif + $(MAKE) -C $< -f makefile_dummy FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" +endif $(LIBDIR)libcernlib.$(libext): CERNLIB - cd CERNLIB; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" # The bias library is here the dummy by default; compilation of other ones specified in the run_card will be done by MG5aMC directly. $(LIBDIR)libbias.$(libext): BIAS/dummy - cd BIAS/dummy; make; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libmodel.$(libext): MODEL param_card.inc - cd MODEL; make + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" param_card.inc: ../Cards/param_card.dat ../bin/madevent treatcards param + touch $@ # madevent doesn't update the time stamp if there's nothing to do -$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o - $(FC) $(LDFLAGS) -o $@ $^ -#$(BINDIR)combine_events: $(COMBINE) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) run_card.inc $(LIBDIR)libbias.$(libext) -# $(FC) -o $@ $(COMBINE) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC $(llhapdf) $(LDFLAGS) -lbias +$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o + $(FC) $(MG_LDFLAGS) $(LDFLAGS) -o $@ $^ $(BINDIR)gensudgrid: $(GENSUDGRID) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libcernlib.$(libext) - $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(LDFLAGS) + $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(MG_LDFLAGS) $(LDFLAGS) # Dependencies @@ -85,6 +87,7 @@ rw_events.o: rw_events.f run_config.inc run_card.inc: ../Cards/run_card.dat ../bin/madevent treatcards run + touch $@ # madevent doesn't update the time stamp if there's nothing to do clean4pdf: rm -f ../lib/libpdf.$(libext) @@ -120,7 +123,7 @@ $(LIBDIR)libiregi.a: $(IREGIDIR) cd $(IREGIDIR); make ln -sf ../Source/$(IREGIDIR)libiregi.a $(LIBDIR)libiregi.a -cleanSource: +clean: $(RM) *.o $(LIBRARIES) $(BINARIES) cd PDF; make clean; cd .. cd PDF/gammaUPC; make clean; cd ../../ @@ -132,11 +135,3 @@ cleanSource: cd BIAS/ptj_bias; make clean; cd ../.. if [ -d $(CUTTOOLSDIR) ]; then cd $(CUTTOOLSDIR); make clean; cd ..; fi if [ -d $(IREGIDIR) ]; then cd $(IREGIDIR); make clean; cd ..; fi - -clean: cleanSource - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make clean; cd -; done; - -cleanavx: - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; -cleanall: cleanSource # THIS IS THE ONE - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/Bridge.h b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/Bridge.h index bf8b5e024d..c263f39a62 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/Bridge.h +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/Bridge.h @@ -236,7 +236,7 @@ namespace mg5amcCpu #ifdef __CUDACC__ if( ( m_nevt < s_gputhreadsmin ) || ( m_nevt % s_gputhreadsmin != 0 ) ) throw std::runtime_error( "Bridge constructor: nevt should be a multiple of " + std::to_string( s_gputhreadsmin ) ); - while( m_nevt != m_gpublocks * m_gputhreads ) + while( m_nevt != static_cast( m_gpublocks * m_gputhreads ) ) { m_gputhreads /= 2; if( m_gputhreads < s_gputhreadsmin ) @@ -266,7 +266,7 @@ namespace mg5amcCpu template void Bridge::set_gpugrid( const int gpublocks, const int gputhreads ) { - if( m_nevt != gpublocks * gputhreads ) + if( m_nevt != static_cast( gpublocks * gputhreads ) ) throw std::runtime_error( "Bridge: gpublocks*gputhreads must equal m_nevt in set_gpugrid" ); m_gpublocks = gpublocks; m_gputhreads = gputhreads; diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/MadgraphTest.h b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/MadgraphTest.h index ef40624c88..b0f2250c25 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/MadgraphTest.h +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/MadgraphTest.h @@ -199,10 +199,6 @@ class MadgraphTest : public testing::TestWithParam } }; -// Since we link both the CPU-only and GPU tests into the same executable, we prevent -// a multiply defined symbol by only compiling this in the non-CUDA phase: -#ifndef __CUDACC__ - /// Compare momenta and matrix elements. /// This uses an implementation of TestDriverBase to run a madgraph workflow, /// and compares momenta and matrix elements with a reference file. @@ -307,6 +303,4 @@ TEST_P( MadgraphTest, CompareMomentaAndME ) } } -#endif // __CUDACC__ - #endif /* MADGRAPHTEST_H_ */ diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/MatrixElementKernels.cc b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/MatrixElementKernels.cc index 74b5239ebf..2d6f27cd5d 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/MatrixElementKernels.cc +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/MatrixElementKernels.cc @@ -196,6 +196,9 @@ namespace mg5amcGpu void MatrixElementKernelDevice::setGrid( const int gpublocks, const int gputhreads ) { + m_gpublocks = gpublocks; + m_gputhreads = gputhreads; + if( m_gpublocks == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gpublocks must be > 0 in setGrid" ); if( m_gputhreads == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gputhreads must be > 0 in setGrid" ); if( this->nevt() != m_gpublocks * m_gputhreads ) throw std::runtime_error( "MatrixElementKernelDevice: nevt mismatch in setGrid" ); diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P1_gg_ttx/check_sa.cc b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P1_gg_ttx/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P1_gg_ttx/check_sa.cc +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P1_gg_ttx/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P1_gg_ttx/counters.cc b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P1_gg_ttx/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P1_gg_ttx/counters.cc +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P1_gg_ttx/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P2_gg_ttxg/check_sa.cc b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P2_gg_ttxg/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P2_gg_ttxg/check_sa.cc +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P2_gg_ttxg/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P2_gg_ttxg/counters.cc b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P2_gg_ttxg/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P2_gg_ttxg/counters.cc +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/P2_gg_ttxg/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk index 509307506b..a522ddb335 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk @@ -1,56 +1,41 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: use ':=' to ensure that the value of CUDACPP_MAKEFILE is not modified further down after including make_opts -#=== NB: use 'override' to ensure that the value can not be modified from the outside -override CUDACPP_MAKEFILE := $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) -###$(info CUDACPP_MAKEFILE='$(CUDACPP_MAKEFILE)') - -#=== NB: different names (e.g. cudacpp.mk and cudacpp_src.mk) are used in the Subprocess and src directories -override CUDACPP_SRC_MAKEFILE = cudacpp_src.mk - -#------------------------------------------------------------------------------- - -#=== Use bash in the Makefile (https://www.gnu.org/software/make/manual/html_node/Choosing-the-Shell.html) - -SHELL := /bin/bash - -#------------------------------------------------------------------------------- - -#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) - -# Detect O/S kernel (Linux, Darwin...) -UNAME_S := $(shell uname -s) -###$(info UNAME_S='$(UNAME_S)') - -# Detect architecture (x86_64, ppc64le...) -UNAME_P := $(shell uname -p) -###$(info UNAME_P='$(UNAME_P)') - -#------------------------------------------------------------------------------- - -#=== Include the common MG5aMC Makefile options - -# OM: this is crucial for MG5aMC flag consistency/documentation -# AV: temporarely comment this out because it breaks cudacpp builds -ifneq ($(wildcard ../../Source/make_opts),) -include ../../Source/make_opts -endif +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. + +# This makefile extends the Fortran makefile called "makefile" + +CUDACPP_SRC_MAKEFILE = cudacpp_src.mk + +# Self-invocation with adapted flags: +cppnative: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=native AVXFLAGS="-march=native" cppbuild +cppnone: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=none AVXFLAGS= cppbuild +cppsse4: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=sse4 AVXFLAGS=-march=nehalem cppbuild +cppavx2: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=avx2 AVXFLAGS=-march=haswell cppbuild +cppavx512y: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512y AVXFLAGS="-march=skylake-avx512 -mprefer-vector-width=256" cppbuild +cppavx512z: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512z AVXFLAGS="-march=skylake-avx512 -DMGONGPU_PVW512" cppbuild +cuda: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=cuda cudabuild #------------------------------------------------------------------------------- #=== Configure common compiler flags for C++ and CUDA +# NB: The base flags are defined in the fortran "makefile" + +# Include directories +INCFLAGS = -I. -I../../src -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here +MG_CXXFLAGS += $(INCFLAGS) +MG_NVCCFLAGS += $(INCFLAGS) # Dependency on src directory -MG5AMC_COMMONLIB = mg5amc_common -LIBFLAGS = -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -INCFLAGS += -I../../src +MG5AMC_COMMONLIB = mg5amc_common # Compiler-specific googletest build directory (#125 and #738) ifneq ($(shell $(CXX) --version | grep '^Intel(R) oneAPI DPC++/C++ Compiler'),) @@ -99,356 +84,42 @@ endif #------------------------------------------------------------------------------- -#=== Configure the C++ compiler - -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) -Wall -Wshadow -Wextra -ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS += -ffast-math # see issue #117 -endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY - -# Optionally add debug flags to display the full list of flags (eg on Darwin) -###CXXFLAGS+= -v - -# Note: AR, CXX and FC are implicitly defined if not set externally -# See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html - -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler - -# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) -# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below -ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside - $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") - override CUDA_HOME=disabled -endif - -# If CUDA_HOME is not set, try to set it from the location of nvcc -ifndef CUDA_HOME - CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) - $(warning CUDA_HOME was not set: using "$(CUDA_HOME)") -endif - -# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists -ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) - NVCC = $(CUDA_HOME)/bin/nvcc - USE_NVTX ?=-DUSE_NVTX - # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html - # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ - # Default: use compute capability 70 for V100 (CERN lxbatch, CERN itscrd, Juwels Cluster). - # Embed device code for 70, and PTX for 70+. - # Export MADGRAPH_CUDA_ARCHITECTURE (comma-separated list) to use another value or list of values (see #533). - # Examples: use 60 for P100 (Piz Daint), 80 for A100 (Juwels Booster, NVidia raplab/Curiosity). - MADGRAPH_CUDA_ARCHITECTURE ?= 70 - ###CUARCHFLAGS = -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=compute_$(MADGRAPH_CUDA_ARCHITECTURE) -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=sm_$(MADGRAPH_CUDA_ARCHITECTURE) # Older implementation (AV): go back to this one for multi-GPU support #533 - ###CUARCHFLAGS = --gpu-architecture=compute_$(MADGRAPH_CUDA_ARCHITECTURE) --gpu-code=sm_$(MADGRAPH_CUDA_ARCHITECTURE),compute_$(MADGRAPH_CUDA_ARCHITECTURE) # Newer implementation (SH): cannot use this as-is for multi-GPU support #533 - comma:=, - CUARCHFLAGS = $(foreach arch,$(subst $(comma), ,$(MADGRAPH_CUDA_ARCHITECTURE)),-gencode arch=compute_$(arch),code=compute_$(arch) -gencode arch=compute_$(arch),code=sm_$(arch)) - CUINC = -I$(CUDA_HOME)/include/ - ifeq ($(RNDGEN),hasNoCurand) - CURANDLIBFLAGS= - else - CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! - endif - CUOPTFLAGS = -lineinfo - CUFLAGS = $(foreach opt, $(OPTFLAGS), -Xcompiler $(opt)) $(CUOPTFLAGS) $(INCFLAGS) $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) -use_fast_math - ###CUFLAGS += -Xcompiler -Wall -Xcompiler -Wextra -Xcompiler -Wshadow - ###NVCC_VERSION = $(shell $(NVCC) --version | grep 'Cuda compilation tools' | cut -d' ' -f5 | cut -d, -f1) - CUFLAGS += -std=c++17 # need CUDA >= 11.2 (see #333): this is enforced in mgOnGpuConfig.h - # Without -maxrregcount: baseline throughput: 6.5E8 (16384 32 12) up to 7.3E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 160 # improves throughput: 6.9E8 (16384 32 12) up to 7.7E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 128 # improves throughput: 7.3E8 (16384 32 12) up to 7.6E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 96 # degrades throughput: 4.1E8 (16384 32 12) up to 4.5E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 64 # degrades throughput: 1.7E8 (16384 32 12) flat at 1.7E8 (65536 128 12) -else ifneq ($(origin REQUIRE_CUDA),undefined) - # If REQUIRE_CUDA is set but no cuda is found, stop here (e.g. for CI tests on GPU #443) - $(error No cuda installation found (set CUDA_HOME or make nvcc visible in PATH)) -else - # No cuda. Switch cuda compilation off and go to common random numbers in C++ - $(warning CUDA_HOME is not set or is invalid: export CUDA_HOME to compile with cuda) - override NVCC= - override USE_NVTX= - override CUINC= - override CURANDLIBFLAGS= -endif -export NVCC -export CUFLAGS - -# Set the host C++ compiler for nvcc via "-ccbin " -# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) -CUFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) - -# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) -ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) -CUFLAGS += -allow-unsupported-compiler -endif - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ and CUDA builds - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif -ifneq ($(NVCC),) - ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) - override NVCC:=ccache $(NVCC) - endif -endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for C++ and CUDA - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # would increase to none=4.08-4.12E6, sse4=4.99-5.03E6! -else - ###CXXFLAGS+= -flto # also on Intel this would increase throughputs by a factor 2 to 4... - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -# PowerPC-specific CUDA compiler flags (to be reviewed!) -ifeq ($(UNAME_P),ppc64le) - CUFLAGS+= -Xcompiler -mno-float128 -endif - -#------------------------------------------------------------------------------- - #=== Configure defaults and check if user-defined choices exist for OMPFLAGS, AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the default OMPFLAGS choice -ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on Intel (was ok without nvcc but not ok with nvcc before #578) -else ifneq ($(shell $(CXX) --version | egrep '^(clang)'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on clang (was not ok without or with nvcc before #578) -###else ifneq ($(shell $(CXX) --version | egrep '^(Apple clang)'),) # AV for Mac (Apple clang compiler) -else ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) +OMPFLAGS ?= -fopenmp +ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) override OMPFLAGS = # AV disable OpenMP MT on Apple clang (builds fail in the CI #578) -###override OMPFLAGS = -fopenmp # OM reenable OpenMP MT on Apple clang? (AV Oct 2023: this still fails in the CI) -else -override OMPFLAGS = -fopenmp # enable OpenMP MT by default on all other platforms -###override OMPFLAGS = # disable OpenMP MT on all other platforms (default before #575) -endif - -# Set the default AVX (vectorization) choice -ifeq ($(AVX),) - ifeq ($(UNAME_P),ppc64le) - ###override AVX = none - override AVX = sse4 - else ifeq ($(UNAME_P),arm) - ###override AVX = none - override AVX = sse4 - else ifeq ($(wildcard /proc/cpuinfo),) - override AVX = none - $(warning Using AVX='$(AVX)' because host SIMD features cannot be read from /proc/cpuinfo) - else ifeq ($(shell grep -m1 -c avx512vl /proc/cpuinfo)$(shell $(CXX) --version | grep ^clang),1) - override AVX = 512y - ###$(info Using AVX='$(AVX)' as no user input exists) - else - override AVX = avx2 - ifneq ($(shell grep -m1 -c avx512vl /proc/cpuinfo),1) - $(warning Using AVX='$(AVX)' because host does not support avx512vl) - else - $(warning Using AVX='$(AVX)' because this is faster than avx512vl for clang) - endif - endif -else - ###$(info Using AVX='$(AVX)' according to user input) -endif - -# Set the default FPTYPE (floating point type) choice -ifeq ($(FPTYPE),) - override FPTYPE = d -endif - -# Set the default HELINL (inline helicities?) choice -ifeq ($(HELINL),) - override HELINL = 0 -endif - -# Set the default HRDCOD (hardcode cIPD physics parameters?) choice -ifeq ($(HRDCOD),) - override HRDCOD = 0 -endif - -# Set the default RNDGEN (random number generator) choice -ifeq ($(RNDGEN),) - ifeq ($(NVCC),) - override RNDGEN = hasNoCurand - else ifeq ($(RNDGEN),) - override RNDGEN = hasCurand - endif endif -# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so that it is not necessary to pass them to the src Makefile too -export AVX -export FPTYPE -export HELINL -export HRDCOD -export RNDGEN +# Export here, so sub makes don't fall back to the defaults: export OMPFLAGS -#------------------------------------------------------------------------------- - -#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN - -# Set the build flags appropriate to OMPFLAGS -$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS - CUFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM - CUFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND -else ifeq ($(RNDGEN),hasCurand) - override CXXFLAGSCURAND = -else - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- #=== Configure build directories and build lockfiles === -# Build directory "short" tag (defines target and path to the optional build directory) -# (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) - -# Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) -# (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) - -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIR = ../../lib/$(BUILDDIR) - override LIBDIRRPATH = '$$ORIGIN/../$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is set = 1)) -else - override BUILDDIR = . - override LIBDIR = ../../lib - override LIBDIRRPATH = '$$ORIGIN/$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) +# Build directory "short" tag (defines target and path to the build directory) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +CUDACPP_BUILDDIR = build.$(DIRTAG) +CUDACPP_LIBDIR := ../../lib/$(CUDACPP_BUILDDIR) +LIBDIRRPATH := '$$ORIGIN:$$ORIGIN/../$(CUDACPP_LIBDIR)' +ifneq ($(AVX),) + $(info Building CUDACPP in CUDACPP_BUILDDIR=$(CUDACPP_BUILDDIR). Libs in $(CUDACPP_LIBDIR)) endif -###override INCDIR = ../../include -###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) -# On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH +# On Linux, set rpath to CUDACPP_LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables or shared libraries ($ORIGIN on Linux) -# On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary +# On Darwin, building libraries with absolute paths in CUDACPP_LIBDIR makes this unnecessary ifeq ($(UNAME_S),Darwin) override CXXLIBFLAGSRPATH = override CULIBFLAGSRPATH = - override CXXLIBFLAGSRPATH2 = - override CULIBFLAGSRPATH2 = else # RPATH to cuda/cpp libs when linking executables override CXXLIBFLAGSRPATH = -Wl,-rpath,$(LIBDIRRPATH) override CULIBFLAGSRPATH = -Xlinker -rpath,$(LIBDIRRPATH) - # RPATH to common lib when linking cuda/cpp libs - override CXXLIBFLAGSRPATH2 = -Wl,-rpath,'$$ORIGIN' - override CULIBFLAGSRPATH2 = -Xlinker -rpath,'$$ORIGIN' endif # Setting LD_LIBRARY_PATH or DYLD_LIBRARY_PATH in the RUNTIME is no longer necessary (neither on Linux nor on Mac) @@ -458,107 +129,68 @@ override RUNTIME = #=== Makefile TARGETS and build rules below #=============================================================================== -cxx_main=$(BUILDDIR)/check.exe -fcxx_main=$(BUILDDIR)/fcheck.exe +cxx_main=$(CUDACPP_BUILDDIR)/check.exe +fcxx_main=$(CUDACPP_BUILDDIR)/fcheck.exe -ifneq ($(NVCC),) -cu_main=$(BUILDDIR)/gcheck.exe -fcu_main=$(BUILDDIR)/fgcheck.exe -else -cu_main= -fcu_main= -endif - -testmain=$(BUILDDIR)/runTest.exe +cu_main=$(CUDACPP_BUILDDIR)/gcheck.exe +fcu_main=$(CUDACPP_BUILDDIR)/fgcheck.exe ifneq ($(GTESTLIBS),) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) $(testmain) -else -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) +testmain=$(CUDACPP_BUILDDIR)/runTest.exe +cutestmain=$(CUDACPP_BUILDDIR)/runTest_cuda.exe endif -# Target (and build options): debug -MAKEDEBUG= -debug: OPTFLAGS = -g -O0 -debug: CUOPTFLAGS = -G -debug: MAKEDEBUG := debug -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -$(BUILDDIR)/.build.$(TAG): - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @if [ "$(oldtagsb)" != "" ]; then echo "Cannot build for tag=$(TAG) as old builds exist for other tags:"; echo " $(oldtagsb)"; echo "Please run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @touch $(BUILDDIR)/.build.$(TAG) +cppbuild: $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(cxx_main) $(fcxx_main) $(testmain) +cudabuild: $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(cu_main) $(fcu_main) $(cutestmain) # Generic target and build rules: objects from CUDA compilation -ifneq ($(NVCC),) -$(BUILDDIR)/%.o : %.cu *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cu *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c $< -o $@ -$(BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ -endif +$(CUDACPP_BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ # Generic target and build rules: objects from C++ compilation # (NB do not include CUINC here! add it only for NVTX or curand #679) -$(BUILDDIR)/%.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Apply special build flags only to CrossSectionKernel.cc and gCrossSectionKernel.cu (no fast math, see #117 and #516) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS := $(filter-out -ffast-math,$(CXXFLAGS)) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math +$(CUDACPP_BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math ifneq ($(NVCC),) -$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -fno-fast-math +$(CUDACPP_BUILDDIR)/gCrossSectionKernels.o: NVCCFLAGS += -Xcompiler -fno-fast-math endif endif # Apply special build flags only to check_sa.o and gcheck_sa.o (NVTX in timermap.h, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) -$(BUILDDIR)/gcheck_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) # Apply special build flags only to check_sa and CurandRandomNumberKernel (curand headers, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gcheck_sa.o: CUFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gCurandRandomNumberKernel.o: CUFLAGS += $(CXXFLAGSCURAND) -ifeq ($(RNDGEN),hasCurand) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CUINC) -endif +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) + # Avoid "warning: builtin __has_trivial_... is deprecated; use __is_trivially_... instead" in nvcc with icx2023 (#592) ifneq ($(shell $(CXX) --version | egrep '^(Intel)'),) ifneq ($(NVCC),) -CUFLAGS += -Xcompiler -Wno-deprecated-builtins +MG_NVCCFLAGS += -Xcompiler -Wno-deprecated-builtins endif endif -# Avoid clang warning "overriding '-ffp-contract=fast' option with '-ffp-contract=on'" (#516) -# This patch does remove the warning, but I prefer to keep it disabled for the moment... -###ifneq ($(shell $(CXX) --version | egrep '^(clang|Apple clang|Intel)'),) -###$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -Wno-overriding-t-option -###ifneq ($(NVCC),) -###$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -Wno-overriding-t-option -###endif -###endif - #### Apply special build flags only to CPPProcess.cc (-flto) ###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += -flto -#### Apply special build flags only to CPPProcess.cc (AVXFLAGS) -###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += $(AVXFLAGS) - #------------------------------------------------------------------------------- -# Target (and build rules): common (src) library -commonlib : $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc $(BUILDDIR)/.build.$(TAG) - $(MAKE) -C ../../src $(MAKEDEBUG) -f $(CUDACPP_SRC_MAKEFILE) +$(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc + $(MAKE) AVX=$(AVX) AVXFLAGS="$(AVXFLAGS)" -C ../../src -f $(CUDACPP_SRC_MAKEFILE) #------------------------------------------------------------------------------- @@ -566,162 +198,123 @@ processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') ###$(info processid_short=$(processid_short)) MG5AMC_CXXLIB = mg5amc_$(processid_short)_cpp -cxx_objects_lib=$(BUILDDIR)/CPPProcess.o $(BUILDDIR)/MatrixElementKernels.o $(BUILDDIR)/BridgeKernels.o $(BUILDDIR)/CrossSectionKernels.o -cxx_objects_exe=$(BUILDDIR)/CommonRandomNumberKernel.o $(BUILDDIR)/RamboSamplingKernels.o +cxx_objects_lib=$(CUDACPP_BUILDDIR)/CPPProcess.o $(CUDACPP_BUILDDIR)/MatrixElementKernels.o $(CUDACPP_BUILDDIR)/BridgeKernels.o $(CUDACPP_BUILDDIR)/CrossSectionKernels.o +cxx_objects_exe=$(CUDACPP_BUILDDIR)/CommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/RamboSamplingKernels.o -ifneq ($(NVCC),) MG5AMC_CULIB = mg5amc_$(processid_short)_cuda -cu_objects_lib=$(BUILDDIR)/gCPPProcess.o $(BUILDDIR)/gMatrixElementKernels.o $(BUILDDIR)/gBridgeKernels.o $(BUILDDIR)/gCrossSectionKernels.o -cu_objects_exe=$(BUILDDIR)/gCommonRandomNumberKernel.o $(BUILDDIR)/gRamboSamplingKernels.o -endif +cu_objects_lib=$(CUDACPP_BUILDDIR)/gCPPProcess.o $(CUDACPP_BUILDDIR)/gMatrixElementKernels.o $(CUDACPP_BUILDDIR)/gBridgeKernels.o $(CUDACPP_BUILDDIR)/gCrossSectionKernels.o +cu_objects_exe=$(CUDACPP_BUILDDIR)/gCommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/gRamboSamplingKernels.o # Target (and build rules): C++ and CUDA shared libraries -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) - $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) - -ifneq ($(NVCC),) -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) - $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -endif +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) + $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) $(MG_LDFLAGS) $(LDFLAGS) -#------------------------------------------------------------------------------- - -# Target (and build rules): Fortran include files -###$(INCDIR)/%.inc : ../%.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### \cp $< $@ +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) + $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) #------------------------------------------------------------------------------- # Target (and build rules): C++ and CUDA standalone executables -$(cxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cxx_main): $(BUILDDIR)/check_sa.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o - $(CXX) -o $@ $(BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o $(CURANDLIBFLAGS) -ifneq ($(NVCC),) +$(cxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(cxx_main): $(CUDACPP_BUILDDIR)/check_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) + ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(cu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(cu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(cu_main): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(cu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cu_main): $(BUILDDIR)/gcheck_sa.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o - $(NVCC) -o $@ $(BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o $(CURANDLIBFLAGS) +$(cu_main): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif +$(cu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(cu_main): $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- - -# Generic target and build rules: objects from Fortran compilation -$(BUILDDIR)/%.o : %.f *.inc - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(FC) -I. -c $< -o $@ - -# Generic target and build rules: objects from Fortran compilation -###$(BUILDDIR)/%.o : %.f *.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi -### $(FC) -I. -I$(INCDIR) -c $< -o $@ - -# Target (and build rules): Fortran standalone executables -###$(BUILDDIR)/fcheck_sa.o : $(INCDIR)/fbridge.inc +# Check executables: ifeq ($(UNAME_S),Darwin) -$(fcxx_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 +$(fcxx_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif -$(fcxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcxx_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) - $(CXX) -o $@ $(BUILDDIR)/fcheck_sa.o $(OMPFLAGS) $(BUILDDIR)/fsampler.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) +$(fcxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(fcxx_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(cxx_objects_exe) $(OMPFLAGS) $(CUDACPP_BUILDDIR)/fsampler.o -lgfortran -L$(CUDACPP_LIBDIR) $(MG_LDFLAGS) $(LDFLAGS) -ifneq ($(NVCC),) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(fcu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(fcu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(fcu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(fcu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') endif ifeq ($(UNAME_S),Darwin) -$(fcu_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 -endif -$(fcu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcu_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) - $(NVCC) -o $@ $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) +$(fcu_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif +$(fcu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(fcu_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(cu_objects_exe) -lgfortran $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- # Target (and build rules): test objects and test executable -$(BUILDDIR)/testxxx.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx_cu.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx_cu.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -endif +$(testmain) $(cutestmain): $(GTESTLIBS) +$(testmain) $(cutestmain): INCFLAGS += $(GTESTINC) +$(testmain) $(cutestmain): MG_LDFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main -$(BUILDDIR)/testmisc.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(CUDACPP_BUILDDIR)/testxxx.o $(CUDACPP_BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) testxxx_cc_ref.txt +$(testmain): $(CUDACPP_BUILDDIR)/testxxx.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions +$(cutestmain): $(CUDACPP_BUILDDIR)/testxxx_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc_cu.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests -endif -$(BUILDDIR)/runTest.o: $(GTESTLIBS) -$(BUILDDIR)/runTest.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/runTest.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/runTest.o +$(CUDACPP_BUILDDIR)/testmisc.o $(CUDACPP_BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/testmisc.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(cutestmain): $(CUDACPP_BUILDDIR)/testmisc_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests + + +$(CUDACPP_BUILDDIR)/runTest.o $(CUDACPP_BUILDDIR)/runTest_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/runTest.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/runTest.o +$(cutestmain): $(CUDACPP_BUILDDIR)/runTest_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/runTest_cu.o + -ifneq ($(NVCC),) -$(BUILDDIR)/runTest_cu.o: $(GTESTLIBS) -$(BUILDDIR)/runTest_cu.o: INCFLAGS += $(GTESTINC) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(testmain): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(testmain): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cutestmain): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cutestmain): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(testmain): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(testmain): $(BUILDDIR)/runTest_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/runTest_cu.o +$(cutestmain): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif -$(testmain): $(GTESTLIBS) -$(testmain): INCFLAGS += $(GTESTINC) -$(testmain): LIBFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main ifneq ($(OMPFLAGS),) ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -$(testmain): LIBFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) +$(testmain): MG_LDFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -$(testmain): LIBFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 +$(testmain): MG_LDFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 ###else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) ###$(testmain): LIBFLAGS += ???? # OMP is not supported yet by cudacpp for Apple clang (see #578 and #604) else -$(testmain): LIBFLAGS += -lgomp +$(testmain): MG_LDFLAGS += -lgomp endif endif -ifeq ($(NVCC),) # link only runTest.o -$(testmain): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) - $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -ldl -pthread $(LIBFLAGS) -else # link both runTest.o and runTest_cu.o -$(testmain): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) - $(NVCC) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) -ldl $(LIBFLAGS) -lcuda -endif +$(testmain): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(testmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) + $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -pthread $(MG_LDFLAGS) $(LDFLAGS) + +$(cutestmain): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cutestmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) + $(NVCC) -o $@ $(cu_objects_lib) $(cu_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -lcuda $(MG_LDFLAGS) $(LDFLAGS) # Use target gtestlibs to build only googletest ifneq ($(GTESTLIBS),) @@ -731,72 +324,15 @@ endif # Use flock (Linux only, no Mac) to allow 'make -j' if googletest has not yet been downloaded https://stackoverflow.com/a/32666215 $(GTESTLIBS): ifneq ($(shell which flock 2>/dev/null),) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - flock $(BUILDDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) + flock $(TESTDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) else if [ -d $(TESTDIR) ]; then $(MAKE) -C $(TESTDIR); fi endif #------------------------------------------------------------------------------- -# Target: build all targets in all AVX modes (each AVX mode in a separate build directory) -# Split the avxall target into five separate targets to allow parallel 'make -j avxall' builds -# (Hack: add a fbridge.inc dependency to avxall, to ensure it is only copied once for all AVX modes) -avxnone: - @echo - $(MAKE) USEBUILDDIR=1 AVX=none -f $(CUDACPP_MAKEFILE) - -avxsse4: - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 -f $(CUDACPP_MAKEFILE) - -avxavx2: - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 -f $(CUDACPP_MAKEFILE) - -avx512y: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y -f $(CUDACPP_MAKEFILE) - -avx512z: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z -f $(CUDACPP_MAKEFILE) - -ifeq ($(UNAME_P),ppc64le) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else ifeq ($(UNAME_P),arm) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 avxavx2 avx512y avx512z -avxall: avxnone avxsse4 avxavx2 avx512y avx512z -endif - -#------------------------------------------------------------------------------- - -# Target: clean the builds -.PHONY: clean - -clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(BUILDDIR) -else - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe - rm -f $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(LIBDIR)/lib$(MG5AMC_CULIB).so -endif - $(MAKE) -C ../../src clean -f $(CUDACPP_SRC_MAKEFILE) -### rm -rf $(INCDIR) - -cleanall: - @echo - $(MAKE) USEBUILDDIR=0 clean -f $(CUDACPP_MAKEFILE) - @echo - $(MAKE) USEBUILDDIR=0 -C ../../src cleanall -f $(CUDACPP_SRC_MAKEFILE) - rm -rf build.* - # Target: clean the builds as well as the gtest installation(s) -distclean: cleanall +distclean: clean cleansrc ifneq ($(wildcard $(TESTDIRCOMMON)),) $(MAKE) -C $(TESTDIRCOMMON) clean endif @@ -848,50 +384,55 @@ endif #------------------------------------------------------------------------------- -# Target: check (run the C++ test executable) +# Target: check/gcheck (run the C++ test executable) # [NB THIS IS WHAT IS USED IN THE GITHUB CI!] -ifneq ($(NVCC),) -check: runTest cmpFcheck cmpFGcheck -else check: runTest cmpFcheck -endif +gcheck: + $(MAKE) AVX=cuda runTest cmpFGcheck # Target: runTest (run the C++ test executable runTest.exe) -runTest: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/runTest.exe +ifneq ($(AVX),cuda) +runTest: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest.exe +else +runTest: cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest_cuda.exe +endif + # Target: runCheck (run the C++ standalone executable check.exe, with a small number of events) -runCheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/check.exe -p 2 32 2 +runCheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe -p 2 32 2 # Target: runGcheck (run the CUDA standalone executable gcheck.exe, with a small number of events) -runGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/gcheck.exe -p 2 32 2 +runGcheck: AVX=cuda +runGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe -p 2 32 2 # Target: runFcheck (run the Fortran standalone executable - with C++ MEs - fcheck.exe, with a small number of events) -runFcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 +runFcheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 # Target: runFGcheck (run the Fortran standalone executable - with CUDA MEs - fgcheck.exe, with a small number of events) -runFGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 +runFGcheck: AVX=cuda +runFGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 # Target: cmpFcheck (compare ME results from the C++ and Fortran with C++ MEs standalone executables, with a small number of events) -cmpFcheck: all.$(TAG) +cmpFcheck: cppbuild @echo - @echo "$(BUILDDIR)/check.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi # Target: cmpFGcheck (compare ME results from the CUDA and Fortran with CUDA MEs standalone executables, with a small number of events) -cmpFGcheck: all.$(TAG) +cmpFGcheck: AVX=cuda +cmpFGcheck: + $(MAKE) AVX=cuda cudabuild @echo - @echo "$(BUILDDIR)/gcheck.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fgcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi -# Target: memcheck (run the CUDA standalone executable gcheck.exe with a small number of events through cuda-memcheck) -memcheck: all.$(TAG) - $(RUNTIME) $(CUDA_HOME)/bin/cuda-memcheck --check-api-memory-access yes --check-deprecated-instr yes --check-device-heap yes --demangle full --language c --leak-check full --racecheck-report all --report-api-errors all --show-backtrace yes --tool memcheck --track-unused-memory yes $(BUILDDIR)/gcheck.exe -p 2 32 2 - -#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/makefile b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/makefile index d572486c2e..b69917ee1f 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/makefile +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/makefile @@ -1,27 +1,30 @@ SHELL := /bin/bash -include ../../Source/make_opts -FFLAGS+= -w +# Include general setup +OPTIONS_MAKEFILE := ../../Source/make_opts +include $(OPTIONS_MAKEFILE) # Enable the C preprocessor https://gcc.gnu.org/onlinedocs/gfortran/Preprocessing-Options.html -FFLAGS+= -cpp +MG_FCFLAGS += -cpp +MG_CXXFLAGS += -I. -# Compile counters with -O3 as in the cudacpp makefile (avoid being "unfair" to Fortran #740) -CXXFLAGS = -O3 -Wall -Wshadow -Wextra +all: help cppnative + +# Target if user does not specify target +help: + $(info No target specified.) + $(info Viable targets are 'cppnative' (default), 'cppnone', 'cppsse4', 'cppavx2', 'cpp512y', 'cpp512z' and 'cuda') + $(info Or 'cppall' for all C++ targets) + $(info Or 'ALL' for all C++ and cuda targets) -# Add -std=c++17 explicitly to avoid build errors on macOS -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 -endif -# Enable ccache if USECCACHE=1 +# Enable ccache for C++ if USECCACHE=1 (do not enable it for Fortran since it is not supported for Fortran) ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) override CXX:=ccache $(CXX) endif -ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) - override FC:=ccache $(FC) -endif +###ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) +### override FC:=ccache $(FC) +###endif # Load additional dependencies of the bias module, if present ifeq (,$(wildcard ../bias_dependencies)) @@ -46,34 +49,25 @@ else MADLOOP_LIB = endif -LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias - -processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') -CUDACPP_MAKEFILE=cudacpp.mk -# NB1 Using ":=" below instead of "=" is much faster (it only runs the subprocess once instead of many times) -# NB2 Use '|&' in CUDACPP_BUILDDIR to avoid confusing errors about googletest #507 -# NB3 Do not add a comment inlined "CUDACPP_BUILDDIR=$(shell ...) # comment" as otherwise a trailing space is included... -# NB4 The variables relevant to the cudacpp Makefile must be explicitly passed to $(shell...) -CUDACPP_MAKEENV:=$(shell echo '$(.VARIABLES)' | tr " " "\n" | egrep "(USEBUILDDIR|AVX|FPTYPE|HELINL|HRDCOD)") -###$(info CUDACPP_MAKEENV=$(CUDACPP_MAKEENV)) -###$(info $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))")) -CUDACPP_BUILDDIR:=$(shell $(MAKE) $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))") -f $(CUDACPP_MAKEFILE) -pn 2>&1 | awk '/Building/{print $$3}' | sed s/BUILDDIR=//) -ifeq ($(CUDACPP_BUILDDIR),) -$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) -else -$(info CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)') -endif -CUDACPP_COMMONLIB=mg5amc_common -CUDACPP_CXXLIB=mg5amc_$(processid_short)_cpp -CUDACPP_CULIB=mg5amc_$(processid_short)_cuda - +LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias LIBS = $(LIBDIR)libbias.$(libext) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(MADLOOP_LIB) $(LOOP_LIBS) ifneq ("$(wildcard ../../Source/RUNNING)","") LINKLIBS += -lrunning - LIBS += $(LIBDIR)librunning.$(libext) + LIBS += $(LIBDIR)librunning.$(libext) endif +SOURCEDIR_GUARD:=../../Source/.timestamp_guard +# We use $(SOURCEDIR_GUARD) to figure out if Source is out of date. The Source makefile doesn't correctly +# update all files, so we need a proxy that is updated every time we run "$(MAKE) -C ../../Source". +$(SOURCEDIR_GUARD) ../../Source/discretesampler.mod &: ../../Source/*.f ../../Cards/param_card.dat ../../Cards/run_card.dat +ifneq ($(shell which flock 2>/dev/null),) + flock ../../Source/.lock -c "$(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD)" +else + $(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD) +endif + +$(LIBS): $(SOURCEDIR_GUARD) # Source files @@ -91,82 +85,83 @@ PROCESS= myamp.o genps.o unwgt.o setcuts.o get_color.o \ DSIG=driver.o $(patsubst %.f, %.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) DSIG_cudacpp=driver_cudacpp.o $(patsubst %.f, %_cudacpp.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) -SYMMETRY = symmetry.o idenparts.o +SYMMETRY = symmetry.o idenparts.o -# Binaries +# cudacpp targets: +CUDACPP_MAKEFILE := cudacpp.mk +ifneq (,$(wildcard $(CUDACPP_MAKEFILE))) +include $(CUDACPP_MAKEFILE) +endif -ifeq ($(UNAME),Darwin) -LDFLAGS += -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) -LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" -else -LDFLAGS += -Wl,--no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +ifeq ($(CUDACPP_BUILDDIR),) +$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) endif +CUDACPP_COMMONLIB=mg5amc_common +CUDACPP_CXXLIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so +CUDACPP_CULIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so -all: $(PROG)_fortran $(CUDACPP_BUILDDIR)/$(PROG)_cpp # also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (#503) +# Set up OpenMP if supported +OMPFLAGS ?= -fopenmp ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp LINKLIBS += -liomp5 # see #578 LINKLIBS += -lintlc # undefined reference to `_intel_fast_memcpy' else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -override OMPFLAGS = -fopenmp $(CUDACPP_BUILDDIR)/$(PROG)_cpp: LINKLIBS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -override OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang -else -override OMPFLAGS = -fopenmp +OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang endif -$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o - $(FC) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) - -$(LIBS): .libs -.libs: ../../Cards/param_card.dat ../../Cards/run_card.dat - cd ../../Source; make - touch $@ +# Binaries -$(CUDACPP_BUILDDIR)/.cudacpplibs: - $(MAKE) -f $(CUDACPP_MAKEFILE) - touch $@ +$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) # On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables ($ORIGIN on Linux) # On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary -ifeq ($(UNAME_S),Darwin) - override LIBFLAGSRPATH = -else ifeq ($(USEBUILDDIR),1) - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' -else - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/$(LIBDIR)' +ifneq ($(UNAME_S),Darwin) + LIBFLAGSRPATH := -Wl,-rpath,'$$ORIGIN:$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' endif -.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link +.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link madevent_cppnone_link madevent_cppsse4_link madevent_cppavx2_link madevent_cpp512y_link madevent_cpp512z_link clean cleanall cleansrc madevent_fortran_link: $(PROG)_fortran rm -f $(PROG) ln -s $(PROG)_fortran $(PROG) -madevent_cpp_link: $(CUDACPP_BUILDDIR)/$(PROG)_cpp - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) +madevent_cppnone_link: AVX=none +madevent_cppnone_link: cppnone + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -madevent_cuda_link: $(CUDACPP_BUILDDIR)/$(PROG)_cuda - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) +madevent_cppavx2_link: AVX=avx2 +madevent_cppavx2_link: cppavx2 + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512y_link: AVX=512y +madevent_cpp512y_link: cppavx512y + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512z_link: AVX=512z +madevent_cpp512z_link: cppavx512z + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -# Building $(PROG)_cpp also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (improved patch for cpp-only builds #503) -$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o $(CUDACPP_BUILDDIR)/.cudacpplibs - $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CXXLIB) $(LIBFLAGSRPATH) $(LDFLAGS) - if [ -f $(LIBDIR)/$(CUDACPP_BUILDDIR)/lib$(CUDACPP_CULIB).* ]; then $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CULIB) $(LIBFLAGSRPATH) $(LDFLAGS); fi +madevent_cuda_link: AVX=cuda +madevent_cuda_link: cuda + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) -$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(CUDACPP_BUILDDIR)/$(PROG)_cpp +$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(LIBS) $(CUDACPP_CXXLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) + +$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(LIBS) $(CUDACPP_CULIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) counters.o: counters.cc timer.h - $(CXX) $(CXXFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ ompnumthreads.o: ompnumthreads.cc ompnumthreads.h - $(CXX) -I. $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) $(FC) -o $(PROG)_forhel $(PROCESS) $(MATRIX_HEL) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) $(OMPFLAGS) @@ -174,27 +169,14 @@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) gensym: $(SYMMETRY) configs.inc $(LIBS) $(FC) -o gensym $(SYMMETRY) -L$(LIBDIR) $(LINKLIBS) $(LDFLAGS) -###ifeq (,$(wildcard fbridge.inc)) # Pointless: fbridge.inc always exists as this is the cudacpp-modified makefile! -###$(LIBDIR)libmodel.$(libext): ../../Cards/param_card.dat -### cd ../../Source/MODEL; make -### -###$(LIBDIR)libgeneric.$(libext): ../../Cards/run_card.dat -### cd ../../Source; make -### -###$(LIBDIR)libpdf.$(libext): -### cd ../../Source/PDF; make -### -###$(LIBDIR)libgammaUPC.$(libext): -### cd ../../Source/PDF/gammaUPC; make -###endif # Add source so that the compiler finds the DiscreteSampler module. $(MATRIX): %.o: %.f - $(FC) $(FFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC -%.o: %.f - $(FC) $(FFLAGS) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC + $(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC +%.o $(CUDACPP_BUILDDIR)/%.o: %.f + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -I../../Source/ -I../../Source/PDF/gammaUPC -c $< -o $@ %_cudacpp.o: %.f - $(FC) $(FFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ # Dependencies @@ -215,60 +197,42 @@ unwgt.o: genps.inc nexternal.inc symswap.inc cluster.inc run.inc message.inc \ initcluster.o: message.inc # Extra dependencies on discretesampler.mod +../../Source/discretesampler.mod: ../../Source/DiscreteSampler.f -auto_dsig.o: .libs -driver.o: .libs -driver_cudacpp.o: .libs -$(MATRIX): .libs -genps.o: .libs +auto_dsig.o: ../../Source/discretesampler.mod +driver.o: ../../Source/discretesampler.mod +driver_cudacpp.o: ../../Source/discretesampler.mod +$(MATRIX): ../../Source/discretesampler.mod +genps.o: ../../Source/discretesampler.mod # Cudacpp avxall targets -UNAME_P := $(shell uname -p) ifeq ($(UNAME_P),ppc64le) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else ifeq ($(UNAME_P),arm) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else -avxall: avxnone avxsse4 avxavx2 avx512y avx512z +cppall: cppnative cppnone cppsse4 cppavx2 cppavx512y cppavx512z endif -avxnone: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=none - -avxsse4: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 - -avxavx2: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 - -avx512y: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y - -avx512z: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z - -###endif - -# Clean (NB: 'make clean' in Source calls 'make clean' in all P*) +ALL: cppall cuda -clean: # Clean builds: fortran in this Pn; cudacpp executables for one AVX in this Pn - $(RM) *.o gensym $(PROG) $(PROG)_fortran $(PROG)_forhel $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(CUDACPP_BUILDDIR)/$(PROG)_cuda +# Clean all architecture-specific builds: +clean: + $(RM) *.o gensym $(PROG) $(PROG)_* + $(RM) -rf build.*/*{.o,.so,.exe,.dylib,madevent_*} + @for dir in build.*; do if [ -z "$$(ls -A $${dir})" ]; then rm -r $${dir}; else echo "Not cleaning $${dir}; not empty"; fi; done -cleanavxs: clean # Clean builds: fortran in this Pn; cudacpp for all AVX in this Pn and in src - $(MAKE) -f $(CUDACPP_MAKEFILE) cleanall - rm -f $(CUDACPP_BUILDDIR)/.cudacpplibs - rm -f .libs +cleanall: cleansrc + for PROCESS in ../P[0-9]*; do $(MAKE) -C $${PROCESS} clean; done -cleanall: # Clean builds: fortran in all P* and in Source; cudacpp for all AVX in all P* and in src - make -C ../../Source cleanall - rm -rf $(LIBDIR)libbias.$(libext) - rm -f ../../Source/*.mod ../../Source/*/*.mod +# Clean one architecture-specific build +clean%: + $(RM) -r build.$*_* -distclean: cleanall # Clean all fortran and cudacpp builds as well as the googletest installation - $(MAKE) -f $(CUDACPP_MAKEFILE) distclean +# Clean common source directories (interferes with other P*) +cleansrc: + make -C ../../Source clean + $(RM) -f $(SOURCEDIR_GUARD) ../../Source/{*.mod,.lock} ../../Source/*/*.mod + $(RM) -r $(LIBDIR)libbias.$(libext) + if [ -d ../../src ]; then $(MAKE) -C ../../src -f cudacpp_src.mk clean; fi diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/runTest.cc b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/runTest.cc index d4a760a71b..6c77775fb2 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/runTest.cc +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/runTest.cc @@ -243,18 +243,20 @@ struct CUDATest : public CUDA_CPU_TestBase // Use two levels of macros to force stringification at the right level // (see https://gcc.gnu.org/onlinedocs/gcc-3.0.1/cpp_3.html#SEC17 and https://stackoverflow.com/a/3419392) // Google macro is in https://github.com/google/googletest/blob/master/googletest/include/gtest/gtest-param-test.h +/* clang-format off */ #define TESTID_CPU( s ) s##_CPU #define XTESTID_CPU( s ) TESTID_CPU( s ) #define MG_INSTANTIATE_TEST_SUITE_CPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); #define TESTID_GPU( s ) s##_GPU #define XTESTID_GPU( s ) TESTID_GPU( s ) #define MG_INSTANTIATE_TEST_SUITE_GPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); +/* clang-format on */ #ifdef __CUDACC__ MG_INSTANTIATE_TEST_SUITE_GPU( XTESTID_GPU( MG_EPOCH_PROCESS_ID ), MadgraphTest ); diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/testxxx.cc b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/testxxx.cc index 3361fe5aa9..1d315f6d75 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/testxxx.cc +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/testxxx.cc @@ -40,7 +40,7 @@ namespace mg5amcCpu { std::string FPEhandlerMessage = "unknown"; int FPEhandlerIevt = -1; - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU): '" << FPEhandlerMessage << "' ievt=" << FPEhandlerIevt << std::endl; @@ -71,11 +71,10 @@ TEST( XTESTID( MG_EPOCH_PROCESS_ID ), testxxx ) constexpr bool testEvents = !dumpEvents; // run the test? constexpr fptype toleranceXXXs = std::is_same::value ? 1.E-15 : 1.E-5; // Constant parameters - constexpr int neppM = MemoryAccessMomenta::neppM; // AOSOA layout constexpr int np4 = CPPProcess::np4; - const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') - assert( nevt % neppM == 0 ); // nevt must be a multiple of neppM - assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV + const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') + assert( nevt % MemoryAccessMomenta::neppM == 0 ); // nevt must be a multiple of neppM + assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV // Fill in the input momenta #ifdef __CUDACC__ mg5amcGpu::PinnedHostBufferMomenta hstMomenta( nevt ); // AOSOA[npagM][npar=4][np4=4][neppM] diff --git a/epochX/cudacpp/gg_tt01g.mad/bin/internal/banner.py b/epochX/cudacpp/gg_tt01g.mad/bin/internal/banner.py index bd1517985f..b408679c2f 100755 --- a/epochX/cudacpp/gg_tt01g.mad/bin/internal/banner.py +++ b/epochX/cudacpp/gg_tt01g.mad/bin/internal/banner.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,7 +53,7 @@ MADEVENT = False import madgraph.various.misc as misc import madgraph.iolibs.file_writers as file_writers - import madgraph.iolibs.files as files + import madgraph.iolibs.files as files import models.check_param_card as param_card_reader from madgraph import MG5DIR, MadGraph5Error, InvalidCmd @@ -80,36 +80,36 @@ class Banner(dict): 'mgproccard': 'MGProcCard', 'mgruncard': 'MGRunCard', 'ma5card_parton' : 'MA5Card_parton', - 'ma5card_hadron' : 'MA5Card_hadron', + 'ma5card_hadron' : 'MA5Card_hadron', 'mggenerationinfo': 'MGGenerationInfo', 'mgpythiacard': 'MGPythiaCard', 'mgpgscard': 'MGPGSCard', 'mgdelphescard': 'MGDelphesCard', 'mgdelphestrigger': 'MGDelphesTrigger', 'mgshowercard': 'MGShowerCard' } - + forbid_cdata = ['initrwgt'] - + def __init__(self, banner_path=None): """ """ if isinstance(banner_path, Banner): dict.__init__(self, banner_path) self.lhe_version = banner_path.lhe_version - return + return else: dict.__init__(self) - + #Look at the version if MADEVENT: self['mgversion'] = '#%s\n' % open(pjoin(MEDIR, 'MGMEVersion.txt')).read() else: info = misc.get_pkg_info() self['mgversion'] = info['version']+'\n' - + self.lhe_version = None - + if banner_path: self.read_banner(banner_path) @@ -123,7 +123,7 @@ def __init__(self, banner_path=None): 'mgruncard':'run_card.dat', 'mgpythiacard':'pythia_card.dat', 'mgpgscard' : 'pgs_card.dat', - 'mgdelphescard':'delphes_card.dat', + 'mgdelphescard':'delphes_card.dat', 'mgdelphestrigger':'delphes_trigger.dat', 'mg5proccard':'proc_card_mg5.dat', 'mgproccard': 'proc_card.dat', @@ -137,10 +137,10 @@ def __init__(self, banner_path=None): 'mgshowercard':'shower_card.dat', 'pythia8':'pythia8_card.dat', 'ma5card_parton':'madanalysis5_parton_card.dat', - 'ma5card_hadron':'madanalysis5_hadron_card.dat', + 'ma5card_hadron':'madanalysis5_hadron_card.dat', 'run_settings':'' } - + def read_banner(self, input_path): """read a banner""" @@ -151,7 +151,7 @@ def read_banner(self, input_path): def split_iter(string): return (x.groups(0)[0] for x in re.finditer(r"([^\n]*\n)", string, re.DOTALL)) input_path = split_iter(input_path) - + text = '' store = False for line in input_path: @@ -170,13 +170,13 @@ def split_iter(string): text += line else: text += '%s%s' % (line, '\n') - - #reaching end of the banner in a event file avoid to read full file + + #reaching end of the banner in a event file avoid to read full file if "
" in line: break elif "" in line: break - + def __getattribute__(self, attr): """allow auto-build for the run_card/param_card/... """ try: @@ -187,23 +187,23 @@ def __getattribute__(self, attr): return self.charge_card(attr) - + def change_lhe_version(self, version): """change the lhe version associate to the banner""" - + version = float(version) if version < 3: version = 1 elif version > 3: raise Exception("Not Supported version") self.lhe_version = version - + def get_cross(self, witherror=False): """return the cross-section of the file""" if "init" not in self: raise Exception - + text = self["init"].split('\n') cross = 0 error = 0 @@ -217,13 +217,13 @@ def get_cross(self, witherror=False): return cross else: return cross, math.sqrt(error) - + def scale_init_cross(self, ratio): """modify the init information with the associate scale""" assert "init" in self - + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -231,29 +231,29 @@ def scale_init_cross(self, ratio): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break pid = int(pid) - + line = " %+13.7e %+13.7e %+13.7e %i" % \ (ratio*float(xsec), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + def get_pdg_beam(self): """return the pdg of each beam""" - + assert "init" in self - + all_lines = self["init"].split('\n') pdg1,pdg2,_ = all_lines[0].split(None, 2) return int(pdg1), int(pdg2) - + def load_basic(self, medir): """ Load the proc_card /param_card and run_card """ - + self.add(pjoin(medir,'Cards', 'param_card.dat')) self.add(pjoin(medir,'Cards', 'run_card.dat')) if os.path.exists(pjoin(medir, 'SubProcesses', 'procdef_mg5.dat')): @@ -261,29 +261,29 @@ def load_basic(self, medir): self.add(pjoin(medir,'Cards', 'proc_card_mg5.dat')) else: self.add(pjoin(medir,'Cards', 'proc_card.dat')) - + def change_seed(self, seed): """Change the seed value in the banner""" # 0 = iseed p = re.compile(r'''^\s*\d+\s*=\s*iseed''', re.M) new_seed_str = " %s = iseed" % seed self['mgruncard'] = p.sub(new_seed_str, self['mgruncard']) - + def add_generation_info(self, cross, nb_event): """add info on MGGeneration""" - + text = """ # Number of Events : %s # Integrated weight (pb) : %s """ % (nb_event, cross) self['MGGenerationInfo'] = text - + ############################################################################ # SPLIT BANNER ############################################################################ def split(self, me_dir, proc_card=True): """write the banner in the Cards directory. - proc_card argument is present to avoid the overwrite of proc_card + proc_card argument is present to avoid the overwrite of proc_card information""" for tag, text in self.items(): @@ -305,37 +305,37 @@ def check_pid(self, pid2label): """special routine removing width/mass of particles not present in the model This is usefull in case of loop model card, when we want to use the non loop model.""" - + if not hasattr(self, 'param_card'): self.charge_card('slha') - + for tag in ['mass', 'decay']: block = self.param_card.get(tag) for data in block: pid = data.lhacode[0] - if pid not in list(pid2label.keys()): + if pid not in list(pid2label.keys()): block.remove((pid,)) def get_lha_strategy(self): """get the lha_strategy: how the weight have to be handle by the shower""" - + if not self["init"]: raise Exception("No init block define") - + data = self["init"].split('\n')[0].split() if len(data) != 10: misc.sprint(len(data), self['init']) raise Exception("init block has a wrong format") return int(float(data[-2])) - + def set_lha_strategy(self, value): """set the lha_strategy: how the weight have to be handle by the shower""" - + if not (-4 <= int(value) <= 4): six.reraise(Exception, "wrong value for lha_strategy", value) if not self["init"]: raise Exception("No init block define") - + all_lines = self["init"].split('\n') data = all_lines[0].split() if len(data) != 10: @@ -351,13 +351,13 @@ def modify_init_cross(self, cross, allow_zero=False): assert isinstance(cross, dict) # assert "all" in cross assert "init" in self - + cross = dict(cross) for key in cross.keys(): if isinstance(key, str) and key.isdigit() and int(key) not in cross: cross[int(key)] = cross[key] - - + + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -365,7 +365,7 @@ def modify_init_cross(self, cross, allow_zero=False): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break @@ -383,23 +383,23 @@ def modify_init_cross(self, cross, allow_zero=False): (float(cross[pid]), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + ############################################################################ # WRITE BANNER ############################################################################ def write(self, output_path, close_tag=True, exclude=[]): """write the banner""" - + if isinstance(output_path, str): ff = open(output_path, 'w') else: ff = output_path - + if MADEVENT: header = open(pjoin(MEDIR, 'Source', 'banner_header.txt')).read() else: header = open(pjoin(MG5DIR,'Template', 'LO', 'Source', 'banner_header.txt')).read() - + if not self.lhe_version: self.lhe_version = self.get('run_card', 'lhe_version', default=1.0) if float(self.lhe_version) < 3: @@ -412,7 +412,7 @@ def write(self, output_path, close_tag=True, exclude=[]): for tag in [t for t in self.ordered_items if t in list(self.keys())]+ \ [t for t in self.keys() if t not in self.ordered_items]: - if tag in ['init'] or tag in exclude: + if tag in ['init'] or tag in exclude: continue capitalized_tag = self.capitalized_items[tag] if tag in self.capitalized_items else tag start_data, stop_data = '', '' @@ -422,19 +422,19 @@ def write(self, output_path, close_tag=True, exclude=[]): stop_data = ']]>\n' out = '<%(tag)s>%(start_data)s\n%(text)s\n%(stop_data)s\n' % \ {'tag':capitalized_tag, 'text':self[tag].strip(), - 'start_data': start_data, 'stop_data':stop_data} + 'start_data': start_data, 'stop_data':stop_data} try: ff.write(out) except: ff.write(out.encode('utf-8')) - - + + if not '/header' in exclude: out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) if 'init' in self and not 'init' in exclude: text = self['init'] @@ -444,22 +444,22 @@ def write(self, output_path, close_tag=True, exclude=[]): ff.write(out) except: ff.write(out.encode('utf-8')) - + if close_tag: - out = '\n' + out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) return ff - - + + ############################################################################ # BANNER ############################################################################ def add(self, path, tag=None): """Add the content of the file to the banner""" - + if not tag: card_name = os.path.basename(path) if 'param_card' in card_name: @@ -505,33 +505,33 @@ def add_text(self, tag, text): if tag == 'param_card': tag = 'slha' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' - + self[tag.lower()] = text - - + + def charge_card(self, tag): """Build the python object associated to the card""" - + if tag in ['param_card', 'param']: tag = 'slha' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse'], 'invalid card %s' % tag - + if tag == 'slha': param_card = self[tag].split('\n') self.param_card = param_card_reader.ParamCard(param_card) @@ -544,56 +544,56 @@ def charge_card(self, tag): self.proc_card = ProcCard(proc_card) return self.proc_card elif tag =='mgshowercard': - shower_content = self[tag] + shower_content = self[tag] if MADEVENT: import internal.shower_card as shower_card else: import madgraph.various.shower_card as shower_card self.shower_card = shower_card.ShowerCard(shower_content, True) - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.shower_card.testing = False return self.shower_card elif tag =='foanalyse': - analyse_content = self[tag] + analyse_content = self[tag] if MADEVENT: import internal.FO_analyse_card as FO_analyse_card else: import madgraph.various.FO_analyse_card as FO_analyse_card - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.FOanalyse_card = FO_analyse_card.FOAnalyseCard(analyse_content, True) self.FOanalyse_card.testing = False return self.FOanalyse_card - + def get_detail(self, tag, *arg, **opt): """return a specific """ - + if tag in ['param_card', 'param']: tag = 'slha' attr_tag = 'param_card' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], '%s not recognized' % tag - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) + self.charge_card(attr_tag) card = getattr(self, attr_tag) if len(arg) == 0: @@ -613,7 +613,7 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 2 and tag == 'slha': try: return card[arg[0]].get(arg[1:]) @@ -621,15 +621,15 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 0: return card else: raise Exception("Unknow command") - + #convenient alias get = get_detail - + def set(self, tag, *args): """modify one of the cards""" @@ -637,27 +637,27 @@ def set(self, tag, *args): tag = 'slha' attr_tag = 'param_card' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized' - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) - + self.charge_card(attr_tag) + card = getattr(self, attr_tag) if len(args) ==2: if tag == 'mg5proccard': @@ -666,20 +666,20 @@ def set(self, tag, *args): card[args[0]] = args[1] else: card[args[:-1]] = args[-1] - - + + @misc.multiple_try() def add_to_file(self, path, seed=None, out=None): """Add the banner to a file and change the associate seed in the banner""" if seed is not None: self.set("run_card", "iseed", seed) - + if not out: path_out = "%s.tmp" % path else: path_out = out - + ff = self.write(path_out, close_tag=False, exclude=['MGGenerationInfo', '/header', 'init']) ff.write("## END BANNER##\n") @@ -698,44 +698,44 @@ def add_to_file(self, path, seed=None, out=None): files.mv(path_out, path) - + def split_banner(banner_path, me_dir, proc_card=True): """a simple way to split a banner""" - + banner = Banner(banner_path) banner.split(me_dir, proc_card) - + def recover_banner(results_object, level, run=None, tag=None): """as input we receive a gen_crossxhtml.AllResults object. This define the current banner and load it """ - + if not run: - try: - _run = results_object.current['run_name'] - _tag = results_object.current['tag'] + try: + _run = results_object.current['run_name'] + _tag = results_object.current['tag'] except Exception: return Banner() else: _run = run if not tag: - try: - _tag = results_object[run].tags[-1] + try: + _tag = results_object[run].tags[-1] except Exception as error: if os.path.exists( pjoin(results_object.path,'Events','%s_banner.txt' % (run))): tag = None else: - return Banner() + return Banner() else: _tag = tag - - path = results_object.path - if tag: + + path = results_object.path + if tag: banner_path = pjoin(path,'Events',run,'%s_%s_banner.txt' % (run, tag)) else: banner_path = pjoin(results_object.path,'Events','%s_banner.txt' % (run)) - + if not os.path.exists(banner_path): if level != "parton" and tag != _tag: return recover_banner(results_object, level, _run, results_object[_run].tags[0]) @@ -754,12 +754,12 @@ def recover_banner(results_object, level, run=None, tag=None): return Banner(lhe.banner) # security if the banner was remove (or program canceled before created it) - return Banner() - + return Banner() + banner = Banner(banner_path) - - - + + + if level == 'pythia': if 'mgpythiacard' in banner: del banner['mgpythiacard'] @@ -768,13 +768,13 @@ def recover_banner(results_object, level, run=None, tag=None): if tag in banner: del banner[tag] return banner - + class InvalidRunCard(InvalidCmd): pass class ProcCard(list): """Basic Proccard object""" - + history_header = \ '#************************************************************\n' + \ '#* MadGraph5_aMC@NLO *\n' + \ @@ -798,10 +798,10 @@ class ProcCard(list): '#* run as ./bin/mg5_aMC filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - - - - + + + + def __init__(self, init=None): """ initialize a basic proc_card""" self.info = {'model': 'sm', 'generate':None, @@ -810,13 +810,13 @@ def __init__(self, init=None): if init: self.read(init) - + def read(self, init): """read the proc_card and save the information""" - + if isinstance(init, str): #path to file init = open(init, 'r') - + store_line = '' for line in init: line = line.rstrip() @@ -828,28 +828,28 @@ def read(self, init): store_line = "" if store_line: raise Exception("WRONG CARD FORMAT") - - + + def move_to_last(self, cmd): """move an element to the last history.""" for line in self[:]: if line.startswith(cmd): self.remove(line) list.append(self, line) - + def append(self, line): """"add a line in the proc_card perform automatically cleaning""" - + line = line.strip() cmds = line.split() if len(cmds) == 0: return - + list.append(self, line) - + # command type: cmd = cmds[0] - + if cmd == 'output': # Remove previous outputs from history self.clean(allow_for_removal = ['output'], keep_switch=True, @@ -875,7 +875,7 @@ def append(self, line): elif cmds[1] == 'proc_v4': #full cleaning self[:] = [] - + def clean(self, to_keep=['set','add','load'], remove_bef_last=None, @@ -884,13 +884,13 @@ def clean(self, to_keep=['set','add','load'], keep_switch=False): """Remove command in arguments from history. All command before the last occurrence of 'remove_bef_last' - (including it) will be removed (but if another options tells the opposite). + (including it) will be removed (but if another options tells the opposite). 'to_keep' is a set of line to always keep. - 'to_remove' is a set of line to always remove (don't care about remove_bef_ + 'to_remove' is a set of line to always remove (don't care about remove_bef_ status but keep_switch acts.). - if 'allow_for_removal' is define only the command in that list can be + if 'allow_for_removal' is define only the command in that list can be remove of the history for older command that remove_bef_lb1. all parameter - present in to_remove are always remove even if they are not part of this + present in to_remove are always remove even if they are not part of this list. keep_switch force to keep the statement remove_bef_??? which changes starts the removal mode. @@ -900,8 +900,8 @@ def clean(self, to_keep=['set','add','load'], if __debug__ and allow_for_removal: for arg in to_keep: assert arg not in allow_for_removal - - + + nline = -1 removal = False #looping backward @@ -912,7 +912,7 @@ def clean(self, to_keep=['set','add','load'], if not removal and remove_bef_last: if self[nline].startswith(remove_bef_last): removal = True - switch = True + switch = True # if this is the switch and is protected pass to the next element if switch and keep_switch: @@ -923,12 +923,12 @@ def clean(self, to_keep=['set','add','load'], if any([self[nline].startswith(arg) for arg in to_remove]): self.pop(nline) continue - + # Only if removal mode is active! if removal: if allow_for_removal: # Only a subset of command can be removed - if any([self[nline].startswith(arg) + if any([self[nline].startswith(arg) for arg in allow_for_removal]): self.pop(nline) continue @@ -936,10 +936,10 @@ def clean(self, to_keep=['set','add','load'], # All command have to be remove but protected self.pop(nline) continue - + # update the counter to pass to the next element nline -= 1 - + def get(self, tag, default=None): if isinstance(tag, int): list.__getattr__(self, tag) @@ -954,32 +954,32 @@ def get(self, tag, default=None): except ValueError: name, content = line[7:].split(None,1) out.append((name, content)) - return out + return out else: return self.info[tag] - + def write(self, path): """write the proc_card to a given path""" - + fsock = open(path, 'w') fsock.write(self.history_header) for line in self: while len(line) > 70: - sub, line = line[:70]+"\\" , line[70:] + sub, line = line[:70]+"\\" , line[70:] fsock.write(sub+"\n") else: fsock.write(line+"\n") - -class InvalidCardEdition(InvalidCmd): pass - + +class InvalidCardEdition(InvalidCmd): pass + class ConfigFile(dict): """ a class for storing/dealing with input file. - """ + """ def __init__(self, finput=None, **opt): """initialize a new instance. input can be an instance of MadLoopParam, - a file, a path to a file, or simply Nothing""" - + a file, a path to a file, or simply Nothing""" + if isinstance(finput, self.__class__): dict.__init__(self) for key in finput.__dict__: @@ -989,7 +989,7 @@ def __init__(self, finput=None, **opt): return else: dict.__init__(self) - + # Initialize it with all the default value self.user_set = set() self.auto_set = set() @@ -1000,15 +1000,15 @@ def __init__(self, finput=None, **opt): self.comments = {} # comment associated to parameters. can be display via help message # store the valid options for a given parameter. self.allowed_value = {} - + self.default_setup() self.plugin_input(finput) - + # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, **opt) - + @@ -1028,7 +1028,7 @@ def __add__(self, other): base = self.__class__(self) #base = copy.copy(self) base.update((key.lower(),value) for key, value in other.items()) - + return base def __radd__(self, other): @@ -1036,26 +1036,26 @@ def __radd__(self, other): new = copy.copy(other) new.update((key, value) for key, value in self.items()) return new - + def __contains__(self, key): return dict.__contains__(self, key.lower()) def __iter__(self): - + for name in super(ConfigFile, self).__iter__(): yield self.lower_to_case[name.lower()] - - + + #iter = super(ConfigFile, self).__iter__() #misc.sprint(iter) #return (self.lower_to_case[name] for name in iter) - + def keys(self): return [name for name in self] - + def items(self): return [(name,self[name]) for name in self] - + @staticmethod def warn(text, level, raiseerror=False): """convenient proxy to raiseerror/print warning""" @@ -1071,11 +1071,11 @@ def warn(text, level, raiseerror=False): log = lambda t: logger.log(level, t) elif level: log = level - + return log(text) def post_set(self, name, value, change_userdefine, raiseerror): - + if value is None: value = self[name] @@ -1087,25 +1087,25 @@ def post_set(self, name, value, change_userdefine, raiseerror): return getattr(self, 'post_set_%s' % name)(value, change_userdefine, raiseerror) else: raise - + def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): """set the attribute and set correctly the type if the value is a string. change_userdefine on True if we have to add the parameter in user_set """ - + if not len(self): #Should never happen but when deepcopy/pickle self.__init__() - + name = name.strip() - lower_name = name.lower() - + lower_name = name.lower() + # 0. check if this parameter is a system only one if change_userdefine and lower_name in self.system_only: text='%s is a private entry which can not be modify by the user. Keep value at %s' % (name,self[name]) self.warn(text, 'critical', raiseerror) return - + #1. check if the parameter is set to auto -> pass it to special if lower_name in self: targettype = type(dict.__getitem__(self, lower_name)) @@ -1115,22 +1115,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): self.user_set.remove(lower_name) #keep old value. self.post_set(lower_name, 'auto', change_userdefine, raiseerror) - return + return elif lower_name in self.auto_set: self.auto_set.remove(lower_name) - + # 2. Find the type of the attribute that we want if lower_name in self.list_parameter: targettype = self.list_parameter[lower_name] - - - + + + if isinstance(value, str): # split for each comma/space value = value.strip() if value.startswith('[') and value.endswith(']'): value = value[1:-1] - #do not perform split within a " or ' block + #do not perform split within a " or ' block data = re.split(r"((? bad input dropped.append(val) - + if not new_values: text= "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ % (value, name, self[lower_name]) text += "allowed values are any list composed of the following entries: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) - return self.warn(text, 'warning', raiseerror) - elif dropped: + return self.warn(text, 'warning', raiseerror) + elif dropped: text = "some value for entry '%s' are not valid. Invalid items are: '%s'.\n" \ % (name, dropped) text += "value will be set to %s" % new_values - text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) + text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) self.warn(text, 'warning') values = new_values # make the assignment - dict.__setitem__(self, lower_name, values) + dict.__setitem__(self, lower_name, values) if change_userdefine: self.user_set.add(lower_name) #check for specific action - return self.post_set(lower_name, None, change_userdefine, raiseerror) + return self.post_set(lower_name, None, change_userdefine, raiseerror) elif lower_name in self.dict_parameter: - targettype = self.dict_parameter[lower_name] + targettype = self.dict_parameter[lower_name] full_reset = True #check if we just update the current dict or not - + if isinstance(value, str): value = value.strip() # allowed entry: @@ -1209,7 +1209,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): # name , value => just add the entry # name value => just add the entry # {name1:value1, name2:value2} => full reset - + # split for each comma/space if value.startswith('{') and value.endswith('}'): new_value = {} @@ -1219,23 +1219,23 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): x, y = pair.split(':') x, y = x.strip(), y.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] new_value[x] = y value = new_value elif ',' in value: x,y = value.split(',') value = {x.strip():y.strip()} full_reset = False - + elif ':' in value: x,y = value.split(':') value = {x.strip():y.strip()} - full_reset = False + full_reset = False else: x,y = value.split() value = {x:y} - full_reset = False - + full_reset = False + if isinstance(value, dict): for key in value: value[key] = self.format_variable(value[key], targettype, name=name) @@ -1248,7 +1248,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - elif name in self: + elif name in self: targettype = type(self[name]) else: logger.debug('Trying to add argument %s in %s. ' % (name, self.__class__.__name__) +\ @@ -1256,22 +1256,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): suggestions = [k for k in self.keys() if k.startswith(name[0].lower())] if len(suggestions)>0: logger.debug("Did you mean one of the following: %s"%suggestions) - self.add_param(lower_name, self.format_variable(UnknownType(value), + self.add_param(lower_name, self.format_variable(UnknownType(value), UnknownType, name)) self.lower_to_case[lower_name] = name if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - + value = self.format_variable(value, targettype, name=name) #check that the value is allowed: if lower_name in self.allowed_value and '*' not in self.allowed_value[lower_name]: valid = False allowed = self.allowed_value[lower_name] - + # check if the current value is allowed or not (set valid to True) if value in allowed: - valid=True + valid=True elif isinstance(value, str): value = value.lower().strip() allowed = [str(v).lower() for v in allowed] @@ -1279,7 +1279,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): i = allowed.index(value) value = self.allowed_value[lower_name][i] valid=True - + if not valid: # act if not valid: text = "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ @@ -1303,7 +1303,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, if __debug__: if lower_name in self: raise Exception("Duplicate case for %s in %s" % (name,self.__class__)) - + dict.__setitem__(self, lower_name, value) self.lower_to_case[lower_name] = name if isinstance(value, list): @@ -1318,12 +1318,12 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, elif isinstance(value, dict): allvalues = list(value.values()) if any([type(allvalues[0]) != type(v) for v in allvalues]): - raise Exception("All entry should have the same type") - self.dict_parameter[lower_name] = type(allvalues[0]) + raise Exception("All entry should have the same type") + self.dict_parameter[lower_name] = type(allvalues[0]) if '__type__' in value: del value['__type__'] dict.__setitem__(self, lower_name, value) - + if allowed and allowed != ['*']: self.allowed_value[lower_name] = allowed if lower_name in self.list_parameter: @@ -1333,8 +1333,8 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, assert value in allowed or '*' in allowed #elif isinstance(value, bool) and allowed != ['*']: # self.allowed_value[name] = [True, False] - - + + if system: self.system_only.add(lower_name) if comment: @@ -1342,7 +1342,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, def do_help(self, name): """return a minimal help for the parameter""" - + out = "## Information on parameter %s from class %s\n" % (name, self.__class__.__name__) if name.lower() in self: out += "## current value: %s (parameter should be of type %s)\n" % (self[name], type(self[name])) @@ -1351,7 +1351,7 @@ def do_help(self, name): else: out += "## Unknown for this class\n" if name.lower() in self.user_set: - out += "## This value is considered as being set by the user\n" + out += "## This value is considered as being set by the user\n" else: out += "## This value is considered as being set by the system\n" if name.lower() in self.allowed_value: @@ -1359,17 +1359,17 @@ def do_help(self, name): out += "Allowed value are: %s\n" % ','.join([str(p) for p in self.allowed_value[name.lower()]]) else: out += "Suggested value are : %s\n " % ','.join([str(p) for p in self.allowed_value[name.lower()] if p!='*']) - + logger.info(out) return out @staticmethod def guess_type_from_value(value): "try to guess the type of the string --do not use eval as it might not be safe" - + if not isinstance(value, str): return str(value.__class__.__name__) - + #use ast.literal_eval to be safe since value is untrusted # add a timeout to mitigate infinite loop, memory stack attack with misc.stdchannel_redirected(sys.stdout, os.devnull): @@ -1388,7 +1388,7 @@ def guess_type_from_value(value): @staticmethod def format_variable(value, targettype, name="unknown"): """assign the value to the attribute for the given format""" - + if isinstance(targettype, str): if targettype in ['str', 'int', 'float', 'bool']: targettype = eval(targettype) @@ -1412,7 +1412,7 @@ def format_variable(value, targettype, name="unknown"): (name, type(value), targettype, value)) else: raise InvalidCmd("Wrong input type for %s found %s and expecting %s for value %s" %\ - (name, type(value), targettype, value)) + (name, type(value), targettype, value)) else: if targettype != UnknownType: value = value.strip() @@ -1441,8 +1441,8 @@ def format_variable(value, targettype, name="unknown"): value = int(value) elif value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} - value =int(value[:-1]) * convert[value[-1]] - elif '/' in value or '*' in value: + value =int(value[:-1]) * convert[value[-1]] + elif '/' in value or '*' in value: try: split = re.split('(\*|/)',value) v = float(split[0]) @@ -1461,7 +1461,7 @@ def format_variable(value, targettype, name="unknown"): try: value = float(value.replace('d','e')) except ValueError: - raise InvalidCmd("%s can not be mapped to an integer" % value) + raise InvalidCmd("%s can not be mapped to an integer" % value) try: new_value = int(value) except ValueError: @@ -1471,7 +1471,7 @@ def format_variable(value, targettype, name="unknown"): value = new_value else: raise InvalidCmd("incorect input: %s need an integer for %s" % (value,name)) - + elif targettype == float: if value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} @@ -1496,33 +1496,33 @@ def format_variable(value, targettype, name="unknown"): value = v else: raise InvalidCmd("type %s is not handle by the card" % targettype) - + return value - - + + def __getitem__(self, name): - + lower_name = name.lower() if __debug__: if lower_name not in self: if lower_name in [key.lower() for key in self] : raise Exception("Some key are not lower case %s. Invalid use of the class!"\ % [key for key in self if key.lower() != key]) - + if lower_name in self.auto_set: return 'auto' - + return dict.__getitem__(self, name.lower()) - + get = __getitem__ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): """convenient way to change attribute. changeifuserset=False means that the value is NOT change is the value is not on default. - user=True, means that the value will be marked as modified by the user - (potentially preventing future change to the value) + user=True, means that the value will be marked as modified by the user + (potentially preventing future change to the value) """ # changeifuserset=False -> we need to check if the user force a value. @@ -1530,8 +1530,8 @@ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): if name.lower() in self.user_set: #value modified by the user -> do nothing return - self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) - + self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) + class RivetCard(ConfigFile): @@ -1706,7 +1706,7 @@ def setRelevantParamCard(self, f_params, f_relparams): yexec_dict = {} yexec_line = exec_line + "yaxis_relvar = " + self['yaxis_relvar'] exec(yexec_line, locals(), yexec_dict) - if self['yaxis_label'] == "": + if self['yaxis_label'] == "": self['yaxis_label'] = "yaxis_relvar" f_relparams.write("{0} = {1}\n".format(self['yaxis_label'], yexec_dict['yaxis_relvar'])) else: @@ -1715,11 +1715,11 @@ def setRelevantParamCard(self, f_params, f_relparams): class ProcCharacteristic(ConfigFile): """A class to handle information which are passed from MadGraph to the madevent - interface.""" - + interface.""" + def default_setup(self): """initialize the directory to the default value""" - + self.add_param('loop_induced', False) self.add_param('has_isr', False) self.add_param('has_fsr', False) @@ -1735,16 +1735,16 @@ def default_setup(self): self.add_param('pdg_initial1', [0]) self.add_param('pdg_initial2', [0]) self.add_param('splitting_types',[], typelist=str) - self.add_param('perturbation_order', [], typelist=str) - self.add_param('limitations', [], typelist=str) - self.add_param('hel_recycling', False) + self.add_param('perturbation_order', [], typelist=str) + self.add_param('limitations', [], typelist=str) + self.add_param('hel_recycling', False) self.add_param('single_color', True) - self.add_param('nlo_mixed_expansion', True) + self.add_param('nlo_mixed_expansion', True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1752,49 +1752,49 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: if '#' in line: line = line.split('#',1)[0] if not line: continue - + if '=' in line: key, value = line.split('=',1) self[key.strip()] = value - + def write(self, outputpath): """write the file""" template ="# Information about the process #\n" template +="#########################################\n" - + fsock = open(outputpath, 'w') fsock.write(template) - + for key, value in self.items(): fsock.write(" %s = %s \n" % (key, value)) - - fsock.close() - + + fsock.close() + class GridpackCard(ConfigFile): """an object for the GridpackCard""" - + def default_setup(self): """default value for the GridpackCard""" - + self.add_param("GridRun", True) self.add_param("gevents", 2500) self.add_param("gseed", 1) - self.add_param("ngran", -1) - + self.add_param("ngran", -1) + def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1802,7 +1802,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -1812,19 +1812,19 @@ def read(self, finput): self[line[1].strip()] = line[0].replace('\'','').strip() def write(self, output_file, template=None): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'grid_card_default.dat') else: template = pjoin(MEDIR, 'Cards', 'grid_card_default.dat') - + text = "" - for line in open(template,'r'): + for line in open(template,'r'): nline = line.split('#')[0] nline = nline.split('!')[0] comment = line[len(nline):] @@ -1832,19 +1832,19 @@ def write(self, output_file, template=None): if len(nline) != 2: text += line elif nline[1].strip() in self: - text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) + text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) else: logger.info('Adding missing parameter %s to current run_card (with default value)' % nline[1].strip()) - text += line - + text += line + if isinstance(output_file, str): fsock = open(output_file,'w') else: fsock = output_file - + fsock.write(text) fsock.close() - + class PY8Card(ConfigFile): """ Implements the Pythia8 card.""" @@ -1868,7 +1868,7 @@ def add_default_subruns(self, type): def default_setup(self): """ Sets up the list of available PY8 parameters.""" - + # Visible parameters # ================== self.add_param("Main:numberOfEvents", -1) @@ -1877,11 +1877,11 @@ def default_setup(self): self.add_param("JetMatching:qCut", -1.0, always_write_to_card=False) self.add_param("JetMatching:doShowerKt",False,always_write_to_card=False) # -1 means that it is automatically set. - self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) + self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) # for CKKWL merging self.add_param("Merging:TMS", -1.0, always_write_to_card=False) self.add_param("Merging:Process", '', always_write_to_card=False) - # -1 means that it is automatically set. + # -1 means that it is automatically set. self.add_param("Merging:nJetMax", -1, always_write_to_card=False) # for both merging, chose whether to also consider different merging # scale values for the extra weights related to scale and PDF variations. @@ -1918,10 +1918,10 @@ def default_setup(self): comment='This allows to turn on/off hadronization alltogether.') self.add_param("partonlevel:mpi", True, hidden=True, always_write_to_card=False, comment='This allows to turn on/off MPI alltogether.') - self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, + self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, always_write_to_card=False, comment='This parameter is automatically set to True by MG5aMC when doing MLM merging with PY8.') - + # for MLM merging self.add_param("JetMatching:merge", False, hidden=True, always_write_to_card=False, comment='Specifiy if we are merging sample of different multiplicity.') @@ -1931,9 +1931,9 @@ def default_setup(self): comment='Value of the merging scale below which one does not even write the HepMC event.') self.add_param("JetMatching:doVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') - self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) + self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) self.add_param("JetMatching:setMad", False, hidden=True, always_write_to_card=False, - comment='Specify one must read inputs from the MadGraph banner.') + comment='Specify one must read inputs from the MadGraph banner.') self.add_param("JetMatching:coneRadius", 1.0, hidden=True, always_write_to_card=False) self.add_param("JetMatching:nQmatch",4,hidden=True, always_write_to_card=False) # for CKKWL merging (common with UMEPS, UNLOPS) @@ -1946,7 +1946,7 @@ def default_setup(self): self.add_param("Merging:applyVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') self.add_param("Merging:includeWeightInXsection", True, hidden=True, always_write_to_card=False, - comment='If turned off, then the option belows forces PY8 to keep the original weight.') + comment='If turned off, then the option belows forces PY8 to keep the original weight.') self.add_param("Merging:muRen", 91.188, hidden=True, always_write_to_card=False, comment='Set renormalization scales of the 2->2 process.') self.add_param("Merging:muFacInME", 91.188, hidden=True, always_write_to_card=False, @@ -1958,7 +1958,7 @@ def default_setup(self): # To be added in subruns for CKKWL self.add_param("Merging:mayRemoveDecayProducts", False, hidden=True, always_write_to_card=False) self.add_param("Merging:doKTMerging", False, hidden=True, always_write_to_card=False) - self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) + self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) self.add_param("Merging:doPTLundMerging", False, hidden=True, always_write_to_card=False) # Special Pythia8 paremeters useful to simplify the shower. @@ -1975,33 +1975,33 @@ def default_setup(self): # Add parameters controlling the subruns execution flow. # These parameters should not be part of PY8SubRun daughter. self.add_default_subruns('parameters') - + def __init__(self, *args, **opts): - # Parameters which are not printed in the card unless they are - # 'user_set' or 'system_set' or part of the + # Parameters which are not printed in the card unless they are + # 'user_set' or 'system_set' or part of the # self.hidden_params_to_always_print set. self.hidden_param = [] self.hidden_params_to_always_write = set() self.visible_params_to_always_write = set() # List of parameters that should never be written out given the current context. self.params_to_never_write = set() - + # Parameters which have been set by the system (i.e. MG5 itself during # the regular course of the shower interface) self.system_set = set() - + # Add attributes controlling the subruns execution flow. # These attributes should not be part of PY8SubRun daughter. self.add_default_subruns('attributes') - - # Parameters which have been set by the + + # Parameters which have been set by the super(PY8Card, self).__init__(*args, **opts) - def add_param(self, name, value, hidden=False, always_write_to_card=True, + def add_param(self, name, value, hidden=False, always_write_to_card=True, comment=None): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. The option 'hidden' decides whether the parameter should be visible to the user. The option 'always_write_to_card' decides whether it should @@ -2017,7 +2017,7 @@ def add_param(self, name, value, hidden=False, always_write_to_card=True, self.hidden_params_to_always_write.add(name) else: if always_write_to_card: - self.visible_params_to_always_write.add(name) + self.visible_params_to_always_write.add(name) if not comment is None: if not isinstance(comment, str): raise MadGraph5Error("Option 'comment' must be a string, not"+\ @@ -2036,7 +2036,7 @@ def add_subrun(self, py8_subrun): self.subruns[py8_subrun['Main:subrun']] = py8_subrun if not 'LHEFInputs:nSubruns' in self.user_set: self['LHEFInputs:nSubruns'] = max(self.subruns.keys()) - + def userSet(self, name, value, **opts): """Set an attribute of this card, following a user_request""" self.__setitem__(name, value, change_userdefine=True, **opts) @@ -2044,10 +2044,10 @@ def userSet(self, name, value, **opts): self.system_set.remove(name.lower()) def vetoParamWriteOut(self, name): - """ Forbid the writeout of a specific parameter of this card when the + """ Forbid the writeout of a specific parameter of this card when the "write" function will be invoked.""" self.params_to_never_write.add(name.lower()) - + def systemSet(self, name, value, **opts): """Set an attribute of this card, independently of a specific user request and only if not already user_set.""" @@ -2058,7 +2058,7 @@ def systemSet(self, name, value, **opts): if force or name.lower() not in self.user_set: self.__setitem__(name, value, change_userdefine=False, **opts) self.system_set.add(name.lower()) - + def MadGraphSet(self, name, value, **opts): """ Sets a card attribute, but only if it is absent or not already user_set.""" @@ -2068,18 +2068,18 @@ def MadGraphSet(self, name, value, **opts): force = False if name.lower() not in self or (force or name.lower() not in self.user_set): self.__setitem__(name, value, change_userdefine=False, **opts) - self.system_set.add(name.lower()) - + self.system_set.add(name.lower()) + def defaultSet(self, name, value, **opts): self.__setitem__(name, value, change_userdefine=False, **opts) - + @staticmethod def pythia8_formatting(value, formatv=None): """format the variable into pythia8 card convention. The type is detected by default""" if not formatv: if isinstance(value,UnknownType): - formatv = 'unknown' + formatv = 'unknown' elif isinstance(value, bool): formatv = 'bool' elif isinstance(value, int): @@ -2095,7 +2095,7 @@ def pythia8_formatting(value, formatv=None): formatv = 'str' else: assert formatv - + if formatv == 'unknown': # No formatting then return str(value) @@ -2116,7 +2116,7 @@ def pythia8_formatting(value, formatv=None): elif formatv == 'float': return '%.10e' % float(value) elif formatv == 'shortfloat': - return '%.3f' % float(value) + return '%.3f' % float(value) elif formatv == 'str': return "%s" % value elif formatv == 'list': @@ -2124,9 +2124,9 @@ def pythia8_formatting(value, formatv=None): return ','.join([PY8Card.pythia8_formatting(arg, 'shortfloat') for arg in value]) else: return ','.join([PY8Card.pythia8_formatting(arg) for arg in value]) - - def write(self, output_file, template, read_subrun=False, + + def write(self, output_file, template, read_subrun=False, print_only_visible=False, direct_pythia_input=False, add_missing=True): """ Write the card to output_file using a specific template. > 'print_only_visible' specifies whether or not the hidden parameters @@ -2143,28 +2143,28 @@ def write(self, output_file, template, read_subrun=False, or p.lower() in self.user_set] # Filter against list of parameters vetoed for write-out visible_param = [p for p in visible_param if p.lower() not in self.params_to_never_write] - + # Now the hidden param which must be written out if print_only_visible: hidden_output_param = [] else: hidden_output_param = [p for p in self if p.lower() in self.hidden_param and not p.lower() in self.user_set and - (p.lower() in self.hidden_params_to_always_write or + (p.lower() in self.hidden_params_to_always_write or p.lower() in self.system_set)] # Filter against list of parameters vetoed for write-out hidden_output_param = [p for p in hidden_output_param if p not in self.params_to_never_write] - + if print_only_visible: subruns = [] else: if not read_subrun: subruns = sorted(self.subruns.keys()) - + # Store the subruns to write in a dictionary, with its ID in key # and the corresponding stringstream in value subruns_to_write = {} - + # Sort these parameters nicely so as to put together parameters # belonging to the same group (i.e. prefix before the ':' in their name). def group_params(params): @@ -2191,7 +2191,7 @@ def group_params(params): # First dump in a temporary_output (might need to have a second pass # at the very end to update 'LHEFInputs:nSubruns') output = StringIO.StringIO() - + # Setup template from which to read if isinstance(template, str): if os.path.isfile(template): @@ -2199,7 +2199,7 @@ def group_params(params): elif '\n' in template: tmpl = StringIO.StringIO(template) else: - raise Exception("File input '%s' not found." % file_input) + raise Exception("File input '%s' not found." % file_input) elif template is None: # Then use a dummy empty StringIO, hence skipping the reading tmpl = StringIO.StringIO() @@ -2257,8 +2257,8 @@ def group_params(params): # Remove all of its variables (so that nothing is overwritten) DummySubrun.clear() DummySubrun.write(subruns_to_write[int(value)], - tmpl, read_subrun=True, - print_only_visible=print_only_visible, + tmpl, read_subrun=True, + print_only_visible=print_only_visible, direct_pythia_input=direct_pythia_input) logger.info('Adding new unknown subrun with ID %d.'% @@ -2267,7 +2267,7 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - + # Change parameters which must be output if param in visible_param: new_value = PY8Card.pythia8_formatting(self[param]) @@ -2286,10 +2286,10 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - - # Substitute the value. + + # Substitute the value. # If it is directly the pytia input, then don't write the param if it - # is not in the list of visible_params_to_always_write and was + # is not in the list of visible_params_to_always_write and was # not user_set or system_set if ((not direct_pythia_input) or (param.lower() in self.visible_params_to_always_write) or @@ -2304,16 +2304,16 @@ def group_params(params): output.write(template%(param_entry, value_entry.replace(value,new_value))) - + # Proceed to next line last_pos = tmpl.tell() line = tmpl.readline() - + # If add_missing is False, make sure to empty the list of remaining parameters if not add_missing: visible_param = [] hidden_output_param = [] - + # Now output the missing parameters. Warn about visible ones. if len(visible_param)>0 and not template is None: output.write( @@ -2343,12 +2343,12 @@ def group_params(params): """%(' for subrun %d'%self['Main:subrun'] if 'Main:subrun' in self else '')) for param in hidden_output_param: if param.lower() in self.comments: - comment = '\n'.join('! %s'%c for c in + comment = '\n'.join('! %s'%c for c in self.comments[param.lower()].split('\n')) output.write(comment+'\n') output.write('%s=%s\n'%(param,PY8Card.pythia8_formatting(self[param]))) - - # Don't close the file if we were reading a subrun, but simply write + + # Don't close the file if we were reading a subrun, but simply write # output and return now if read_subrun: output_file.write(output.getvalue()) @@ -2382,12 +2382,12 @@ def group_params(params): out.close() else: output_file.write(output.getvalue()) - + def read(self, file_input, read_subrun=False, setter='default'): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file. - The setter option choses the authority that sets potential - modified/new parameters. It can be either: + The setter option choses the authority that sets potential + modified/new parameters. It can be either: 'default' or 'user' or 'system'""" if isinstance(file_input, str): if "\n" in file_input: @@ -2423,8 +2423,8 @@ def read(self, file_input, read_subrun=False, setter='default'): raise MadGraph5Error("Could not read line '%s' of Pythia8 card."%\ line) if '!' in value: - value,_ = value.split('!',1) - + value,_ = value.split('!',1) + # Read a subrun if detected: if param=='Main:subrun': if read_subrun: @@ -2451,7 +2451,7 @@ def read(self, file_input, read_subrun=False, setter='default'): last_pos = finput.tell() line = finput.readline() continue - + # Read parameter. The case of a parameter not defined in the card is # handled directly in ConfigFile. @@ -2478,7 +2478,7 @@ def add_default_subruns(self, type): def __init__(self, *args, **opts): """ Initialize a subrun """ - + # Force user to set it manually. subrunID = -1 if 'subrun_id' in opts: @@ -2489,7 +2489,7 @@ def __init__(self, *args, **opts): def default_setup(self): """Sets up the list of available PY8SubRun parameters.""" - + # Add all default PY8Card parameters super(PY8SubRun, self).default_setup() # Make sure they are all hidden @@ -2501,33 +2501,33 @@ def default_setup(self): self.add_param("Main:subrun", -1) self.add_param("Beams:LHEF", "events.lhe.gz") - + class RunBlock(object): """ Class for a series of parameter in the run_card that can be either visible or hidden. - name: allow to set in the default run_card $name to set where that + name: allow to set in the default run_card $name to set where that block need to be inserted template_on: information to include is block is active template_off: information to include is block is not active on_fields/off_fields: paramater associated to the block - can be specify but are otherwise automatically but + can be specify but are otherwise automatically but otherwise determined from the template. - + function: status(self,run_card) -> return which template need to be used check_validity(self, runcard) -> sanity check - create_default_for_process(self, run_card, proc_characteristic, - history, proc_def) + create_default_for_process(self, run_card, proc_characteristic, + history, proc_def) post_set_XXXX(card, value, change_userdefine, raiseerror) -> fct called when XXXXX is set post_set(card, value, change_userdefine, raiseerror, **opt) -> fct called when a parameter is changed - -> no access to parameter name + -> no access to parameter name -> not called if post_set_XXXX is defined """ - - + + def __init__(self, name, template_on, template_off, on_fields=False, off_fields=False): self.name = name @@ -2550,7 +2550,7 @@ def fields(self): def find_fields_from_template(template): """ return the list of fields from a template. checking line like %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ - + return re.findall(r"^\s*%\((.*)\)s\s*=\s*\1", template, re.M) def get_template(self, card): @@ -2565,7 +2565,7 @@ def get_unused_template(self, card): if self.status(card): return self.template_off else: - return self.template_on + return self.template_on def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -2594,20 +2594,20 @@ def manage_parameters(self, card, written, to_write): written.add(name) if name in to_write: to_write.remove(name) - + def check_validity(self, runcard): """run self consistency check here --avoid to use runcard[''] = xxx here since it can trigger post_set function""" return def create_default_for_process(self, run_card, proc_characteristic, history, proc_def): - return + return # @staticmethod # def post_set(card, value, change_userdefine, raiseerror, **opt): # """default action to run when a parameter of the block is defined. # Here we do not know which parameter is modified. if this is needed. # then one need to define post_set_XXXXX(card, value, change_userdefine, raiseerror) -# and then only that function is used +# and then only that function is used # """ # # if 'pdlabel' in card.user_set: @@ -2621,7 +2621,7 @@ class RunCard(ConfigFile): blocks = [] parameter_in_block = {} - allowed_lep_densities = {} + allowed_lep_densities = {} default_include_file = 'run_card.inc' default_autodef_file = 'run.inc' donewarning = [] @@ -2637,7 +2637,7 @@ def plugin_input(self, finput): curr_dir = os.path.dirname(os.path.dirname(finput.name)) elif isinstance(finput, str): curr_dir = os.path.dirname(os.path.dirname(finput)) - + if curr_dir: if os.path.exists(pjoin(curr_dir, 'bin', 'internal', 'plugin_run_card')): # expected format {} passing everything as optional argument @@ -2646,7 +2646,7 @@ def plugin_input(self, finput): continue opts = dict(eval(line)) self.add_param(**opts) - + @classmethod def fill_post_set_from_blocks(cls): """set the post_set function for any parameter defined in a run_block""" @@ -2659,8 +2659,8 @@ def fill_post_set_from_blocks(cls): elif hasattr(block, 'post_set'): setattr(cls, 'post_set_%s' % parameter, block.post_set) cls.parameter_in_block[parameter] = block - - + + def __new__(cls, finput=None, **opt): cls.fill_post_set_from_blocks() @@ -2718,9 +2718,9 @@ def __new__(cls, finput=None, **opt): return super(RunCard, cls).__new__(cls, finput, **opt) def __init__(self, *args, **opts): - + # The following parameter are updated in the defaultsetup stage. - + #parameter for which no warning should be raised if not define self.hidden_param = [] # in which include file the parameer should be written @@ -2739,11 +2739,11 @@ def __init__(self, *args, **opts): self.cuts_parameter = {} # parameter added where legacy requires an older value. self.system_default = {} - + self.display_block = [] # set some block to be displayed self.fct_mod = {} # {param: (fct_pointer, *argument, **opts)} - self.cut_class = {} + self.cut_class = {} self.warned=False @@ -2776,11 +2776,11 @@ def get_lepton_densities(cls): else: cls.allowed_lep_densities[identity].append(name) - def add_param(self, name, value, fortran_name=None, include=True, + def add_param(self, name, value, fortran_name=None, include=True, hidden=False, legacy=False, cut=False, system=False, sys_default=None, autodef=False, fct_mod=None, **opts): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. fortran_name: defines what is the associate name in the f77 code include: defines if we have to put the value in the include file @@ -2795,7 +2795,7 @@ def add_param(self, name, value, fortran_name=None, include=True, fct_mod: defines a function to run if the parameter is modify in the include file options of **opts: - allowed: list of valid options. '*' means anything else should be allowed. - empty list means anything possible as well. + empty list means anything possible as well. - comment: add comment for writing/help - typelist: type of the list if default is empty """ @@ -2823,9 +2823,9 @@ def add_param(self, name, value, fortran_name=None, include=True, self.fct_mod[name] = fct_mod def read(self, finput, consistency=True, unknown_warning=True, **opt): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -2836,7 +2836,7 @@ def read(self, finput, consistency=True, unknown_warning=True, **opt): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -2864,8 +2864,8 @@ def add_unknown_entry(self, name, value, unknow_warning): This is based on the guess_entry_fromname for the various syntax providing input. This then call add_param accordingly. - This function does not returns anything. - """ + This function does not returns anything. + """ if name == "dsqrt_q2fact1" and not self.LO: raise InvalidRunCard("Looks like you passed a LO run_card for a NLO run. Please correct") @@ -2903,7 +2903,7 @@ def add_unknown_entry(self, name, value, unknow_warning): " The type was assigned to %s. \n"+\ " The definition of that variable will %sbe automatically added to fortran file %s\n"+\ " The value of that variable will %sbe passed to the fortran code via fortran file %s",\ - name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, + name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, "" if opts.get('autodef', False) else "not", "" if opts.get('autodef', False) in [True,False] else opts.get('autodef'), "" if opts.get('include', True) else "not", "" if opts.get('include', True) in [True,False] else opts.get('include')) RunCard.donewarning.append(name) @@ -2923,19 +2923,19 @@ def valid_line(self, line, tmp): return False elif line.strip().startswith('%'): parameter = line[line.find('(')+1:line.find(')')] - + try: cond = self.cuts_parameter[parameter] except KeyError: return True - - + + if template_options.get(cond, default) or cond is True: return True else: - return False + return False else: - return True + return True def reset_simd(self, old_value, new_value, name, *args, **opts): @@ -2946,28 +2946,28 @@ def make_clean(self,old_value, new_value, name, dir): raise Exception('pass make clean for ', dir) def make_Ptouch(self,old_value, new_value, name, reset): - raise Exception('pass Ptouch for ', reset) - + raise Exception('pass Ptouch for ', reset) + def write(self, output_file, template=None, python_template=False, write_hidden=False, template_options=None, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" - to_write = set(self.user_set) + to_write = set(self.user_set) written = set() if not template: raise Exception if not template_options: template_options = collections.defaultdict(str) - + if python_template: text = open(template,'r').read() - text = text.split('\n') + text = text.split('\n') # remove if templating - text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] + text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] for l in text if self.valid_line(l, template_options)] text ='\n'.join(text) - + if python_template and not to_write: import string if self.blocks: @@ -2981,14 +2981,14 @@ def write(self, output_file, template=None, python_template=False, if not self.list_parameter: text = text % self else: - data = dict((key.lower(),value) for key, value in self.items()) + data = dict((key.lower(),value) for key, value in self.items()) for name in self.list_parameter: if self.list_parameter[name] != str: data[name] = ', '.join(str(v) for v in data[name]) else: data[name] = "['%s']" % "', '".join(str(v) for v in data[name]) text = text % data - else: + else: text = "" for line in open(template,'r'): nline = line.split('#')[0] @@ -3005,11 +3005,11 @@ def write(self, output_file, template=None, python_template=False, this_group = this_group[0] text += this_group.get_template(self) % self this_group.manage_parameters(self, written, to_write) - + elif len(nline) != 2: text += line elif nline[1].strip() in self: - + name = nline[1].strip().lower() value = self[name] if name in self.list_parameter: @@ -3026,15 +3026,15 @@ def write(self, output_file, template=None, python_template=False, else: endline = '' text += ' %s\t= %s %s%s' % (value, name, comment, endline) - written.add(name) + written.add(name) if name in to_write: to_write.remove(name) else: logger.info('Adding missing parameter %s to current %s (with default value)', (name, self.filename)) - written.add(name) - text += line + written.add(name) + text += line for b in self.blocks: if b.status(self): @@ -3057,7 +3057,7 @@ def write(self, output_file, template=None, python_template=False, else: #partial writting -> add only what is needed to_add = [] - for line in b.get_template(self).split('\n'): + for line in b.get_template(self).split('\n'): nline = line.split('#')[0] nline = nline.split('!')[0] nline = nline.split('=') @@ -3072,8 +3072,8 @@ def write(self, output_file, template=None, python_template=False, continue #already include before else: to_add.append(line % {nline[1].strip():value, name:value}) - written.add(name) - + written.add(name) + if name in to_write: to_write.remove(name) else: @@ -3095,13 +3095,13 @@ def write(self, output_file, template=None, python_template=False, text += '\n'.join(to_add) if to_write or write_hidden: - text+="""#********************************************************************* + text+="""#********************************************************************* # Additional hidden parameters #********************************************************************* -""" +""" if write_hidden: # - # do not write hidden parameter not hidden for this template + # do not write hidden parameter not hidden for this template # if python_template: written = written.union(set(re.findall('\%\((\w*)\)s', open(template,'r').read(), re.M))) @@ -3129,7 +3129,7 @@ def get_last_value_include(self, output_dir): if inc file does not exist we will return the current value (i.e. set has no change) """ - #remember that + #remember that # default_include_file is a class variable # self.includepath is on the form include_path : [list of param ] out = {} @@ -3165,7 +3165,7 @@ def get_value_from_include(self, path, list_of_params, output_dir): with open(pjoin(output_dir,path), 'r') as fsock: text = fsock.read() - + for name in list_of_params: misc.sprint(name, name in self.fortran_name) misc.sprint(self.fortran_name[name] if name in self.fortran_name[name] else name) @@ -3191,11 +3191,11 @@ def get_value_from_include(self, path, list_of_params, output_dir): misc.sprint(self.fortran_name) misc.sprint(text) raise Exception - return out + return out def get_default(self, name, default=None, log_level=None): - """return self[name] if exist otherwise default. log control if we + """return self[name] if exist otherwise default. log control if we put a warning or not if we use the default value""" lower_name = name.lower() @@ -3216,13 +3216,13 @@ def get_default(self, name, default=None, log_level=None): log_level = 20 if not default: default = dict.__getitem__(self, name.lower()) - + logger.log(log_level, '%s missed argument %s. Takes default: %s' % (self.filename, name, default)) self[name] = default return default else: - return self[name] + return self[name] def mod_inc_pdlabel(self, value): """flag pdlabel has 'dressed' if one of the special lepton PDF with beamstralung. @@ -3237,16 +3237,16 @@ def edit_dummy_fct_from_file(self, filelist, outdir): filelist is a list of input files (given by the user) containing a series of function to be placed in replacement of standard (typically dummy) functions of the code. - This use LO/NLO class attribute that defines which function name need to - be placed in which file. + This use LO/NLO class attribute that defines which function name need to + be placed in which file. First time this is used, a backup of the original file is done in order to - recover if the user remove some of those files. + recover if the user remove some of those files. The function present in the file are determined automatically via regular expression. and only that function is replaced in the associated file. - function in the filelist starting with user_ will also be include within the + function in the filelist starting with user_ will also be include within the dummy_fct.f file """ @@ -3269,7 +3269,7 @@ def edit_dummy_fct_from_file(self, filelist, outdir): fsock = file_writers.FortranWriter(tmp,'w') function_text = fsock.remove_routine(text, fct) fsock.close() - test = open(tmp,'r').read() + test = open(tmp,'r').read() if fct not in self.dummy_fct_file: if fct.startswith('user_'): self.dummy_fct_file[fct] = self.dummy_fct_file['user_'] @@ -3315,22 +3315,22 @@ def guess_entry_fromname(self, name, value): - vartype: type of the variable - name: name of the variable (stripped from metadata) - options: additional options for the add_param - rules: - - if name starts with str_, int_, float_, bool_, list_, dict_ then + rules: + - if name starts with str_, int_, float_, bool_, list_, dict_ then - vartype is set accordingly - name is strip accordingly - otherwise guessed from value (which is string) - if name contains min/max - vartype is set to float - options has an added {'cut':True} - - suffixes like + - suffixes like - will be removed from named - will be added in options (for add_param) as {'cut':True} see add_param documentation for the list of supported options - if include is on False set autodef to False (i.e. enforce it False for future change) """ - # local function + # local function def update_typelist(value, name, opts): """convert a string to a list and update opts to keep track of the type """ value = value.strip() @@ -3358,7 +3358,7 @@ def update_typelist(value, name, opts): opts[key] = val name = name.replace("<%s=%s>" %(key,val), '') - # get vartype + # get vartype # first check that name does not force it supported_type = ["str", "float", "int", "bool", "list", "dict"] if "_" in name and name.split("_")[0].lower() in supported_type: @@ -3406,13 +3406,13 @@ def f77_formatting(value, formatv=None): value = str(value).lower() else: assert formatv - + if formatv == 'bool': if str(value) in ['1','T','.true.','True']: return '.true.' else: return '.false.' - + elif formatv == 'int': try: return str(int(value)) @@ -3422,12 +3422,12 @@ def f77_formatting(value, formatv=None): return str(int(fl)) else: raise - + elif formatv == 'float': if isinstance(value, str): value = value.replace('d','e') return ('%.10e' % float(value)).replace('e','d') - + elif formatv == 'str': # Check if it is a list if value.strip().startswith('[') and value.strip().endswith(']'): @@ -3437,20 +3437,20 @@ def f77_formatting(value, formatv=None): enumerate(elements)] else: return "'%s'" % value - - + + def check_validity(self, log_level=30): """check that parameter missing in the card are set to the expected value""" for name, value in self.system_default.items(): self.set(name, value, changeifuserset=False) - + for name in self.includepath[False]: to_bypass = self.hidden_param + list(self.legacy_parameter.keys()) if name not in to_bypass: - self.get_default(name, log_level=log_level) + self.get_default(name, log_level=log_level) for name in self.legacy_parameter: if self[name] != self.legacy_parameter[name]: @@ -3458,28 +3458,28 @@ def check_validity(self, log_level=30): for block in self.blocks: block.check_validity(self) - + def update_system_parameter_for_include(self): - """update hidden system only parameter for the correct writtin in the + """update hidden system only parameter for the correct writtin in the include""" return - + def write_include_file(self, output_dir, output_file=None): """Write the various include file in output_dir. The entry True of self.includepath will be written in run_card.inc The entry False will not be written anywhere output_file allows testing by providing stream. - This also call the function to add variable definition for the - variable with autodef=True (handle by write_autodef function) + This also call the function to add variable definition for the + variable with autodef=True (handle by write_autodef function) """ - + # ensure that all parameter are coherent and fix those if needed self.check_validity() - + #ensusre that system only parameter are correctly set self.update_system_parameter_for_include() @@ -3490,10 +3490,10 @@ def write_include_file(self, output_dir, output_file=None): self.write_autodef(output_dir, output_file=None) # check/fix status of customised functions self.edit_dummy_fct_from_file(self["custom_fcts"], os.path.dirname(output_dir)) - + for incname in self.includepath: self.write_one_include_file(output_dir, incname, output_file) - + for name,value in value_in_old_include.items(): if value != self[name]: self.fct_mod[name][0](value, self[name], name, *self.fct_mod[name][1],**self.fct_mod[name][2]) @@ -3515,13 +3515,13 @@ def write_one_include_file(self, output_dir, incname, output_file=None): fsock = file_writers.FortranWriter(pjoin(output_dir,pathinc+'.tmp')) - for key in self.includepath[incname]: + for key in self.includepath[incname]: #define the fortran name if key in self.fortran_name: fortran_name = self.fortran_name[key] else: fortran_name = key - + if incname in self.include_as_parameter: fsock.writelines('INTEGER %s\n' % fortran_name) #get the value with warning if the user didn't set it @@ -3534,7 +3534,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): # in case of a list, add the length of the list as 0th # element in fortran. Only in case of integer or float # list (not for bool nor string) - targettype = self.list_parameter[key] + targettype = self.list_parameter[key] if targettype is bool: pass elif targettype is int: @@ -3550,7 +3550,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): elif isinstance(value, dict): for fortran_name, onevalue in value.items(): line = '%s = %s \n' % (fortran_name, self.f77_formatting(onevalue)) - fsock.writelines(line) + fsock.writelines(line) elif isinstance(incname,str) and 'compile' in incname: if incname in self.include_as_parameter: line = 'PARAMETER (%s=%s)' %( fortran_name, value) @@ -3585,7 +3585,7 @@ def write_autodef(self, output_dir, output_file=None): filetocheck = dict(self.definition_path) if True not in self.definition_path: filetocheck[True] = [] - + for incname in filetocheck: if incname is True: @@ -3598,7 +3598,7 @@ def write_autodef(self, output_dir, output_file=None): if output_file: fsock = output_file input = fsock.getvalue() - + else: input = open(pjoin(output_dir,pathinc),'r').read() # do not define fsock here since we might not need to overwrite it @@ -3608,7 +3608,7 @@ def write_autodef(self, output_dir, output_file=None): previous = re.findall(re_pat, input, re.M) # now check which one needed to be added (and remove those identicaly defined) to_add = [] - for key in filetocheck[incname]: + for key in filetocheck[incname]: curr_type = self[key].__class__.__name__ length = "" if curr_type in [list, "list"]: @@ -3640,10 +3640,10 @@ def write_autodef(self, output_dir, output_file=None): fsock.truncate(0) fsock.seek(0) - # remove outdated lines + # remove outdated lines lines = input.split('\n') if previous: - out = [line for line in lines if not re.search(re_pat, line, re.M) or + out = [line for line in lines if not re.search(re_pat, line, re.M) or re.search(re_pat, line, re.M).groups() not in previous] else: out = lines @@ -3662,7 +3662,7 @@ def write_autodef(self, output_dir, output_file=None): stop = out.index('C STOP USER COMMON BLOCK') out = out[:start]+ out[stop+1:] #add new common-block - if self.definition_path[incname]: + if self.definition_path[incname]: out.append("C START USER COMMON BLOCK") if isinstance(pathinc , str): filename = os.path.basename(pathinc).split('.',1)[0] @@ -3675,10 +3675,10 @@ def write_autodef(self, output_dir, output_file=None): filename = filename.upper() out.append(" COMMON/USER_CUSTOM_%s/%s" %(filename,','.join( self.definition_path[incname]))) out.append('C STOP USER COMMON BLOCK') - + if not output_file: fsock.writelines(out) - fsock.close() + fsock.close() else: # for iotest out = ["%s\n" %l for l in out] @@ -3702,7 +3702,7 @@ def get_idbmup(lpp): def get_banner_init_information(self): """return a dictionary with the information needed to write the first line of the block of the lhe file.""" - + output = {} output["idbmup1"] = self.get_idbmup(self['lpp1']) output["idbmup2"] = self.get_idbmup(self['lpp2']) @@ -3713,7 +3713,7 @@ def get_banner_init_information(self): output["pdfsup1"] = self.get_pdf_id(self["pdlabel"]) output["pdfsup2"] = self.get_pdf_id(self["pdlabel"]) return output - + def get_pdf_id(self, pdf): if pdf == "lhapdf": lhaid = self["lhaid"] @@ -3721,19 +3721,19 @@ def get_pdf_id(self, pdf): return lhaid[0] else: return lhaid - else: + else: try: return {'none': 0, 'iww': 0, 'eva':0, 'edff':0, 'chff':0, 'cteq6_m':10000,'cteq6_l':10041,'cteq6l1':10042, 'nn23lo':246800,'nn23lo1':247000,'nn23nlo':244800 - }[pdf] + }[pdf] except: - return 0 - + return 0 + def get_lhapdf_id(self): return self.get_pdf_id(self['pdlabel']) - def remove_all_cut(self): + def remove_all_cut(self): """remove all the cut""" for name in self.cuts_parameter: @@ -3749,7 +3749,7 @@ def remove_all_cut(self): elif 'eta' in name: self[name] = -1 else: - self[name] = 0 + self[name] = 0 ################################################################################################ ### Define various template subpart for the LO Run_card @@ -3767,11 +3767,11 @@ def remove_all_cut(self): %(nb_proton1)s = nb_proton1 # number of proton for the first beam %(nb_neutron1)s = nb_neutron1 # number of neutron for the first beam %(mass_ion1)s = mass_ion1 # mass of the heavy ion (first beam) -# Note that seting differently the two beams only work if you use +# Note that seting differently the two beams only work if you use # group_subprocess=False when generating your matrix-element %(nb_proton2)s = nb_proton2 # number of proton for the second beam %(nb_neutron2)s = nb_neutron2 # number of neutron for the second beam - %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) + %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ template_off = "# To see heavy ion options: type \"update ion_pdf\"" @@ -3834,11 +3834,11 @@ def remove_all_cut(self): # Frame for polarization ------------------------------------------------------------------------------------ template_on = \ """#********************************************************************* -# Frame where to evaluate the matrix-element (not the cut!) for polarization +# Frame where to evaluate the matrix-element (not the cut!) for polarization #********************************************************************* %(me_frame)s = me_frame ! list of particles to sum-up to define the rest-frame ! in which to evaluate the matrix-element - ! [1,2] means the partonic center of mass + ! [1,2] means the partonic center of mass """ template_off = "" frame_block = RunBlock('frame', template_on=template_on, template_off=template_off) @@ -3891,7 +3891,7 @@ def remove_all_cut(self): # CONTROL The extra running scale (not QCD) * # Such running is NOT include in systematics computation * #*********************************************************************** - %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale + %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode %(mue_over_ref)s = mue_over_ref ! ratio to mur if dynamical scale """ @@ -3908,10 +3908,10 @@ def remove_all_cut(self): %(tmin_for_channel)s = tmin_for_channel ! limit the non-singular reach of --some-- channel of integration related to T-channel diagram (value between -1 and 0), -1 is no impact %(survey_splitting)s = survey_splitting ! for loop-induced control how many core are used at survey for the computation of a single iteration. %(survey_nchannel_per_job)s = survey_nchannel_per_job ! control how many Channel are integrated inside a single job on cluster/multicore - %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) + %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) +#********************************************************************* +# Compilation flag. #********************************************************************* -# Compilation flag. -#********************************************************************* %(global_flag)s = global_flag ! fortran optimization flag use for the all code. %(aloha_flag)s = aloha_flag ! fortran optimization flag for aloha function. Suggestions: '-ffast-math' %(matrix_flag)s = matrix_flag ! fortran optimization flag for matrix.f function. Suggestions: '-O3' @@ -3948,7 +3948,7 @@ def check_validity(self, card): if card['pdlabel'] != card['pdlabel1']: dict.__setitem__(card, 'pdlabel', card['pdlabel1']) elif card['pdlabel1'] in sum(card.allowed_lep_densities.values(),[]): - raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") + raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel2'] in sum(card.allowed_lep_densities.values(),[]): raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel1'] == 'none': @@ -3962,7 +3962,7 @@ def check_validity(self, card): dict.__setitem__(card, 'pdlabel2', card['pdlabel']) if abs(card['lpp1']) == 1 == abs(card['lpp2']) and card['pdlabel1'] != card['pdlabel2']: - raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") + raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -4028,7 +4028,7 @@ def post_set(card, value, change_userdefine, raiseerror, name='unknown', **opt): if name == 'fixed_fac_scale2' and 'fixed_fac_scale1' not in card.user_set: dict.__setitem__(card, 'fixed_fac_scale1', card['fixed_fac_scale']) if name == 'fixed_fac_scale1' and 'fixed_fac_scale2' not in card.user_set: - dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) + dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) def status(self, card): @@ -4061,32 +4061,32 @@ def status(self, card): class RunCardLO(RunCard): """an object to handle in a nice way the run_card information""" - + blocks = [heavy_ion_block, beam_pol_block, syscalc_block, ecut_block, frame_block, eva_scale_block, mlm_block, ckkw_block, psoptim_block, pdlabel_block, fixedfacscale, running_block] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), "get_dummy_x1": pjoin("SubProcesses","dummy_fct.f"), - "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), + "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), "dummy_boostframe": pjoin("SubProcesses","dummy_fct.f"), "user_dynamical_scale": pjoin("SubProcesses","dummy_fct.f"), "bias_wgt_custom": pjoin("SubProcesses","dummy_fct.f"), "user_": pjoin("SubProcesses","dummy_fct.f") # all function starting by user will be added to that file } - + include_as_parameter = ['vector.inc'] if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_lo.dat") - + def default_setup(self): """default value for the run_card.dat""" - + self.add_param("run_tag", "tag_1", include=False) self.add_param("gridpack", False) self.add_param("time_of_flight", -1.0, include=False) - self.add_param("nevents", 10000) + self.add_param("nevents", 10000) self.add_param("iseed", 0) self.add_param("python_seed", -2, include=False, hidden=True, comment="controlling python seed [handling in particular the final unweighting].\n -1 means use default from random module.\n -2 means set to same value as iseed") self.add_param("lpp1", 1, fortran_name="lpp(1)", allowed=[-1,1,0,2,3,9,-2,-3,4,-4], @@ -4106,7 +4106,7 @@ def default_setup(self): self.add_param('nb_neutron1', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(1)", comment='For heavy ion physics nb of neutron in the ion (for both beam but if group_subprocess was False)') self.add_param('nb_neutron2', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(2)", - comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') + comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') self.add_param('mass_ion1', -1.0, hidden=True, fortran_name="mass_ion(1)", allowed=[-1,0, 0.938, 207.9766521*0.938, 0.000511, 0.105, '*'], comment='For heavy ion physics mass in GeV of the ion (of beam 1)') @@ -4133,11 +4133,11 @@ def default_setup(self): self.add_param("mue_over_ref", 1.0, hidden=True, comment='ratio mu_other/mu for dynamical scale') self.add_param("ievo_eva",0,hidden=True, allowed=[0,1],fortran_name="ievo_eva", comment='eva: 0 for EW pdf muf evolution by q^2; 1 for evo by pT^2') - + # Bias module options self.add_param("bias_module", 'None', include=False, hidden=True) self.add_param('bias_parameters', {'__type__':1.0}, include='BIAS/bias.inc', hidden=True) - + #matching self.add_param("scalefact", 1.0) self.add_param("ickkw", 0, allowed=[0,1], hidden=True, comment="\'0\' for standard fixed order computation.\n\'1\' for MLM merging activates alphas and pdf re-weighting according to a kt clustering of the QCD radiation.") @@ -4221,7 +4221,7 @@ def default_setup(self): self.add_param("mmaa", 0.0, cut='aa') self.add_param("mmll", 0.0, cut='ll') self.add_param("mmjjmax", -1.0, cut='jj') - self.add_param("mmbbmax", -1.0, cut='bb') + self.add_param("mmbbmax", -1.0, cut='bb') self.add_param("mmaamax", -1.0, cut='aa') self.add_param("mmllmax", -1.0, cut='ll') self.add_param("mmnl", 0.0, cut='LL') @@ -4231,9 +4231,9 @@ def default_setup(self): self.add_param("ptllmax", -1.0, cut='ll') self.add_param("xptj", 0.0, cut='jj') self.add_param("xptb", 0.0, cut='bb') - self.add_param("xpta", 0.0, cut='aa') + self.add_param("xpta", 0.0, cut='aa') self.add_param("xptl", 0.0, cut='ll') - # ordered pt jet + # ordered pt jet self.add_param("ptj1min", 0.0, cut='jj') self.add_param("ptj1max", -1.0, cut='jj') self.add_param("ptj2min", 0.0, cut='jj') @@ -4241,7 +4241,7 @@ def default_setup(self): self.add_param("ptj3min", 0.0, cut='jjj') self.add_param("ptj3max", -1.0, cut='jjj') self.add_param("ptj4min", 0.0, cut='j'*4) - self.add_param("ptj4max", -1.0, cut='j'*4) + self.add_param("ptj4max", -1.0, cut='j'*4) self.add_param("cutuse", 0, cut='jj') # ordered pt lepton self.add_param("ptl1min", 0.0, cut='l'*2) @@ -4249,7 +4249,7 @@ def default_setup(self): self.add_param("ptl2min", 0.0, cut='l'*2) self.add_param("ptl2max", -1.0, cut='l'*2) self.add_param("ptl3min", 0.0, cut='l'*3) - self.add_param("ptl3max", -1.0, cut='l'*3) + self.add_param("ptl3max", -1.0, cut='l'*3) self.add_param("ptl4min", 0.0, cut='l'*4) self.add_param("ptl4max", -1.0, cut='l'*4) # Ht sum of jets @@ -4257,7 +4257,7 @@ def default_setup(self): self.add_param("htjmax", -1.0, cut='j'*2) self.add_param("ihtmin", 0.0, cut='J'*2) self.add_param("ihtmax", -1.0, cut='J'*2) - self.add_param("ht2min", 0.0, cut='J'*3) + self.add_param("ht2min", 0.0, cut='J'*3) self.add_param("ht3min", 0.0, cut='J'*3) self.add_param("ht4min", 0.0, cut='J'*4) self.add_param("ht2max", -1.0, cut='J'*3) @@ -4267,7 +4267,7 @@ def default_setup(self): self.add_param("ptgmin", 0.0, cut='aj') self.add_param("r0gamma", 0.4, hidden=True) self.add_param("xn", 1.0, hidden=True) - self.add_param("epsgamma", 1.0, hidden=True) + self.add_param("epsgamma", 1.0, hidden=True) self.add_param("isoem", True, hidden=True) self.add_param("xetamin", 0.0, cut='jj') self.add_param("deltaeta", 0.0, cut='j'*2) @@ -4280,7 +4280,7 @@ def default_setup(self): self.add_param("use_syst", True) self.add_param('systematics_program', 'systematics', include=False, hidden=True, comment='Choose which program to use for systematics computation: none, systematics, syscalc') self.add_param('systematics_arguments', ['--mur=0.5,1,2', '--muf=0.5,1,2', '--pdf=errorset'], include=False, hidden=True, comment='Choose the argment to pass to the systematics command. like --mur=0.25,1,4. Look at the help of the systematics function for more details.') - + self.add_param("sys_scalefact", "0.5 1 2", include=False, hidden=True) self.add_param("sys_alpsfact", "None", include=False, hidden=True) self.add_param("sys_matchscale", "auto", include=False, hidden=True) @@ -4315,8 +4315,8 @@ def default_setup(self): self.add_param('aloha_flag', '', include=False, hidden=True, comment='global fortran compilation flag, suggestion: -ffast-math', fct_mod=(self.make_clean, ('Source/DHELAS'),{})) self.add_param('matrix_flag', '', include=False, hidden=True, comment='fortran compilation flag for the matrix-element files, suggestion -O3', - fct_mod=(self.make_Ptouch, ('matrix'),{})) - self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', + fct_mod=(self.make_Ptouch, ('matrix'),{})) + self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', fortran_name='VECSIZE_MEMMAX', fct_mod=(self.reset_simd,(),{})) # parameter allowing to define simple cut via the pdg @@ -4329,24 +4329,24 @@ def default_setup(self): self.add_param('eta_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False) - + self.add_param('pdg_cut',[0], system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], system=True) # store pt min self.add_param('ptmax4pdg',[-1.], system=True) self.add_param('Emin4pdg',[0.], system=True) # store pt min - self.add_param('Emax4pdg',[-1.], system=True) + self.add_param('Emax4pdg',[-1.], system=True) self.add_param('etamin4pdg',[0.], system=True) # store pt min - self.add_param('etamax4pdg',[-1.], system=True) + self.add_param('etamax4pdg',[-1.], system=True) self.add_param('mxxmin4pdg',[-1.], system=True) self.add_param('mxxpart_antipart', [False], system=True) - - - + + + def check_validity(self): """ """ - + super(RunCardLO, self).check_validity() - + #Make sure that nhel is only either 0 (i.e. no MC over hel) or #1 (MC over hel with importance sampling). In particular, it can #no longer be > 1. @@ -4357,12 +4357,12 @@ def check_validity(self): "not %s." % self['nhel']) if int(self['maxjetflavor']) > 6: raise InvalidRunCard('maxjetflavor should be lower than 5! (6 is partly supported)') - + if len(self['pdgs_for_merging_cut']) > 1000: raise InvalidRunCard("The number of elements in "+\ "'pdgs_for_merging_cut' should not exceed 1000.") - + # some cut need to be deactivated in presence of isolation if self['ptgmin'] > 0: if self['pta'] > 0: @@ -4370,18 +4370,18 @@ def check_validity(self): self['pta'] = 0.0 if self['draj'] > 0: logger.warning('draj cut discarded since photon isolation is used') - self['draj'] = 0.0 - - # special treatment for gridpack use the gseed instead of the iseed + self['draj'] = 0.0 + + # special treatment for gridpack use the gseed instead of the iseed if self['gridrun']: self['iseed'] = self['gseed'] - + #Some parameter need to be fixed when using syscalc #if self['use_syst']: # if self['scalefact'] != 1.0: # logger.warning('Since use_syst=T, changing the value of \'scalefact\' to 1') # self['scalefact'] = 1.0 - + # CKKW Treatment if self['ickkw'] > 0: if self['ickkw'] != 1: @@ -4399,7 +4399,7 @@ def check_validity(self): raise InvalidRunCard('maxjetflavor at 6 is NOT supported for matching!') if self['ickkw'] == 2: # add warning if ckkw selected but the associate parameter are empty - self.get_default('highestmult', log_level=20) + self.get_default('highestmult', log_level=20) self.get_default('issgridfile', 'issudgrid.dat', log_level=20) if self['xqcut'] > 0: if self['ickkw'] == 0: @@ -4412,13 +4412,13 @@ def check_validity(self): if self['drjl'] != 0: if 'drjl' in self.user_set: logger.warning('Since icckw>0, changing the value of \'drjl\' to 0') - self['drjl'] = 0 - if not self['auto_ptj_mjj']: + self['drjl'] = 0 + if not self['auto_ptj_mjj']: if self['mmjj'] > self['xqcut']: logger.warning('mmjj > xqcut (and auto_ptj_mjj = F). MMJJ set to 0') - self['mmjj'] = 0.0 - - # check validity of the pdf set + self['mmjj'] = 0.0 + + # check validity of the pdf set # note that pdlabel is automatically set to lhapdf if pdlabel1 or pdlabel2 is set to lhapdf if self['pdlabel'] == 'lhapdf': #add warning if lhaid not define @@ -4426,7 +4426,7 @@ def check_validity(self): mod = False for i in [1,2]: - lpp = 'lpp%i' %i + lpp = 'lpp%i' %i pdlabelX = 'pdlabel%i' % i if self[lpp] == 0: # nopdf if self[pdlabelX] != 'none': @@ -4459,12 +4459,12 @@ def check_validity(self): raise InvalidRunCard( "Heavy ion mode is only supported for lpp1=1/2") if self['lpp2'] not in [1,2]: if self['nb_proton2'] !=1 or self['nb_neutron2'] !=0: - raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") + raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") # check that fixed_fac_scale(1/2) is setting as expected # if lpp=2/3/4 -> default is that beam in fixed scale - # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are + # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are # check that both fixed_fac_scale1/2 are defined together # ensure that fixed_fac_scale1 and fixed_fac_scale2 are setup as needed if 'fixed_fac_scale1' in self.user_set: @@ -4475,13 +4475,13 @@ def check_validity(self): elif 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale1 are defined but not fixed_fac_scale2. The value of fixed_fac_scale2 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale2'] = self['fixed_fac_scale'] - elif self['lpp2'] !=0: + elif self['lpp2'] !=0: raise Exception('fixed_fac_scale2 not defined while fixed_fac_scale1 is. Please fix your run_card.') elif 'fixed_fac_scale2' in self.user_set: if 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale2 are defined but not fixed_fac_scale1. The value of fixed_fac_scale1 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale1'] = self['fixed_fac_scale'] - elif self['lpp1'] !=0: + elif self['lpp1'] !=0: raise Exception('fixed_fac_scale1 not defined while fixed_fac_scale2 is. Please fix your run_card.') else: if 'fixed_fac_scale' in self.user_set: @@ -4500,12 +4500,12 @@ def check_validity(self): logger.warning('fixed_fac_scale1 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale1']) logger.warning('fixed_fac_scale2 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale2']) - # check if lpp = + # check if lpp = if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]): for i in [1,2]: if abs(self['lpp%s' % i ]) in [3,4] and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: logger.warning("Vector boson from lepton PDF is using fixed scale value of muf [dsqrt_q2fact%s]. Looks like you kept the default value (Mz). Is this really the cut-off that you want to use?" % i) - + if abs(self['lpp%s' % i ]) == 2 and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: if self['pdlabel'] in ['edff','chff']: logger.warning("Since 3.5.0 exclusive photon-photon processes in ultraperipheral proton and nuclear collisions from gamma-UPC (arXiv:2207.03012) will ignore the factorisation scale.") @@ -4515,10 +4515,10 @@ def check_validity(self): if six.PY2 and self['hel_recycling']: self['hel_recycling'] = False - logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. + logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. In general this optimization speeds up the computation by a factor of two.""") - + # check that ebeam is bigger than the associated mass. for i in [1,2]: if self['lpp%s' % i ] not in [1,2]: @@ -4529,13 +4529,13 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") - elif self['ebeam%i' % i] < self['mass_ion%i' % i]: + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + elif self['ebeam%i' % i] < self['mass_ion%i' % i]: if self['ebeam%i' %i] == 0: logger.warning("At rest ion mode set: Energy beam set to %s" % self['mass_ion%i' % i]) self.set('ebeam%i' %i, self['mass_ion%i' % i]) - - + + # check the tmin_for_channel is negative if self['tmin_for_channel'] == 0: raise InvalidRunCard('tmin_for_channel can not be set to 0.') @@ -4543,15 +4543,15 @@ def check_validity(self): logger.warning('tmin_for_channel should be negative. Will be using -%f instead' % self['tmin_for_channel']) self.set('tmin_for_channel', -self['tmin_for_channel']) - + def update_system_parameter_for_include(self): """system parameter need to be setupe""" - + # polarization self['frame_id'] = sum(2**(n) for n in self['me_frame']) - + # set the pdg_for_cut fortran parameter - pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + + pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + list(self['e_min_pdg'].keys()) +list(self['e_max_pdg'].keys()) + list(self['eta_min_pdg'].keys()) +list(self['eta_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys()) + list(self['mxx_only_part_antipart'].keys())) @@ -4559,15 +4559,15 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different pdgs are allowed for pdg specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative pdg code') - - + + if any(pdg in pdg_to_cut for pdg in [1,2,3,4,5,21,22,11,13,15]): raise Exception("Can not use PDG related cut for light quark/b quark/lepton/gluon/photon") - + if pdg_to_cut: self['pdg_cut'] = list(pdg_to_cut) self['ptmin4pdg'] = [] @@ -4595,7 +4595,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -4605,11 +4605,11 @@ def update_system_parameter_for_include(self): self['ptmax4pdg'] = [-1.] self['Emax4pdg'] = [-1.] self['etamax4pdg'] =[-1.] - self['mxxmin4pdg'] =[0.] + self['mxxmin4pdg'] =[0.] self['mxxpart_antipart'] = [False] - - - + + + def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules process 1->N all cut set on off. @@ -4626,7 +4626,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if proc_characteristic['loop_induced']: self['nhel'] = 1 self['pdgs_for_merging_cut'] = proc_characteristic['colored_pdgs'] - + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() @@ -4636,7 +4636,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): # check for beam_id beam_id = set() beam_id_split = [set(), set()] - for proc in proc_def: + for proc in proc_def: for oneproc in proc: for i,leg in enumerate(oneproc['legs']): if not leg['state']: @@ -4654,20 +4654,20 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): maxjetflavor = max([4]+[abs(i) for i in beam_id if -7< i < 7]) self['maxjetflavor'] = maxjetflavor self['asrwgtflavor'] = maxjetflavor - + if any(i in beam_id for i in [1,-1,2,-2,3,-3,4,-4,5,-5,21,22]): # check for e p collision if any(id in beam_id for id in [11,-11,13,-13]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [11,-11,13,-13]): - self['lpp1'] = 0 - self['lpp2'] = 1 - self['ebeam1'] = '1k' - self['ebeam2'] = '6500' + self['lpp1'] = 0 + self['lpp2'] = 1 + self['ebeam1'] = '1k' + self['ebeam2'] = '6500' else: - self['lpp1'] = 1 - self['lpp2'] = 0 - self['ebeam1'] = '6500' + self['lpp1'] = 1 + self['lpp2'] = 0 + self['ebeam1'] = '6500' self['ebeam2'] = '1k' # UPC for p p collision @@ -4677,7 +4677,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam1'] = '6500' self['ebeam2'] = '6500' self['pdlabel'] = 'edff' - + elif any(id in beam_id for id in [11,-11,13,-13]): self['lpp1'] = 0 self['lpp2'] = 0 @@ -4688,7 +4688,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('ecut') self.display_block.append('beam_pol') - + # check for possibility of eva eva_in_b1 = any(i in beam_id_split[0] for i in [23,24,-24]) #,12,-12,14,-14]) @@ -4701,10 +4701,10 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['nhel'] = 1 self['pdlabel'] = 'eva' self['fixed_fac_scale'] = True - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') elif eva_in_b1: - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') self['pdlabel1'] = 'eva' self['fixed_fac_scale1'] = True self['nhel'] = 1 @@ -4724,7 +4724,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['pdlabel2'] = 'eva' self['fixed_fac_scale2'] = True self['nhel'] = 1 - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') for i in beam_id_split[0]: if abs(i) == 11: self['lpp1'] = math.copysign(3,i) @@ -4740,34 +4740,34 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if any(i in beam_id for i in [22,23,24,-24,12,-12,14,-14]): self.display_block.append('eva_scale') - # automatic polarisation of the beam if neutrino beam + # automatic polarisation of the beam if neutrino beam if any(id in beam_id for id in [12,-12,14,-14,16,-16]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [12,14,16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = -100 if not all(id in [12,14,16] for id in beam_id_split[0]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1]. %s') elif any(id in beam_id_split[0] for id in [-12,-14,-16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[0]): - logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') + logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') if any(id in beam_id_split[1] for id in [12,14,16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = -100 if not all(id in [12,14,16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') elif any(id in beam_id_split[1] for id in [-12,-14,-16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') - + # Check if need matching min_particle = 99 max_particle = 0 @@ -4798,12 +4798,12 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - + break + if matching: self['ickkw'] = 1 self['xqcut'] = 30 - #self['use_syst'] = False + #self['use_syst'] = False self['drjj'] = 0 self['drjl'] = 0 self['sys_alpsfact'] = "0.5 1 2" @@ -4811,8 +4811,8 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('mlm') self.display_block.append('ckkw') self['dynamical_scale_choice'] = -1 - - + + # For interference module, the systematics are wrong. # automatically set use_syst=F and set systematics_program=none no_systematics = False @@ -4826,14 +4826,14 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): continue break - + if interference or no_systematics: self['use_syst'] = False self['systematics_program'] = 'none' if interference: self['dynamical_scale_choice'] = 3 self['sde_strategy'] = 2 - + # set default integration strategy # interference case is already handle above # here pick strategy 2 if only one QCD color flow @@ -4852,7 +4852,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if pure_lepton and proton_initial: self['sde_strategy'] = 1 else: - # check if multi-jet j + # check if multi-jet j is_multijet = True for proc in proc_def: if any(abs(j.get('id')) not in jet_id for j in proc[0]['legs']): @@ -4860,7 +4860,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): break if is_multijet: self['sde_strategy'] = 2 - + # if polarization is used, set the choice of the frame in the run_card # But only if polarization is used for massive particles for plist in proc_def: @@ -4870,7 +4870,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): model = proc.get('model') particle = model.get_particle(l.get('id')) if particle.get('mass').lower() != 'zero': - self.display_block.append('frame') + self.display_block.append('frame') break else: continue @@ -4894,15 +4894,15 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): proc = proc_list[0] if proc['forbidden_onsh_s_channels']: self['sde_strategy'] = 1 - + if 'fix_scale' in proc_characteristic['limitations']: self['fixed_ren_scale'] = 1 self['fixed_fac_scale'] = 1 if self['ickkw'] == 1: logger.critical("MLM matching/merging not compatible with the model! You need to use another method to remove the double counting!") self['ickkw'] = 0 - - # define class of particles present to hide all the cuts associated to + + # define class of particles present to hide all the cuts associated to # not present class cut_class = collections.defaultdict(int) for proc in proc_def: @@ -4925,41 +4925,41 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): one_proc_cut['L'] += 1 elif abs(pdg) in [12,14,16]: one_proc_cut['n'] += 1 - one_proc_cut['L'] += 1 + one_proc_cut['L'] += 1 elif str(oneproc.get('model').get_particle(pdg)['mass']) != 'ZERO': one_proc_cut['H'] += 1 - + for key, nb in one_proc_cut.items(): cut_class[key] = max(cut_class[key], nb) self.cut_class = dict(cut_class) self.cut_class[''] = True #avoid empty - + # If model has running functionality add the additional parameter model = proc_def[0][0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Read file input/default_run_card_lo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'run_card.dat') python_template = True else: template = pjoin(MEDIR, 'Cards', 'run_card_default.dat') python_template = False - + hid_lines = {'default':True}#collections.defaultdict(itertools.repeat(True).next) if isinstance(output_file, str): @@ -4975,9 +4975,9 @@ def write(self, output_file, template=None, python_template=False, hid_lines[k1+k2] = True super(RunCardLO, self).write(output_file, template=template, - python_template=python_template, + python_template=python_template, template_options=hid_lines, - **opt) + **opt) class InvalidMadAnalysis5Card(InvalidCmd): @@ -4986,19 +4986,19 @@ class InvalidMadAnalysis5Card(InvalidCmd): class MadAnalysis5Card(dict): """ A class to store a MadAnalysis5 card. Very basic since it is basically free format.""" - + _MG5aMC_escape_tag = '@MG5aMC' - + _default_hadron_inputs = ['*.hepmc', '*.hep', '*.stdhep', '*.lhco','*.root'] _default_parton_inputs = ['*.lhe'] _skip_analysis = False - + @classmethod def events_can_be_reconstructed(cls, file_path): """ Checks from the type of an event file whether it can be reconstructed or not.""" return not (file_path.endswith('.lhco') or file_path.endswith('.lhco.gz') or \ file_path.endswith('.root') or file_path.endswith('.root.gz')) - + @classmethod def empty_analysis(cls): """ A method returning the structure of an empty analysis """ @@ -5012,7 +5012,7 @@ def empty_reconstruction(cls): 'reco_output':'lhe'} def default_setup(self): - """define the default value""" + """define the default value""" self['mode'] = 'parton' self['inputs'] = [] # None is the default stdout level, it will be set automatically by MG5aMC @@ -5025,8 +5025,8 @@ def default_setup(self): # of this class and some other property could be added to this dictionary # in the future. self['analyses'] = {} - # The recasting structure contains on set of commands and one set of - # card lines. + # The recasting structure contains on set of commands and one set of + # card lines. self['recasting'] = {'commands':[],'card':[]} # Add the default trivial reconstruction to use an lhco input # This is just for the structure @@ -5035,7 +5035,7 @@ def default_setup(self): 'root_input': MadAnalysis5Card.empty_reconstruction()} self['reconstruction']['lhco_input']['reco_output']='lhco' - self['reconstruction']['root_input']['reco_output']='root' + self['reconstruction']['root_input']['reco_output']='root' # Specify in which order the analysis/recasting were specified self['order'] = [] @@ -5049,7 +5049,7 @@ def __init__(self, finput=None,mode=None): return else: dict.__init__(self) - + # Initialize it with all the default value self.default_setup() if not mode is None: @@ -5058,15 +5058,15 @@ def __init__(self, finput=None,mode=None): # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, mode=mode) - + def read(self, input, mode=None): """ Read an MA5 card""" - + if mode not in [None,'parton','hadron']: raise MadGraph5Error('A MadAnalysis5Card can be read online the modes'+ "'parton' or 'hadron'") card_mode = mode - + if isinstance(input, (file, StringIO.StringIO)): input_stream = input elif isinstance(input, str): @@ -5099,10 +5099,10 @@ def read(self, input, mode=None): except ValueError: option = line[len(self._MG5aMC_escape_tag):] option = option.strip() - + if option=='inputs': self['inputs'].extend([v.strip() for v in value.split(',')]) - + elif option == 'skip_analysis': self._skip_analysis = True @@ -5118,7 +5118,7 @@ def read(self, input, mode=None): except: raise InvalidMadAnalysis5Card( "MA5 output level specification '%s' is incorrect."%str(value)) - + elif option=='analysis_name': current_type = 'analyses' current_name = value @@ -5127,7 +5127,7 @@ def read(self, input, mode=None): "Analysis '%s' already defined in MadAnalysis5 card"%current_name) else: self[current_type][current_name] = MadAnalysis5Card.empty_analysis() - + elif option=='set_reconstructions': try: reconstructions = eval(value) @@ -5142,7 +5142,7 @@ def read(self, input, mode=None): "analysis in a MadAnalysis5 card.") self[current_type][current_name]['reconstructions']=reconstructions continue - + elif option=='reconstruction_name': current_type = 'reconstruction' current_name = value @@ -5161,7 +5161,7 @@ def read(self, input, mode=None): raise InvalidMadAnalysis5Card( "Option '%s' can only take the values 'lhe' or 'root'"%option) self['reconstruction'][current_name]['reco_output'] = value.lower() - + elif option.startswith('recasting'): current_type = 'recasting' try: @@ -5171,11 +5171,11 @@ def read(self, input, mode=None): if len(self['recasting'][current_name])>0: raise InvalidMadAnalysis5Card( "Only one recasting can be defined in MadAnalysis5 hadron card") - + else: raise InvalidMadAnalysis5Card( "Unreckognized MG5aMC instruction in MadAnalysis5 card: '%s'"%option) - + if option in ['analysis_name','reconstruction_name'] or \ option.startswith('recasting'): self['order'].append((current_type,current_name)) @@ -5209,7 +5209,7 @@ def read(self, input, mode=None): self['inputs'] = self._default_hadron_inputs else: self['inputs'] = self._default_parton_inputs - + # Make sure at least one reconstruction is specified for each hadron # level analysis and that it exists. if self['mode']=='hadron': @@ -5221,7 +5221,7 @@ def read(self, input, mode=None): analysis['reconstructions']): raise InvalidMadAnalysis5Card('A reconstructions specified in'+\ " analysis '%s' is not defined."%analysis_name) - + def write(self, output): """ Write an MA5 card.""" @@ -5232,7 +5232,7 @@ def write(self, output): else: raise MadGraph5Error('Incorrect input for the write function of'+\ ' the MadAnalysis5Card card. Received argument type is: %s'%str(type(output))) - + output_lines = [] if self._skip_analysis: output_lines.append('%s skip_analysis'%self._MG5aMC_escape_tag) @@ -5240,11 +5240,11 @@ def write(self, output): if not self['stdout_lvl'] is None: output_lines.append('%s stdout_lvl=%s'%(self._MG5aMC_escape_tag,self['stdout_lvl'])) for definition_type, name in self['order']: - + if definition_type=='analyses': output_lines.append('%s analysis_name = %s'%(self._MG5aMC_escape_tag,name)) output_lines.append('%s set_reconstructions = %s'%(self._MG5aMC_escape_tag, - str(self['analyses'][name]['reconstructions']))) + str(self['analyses'][name]['reconstructions']))) elif definition_type=='reconstruction': output_lines.append('%s reconstruction_name = %s'%(self._MG5aMC_escape_tag,name)) elif definition_type=='recasting': @@ -5254,23 +5254,23 @@ def write(self, output): output_lines.extend(self[definition_type][name]) elif definition_type in ['reconstruction']: output_lines.append('%s reco_output = %s'%(self._MG5aMC_escape_tag, - self[definition_type][name]['reco_output'])) + self[definition_type][name]['reco_output'])) output_lines.extend(self[definition_type][name]['commands']) elif definition_type in ['analyses']: - output_lines.extend(self[definition_type][name]['commands']) - + output_lines.extend(self[definition_type][name]['commands']) + output_stream.write('\n'.join(output_lines)) - + return - - def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, + + def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, UFO_model_path=None, run_tag=''): - """ Returns a list of tuples ('AnalysisTag',['commands']) specifying - the commands of the MadAnalysis runs required from this card. - At parton-level, the number of such commands is the number of analysis + """ Returns a list of tuples ('AnalysisTag',['commands']) specifying + the commands of the MadAnalysis runs required from this card. + At parton-level, the number of such commands is the number of analysis asked for. In the future, the idea is that the entire card can be processed in one go from MA5 directly.""" - + if isinstance(inputs_arg, list): inputs = inputs_arg elif isinstance(inputs_arg, str): @@ -5278,21 +5278,21 @@ def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, else: raise MadGraph5Error("The function 'get_MA5_cmds' can only take "+\ " a string or a list for the argument 'inputs_arg'") - + if len(inputs)==0: raise MadGraph5Error("The function 'get_MA5_cmds' must have "+\ " at least one input specified'") - + if run_dir_path is None: run_dir_path = os.path.dirname(inputs_arg) - + cmds_list = [] - + UFO_load = [] # first import the UFO if provided if UFO_model_path: UFO_load.append('import %s'%UFO_model_path) - + def get_import(input, type=None): """ Generates the MA5 import commands for that event file. """ dataset_name = os.path.basename(input).split('.')[0] @@ -5304,7 +5304,7 @@ def get_import(input, type=None): if not type is None: res.append('set %s.type = %s'%(dataset_name, type)) return res - + fifo_status = {'warned_fifo':False,'fifo_used_up':False} def warn_fifo(input): if not input.endswith('.fifo'): @@ -5317,7 +5317,7 @@ def warn_fifo(input): logger.warning('Only the first MA5 analysis/reconstructions can be run on a fifo. Subsequent runs will skip fifo inputs.') fifo_status['warned_fifo'] = True return True - + # Then the event file(s) input(s) inputs_load = [] for input in inputs: @@ -5325,16 +5325,16 @@ def warn_fifo(input): if len(inputs) > 1: inputs_load.append('set main.stacking_method = superimpose') - + submit_command = 'submit %s'%submit_folder+'_%s' - + # Keep track of the reconstruction outpus in the MA5 workflow # Keys are reconstruction names and values are .lhe.gz reco file paths. # We put by default already the lhco/root ones present reconstruction_outputs = { - 'lhco_input':[f for f in inputs if + 'lhco_input':[f for f in inputs if f.endswith('.lhco') or f.endswith('.lhco.gz')], - 'root_input':[f for f in inputs if + 'root_input':[f for f in inputs if f.endswith('.root') or f.endswith('.root.gz')]} # If a recasting card has to be written out, chose here its path @@ -5343,7 +5343,7 @@ def warn_fifo(input): # Make sure to only run over one analysis over each fifo. for definition_type, name in self['order']: - if definition_type == 'reconstruction': + if definition_type == 'reconstruction': analysis_cmds = list(self['reconstruction'][name]['commands']) reco_outputs = [] for i_input, input in enumerate(inputs): @@ -5365,8 +5365,8 @@ def warn_fifo(input): analysis_cmds.append( submit_command%('reco_%s_%d'%(name,i_input+1))) analysis_cmds.append('remove reco_events') - - reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) + + reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) for rec_out in reco_outputs] if len(reco_outputs)>0: cmds_list.append(('_reco_%s'%name,analysis_cmds)) @@ -5386,7 +5386,7 @@ def warn_fifo(input): analysis_cmds = ['set main.mode = parton'] else: analysis_cmds = [] - analysis_cmds.extend(sum([get_import(rec_out) for + analysis_cmds.extend(sum([get_import(rec_out) for rec_out in reconstruction_outputs[reco]],[])) analysis_cmds.extend(self['analyses'][name]['commands']) analysis_cmds.append(submit_command%('%s_%s'%(name,reco))) @@ -5427,12 +5427,12 @@ def warn_fifo(input): %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode """ running_block_nlo = RunBlock('RUNNING', template_on=template_on, template_off="") - + class RunCardNLO(RunCard): """A class object for the run_card for a (aMC@)NLO pocess""" - + LO = False - + blocks = [running_block_nlo] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), @@ -5443,11 +5443,11 @@ class RunCardNLO(RunCard): if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_nlo.dat") - - + + def default_setup(self): """define the default value""" - + self.add_param('run_tag', 'tag_1', include=False) self.add_param('nevents', 10000) self.add_param('req_acc', -1.0, include=False) @@ -5455,27 +5455,27 @@ def default_setup(self): self.add_param("time_of_flight", -1.0, include=False) self.add_param('event_norm', 'average') #FO parameter - self.add_param('req_acc_fo', 0.01, include=False) + self.add_param('req_acc_fo', 0.01, include=False) self.add_param('npoints_fo_grid', 5000, include=False) self.add_param('niters_fo_grid', 4, include=False) - self.add_param('npoints_fo', 10000, include=False) + self.add_param('npoints_fo', 10000, include=False) self.add_param('niters_fo', 6, include=False) #seed and collider self.add_param('iseed', 0) - self.add_param('lpp1', 1, fortran_name='lpp(1)') - self.add_param('lpp2', 1, fortran_name='lpp(2)') + self.add_param('lpp1', 1, fortran_name='lpp(1)') + self.add_param('lpp2', 1, fortran_name='lpp(2)') self.add_param('ebeam1', 6500.0, fortran_name='ebeam(1)') - self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') + self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') self.add_param('pdlabel', 'nn23nlo', allowed=['lhapdf', 'emela', 'cteq6_m','cteq6_d','cteq6_l','cteq6l1', 'nn23lo','nn23lo1','nn23nlo','ct14q00','ct14q07','ct14q14','ct14q21'] +\ - sum(self.allowed_lep_densities.values(),[]) ) + sum(self.allowed_lep_densities.values(),[]) ) self.add_param('lhaid', [244600],fortran_name='lhaPDFid') self.add_param('pdfscheme', 0) # whether to include or not photon-initiated processes in lepton collisions self.add_param('photons_from_lepton', True) self.add_param('lhapdfsetname', ['internal_use_only'], system=True) - # stuff for lepton collisions - # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set - # whether the current PDF set has or not beamstrahlung + # stuff for lepton collisions + # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set + # whether the current PDF set has or not beamstrahlung self.add_param('has_bstrahl', False, system=True) # renormalisation scheme of alpha self.add_param('alphascheme', 0, system=True) @@ -5486,31 +5486,31 @@ def default_setup(self): # w contribution included or not in the running of alpha self.add_param('w_run', 1, system=True) #shower and scale - self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') + self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') self.add_param('shower_scale_factor',1.0) self.add_param('mcatnlo_delta', False) self.add_param('fixed_ren_scale', False) self.add_param('fixed_fac_scale', False) self.add_param('fixed_extra_scale', True, hidden=True, system=True) # set system since running from Ellis-Sexton scale not implemented - self.add_param('mur_ref_fixed', 91.118) + self.add_param('mur_ref_fixed', 91.118) self.add_param('muf1_ref_fixed', -1.0, hidden=True) - self.add_param('muf_ref_fixed', 91.118) + self.add_param('muf_ref_fixed', 91.118) self.add_param('muf2_ref_fixed', -1.0, hidden=True) - self.add_param('mue_ref_fixed', 91.118, hidden=True) - self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', + self.add_param('mue_ref_fixed', 91.118, hidden=True) + self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', allowed = [-2,-1,0,1,2,3,10], comment="\'-1\' is based on CKKW back clustering (following feynman diagram).\n \'1\' is the sum of transverse energy.\n '2' is HT (sum of the transverse mass)\n '3' is HT/2, '0' allows to use the user_hook definition (need to be defined via custom_fct entry) ") self.add_param('fixed_qes_scale', False, hidden=True) self.add_param('qes_ref_fixed', -1.0, hidden=True) self.add_param('mur_over_ref', 1.0) - self.add_param('muf_over_ref', 1.0) - self.add_param('muf1_over_ref', -1.0, hidden=True) + self.add_param('muf_over_ref', 1.0) + self.add_param('muf1_over_ref', -1.0, hidden=True) self.add_param('muf2_over_ref', -1.0, hidden=True) self.add_param('mue_over_ref', 1.0, hidden=True, system=True) # forbid the user to modigy due to incorrect handling of the Ellis-Sexton scale self.add_param('qes_over_ref', -1.0, hidden=True) self.add_param('reweight_scale', [True], fortran_name='lscalevar') - self.add_param('rw_rscale_down', -1.0, hidden=True) + self.add_param('rw_rscale_down', -1.0, hidden=True) self.add_param('rw_rscale_up', -1.0, hidden=True) - self.add_param('rw_fscale_down', -1.0, hidden=True) + self.add_param('rw_fscale_down', -1.0, hidden=True) self.add_param('rw_fscale_up', -1.0, hidden=True) self.add_param('rw_rscale', [1.0,2.0,0.5], fortran_name='scalevarR') self.add_param('rw_fscale', [1.0,2.0,0.5], fortran_name='scalevarF') @@ -5523,60 +5523,60 @@ def default_setup(self): #technical self.add_param('folding', [1,1,1], include=False) - + #merging self.add_param('ickkw', 0, allowed=[-1,0,3,4], comment=" - 0: No merging\n - 3: FxFx Merging : http://amcatnlo.cern.ch/FxFx_merging.htm\n - 4: UNLOPS merging (No interface within MG5aMC)\n - -1: NNLL+NLO jet-veto computation. See arxiv:1412.8408 [hep-ph]") self.add_param('bwcutoff', 15.0) - #cuts + #cuts self.add_param('jetalgo', 1.0) - self.add_param('jetradius', 0.7) + self.add_param('jetradius', 0.7) self.add_param('ptj', 10.0 , cut=True) - self.add_param('etaj', -1.0, cut=True) - self.add_param('gamma_is_j', True) + self.add_param('etaj', -1.0, cut=True) + self.add_param('gamma_is_j', True) self.add_param('ptl', 0.0, cut=True) - self.add_param('etal', -1.0, cut=True) + self.add_param('etal', -1.0, cut=True) self.add_param('drll', 0.0, cut=True) - self.add_param('drll_sf', 0.0, cut=True) + self.add_param('drll_sf', 0.0, cut=True) self.add_param('mll', 0.0, cut=True) - self.add_param('mll_sf', 30.0, cut=True) - self.add_param('rphreco', 0.1) - self.add_param('etaphreco', -1.0) - self.add_param('lepphreco', True) - self.add_param('quarkphreco', True) + self.add_param('mll_sf', 30.0, cut=True) + self.add_param('rphreco', 0.1) + self.add_param('etaphreco', -1.0) + self.add_param('lepphreco', True) + self.add_param('quarkphreco', True) self.add_param('ptgmin', 20.0, cut=True) - self.add_param('etagamma', -1.0) + self.add_param('etagamma', -1.0) self.add_param('r0gamma', 0.4) - self.add_param('xn', 1.0) + self.add_param('xn', 1.0) self.add_param('epsgamma', 1.0) - self.add_param('isoem', True) + self.add_param('isoem', True) self.add_param('maxjetflavor', 4, hidden=True) - self.add_param('pineappl', False) + self.add_param('pineappl', False) self.add_param('lhe_version', 3, hidden=True, include=False) - + # customization self.add_param("custom_fcts",[],typelist="str", include=False, comment="list of files containing function that overwritte dummy function of the code (like adding cuts/...)") #internal variable related to FO_analyse_card self.add_param('FO_LHE_weight_ratio',1e-3, hidden=True, system=True) - self.add_param('FO_LHE_postprocessing',['grouping','random'], + self.add_param('FO_LHE_postprocessing',['grouping','random'], hidden=True, system=True, include=False) - + # parameter allowing to define simple cut via the pdg self.add_param('pt_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('pt_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False, hidden=True) - + #hidden parameter that are transfer to the fortran code self.add_param('pdg_cut',[0], hidden=True, system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], hidden=True, system=True) # store pt min self.add_param('ptmax4pdg',[-1.], hidden=True, system=True) self.add_param('mxxmin4pdg',[0.], hidden=True, system=True) self.add_param('mxxpart_antipart', [False], hidden=True, system=True) - + def check_validity(self): """check the validity of the various input""" - + super(RunCardNLO, self).check_validity() # for lepton-lepton collisions, ignore 'pdlabel' and 'lhaid' @@ -5588,12 +5588,12 @@ def check_validity(self): # for dressed lepton collisions, check that the lhaid is a valid one if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]) + ['emela']: raise InvalidRunCard('pdlabel %s not allowed for dressed-lepton collisions' % self['pdlabel']) - + elif self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' self['reweight_pdf']=[False] logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') - + if self['lpp1'] == 0 == self['lpp2']: if self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' @@ -5601,8 +5601,8 @@ def check_validity(self): logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') # For FxFx merging, make sure that the following parameters are set correctly: - if self['ickkw'] == 3: - # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed + if self['ickkw'] == 3: + # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed scales=['fixed_ren_scale','fixed_fac_scale','fixed_QES_scale'] for scale in scales: if self[scale]: @@ -5615,7 +5615,7 @@ def check_validity(self): self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency in FxFx merging, dynamical_scale_choice has been set to -1 (default)''' ,'$MG:BOLD') - + # 2. Use kT algorithm for jets with pseudo-code size R=1.0 jetparams=['jetradius','jetalgo'] for jetparam in jetparams: @@ -5628,8 +5628,8 @@ def check_validity(self): self["dynamical_scale_choice"] = [-1] self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency with the jet veto, the scale which will be used is ptj. dynamical_scale_choice will be set at -1.''' - ,'$MG:BOLD') - + ,'$MG:BOLD') + # For interface to PINEAPPL, need to use LHAPDF and reweighting to get scale uncertainties if self['pineappl'] and self['pdlabel'].lower() != 'lhapdf': raise InvalidRunCard('PineAPPL generation only possible with the use of LHAPDF') @@ -5661,7 +5661,7 @@ def check_validity(self): if (self['rw_fscale_down'] != -1.0 and ['rw_fscale_down'] not in self['rw_fscale']) or\ (self['rw_fscale_up'] != -1.0 and ['rw_fscale_up'] not in self['rw_fscale']): self['rw_fscale']=[1.0,self['rw_fscale_up'],self['rw_fscale_down']] - + # PDF reweighting check if any(self['reweight_pdf']): # check that we use lhapdf if reweighting is ON @@ -5672,7 +5672,7 @@ def check_validity(self): if self['pdlabel'] != "lhapdf": self['reweight_pdf']=[self['reweight_pdf'][0]] self['lhaid']=[self['lhaid'][0]] - + # make sure set have reweight_scale and dyn_scale_choice of length 1 when fixed scales: if self['fixed_ren_scale'] and self['fixed_fac_scale']: self['reweight_scale']=[self['reweight_scale'][0]] @@ -5685,7 +5685,7 @@ def check_validity(self): self['reweight_pdf']=self['reweight_pdf']*len(self['lhaid']) logger.warning("Setting 'reweight_pdf' for all 'lhaid' to %s" % self['reweight_pdf'][0]) if len(self['reweight_scale']) == 1 and len(self['dynamical_scale_choice']) != 1: - self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) + self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) logger.warning("Setting 'reweight_scale' for all 'dynamical_scale_choice' to %s" % self['reweight_pdf'][0]) # Check that there are no identical elements in lhaid or dynamical_scale_choice @@ -5693,7 +5693,7 @@ def check_validity(self): raise InvalidRunCard("'lhaid' has two or more identical entries. They have to be all different for the code to work correctly.") if len(self['dynamical_scale_choice']) != len(set(self['dynamical_scale_choice'])): raise InvalidRunCard("'dynamical_scale_choice' has two or more identical entries. They have to be all different for the code to work correctly.") - + # Check that lenght of lists are consistent if len(self['reweight_pdf']) != len(self['lhaid']): raise InvalidRunCard("'reweight_pdf' and 'lhaid' lists should have the same length") @@ -5730,7 +5730,7 @@ def check_validity(self): if len(self['folding']) != 3: raise InvalidRunCard("'folding' should contain exactly three integers") for ifold in self['folding']: - if ifold not in [1,2,4,8]: + if ifold not in [1,2,4,8]: raise InvalidRunCard("The three 'folding' parameters should be equal to 1, 2, 4, or 8.") # Check MC@NLO-Delta if self['mcatnlo_delta'] and not self['parton_shower'].lower() == 'pythia8': @@ -5746,11 +5746,11 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938 GeV") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") def update_system_parameter_for_include(self): - + # set the pdg_for_cut fortran parameter pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys())+ list(self['mxx_only_part_antipart'].keys())) @@ -5758,12 +5758,12 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different PDGs are allowed for PDG specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative PDG codes') - - + + if any(pdg in pdg_to_cut for pdg in [21,22,11,13,15]+ list(range(self['maxjetflavor']+1))): # Note that this will double check in the fortran code raise Exception("Can not use PDG related cuts for massless SM particles/leptons") @@ -5790,7 +5790,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -5800,12 +5800,12 @@ def update_system_parameter_for_include(self): self['mxxpart_antipart'] = [False] def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', 'run_card.dat') python_template = True else: @@ -5818,7 +5818,7 @@ def write(self, output_file, template=None, python_template=False, **opt): def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules - e+ e- beam -> lpp:0 ebeam:500 + e+ e- beam -> lpp:0 ebeam:500 p p beam -> set maxjetflavor automatically process with tagged photons -> gamma_is_j = false process without QED splittings -> gamma_is_j = false, recombination = false @@ -5844,19 +5844,19 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam2'] = 500 else: self['lpp1'] = 0 - self['lpp2'] = 0 - + self['lpp2'] = 0 + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() # check for tagged photons tagged_particles = set() - + # If model has running functionality add the additional parameter model = proc_def[0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Check if need matching min_particle = 99 @@ -5885,7 +5885,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: idsmin = [l['id'] for l in procmin['legs']] break - + for procmax in proc_def: if len(procmax['legs']) != max_particle: continue @@ -5901,9 +5901,9 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - - if matching: + break + + if matching: self['ickkw'] = 3 self['fixed_ren_scale'] = False self["fixed_fac_scale"] = False @@ -5911,17 +5911,17 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self["jetalgo"] = 1 self["jetradius"] = 1 self["parton_shower"] = "PYTHIA8" - + # Read file input/default_run_card_nlo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + class MadLoopParam(ConfigFile): """ a class for storing/dealing with the file MadLoopParam.dat contains a parser to read it, facilities to write a new file,... """ - + _ID_reduction_tool_map = {1:'CutTools', 2:'PJFry++', 3:'IREGI', @@ -5929,10 +5929,10 @@ class MadLoopParam(ConfigFile): 5:'Samurai', 6:'Ninja', 7:'COLLIER'} - + def default_setup(self): """initialize the directory to the default value""" - + self.add_param("MLReductionLib", "6|7|1") self.add_param("IREGIMODE", 2) self.add_param("IREGIRECY", True) @@ -5954,7 +5954,7 @@ def default_setup(self): self.add_param("HelicityFilterLevel", 2) self.add_param("LoopInitStartOver", False) self.add_param("HelInitStartOver", False) - self.add_param("UseQPIntegrandForNinja", True) + self.add_param("UseQPIntegrandForNinja", True) self.add_param("UseQPIntegrandForCutTools", True) self.add_param("COLLIERMode", 1) self.add_param("COLLIERComputeUVpoles", True) @@ -5966,9 +5966,9 @@ def default_setup(self): self.add_param("COLLIERUseInternalStabilityTest",True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -5976,7 +5976,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % input) - + previous_line= '' for line in finput: if previous_line.startswith('#'): @@ -5985,20 +5985,20 @@ def read(self, finput): if len(value) and value[0] not in ['#', '!']: self.__setitem__(name, value, change_userdefine=True) previous_line = line - - + + def write(self, outputpath, template=None,commentdefault=False): - + if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', + template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', 'Cards', 'MadLoopParams.dat') else: template = pjoin(MEDIR, 'Cards', 'MadLoopParams_default.dat') fsock = open(template, 'r') template = fsock.readlines() fsock.close() - + if isinstance(outputpath, str): output = open(outputpath, 'w') else: @@ -6019,7 +6019,7 @@ def f77format(value): return value else: raise Exception("Can not format input %s" % type(value)) - + name = '' done = set() for line in template: @@ -6034,12 +6034,12 @@ def f77format(value): elif line.startswith('#'): name = line[1:].split()[0] output.write(line) - - - - - -class eMELA_info(ConfigFile): + + + + + +class eMELA_info(ConfigFile): """ a class for eMELA (LHAPDF-like) info files """ path = '' @@ -6053,7 +6053,7 @@ def __init__(self, finput, me_dir): def read(self, finput): - if isinstance(finput, file): + if isinstance(finput, file): lines = finput.open().read().split('\n') self.path = finput.name else: @@ -6066,7 +6066,7 @@ def read(self, finput): k, v = l.split(':', 1) # ignore further occurrences of : try: self[k.strip()] = eval(v) - except (NameError, SyntaxError): + except (NameError, SyntaxError): self[k.strip()] = v def default_setup(self): @@ -6091,7 +6091,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): +"powers of alpha should be reweighted a posteriori") - logger.info('Updating variables according to %s' % self.path) + logger.info('Updating variables according to %s' % self.path) # Flavours in the running of alpha nd, nu, nl = self['eMELA_ActiveFlavoursAlpha'] self.log_and_update(banner, 'run_card', 'ndnq_run', nd) @@ -6130,8 +6130,8 @@ def update_epdf_emela_variables(self, banner, uvscheme): logger.warning('Cannot treat the following renormalisation schemes for ME and PDFs: %d, %d' \ % (uvscheme, uvscheme_pdf)) - # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref - # also check that the com energy is equal to qref, otherwise print a + # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref + # also check that the com energy is equal to qref, otherwise print a # warning if uvscheme_pdf == 1: qref = self['eMELA_AlphaQref'] @@ -6144,23 +6144,23 @@ def update_epdf_emela_variables(self, banner, uvscheme): # LL / NLL PDF (0/1) pdforder = self['eMELA_PerturbativeOrder'] - # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) + # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) # 4->mixed (leptonic); 5-> nobeta (leptonic); 6->delta (leptonic) # if LL, use nobeta scheme unless LEGACYLLPDF > 0 if pdforder == 0: if 'eMELA_LEGACYLLPDF' not in self.keys() or self['eMELA_LEGACYLLPDF'] in [-1, 0]: self.log_and_update(banner, 'run_card', 'pdfscheme', 5) - elif self['eMELA_LEGACYLLPDF'] == 1: + elif self['eMELA_LEGACYLLPDF'] == 1: # mixed self.log_and_update(banner, 'run_card', 'pdfscheme', 4) - elif self['eMELA_LEGACYLLPDF'] == 2: + elif self['eMELA_LEGACYLLPDF'] == 2: # eta self.log_and_update(banner, 'run_card', 'pdfscheme', 2) - elif self['eMELA_LEGACYLLPDF'] == 3: + elif self['eMELA_LEGACYLLPDF'] == 3: # beta self.log_and_update(banner, 'run_card', 'pdfscheme', 3) elif pdforder == 1: - # for NLL, use eMELA_FactorisationSchemeInt = 0/1 + # for NLL, use eMELA_FactorisationSchemeInt = 0/1 # for delta/MSbar if self['eMELA_FactorisationSchemeInt'] == 0: # MSbar @@ -6177,7 +6177,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): - + def log_and_update(self, banner, card, par, v): """update the card parameter par to value v diff --git a/epochX/cudacpp/gg_tt01g.mad/bin/internal/gen_ximprove.py b/epochX/cudacpp/gg_tt01g.mad/bin/internal/gen_ximprove.py index 5fd170d18d..cc842aa50f 100755 --- a/epochX/cudacpp/gg_tt01g.mad/bin/internal/gen_ximprove.py +++ b/epochX/cudacpp/gg_tt01g.mad/bin/internal/gen_ximprove.py @@ -2,18 +2,18 @@ # # Copyright (c) 2014 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch # ################################################################################ """ A python file to replace the fortran script gen_ximprove. - This script analyses the result of the survey/ previous refine and + This script analyses the result of the survey/ previous refine and creates the jobs for the following script. """ from __future__ import division @@ -66,77 +66,77 @@ class gensym(object): """a class to call the fortran gensym executable and handle it's output in order to create the various job that are needed for the survey""" - + #convenient shortcut for the formatting of variable @ staticmethod def format_variable(*args): return bannermod.ConfigFile.format_variable(*args) - + combining_job = 2 # number of channel by ajob - splitted_grid = False + splitted_grid = False min_iterations = 3 mode= "survey" - + def __init__(self, cmd, opt=None): - + try: super(gensym, self).__init__(cmd, opt) except TypeError: pass - - # Run statistics, a dictionary of RunStatistics(), with + + # Run statistics, a dictionary of RunStatistics(), with self.run_statistics = {} - + self.cmd = cmd self.run_card = cmd.run_card self.me_dir = cmd.me_dir - - + + # dictionary to keep track of the precision when combining iteration self.cross = collections.defaultdict(int) self.abscross = collections.defaultdict(int) self.sigma = collections.defaultdict(int) self.chi2 = collections.defaultdict(int) - + self.splitted_grid = False if self.cmd.proc_characteristics['loop_induced']: nexternal = self.cmd.proc_characteristics['nexternal'] self.splitted_grid = max(2, (nexternal-2)**2) if hasattr(self.cmd, "opts") and self.cmd.opts['accuracy'] == 0.1: self.cmd.opts['accuracy'] = 0.02 - + if isinstance(cmd.cluster, cluster.MultiCore) and self.splitted_grid > 1: self.splitted_grid = int(cmd.cluster.nb_core**0.5) if self.splitted_grid == 1 and cmd.cluster.nb_core >1: self.splitted_grid = 2 - + #if the user defines it in the run_card: if self.run_card['survey_splitting'] != -1: self.splitted_grid = self.run_card['survey_splitting'] if self.run_card['survey_nchannel_per_job'] != 1 and 'survey_nchannel_per_job' in self.run_card.user_set: - self.combining_job = self.run_card['survey_nchannel_per_job'] + self.combining_job = self.run_card['survey_nchannel_per_job'] elif self.run_card['hard_survey'] > 1: self.combining_job = 1 - - + + self.splitted_Pdir = {} self.splitted_for_dir = lambda x,y: self.splitted_grid self.combining_job_for_Pdir = lambda x: self.combining_job self.lastoffset = {} - + done_warning_zero_coupling = False def get_helicity(self, to_submit=True, clean=True): """launch a single call to madevent to get the list of non zero helicity""" - - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc P_zero_result = [] nb_tot_proc = len(subproc) - job_list = {} - - + job_list = {} + + for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -154,7 +154,7 @@ def get_helicity(self, to_submit=True, clean=True): p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts - + (stdout, _) = p.communicate(''.encode()) stdout = stdout.decode('ascii',errors='ignore') if stdout: @@ -166,11 +166,11 @@ def get_helicity(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir, 'error')): os.remove(pjoin(self.me_dir, 'error')) continue # bypass bad process - + self.cmd.compile(['madevent_forhel'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'madevent_forhel')): - raise Exception('Error make madevent_forhel not successful') - + raise Exception('Error make madevent_forhel not successful') + if not os.path.exists(pjoin(Pdir, 'Hel')): os.mkdir(pjoin(Pdir, 'Hel')) ff = open(pjoin(Pdir, 'Hel', 'input_app.txt'),'w') @@ -180,15 +180,15 @@ def get_helicity(self, to_submit=True, clean=True): try: os.remove(pjoin(Pdir, 'Hel','results.dat')) except Exception: - pass + pass # Launch gensym - p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, + p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=pjoin(Pdir,'Hel'), shell=True) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(" ".encode()) stdout = stdout.decode('ascii',errors='ignore') if os.path.exists(pjoin(self.me_dir, 'error')): - raise Exception(pjoin(self.me_dir,'error')) + raise Exception(pjoin(self.me_dir,'error')) # note a continue is not enough here, we have in top to link # the matrixX_optim.f to matrixX_orig.f to let the code to work # after this error. @@ -203,7 +203,7 @@ def get_helicity(self, to_submit=True, clean=True): zero_gc = list() all_zampperhel = set() all_bad_amps_perhel = set() - + for line in stdout.splitlines(): if "=" not in line and ":" not in line: continue @@ -229,22 +229,22 @@ def get_helicity(self, to_submit=True, clean=True): "%s\n" % (' '.join(zero_gc)) +\ "This will slow down the computation. Please consider using restricted model:\n" +\ "https://answers.launchpad.net/mg5amcnlo/+faq/2312") - - + + all_good_hels = collections.defaultdict(list) for me_index, hel in all_hel: - all_good_hels[me_index].append(int(hel)) - + all_good_hels[me_index].append(int(hel)) + #print(all_hel) if self.run_card['hel_zeroamp']: all_bad_amps = collections.defaultdict(list) for me_index, amp in all_zamp: all_bad_amps[me_index].append(int(amp)) - + all_bad_amps_perhel = collections.defaultdict(list) for me_index, hel, amp in all_zampperhel: - all_bad_amps_perhel[me_index].append((int(hel),int(amp))) - + all_bad_amps_perhel[me_index].append((int(hel),int(amp))) + elif all_zamp: nb_zero = sum(int(a[1]) for a in all_zamp) if zero_gc: @@ -254,7 +254,7 @@ def get_helicity(self, to_submit=True, clean=True): else: logger.warning("The optimization detected that you have %i zero matrix-element for this SubProcess: %s.\n" % nb_zero +\ "This part can optimize if you set the flag hel_zeroamp to True in the run_card.") - + #check if we need to do something and write associate information" data = [all_hel, all_zamp, all_bad_amps_perhel] if not self.run_card['hel_zeroamp']: @@ -266,14 +266,14 @@ def get_helicity(self, to_submit=True, clean=True): old_data = open(pjoin(Pdir,'Hel','selection')).read() if old_data == data: continue - - + + with open(pjoin(Pdir,'Hel','selection'),'w') as fsock: - fsock.write(data) - - + fsock.write(data) + + for matrix_file in misc.glob('matrix*orig.f', Pdir): - + split_file = matrix_file.split('/') me_index = split_file[-1][len('matrix'):-len('_orig.f')] @@ -289,11 +289,11 @@ def get_helicity(self, to_submit=True, clean=True): #good_hels = sorted(list(good_hels)) good_hels = [str(x) for x in sorted(all_good_hels[me_index])] if self.run_card['hel_zeroamp']: - + bad_amps = [str(x) for x in sorted(all_bad_amps[me_index])] bad_amps_perhel = [x for x in sorted(all_bad_amps_perhel[me_index])] else: - bad_amps = [] + bad_amps = [] bad_amps_perhel = [] if __debug__: mtext = open(matrix_file).read() @@ -310,7 +310,7 @@ def get_helicity(self, to_submit=True, clean=True): recycler.set_input(matrix_file) recycler.set_output(out_file) - recycler.set_template(templ_file) + recycler.set_template(templ_file) recycler.generate_output_file() del recycler @@ -321,19 +321,19 @@ def get_helicity(self, to_submit=True, clean=True): return {}, P_zero_result - + def launch(self, to_submit=True, clean=True): """ """ if not hasattr(self, 'subproc'): - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc - + P_zero_result = [] # check the number of times where they are no phase-space - + nb_tot_proc = len(subproc) - job_list = {} + job_list = {} for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.
(previous processes already running)' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -341,7 +341,7 @@ def launch(self, to_submit=True, clean=True): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) logger.info(' %s ' % subdir) - + # clean previous run if clean: for match in misc.glob('*ajob*', Pdir): @@ -349,17 +349,17 @@ def launch(self, to_submit=True, clean=True): os.remove(match) for match in misc.glob('G*', Pdir): if os.path.exists(pjoin(match,'results.dat')): - os.remove(pjoin(match, 'results.dat')) + os.remove(pjoin(match, 'results.dat')) if os.path.exists(pjoin(match, 'ftn25')): - os.remove(pjoin(match, 'ftn25')) - + os.remove(pjoin(match, 'ftn25')) + #compile gensym self.cmd.compile(['gensym'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'gensym')): - raise Exception('Error make gensym not successful') - + raise Exception('Error make gensym not successful') + # Launch gensym - p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, + p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(''.encode()) @@ -367,8 +367,8 @@ def launch(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir,'error')): files.mv(pjoin(self.me_dir,'error'), pjoin(Pdir,'ajob.no_ps.log')) P_zero_result.append(subdir) - continue - + continue + jobs = stdout.split() job_list[Pdir] = jobs try: @@ -386,8 +386,8 @@ def launch(self, to_submit=True, clean=True): continue else: if done: - raise Exception('Parsing error in gensym: %s' % stdout) - job_list[Pdir] = l.split() + raise Exception('Parsing error in gensym: %s' % stdout) + job_list[Pdir] = l.split() done = True if not done: raise Exception('Parsing error in gensym: %s' % stdout) @@ -408,16 +408,16 @@ def launch(self, to_submit=True, clean=True): if to_submit: self.submit_to_cluster(job_list) job_list = {} - + return job_list, P_zero_result - + def resubmit(self, min_precision=1.0, resubmit_zero=False): """collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - + job_list, P_zero_result = self.launch(to_submit=False, clean=False) - + for P , jobs in dict(job_list).items(): misc.sprint(jobs) to_resub = [] @@ -434,7 +434,7 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): elif max(one_result.xerru, one_result.xerrc)/one_result.xsec > min_precision: to_resub.append(job) else: - to_resub.append(job) + to_resub.append(job) if to_resub: for G in to_resub: try: @@ -442,19 +442,19 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): except Exception as error: misc.sprint(error) pass - misc.sprint(to_resub) + misc.sprint(to_resub) self.submit_to_cluster({P: to_resub}) - - - - - - - - - - - + + + + + + + + + + + def submit_to_cluster(self, job_list): """ """ @@ -467,7 +467,7 @@ def submit_to_cluster(self, job_list): nexternal = self.cmd.proc_characteristics['nexternal'] current = open(pjoin(path, "nexternal.inc")).read() ext = re.search(r"PARAMETER \(NEXTERNAL=(\d+)\)", current).group(1) - + if self.run_card['job_strategy'] == 2: self.splitted_grid = 2 if nexternal == int(ext): @@ -498,18 +498,18 @@ def submit_to_cluster(self, job_list): return self.submit_to_cluster_no_splitting(job_list) else: return self.submit_to_cluster_splitted(job_list) - - + + def submit_to_cluster_no_splitting(self, job_list): """submit the survey without the parralelization. This is the old mode which is still usefull in single core""" - - # write the template file for the parameter file + + # write the template file for the parameter file self.write_parameter(parralelization=False, Pdirs=list(job_list.keys())) - - + + # launch the job with the appropriate grouping - for Pdir, jobs in job_list.items(): + for Pdir, jobs in job_list.items(): jobs = list(jobs) i=0 while jobs: @@ -518,16 +518,16 @@ def submit_to_cluster_no_splitting(self, job_list): for _ in range(self.combining_job_for_Pdir(Pdir)): if jobs: to_submit.append(jobs.pop(0)) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=to_submit, cwd=pjoin(self.me_dir,'SubProcesses' , Pdir)) - + def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): """prepare the input_file for submitting the channel""" - + if 'SubProcesses' not in Pdir: Pdir = pjoin(self.me_dir, 'SubProcesses', Pdir) @@ -535,8 +535,8 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): self.splitted_Pdir[(Pdir, G)] = int(nb_job) - # 1. write the new input_app.txt - run_card = self.cmd.run_card + # 1. write the new input_app.txt + run_card = self.cmd.run_card options = {'event' : submit_ps, 'maxiter': 1, 'miniter': 1, @@ -545,29 +545,29 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): else run_card['nhel'], 'gridmode': -2, 'channel' : G - } - + } + Gdir = pjoin(Pdir, 'G%s' % G) - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + # 2. check that ftn25 exists. - assert os.path.exists(pjoin(Gdir, "ftn25")) - - + assert os.path.exists(pjoin(Gdir, "ftn25")) + + # 3. Submit the new jobs #call back function - packet = cluster.Packet((Pdir, G, step+1), + packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, (Pdir, G, step+1)) - + if step ==0: - self.lastoffset[(Pdir, G)] = 0 - - # resubmit the new jobs + self.lastoffset[(Pdir, G)] = 0 + + # resubmit the new jobs for i in range(int(nb_job)): name = "G%s_%s" % (G,i+1) self.lastoffset[(Pdir, G)] += 1 - offset = self.lastoffset[(Pdir, G)] + offset = self.lastoffset[(Pdir, G)] self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'refine_splitted.sh'), argument=[name, 'G%s'%G, offset], cwd= Pdir, @@ -575,9 +575,9 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): def submit_to_cluster_splitted(self, job_list): - """ submit the version of the survey with splitted grid creation - """ - + """ submit the version of the survey with splitted grid creation + """ + #if self.splitted_grid <= 1: # return self.submit_to_cluster_no_splitting(job_list) @@ -592,7 +592,7 @@ def submit_to_cluster_splitted(self, job_list): for job in jobs: packet = cluster.Packet((Pdir, job, 1), self.combine_iteration, (Pdir, job, 1)) - for i in range(self.splitted_for_dir(Pdir, job)): + for i in range(self.splitted_for_dir(Pdir, job)): self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[i+1, job], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), @@ -601,15 +601,15 @@ def submit_to_cluster_splitted(self, job_list): def combine_iteration(self, Pdir, G, step): grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - - # Compute the number of events used for this run. + + # Compute the number of events used for this run. nb_events = grid_calculator.target_evt Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) - + # 4. make the submission of the next iteration # Three cases - less than 3 iteration -> continue # - more than 3 and less than 5 -> check error @@ -627,15 +627,15 @@ def combine_iteration(self, Pdir, G, step): need_submit = False else: need_submit = True - + elif step >= self.cmd.opts['iterations']: need_submit = False elif self.cmd.opts['accuracy'] < 0: #check for luminosity raise Exception("Not Implemented") elif self.abscross[(Pdir,G)] == 0: - need_submit = False - else: + need_submit = False + else: across = self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) tot_across = self.get_current_axsec() if across == 0: @@ -646,20 +646,20 @@ def combine_iteration(self, Pdir, G, step): need_submit = True else: need_submit = False - - + + if cross: grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_events,mode=self.mode, conservative_factor=5.0) - - xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) - if float(cross)!=0.0 and float(error)!=0.0 else 8) + + xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) + if float(cross)!=0.0 and float(error)!=0.0 else 8) if need_submit: message = "%%s/G%%s is at %%%s +- %%.3g pb. Now submitting iteration #%s."%(xsec_format, step+1) logger.info(message%\ - (os.path.basename(Pdir), G, float(cross), + (os.path.basename(Pdir), G, float(cross), float(error)*float(cross))) self.resubmit_survey(Pdir,G, Gdirs, step) elif cross: @@ -670,26 +670,26 @@ def combine_iteration(self, Pdir, G, step): newGpath = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(newGpath): os.mkdir(newGpath) - + # copy the new grid: - files.cp(pjoin(Gdirs[0], 'ftn25'), + files.cp(pjoin(Gdirs[0], 'ftn25'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, 'ftn26')) - + # copy the events fsock = open(pjoin(newGpath, 'events.lhe'), 'w') for Gdir in Gdirs: - fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) - + fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) + # copy one log - files.cp(pjoin(Gdirs[0], 'log.txt'), + files.cp(pjoin(Gdirs[0], 'log.txt'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G)) - - + + # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) else: logger.info("Survey finished for %s/G%s [0 cross]", os.path.basename(Pdir),G) - + Gdir = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(Gdir): os.mkdir(Gdir) @@ -697,21 +697,21 @@ def combine_iteration(self, Pdir, G, step): files.cp(pjoin(Gdirs[0], 'log.txt'), Gdir) # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) - + return 0 def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): """ exclude_sub_jobs is to remove some of the subjobs if a numerical issue is detected in one of them. Warning is issue when this occurs. """ - + # 1. create an object to combine the grid information and fill it grid_calculator = combine_grid.grid_information(self.run_card['nhel']) - + for i in range(self.splitted_for_dir(Pdir, G)): if i in exclude_sub_jobs: continue - path = pjoin(Pdir, "G%s_%s" % (G, i+1)) + path = pjoin(Pdir, "G%s_%s" % (G, i+1)) fsock = misc.mult_try_open(pjoin(path, 'results.dat')) one_result = grid_calculator.add_results_information(fsock) fsock.close() @@ -723,9 +723,9 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): fsock.close() os.remove(pjoin(path, 'results.dat')) #os.remove(pjoin(path, 'grid_information')) - - - + + + #2. combine the information about the total crossection / error # start by keep the interation in memory cross, across, sigma = grid_calculator.get_cross_section() @@ -736,12 +736,12 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): if maxwgt: nunwgt = grid_calculator.get_nunwgt(maxwgt) # Make sure not to apply the security below during the first step of the - # survey. Also, disregard channels with a contribution relative to the + # survey. Also, disregard channels with a contribution relative to the # total cross-section smaller than 1e-8 since in this case it is unlikely # that this channel will need more than 1 event anyway. apply_instability_security = False rel_contrib = 0.0 - if (self.__class__ != gensym or step > 1): + if (self.__class__ != gensym or step > 1): Pdir_across = 0.0 Gdir_across = 0.0 for (mPdir,mG) in self.abscross.keys(): @@ -750,7 +750,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): (self.sigma[(mPdir,mG)]+1e-99)) if mG == G: Gdir_across += (self.abscross[(mPdir,mG)]/ - (self.sigma[(mPdir,mG)]+1e-99)) + (self.sigma[(mPdir,mG)]+1e-99)) rel_contrib = abs(Gdir_across/(Pdir_across+1e-99)) if rel_contrib > (1.0e-8) and \ nunwgt < 2 and len(grid_calculator.results) > 1: @@ -770,14 +770,14 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): exclude_sub_jobs = list(exclude_sub_jobs) exclude_sub_jobs.append(th_maxwgt[-1][1]) grid_calculator.results.run_statistics['skipped_subchannel'] += 1 - + # Add some monitoring of the problematic events - gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) + gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) if os.path.isfile(pjoin(gPath,'events.lhe')): lhe_file = lhe_parser.EventFile(pjoin(gPath,'events.lhe')) discardedPath = pjoin(Pdir,'DiscardedUnstableEvents') if not os.path.exists(discardedPath): - os.mkdir(discardedPath) + os.mkdir(discardedPath) if os.path.isdir(discardedPath): # Keep only the event with a maximum weight, as it surely # is the problematic one. @@ -790,10 +790,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): lhe_file.close() evtRecord.write(pjoin(gPath,'events.lhe').read()) evtRecord.close() - + return self.combine_grid(Pdir, G, step, exclude_sub_jobs) - + if across !=0: if sigma != 0: self.cross[(Pdir,G)] += cross**3/sigma**2 @@ -814,10 +814,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): self.chi2[(Pdir,G)] = 0 cross = self.cross[(Pdir,G)] error = 0 - + else: error = 0 - + grid_calculator.results.compute_values(update_statistics=True) if (str(os.path.basename(Pdir)), G) in self.run_statistics: self.run_statistics[(str(os.path.basename(Pdir)), G)]\ @@ -825,8 +825,8 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): else: self.run_statistics[(str(os.path.basename(Pdir)), G)] = \ grid_calculator.results.run_statistics - - self.warnings_from_statistics(G, grid_calculator.results.run_statistics) + + self.warnings_from_statistics(G, grid_calculator.results.run_statistics) stats_msg = grid_calculator.results.run_statistics.nice_output( '/'.join([os.path.basename(Pdir),'G%s'%G])) @@ -836,7 +836,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): # Clean up grid_information to avoid border effects in case of a crash for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) - try: + try: os.remove(pjoin(path, 'grid_information')) except OSError as oneerror: if oneerror.errno != 2: @@ -850,7 +850,7 @@ def warnings_from_statistics(self,G,stats): return EPS_fraction = float(stats['exceptional_points'])/stats['n_madloop_calls'] - + msg = "Channel %s has encountered a fraction of %.3g\n"+ \ "of numerically unstable loop matrix element computations\n"+\ "(which could not be rescued using quadruple precision).\n"+\ @@ -861,16 +861,16 @@ def warnings_from_statistics(self,G,stats): elif EPS_fraction > 0.01: logger.critical((msg%(G,EPS_fraction)).replace('might', 'can')) raise Exception((msg%(G,EPS_fraction)).replace('might', 'can')) - + def get_current_axsec(self): - + across = 0 for (Pdir,G) in self.abscross: across += self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) return across - + def write_results(self, grid_calculator, cross, error, Pdir, G, step): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -888,7 +888,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step): maxwgt = grid_calculator.get_max_wgt() nunwgt = grid_calculator.get_nunwgt() luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -897,20 +897,20 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s %s 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross), fstr(maxwgt)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - + def resubmit_survey(self, Pdir, G, Gdirs, step): """submit the next iteration of the survey""" # 1. write the new input_app.txt to double the number of points - run_card = self.cmd.run_card + run_card = self.cmd.run_card options = {'event' : 2**(step) * self.cmd.opts['points'] / self.splitted_grid, 'maxiter': 1, 'miniter': 1, @@ -919,18 +919,18 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): else run_card['nhel'], 'gridmode': -2, 'channel' : '' - } - + } + if int(options['helicity']) == 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + for Gdir in Gdirs: - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + + #2. resubmit the new jobs packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, \ - (Pdir, G, step+1)) + (Pdir, G, step+1)) nb_step = len(Gdirs) * (step+1) for i,subdir in enumerate(Gdirs): subdir = subdir.rsplit('_',1)[1] @@ -938,34 +938,34 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): offset = nb_step+i+1 offset=str(offset) tag = "%s.%s" % (subdir, offset) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[tag, G], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), packet_member=packet) - + def write_parameter_file(self, path, options): """ """ - + template =""" %(event)s %(maxiter)s %(miniter)s !Number of events and max and min iterations %(accuracy)s !Accuracy %(gridmode)s !Grid Adjustment 0=none, 2=adjust 1 !Suppress Amplitude 1=yes %(helicity)s !Helicity Sum/event 0=exact - %(channel)s """ + %(channel)s """ options['event'] = int(options['event']) open(path, 'w').write(template % options) - - + + def write_parameter(self, parralelization, Pdirs=None): """Write the parameter of the survey run""" run_card = self.cmd.run_card - + options = {'event' : self.cmd.opts['points'], 'maxiter': self.cmd.opts['iterations'], 'miniter': self.min_iterations, @@ -975,36 +975,36 @@ def write_parameter(self, parralelization, Pdirs=None): 'gridmode': 2, 'channel': '' } - + if int(options['helicity'])== 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + if parralelization: options['gridmode'] = -2 options['maxiter'] = 1 #this is automatic in dsample anyway options['miniter'] = 1 #this is automatic in dsample anyway options['event'] /= self.splitted_grid - + if not Pdirs: Pdirs = self.subproc - + for Pdir in Pdirs: - path =pjoin(Pdir, 'input_app.txt') + path =pjoin(Pdir, 'input_app.txt') self.write_parameter_file(path, options) - - -class gen_ximprove(object): - - + + +class gen_ximprove(object): + + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job @@ -1022,7 +1022,7 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_gridpack) elif cls.force_class == 'loop_induced': return super(gen_ximprove, cls).__new__(gen_ximprove_share) - + if cmd.proc_characteristics['loop_induced']: return super(gen_ximprove, cls).__new__(gen_ximprove_share) elif gen_ximprove.format_variable(cmd.run_card['gridpack'], bool): @@ -1031,31 +1031,31 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_share) else: return super(gen_ximprove, cls).__new__(gen_ximprove_v4) - - + + def __init__(self, cmd, opt=None): - + try: super(gen_ximprove, self).__init__(cmd, opt) except TypeError: pass - + self.run_statistics = {} self.cmd = cmd self.run_card = cmd.run_card run_card = self.run_card self.me_dir = cmd.me_dir - + #extract from the run_card the information that we need. self.gridpack = run_card['gridpack'] self.nhel = run_card['nhel'] if "nhel_refine" in run_card: self.nhel = run_card["nhel_refine"] - + if self.run_card['refine_evt_by_job'] != -1: self.max_request_event = run_card['refine_evt_by_job'] - - + + # Default option for the run self.gen_events = True self.parralel = False @@ -1066,7 +1066,7 @@ def __init__(self, cmd, opt=None): # parameter for the gridpack run self.nreq = 2000 self.iseed = 4321 - + # placeholder for information self.results = 0 #updated in launch/update_html @@ -1074,16 +1074,16 @@ def __init__(self, cmd, opt=None): self.configure(opt) elif isinstance(opt, bannermod.GridpackCard): self.configure_gridpack(opt) - + def __call__(self): return self.launch() - + def launch(self): - """running """ - + """running """ + #start the run self.handle_seed() - self.results = sum_html.collect_result(self.cmd, + self.results = sum_html.collect_result(self.cmd, main_dir=pjoin(self.cmd.me_dir,'SubProcesses')) #main_dir is for gridpack readonly mode if self.gen_events: # We run to provide a given number of events @@ -1095,15 +1095,15 @@ def launch(self): def configure(self, opt): """Defines some parameter of the run""" - + for key, value in opt.items(): if key in self.__dict__: targettype = type(getattr(self, key)) setattr(self, key, self.format_variable(value, targettype, key)) else: raise Exception('%s not define' % key) - - + + # special treatment always do outside the loop to avoid side effect if 'err_goal' in opt: if self.err_goal < 1: @@ -1113,24 +1113,24 @@ def configure(self, opt): logger.info("Generating %s unweighted events." % self.err_goal) self.gen_events = True self.err_goal = self.err_goal * self.gen_events_security # security - + def handle_seed(self): """not needed but for gridpack --which is not handle here for the moment""" return - - + + def find_job_for_event(self): """return the list of channel that need to be improved""" - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) - - goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 + + goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) - all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) - + all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) + to_refine = [] for C in all_channels: if C.get('axsec') == 0: @@ -1141,61 +1141,61 @@ def find_job_for_event(self): elif C.get('xerr') > max(C.get('axsec'), (1/(100*math.sqrt(self.err_goal)))*all_channels[-1].get('axsec')): to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru + - class gen_ximprove_v4(gen_ximprove): - + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + super(gen_ximprove_v4, self).__init__(cmd, opt) - + if cmd.opts['accuracy'] < cmd._survey_options['accuracy'][1]: self.increase_precision(cmd._survey_options['accuracy'][1]/cmd.opts['accuracy']) @@ -1203,7 +1203,7 @@ def reset_multijob(self): for path in misc.glob(pjoin('*', '*','multijob.dat'), pjoin(self.me_dir, 'SubProcesses')): open(path,'w').write('0\n') - + def write_multijob(self, Channel, nb_split): """ """ if nb_split <=1: @@ -1211,7 +1211,7 @@ def write_multijob(self, Channel, nb_split): f = open(pjoin(self.me_dir, 'SubProcesses', Channel.get('name'), 'multijob.dat'), 'w') f.write('%i\n' % nb_split) f.close() - + def increase_precision(self, rate=3): #misc.sprint(rate) if rate < 3: @@ -1222,25 +1222,25 @@ def increase_precision(self, rate=3): rate = rate -2 self.max_event_in_iter = int((rate+1) * 10000) self.min_events = int(rate+2) * 2500 - self.gen_events_security = 1 + 0.1 * (rate+2) - + self.gen_events_security = 1 + 0.1 * (rate+2) + if int(self.nhel) == 1: self.min_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//3) self.max_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//2) - - + + alphabet = "abcdefghijklmnopqrstuvwxyz" def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() #reset the potential multijob of previous run self.reset_multijob() - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. @@ -1257,17 +1257,17 @@ def get_job_for_event(self): else: for i in range(len(to_refine) //3): new_order.append(to_refine[i]) - new_order.append(to_refine[-2*i-1]) + new_order.append(to_refine[-2*i-1]) new_order.append(to_refine[-2*i-2]) if len(to_refine) % 3 == 1: - new_order.append(to_refine[i+1]) + new_order.append(to_refine[i+1]) elif len(to_refine) % 3 == 2: - new_order.append(to_refine[i+2]) + new_order.append(to_refine[i+2]) #ensure that the reordering is done nicely assert set([id(C) for C in to_refine]) == set([id(C) for C in new_order]) - to_refine = new_order - - + to_refine = new_order + + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target @@ -1279,7 +1279,7 @@ def get_job_for_event(self): nb_split = self.max_splitting nb_split=max(1, nb_split) - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1296,21 +1296,21 @@ def get_job_for_event(self): nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + # write the multi-job information self.write_multijob(C, nb_split) - + packet = cluster.Packet((C.parent_name, C.name), combine_runs.CombineRuns, (pjoin(self.me_dir, 'SubProcesses', C.parent_name)), {"subproc": C.name, "nb_split":nb_split}) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1321,7 +1321,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': packet, + 'packet': packet, } if nb_split == 1: @@ -1334,19 +1334,19 @@ def get_job_for_event(self): if self.keep_grid_for_refine: new_info['base_directory'] = info['directory'] jobs.append(new_info) - - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def create_ajob(self, template, jobs, write_dir=None): """create the ajob""" - + if not jobs: return if not write_dir: write_dir = pjoin(self.me_dir, 'SubProcesses') - + #filter the job according to their SubProcess directory # no mix submition P2job= collections.defaultdict(list) for j in jobs: @@ -1355,11 +1355,11 @@ def create_ajob(self, template, jobs, write_dir=None): for P in P2job.values(): self.create_ajob(template, P, write_dir) return - - + + #Here we can assume that all job are for the same directory. path = pjoin(write_dir, jobs[0]['P_dir']) - + template_text = open(template, 'r').read() # special treatment if needed to combine the script # computes how many submition miss one job @@ -1384,8 +1384,8 @@ def create_ajob(self, template, jobs, write_dir=None): skip1=0 combining_job =1 nb_sub = len(jobs) - - + + nb_use = 0 for i in range(nb_sub): script_number = i+1 @@ -1404,14 +1404,14 @@ def create_ajob(self, template, jobs, write_dir=None): info["base_directory"] = "./" fsock.write(template_text % info) nb_use += nb_job - + fsock.close() return script_number def get_job_for_precision(self): """create the ajob to achieve a give precision on the total cross-section""" - + assert self.err_goal <=1 xtot = abs(self.results.xsec) logger.info("Working on precision: %s %%" %(100*self.err_goal)) @@ -1428,46 +1428,46 @@ def get_job_for_precision(self): rerr *=rerr if not len(to_refine): return - - # change limit since most don't contribute + + # change limit since most don't contribute limit = math.sqrt((self.err_goal * xtot)**2 - rerr/math.sqrt(len(to_refine))) for C in to_refine[:]: cerr = C.mfactor*(C.xerru + len(to_refine)*C.xerrc) if cerr < limit: to_refine.remove(C) - + # all the channel are now selected. create the channel information logger.info('need to improve %s channels' % len(to_refine)) - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. # loop over the channel to refine for C in to_refine: - + #1. Determine how many events we need in each iteration yerr = C.mfactor*(C.xerru+len(to_refine)*C.xerrc) nevents = 0.2*C.nevents*(yerr/limit)**2 - + nb_split = int((nevents*(C.nunwgt/C.nevents)/self.max_request_event/ (2**self.min_iter-1))**(2/3)) nb_split = max(nb_split, 1) - # **(2/3) to slow down the increase in number of jobs + # **(2/3) to slow down the increase in number of jobs if nb_split > self.max_splitting: nb_split = self.max_splitting - + if nb_split >1: nevents = nevents / nb_split self.write_multijob(C, nb_split) # forbid too low/too large value nevents = min(self.min_event_in_iter, max(self.max_event_in_iter, nevents)) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1487,38 +1487,38 @@ def get_job_for_precision(self): new_info['offset'] = i+1 new_info['directory'] += self.alphabet[i % 26] + str((i+1)//26) jobs.append(new_info) - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru @@ -1528,27 +1528,27 @@ class gen_ximprove_v4_nogridupdate(gen_ximprove_v4): # some hardcoded value which impact the generation gen_events_security = 1.1 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 400 # split jobs if a channel if it needs more than that + max_request_event = 400 # split jobs if a channel if it needs more than that max_event_in_iter = 500 min_event_in_iter = 250 - max_splitting = 260 # maximum duplication of a given channel - min_iter = 2 + max_splitting = 260 # maximum duplication of a given channel + min_iter = 2 max_iter = 6 keep_grid_for_refine = True - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + gen_ximprove.__init__(cmd, opt) - + if cmd.proc_characteristics['loopinduced'] and \ cmd.proc_characteristics['nexternal'] > 2: self.increase_parralelization(cmd.proc_characteristics['nexternal']) - + def increase_parralelization(self, nexternal): - self.max_splitting = 1000 - + self.max_splitting = 1000 + if self.run_card['refine_evt_by_job'] != -1: pass elif nexternal == 3: @@ -1563,27 +1563,27 @@ def increase_parralelization(self, nexternal): class gen_ximprove_share(gen_ximprove, gensym): """Doing the refine in multicore. Each core handle a couple of PS point.""" - nb_ps_by_job = 2000 + nb_ps_by_job = 2000 mode = "refine" gen_events_security = 1.15 # Note the real security is lower since we stop the jobs if they are at 96% # of this target. def __init__(self, *args, **opts): - + super(gen_ximprove_share, self).__init__(*args, **opts) self.generated_events = {} self.splitted_for_dir = lambda x,y : self.splitted_Pdir[(x,y)] - + def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - + goal_lum, to_refine = self.find_job_for_event() self.goal_lum = goal_lum - + # loop over the channel to refine to find the number of PS point to launch total_ps_points = 0 channel_to_ps_point = [] @@ -1593,7 +1593,7 @@ def get_job_for_event(self): os.remove(pjoin(self.me_dir, "SubProcesses",C.parent_name, C.name, "events.lhe")) except: pass - + #1. Compute the number of points are needed to reach target needed_event = goal_lum*C.get('axsec') if needed_event == 0: @@ -1609,18 +1609,18 @@ def get_job_for_event(self): nb_split = 1 if nb_split > self.max_splitting: nb_split = self.max_splitting - nevents = self.max_event_in_iter * self.max_splitting + nevents = self.max_event_in_iter * self.max_splitting else: nevents = self.max_event_in_iter * nb_split if nevents > self.max_splitting*self.max_event_in_iter: logger.warning("Channel %s/%s has a very low efficiency of unweighting. Might not be possible to reach target" % \ (C.name, C.parent_name)) - nevents = self.max_event_in_iter * self.max_splitting - - total_ps_points += nevents - channel_to_ps_point.append((C, nevents)) - + nevents = self.max_event_in_iter * self.max_splitting + + total_ps_points += nevents + channel_to_ps_point.append((C, nevents)) + if self.cmd.options["run_mode"] == 1: if self.cmd.options["cluster_size"]: nb_ps_by_job = total_ps_points /int(self.cmd.options["cluster_size"]) @@ -1634,7 +1634,7 @@ def get_job_for_event(self): nb_ps_by_job = total_ps_points / self.cmd.options["nb_core"] else: nb_ps_by_job = self.nb_ps_by_job - + nb_ps_by_job = int(max(nb_ps_by_job, 500)) for C, nevents in channel_to_ps_point: @@ -1648,20 +1648,20 @@ def get_job_for_event(self): self.create_resubmit_one_iter(C.parent_name, C.name[1:], submit_ps, nb_job, step=0) needed_event = goal_lum*C.get('xsec') logger.debug("%s/%s : need %s event. Need %s split job of %s points", C.parent_name, C.name, needed_event, nb_job, submit_ps) - - + + def combine_iteration(self, Pdir, G, step): - + grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - + # collect all the generated_event Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) assert len(grid_calculator.results) == len(Gdirs) == self.splitted_for_dir(Pdir, G) - - + + # Check how many events are going to be kept after un-weighting. needed_event = cross * self.goal_lum if needed_event == 0: @@ -1671,19 +1671,19 @@ def combine_iteration(self, Pdir, G, step): if self.err_goal >=1: if needed_event > self.gen_events_security * self.err_goal: needed_event = int(self.gen_events_security * self.err_goal) - + if (Pdir, G) in self.generated_events: old_nunwgt, old_maxwgt = self.generated_events[(Pdir, G)] else: old_nunwgt, old_maxwgt = 0, 0 - + if old_nunwgt == 0 and os.path.exists(pjoin(Pdir,"G%s" % G, "events.lhe")): # possible for second refine. lhe = lhe_parser.EventFile(pjoin(Pdir,"G%s" % G, "events.lhe")) old_nunwgt = lhe.unweight(None, trunc_error=0.005, log_level=0) old_maxwgt = lhe.max_wgt - - + + maxwgt = max(grid_calculator.get_max_wgt(), old_maxwgt) new_evt = grid_calculator.get_nunwgt(maxwgt) @@ -1695,35 +1695,35 @@ def combine_iteration(self, Pdir, G, step): one_iter_nb_event = max(grid_calculator.get_nunwgt(),1) drop_previous_iteration = False # compare the number of events to generate if we discard the previous iteration - n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) n_target_combined = (needed_event-nunwgt) / efficiency if n_target_one_iter < n_target_combined: # the last iteration alone has more event that the combine iteration. - # it is therefore interesting to drop previous iteration. + # it is therefore interesting to drop previous iteration. drop_previous_iteration = True nunwgt = one_iter_nb_event maxwgt = grid_calculator.get_max_wgt() new_evt = nunwgt - efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) - + efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + try: if drop_previous_iteration: raise IOError output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'a') except IOError: output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'w') - + misc.call(["cat"] + [pjoin(d, "events.lhe") for d in Gdirs], stdout=output_file) output_file.close() # For large number of iteration. check the number of event by doing the # real unweighting. - if nunwgt < 0.6 * needed_event and step > self.min_iter: + if nunwgt < 0.6 * needed_event and step > self.min_iter: lhe = lhe_parser.EventFile(output_file.name) old_nunwgt =nunwgt nunwgt = lhe.unweight(None, trunc_error=0.01, log_level=0) - - + + self.generated_events[(Pdir, G)] = (nunwgt, maxwgt) # misc.sprint("Adding %s event to %s. Currently at %s" % (new_evt, G, nunwgt)) @@ -1742,21 +1742,21 @@ def combine_iteration(self, Pdir, G, step): nevents = grid_calculator.results[0].nevents if nevents == 0: # possible if some integral returns 0 nevents = max(g.nevents for g in grid_calculator.results) - + need_ps_point = (needed_event - nunwgt)/(efficiency+1e-99) - need_job = need_ps_point // nevents + 1 - + need_job = need_ps_point // nevents + 1 + if step < self.min_iter: # This is normal but check if we are on the good track - job_at_first_iter = nb_split_before/2**(step-1) + job_at_first_iter = nb_split_before/2**(step-1) expected_total_job = job_at_first_iter * (2**self.min_iter-1) done_job = job_at_first_iter * (2**step-1) expected_remaining_job = expected_total_job - done_job - logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) + logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) # increase if needed but not too much need_job = min(need_job, expected_remaining_job*1.25) - + nb_job = (need_job-0.5)//(2**(self.min_iter-step)-1) + 1 nb_job = max(1, nb_job) grid_calculator.write_grid_for_submission(Pdir,G, @@ -1768,7 +1768,7 @@ def combine_iteration(self, Pdir, G, step): nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) #self.create_job(Pdir, G, nb_job, nevents, step) - + elif step < self.max_iter: if step + 1 == self.max_iter: need_job = 1.20 * need_job # avoid to have just too few event. @@ -1777,21 +1777,21 @@ def combine_iteration(self, Pdir, G, step): grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_job*nevents ,mode=self.mode, conservative_factor=self.max_iter) - - + + logger.info("%s/G%s is at %i/%i ('%.2g%%') event. Resubmit %i job at iteration %i." \ % (os.path.basename(Pdir), G, int(nunwgt),int(needed_event)+1, (float(nunwgt)/needed_event)*100.0 if needed_event>0.0 else 0.0, nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) - - + + return 0 - - + + def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -1807,7 +1807,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency nevents = nunwgt # make the unweighting to compute the number of events: luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -1816,23 +1816,23 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s 0.0 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - - - + + + class gen_ximprove_gridpack(gen_ximprove_v4): - - min_iter = 1 + + min_iter = 1 max_iter = 13 - max_request_event = 1e12 # split jobs if a channel if it needs more than that + max_request_event = 1e12 # split jobs if a channel if it needs more than that max_event_in_iter = 4000 min_event_in_iter = 500 combining_job = sys.maxsize @@ -1844,7 +1844,7 @@ def __new__(cls, *args, **opts): return super(gen_ximprove_gridpack, cls).__new__(cls, *args, **opts) def __init__(self, *args, **opts): - + self.ngran = -1 self.gscalefact = {} self.readonly = False @@ -1855,23 +1855,23 @@ def __init__(self, *args, **opts): self.readonly = opts['readonly'] super(gen_ximprove_gridpack,self).__init__(*args, **opts) if self.ngran == -1: - self.ngran = 1 - + self.ngran = 1 + def find_job_for_event(self): """return the list of channel that need to be improved""" import random - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) self.gscalefact = {} - + xtot = self.results.axsec - goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 + goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 # logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) all_channels.sort(key=lambda x : x.get('luminosity'), reverse=True) - + to_refine = [] for C in all_channels: tag = C.get('name') @@ -1885,27 +1885,27 @@ def find_job_for_event(self): #need to generate events logger.debug('request events for ', C.get('name'), 'cross=', C.get('axsec'), 'needed events = ', goal_lum * C.get('axsec')) - to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + to_refine.append(C) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. - + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target needed_event = max(goal_lum*C.get('axsec'), self.ngran) nb_split = 1 - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1920,13 +1920,13 @@ def get_job_for_event(self): # forbid too low/too large value nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': os.path.basename(C.parent_name), + 'P_dir': os.path.basename(C.parent_name), 'offset': 1, # need to be change for splitted job 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'nevents': nevents, #int(nevents*self.gen_events_security)+1, @@ -1938,7 +1938,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': None, + 'packet': None, } if self.readonly: @@ -1946,11 +1946,11 @@ def get_job_for_event(self): info['base_directory'] = basedir jobs.append(info) - - write_dir = '.' if self.readonly else None - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) - + + write_dir = '.' if self.readonly else None + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) + done = [] for j in jobs: if j['P_dir'] in done: @@ -1967,22 +1967,22 @@ def get_job_for_event(self): write_dir = '.' if self.readonly else pjoin(self.me_dir, 'SubProcesses') self.check_events(goal_lum, to_refine, jobs, write_dir) - + def check_events(self, goal_lum, to_refine, jobs, Sdir): """check that we get the number of requested events if not resubmit.""" - + new_jobs = [] - + for C, job_info in zip(to_refine, jobs): - P = job_info['P_dir'] + P = job_info['P_dir'] G = job_info['channel'] axsec = C.get('axsec') - requested_events= job_info['requested_event'] - + requested_events= job_info['requested_event'] + new_results = sum_html.OneResult((P,G)) new_results.read_results(pjoin(Sdir,P, 'G%s'%G, 'results.dat')) - + # need to resubmit? if new_results.get('nunwgt') < requested_events: pwd = pjoin(os.getcwd(),job_info['P_dir'],'G%s'%G) if self.readonly else \ @@ -1992,10 +1992,10 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): job_info['offset'] += 1 new_jobs.append(job_info) files.mv(pjoin(pwd, 'events.lhe'), pjoin(pwd, 'events.lhe.previous')) - + if new_jobs: - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) + done = [] for j in new_jobs: if j['P_dir'] in done: @@ -2015,9 +2015,9 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): files.put_at_end(pjoin(pwd, 'events.lhe'),pjoin(pwd, 'events.lhe.previous')) return self.check_events(goal_lum, to_refine, new_jobs, Sdir) - - - - + + + + diff --git a/epochX/cudacpp/gg_tt01g.mad/bin/internal/madevent_interface.py b/epochX/cudacpp/gg_tt01g.mad/bin/internal/madevent_interface.py index cb6bf4ca57..8abba3f33f 100755 --- a/epochX/cudacpp/gg_tt01g.mad/bin/internal/madevent_interface.py +++ b/epochX/cudacpp/gg_tt01g.mad/bin/internal/madevent_interface.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,10 +53,10 @@ # Special logger for the Cmd Interface logger = logging.getLogger('madevent.stdout') # -> stdout logger_stderr = logging.getLogger('madevent.stderr') # ->stderr - + try: import madgraph -except ImportError as error: +except ImportError as error: # import from madevent directory MADEVENT = True import internal.extended_cmd as cmd @@ -92,7 +92,7 @@ import madgraph.various.lhe_parser as lhe_parser # import madgraph.various.histograms as histograms # imported later to not slow down the loading of the code import models.check_param_card as check_param_card - from madgraph.iolibs.files import ln + from madgraph.iolibs.files import ln from madgraph import InvalidCmd, MadGraph5Error, MG5DIR, ReadWrite @@ -113,10 +113,10 @@ class CmdExtended(common_run.CommonRunCmd): next_possibility = { 'start': [], } - + debug_output = 'ME5_debug' error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' - error_debug += 'More information is found in \'%(debug)s\'.\n' + error_debug += 'More information is found in \'%(debug)s\'.\n' error_debug += 'Please attach this file to your report.' config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' @@ -124,18 +124,18 @@ class CmdExtended(common_run.CommonRunCmd): keyboard_stop_msg = """stopping all operation in order to quit MadGraph5_aMC@NLO please enter exit""" - + # Define the Error InvalidCmd = InvalidCmd ConfigurationError = MadGraph5Error def __init__(self, me_dir, options, *arg, **opt): """Init history and line continuation""" - + # Tag allowing/forbiding question self.force = False - - # If possible, build an info line with current version number + + # If possible, build an info line with current version number # and date, from the VERSION text file info = misc.get_pkg_info() info_line = "" @@ -150,7 +150,7 @@ def __init__(self, me_dir, options, *arg, **opt): else: version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() info_line = "#* VERSION %s %s *\n" % \ - (version, (24 - len(version)) * ' ') + (version, (24 - len(version)) * ' ') # Create a header for the history file. # Remember to fill in time at writeout time! @@ -177,7 +177,7 @@ def __init__(self, me_dir, options, *arg, **opt): '#* run as ./bin/madevent.py filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - + if info_line: info_line = info_line[1:] @@ -203,11 +203,11 @@ def __init__(self, me_dir, options, *arg, **opt): "* *\n" + \ "************************************************************") super(CmdExtended, self).__init__(me_dir, options, *arg, **opt) - + def get_history_header(self): - """return the history header""" + """return the history header""" return self.history_header % misc.get_time_info() - + def stop_on_keyboard_stop(self): """action to perform to close nicely on a keyboard interupt""" try: @@ -219,20 +219,20 @@ def stop_on_keyboard_stop(self): self.add_error_log_in_html(KeyboardInterrupt) except: pass - + def postcmd(self, stop, line): """ Update the status of the run for finishing interactive command """ - - stop = super(CmdExtended, self).postcmd(stop, line) + + stop = super(CmdExtended, self).postcmd(stop, line) # relaxing the tag forbidding question self.force = False - + if not self.use_rawinput: return stop - + if self.results and not self.results.current: return stop - + arg = line.split() if len(arg) == 0: return stop @@ -240,41 +240,41 @@ def postcmd(self, stop, line): return stop if isinstance(self.results.status, str) and self.results.status == 'Stop by the user': self.update_status('%s Stop by the user' % arg[0], level=None, error=True) - return stop + return stop elif not self.results.status: return stop elif str(arg[0]) in ['exit','quit','EOF']: return stop - + try: - self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], + self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], level=None, error=True) except Exception: misc.sprint('update_status fails') pass - - + + def nice_user_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() - return cmd.Cmd.nice_user_error(self, error, line) - + return cmd.Cmd.nice_user_error(self, error, line) + def nice_config_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() stop = cmd.Cmd.nice_config_error(self, error, line) - - + + try: debug_file = open(self.debug_output, 'a') debug_file.write(open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat'))) debug_file.close() except: - pass + pass return stop - + def nice_error_handling(self, error, line): """If a ME run is currently running add a link in the html output""" @@ -294,7 +294,7 @@ def nice_error_handling(self, error, line): proc_card = pjoin(self.me_dir,'Cards','proc_card_mg5.dat') if os.path.exists(proc_card): self.banner.add(proc_card) - + out_dir = pjoin(self.me_dir, 'Events', self.run_name) if not os.path.isdir(out_dir): os.mkdir(out_dir) @@ -307,7 +307,7 @@ def nice_error_handling(self, error, line): else: pass else: - self.add_error_log_in_html() + self.add_error_log_in_html() stop = cmd.Cmd.nice_error_handling(self, error, line) try: debug_file = open(self.debug_output, 'a') @@ -316,14 +316,14 @@ def nice_error_handling(self, error, line): except: pass return stop - - + + #=============================================================================== # HelpToCmd #=============================================================================== class HelpToCmd(object): """ The Series of help routine for the MadEventCmd""" - + def help_pythia(self): logger.info("syntax: pythia [RUN] [--run_options]") logger.info("-- run pythia on RUN (current one by default)") @@ -352,29 +352,29 @@ def help_banner_run(self): logger.info(" Path should be the path of a valid banner.") logger.info(" RUN should be the name of a run of the current directory") self.run_options_help([('-f','answer all question by default'), - ('--name=X', 'Define the name associated with the new run')]) - + ('--name=X', 'Define the name associated with the new run')]) + def help_open(self): logger.info("syntax: open FILE ") logger.info("-- open a file with the appropriate editor.") logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') logger.info(' the path to the last created/used directory is used') logger.info(' The program used to open those files can be chosen in the') - logger.info(' configuration file ./input/mg5_configuration.txt') - - + logger.info(' configuration file ./input/mg5_configuration.txt') + + def run_options_help(self, data): if data: logger.info('-- local options:') for name, info in data: logger.info(' %s : %s' % (name, info)) - + logger.info("-- session options:") - logger.info(" Note that those options will be kept for the current session") + logger.info(" Note that those options will be kept for the current session") logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) logger.info(" --multicore : Run in multi-core configuration") logger.info(" --nb_core=X : limit the number of core to use to X.") - + def help_generate_events(self): logger.info("syntax: generate_events [run_name] [options]",) @@ -398,16 +398,16 @@ def help_initMadLoop(self): logger.info(" -f : Bypass the edition of MadLoopParams.dat.",'$MG:color:BLUE') logger.info(" -r : Refresh of the existing filters (erasing them if already present).",'$MG:color:BLUE') logger.info(" --nPS= : Specify how many phase-space points should be tried to set up the filters.",'$MG:color:BLUE') - + def help_calculate_decay_widths(self): - + if self.ninitial != 1: logger.warning("This command is only valid for processes of type A > B C.") logger.warning("This command can not be run in current context.") logger.warning("") - + logger.info("syntax: calculate_decay_widths [run_name] [options])") logger.info("-- Calculate decay widths and enter widths and BRs in param_card") logger.info(" for a series of processes of type A > B C ...") @@ -428,8 +428,8 @@ def help_survey(self): logger.info("-- evaluate the different channel associate to the process") self.run_options_help([("--" + key,value[-1]) for (key,value) in \ self._survey_options.items()]) - - + + def help_restart_gridpack(self): logger.info("syntax: restart_gridpack --precision= --restart_zero") @@ -439,14 +439,14 @@ def help_launch(self): logger.info("syntax: launch [run_name] [options])") logger.info(" --alias for either generate_events/calculate_decay_widths") logger.info(" depending of the number of particles in the initial state.") - + if self.ninitial == 1: logger.info("For this directory this is equivalent to calculate_decay_widths") self.help_calculate_decay_widths() else: logger.info("For this directory this is equivalent to $generate_events") self.help_generate_events() - + def help_refine(self): logger.info("syntax: refine require_precision [max_channel] [--run_options]") logger.info("-- refine the LAST run to achieve a given precision.") @@ -454,14 +454,14 @@ def help_refine(self): logger.info(' or the required relative error') logger.info(' max_channel:[5] maximal number of channel per job') self.run_options_help([]) - + def help_combine_events(self): """ """ logger.info("syntax: combine_events [run_name] [--tag=tag_name] [--run_options]") logger.info("-- Combine the last run in order to write the number of events") logger.info(" asked in the run_card.") self.run_options_help([]) - + def help_store_events(self): """ """ logger.info("syntax: store_events [--run_options]") @@ -481,7 +481,7 @@ def help_import(self): logger.info("syntax: import command PATH") logger.info("-- Execute the command present in the file") self.run_options_help([]) - + def help_syscalc(self): logger.info("syntax: syscalc [RUN] [%s] [-f | --tag=]" % '|'.join(self._plot_mode)) logger.info("-- calculate systematics information for the RUN (current run by default)") @@ -506,18 +506,18 @@ class AskRun(cmd.ControlSwitch): ('madspin', 'Decay onshell particles'), ('reweight', 'Add weights to events for new hypp.') ] - + def __init__(self, question, line_args=[], mode=None, force=False, *args, **opt): - + self.check_available_module(opt['mother_interface'].options) self.me_dir = opt['mother_interface'].me_dir super(AskRun,self).__init__(self.to_control, opt['mother_interface'], *args, **opt) - - + + def check_available_module(self, options): - + self.available_module = set() if options['pythia-pgs_path']: self.available_module.add('PY6') @@ -540,32 +540,32 @@ def check_available_module(self, options): self.available_module.add('Rivet') else: logger.warning("Rivet program installed but no parton shower with hepmc output detected.\n Please install pythia8") - + if not MADEVENT or ('mg5_path' in options and options['mg5_path']): self.available_module.add('MadSpin') if misc.has_f2py() or options['f2py_compiler']: self.available_module.add('reweight') -# old mode to activate the shower +# old mode to activate the shower def ans_parton(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if value is None: self.set_all_off() else: logger.warning('Invalid command: parton=%s' % value) - - + + # -# HANDLING SHOWER +# HANDLING SHOWER # def get_allowed_shower(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_shower'): return self.allowed_shower - + self.allowed_shower = [] if 'PY6' in self.available_module: self.allowed_shower.append('Pythia6') @@ -574,9 +574,9 @@ def get_allowed_shower(self): if self.allowed_shower: self.allowed_shower.append('OFF') return self.allowed_shower - + def set_default_shower(self): - + if 'PY6' in self.available_module and\ os.path.exists(pjoin(self.me_dir,'Cards','pythia_card.dat')): self.switch['shower'] = 'Pythia6' @@ -590,10 +590,10 @@ def set_default_shower(self): def check_value_shower(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_shower(): return True - + value =value.lower() if value in ['py6','p6','pythia_6'] and 'PY6' in self.available_module: return 'Pythia6' @@ -601,13 +601,13 @@ def check_value_shower(self, value): return 'Pythia8' else: return False - - -# old mode to activate the shower + + +# old mode to activate the shower def ans_pythia(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if 'PY6' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return @@ -621,13 +621,13 @@ def ans_pythia(self, value=None): self.set_switch('shower', 'OFF') else: logger.warning('Invalid command: pythia=%s' % value) - - + + def consistency_shower_detector(self, vshower, vdetector): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower == 'OFF': @@ -635,35 +635,35 @@ def consistency_shower_detector(self, vshower, vdetector): return 'OFF' if vshower == 'Pythia8' and vdetector == 'PGS': return 'OFF' - + return None - + # # HANDLING DETECTOR # def get_allowed_detector(self): """return valid entry for the switch""" - + if hasattr(self, 'allowed_detector'): - return self.allowed_detector - + return self.allowed_detector + self.allowed_detector = [] if 'PGS' in self.available_module: self.allowed_detector.append('PGS') if 'Delphes' in self.available_module: self.allowed_detector.append('Delphes') - + if self.allowed_detector: self.allowed_detector.append('OFF') - return self.allowed_detector + return self.allowed_detector def set_default_detector(self): - + self.set_default_shower() #ensure that this one is called first! - + if 'PGS' in self.available_module and self.switch['shower'] == 'Pythia6'\ and os.path.exists(pjoin(self.me_dir,'Cards','pgs_card.dat')): self.switch['detector'] = 'PGS' @@ -674,16 +674,16 @@ def set_default_detector(self): self.switch['detector'] = 'OFF' else: self.switch['detector'] = 'Not Avail.' - -# old mode to activate pgs + +# old mode to activate pgs def ans_pgs(self, value=None): """None: means that the user type 'pgs' - value: means that the user type pgs=value""" - + value: means that the user type pgs=value""" + if 'PGS' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return - + if value is None: self.set_all_off() self.switch['shower'] = 'Pythia6' @@ -696,16 +696,16 @@ def ans_pgs(self, value=None): else: logger.warning('Invalid command: pgs=%s' % value) - + # old mode to activate Delphes def ans_delphes(self, value=None): """None: means that the user type 'delphes' - value: means that the user type delphes=value""" - + value: means that the user type delphes=value""" + if 'Delphes' not in self.available_module: logger.warning('Delphes not available. Ignore commmand') return - + if value is None: self.set_all_off() if 'PY6' in self.available_module: @@ -718,15 +718,15 @@ def ans_delphes(self, value=None): elif value == 'off': self.set_switch('detector', 'OFF') else: - logger.warning('Invalid command: pgs=%s' % value) + logger.warning('Invalid command: pgs=%s' % value) def consistency_detector_shower(self,vdetector, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ - + if vdetector == 'PGS' and vshower != 'Pythia6': return 'Pythia6' if vdetector == 'Delphes' and vshower not in ['Pythia6', 'Pythia8']: @@ -744,28 +744,28 @@ def consistency_detector_shower(self,vdetector, vshower): # def get_allowed_analysis(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_analysis'): return self.allowed_analysis - + self.allowed_analysis = [] if 'ExRoot' in self.available_module: self.allowed_analysis.append('ExRoot') if 'MA4' in self.available_module: self.allowed_analysis.append('MadAnalysis4') if 'MA5' in self.available_module: - self.allowed_analysis.append('MadAnalysis5') + self.allowed_analysis.append('MadAnalysis5') if 'Rivet' in self.available_module: - self.allowed_analysis.append('Rivet') - + self.allowed_analysis.append('Rivet') + if self.allowed_analysis: self.allowed_analysis.append('OFF') - + return self.allowed_analysis - + def check_analysis(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_analysis(): return True if value.lower() in ['ma4', 'madanalysis4', 'madanalysis_4','4']: @@ -786,30 +786,30 @@ def consistency_shower_analysis(self, vshower, vanalysis): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'OFF' #new value for analysis - + return None - + def consistency_analysis_shower(self, vanalysis, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'Pythia8' #new value for analysis - + return None def set_default_analysis(self): """initialise the switch for analysis""" - + if 'MA4' in self.available_module and \ os.path.exists(pjoin(self.me_dir,'Cards','plot_card.dat')): self.switch['analysis'] = 'MadAnalysis4' @@ -818,46 +818,46 @@ def set_default_analysis(self): or os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat'))): self.switch['analysis'] = 'MadAnalysis5' elif 'ExRoot' in self.available_module: - self.switch['analysis'] = 'ExRoot' - elif self.get_allowed_analysis(): + self.switch['analysis'] = 'ExRoot' + elif self.get_allowed_analysis(): self.switch['analysis'] = 'OFF' else: self.switch['analysis'] = 'Not Avail.' - + # # MADSPIN handling # def get_allowed_madspin(self): """ ON|OFF|onshell """ - + if hasattr(self, 'allowed_madspin'): return self.allowed_madspin - + self.allowed_madspin = [] if 'MadSpin' in self.available_module: self.allowed_madspin = ['OFF',"ON",'onshell',"full"] return self.allowed_madspin - + def check_value_madspin(self, value): """handle alias and valid option not present in get_allowed_madspin""" - + if value.upper() in self.get_allowed_madspin(): return True elif value.lower() in self.get_allowed_madspin(): return True - + if 'MadSpin' not in self.available_module: return False - + if value.lower() in ['madspin', 'full']: return 'full' elif value.lower() in ['none']: return 'none' - - + + def set_default_madspin(self): """initialise the switch for madspin""" - + if 'MadSpin' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): self.switch['madspin'] = 'ON' @@ -865,10 +865,10 @@ def set_default_madspin(self): self.switch['madspin'] = 'OFF' else: self.switch['madspin'] = 'Not Avail.' - + def get_cardcmd_for_madspin(self, value): """set some command to run before allowing the user to modify the cards.""" - + if value == 'onshell': return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] elif value in ['full', 'madspin']: @@ -877,36 +877,36 @@ def get_cardcmd_for_madspin(self, value): return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] else: return [] - + # # ReWeight handling # def get_allowed_reweight(self): """ return the list of valid option for reweight=XXX """ - + if hasattr(self, 'allowed_reweight'): return getattr(self, 'allowed_reweight') - + if 'reweight' not in self.available_module: self.allowed_reweight = [] return self.allowed_reweight = ['OFF', 'ON'] - + # check for plugin mode plugin_path = self.mother_interface.plugin_path opts = misc.from_plugin_import(plugin_path, 'new_reweight', warning=False) self.allowed_reweight += opts - + def set_default_reweight(self): """initialise the switch for reweight""" - + if 'reweight' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): self.switch['reweight'] = 'ON' else: self.switch['reweight'] = 'OFF' else: - self.switch['reweight'] = 'Not Avail.' + self.switch['reweight'] = 'Not Avail.' #=============================================================================== # CheckValidForCmd @@ -916,14 +916,14 @@ class CheckValidForCmd(object): def check_banner_run(self, args): """check the validity of line""" - + if len(args) == 0: self.help_banner_run() raise self.InvalidCmd('banner_run requires at least one argument.') - + tag = [a[6:] for a in args if a.startswith('--tag=')] - - + + if os.path.exists(args[0]): type ='banner' format = self.detect_card_type(args[0]) @@ -931,7 +931,7 @@ def check_banner_run(self, args): raise self.InvalidCmd('The file is not a valid banner.') elif tag: args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) + (args[0], tag)) if not os.path.exists(args[0]): raise self.InvalidCmd('No banner associates to this name and tag.') else: @@ -939,7 +939,7 @@ def check_banner_run(self, args): type = 'run' banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) if not banners: - raise self.InvalidCmd('No banner associates to this name.') + raise self.InvalidCmd('No banner associates to this name.') elif len(banners) == 1: args[0] = banners[0] else: @@ -947,8 +947,8 @@ def check_banner_run(self, args): tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] tag = self.ask('which tag do you want to use?', tags[0], tags) args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) - + (args[0], tag)) + run_name = [arg[7:] for arg in args if arg.startswith('--name=')] if run_name: try: @@ -970,14 +970,14 @@ def check_banner_run(self, args): except Exception: pass self.set_run_name(name) - + def check_history(self, args): """check the validity of line""" - + if len(args) > 1: self.help_history() raise self.InvalidCmd('\"history\" command takes at most one argument') - + if not len(args): return elif args[0] != 'clean': @@ -985,16 +985,16 @@ def check_history(self, args): if dirpath and not os.path.exists(dirpath) or \ os.path.isdir(args[0]): raise self.InvalidCmd("invalid path %s " % dirpath) - + def check_save(self, args): """ check the validity of the line""" - + if len(args) == 0: args.append('options') if args[0] not in self._save_opts: raise self.InvalidCmd('wrong \"save\" format') - + if args[0] != 'options' and len(args) != 2: self.help_save() raise self.InvalidCmd('wrong \"save\" format') @@ -1003,7 +1003,7 @@ def check_save(self, args): if not os.path.exists(basename): raise self.InvalidCmd('%s is not a valid path, please retry' % \ args[1]) - + if args[0] == 'options': has_path = None for arg in args[1:]: @@ -1024,9 +1024,9 @@ def check_save(self, args): has_path = True if not has_path: if '--auto' in arg and self.options['mg5_path']: - args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) + args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) else: - args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) + args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) def check_set(self, args): """ check the validity of the line""" @@ -1039,20 +1039,20 @@ def check_set(self, args): self.help_set() raise self.InvalidCmd('Possible options for set are %s' % \ self._set_options) - + if args[0] in ['stdout_level']: if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \ and not args[1].isdigit(): raise self.InvalidCmd('output_level needs ' + \ - 'a valid level') - + 'a valid level') + if args[0] in ['timeout']: if not args[1].isdigit(): - raise self.InvalidCmd('timeout values should be a integer') - + raise self.InvalidCmd('timeout values should be a integer') + def check_open(self, args): """ check the validity of the line """ - + if len(args) != 1: self.help_open() raise self.InvalidCmd('OPEN command requires exactly one argument') @@ -1069,7 +1069,7 @@ def check_open(self, args): raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file') else: return True - + path = self.me_dir if os.path.isfile(os.path.join(path,args[0])): args[0] = os.path.join(path,args[0]) @@ -1078,7 +1078,7 @@ def check_open(self, args): elif os.path.isfile(os.path.join(path,'HTML',args[0])): args[0] = os.path.join(path,'HTML',args[0]) # special for card with _default define: copy the default and open it - elif '_card.dat' in args[0]: + elif '_card.dat' in args[0]: name = args[0].replace('_card.dat','_card_default.dat') if os.path.isfile(os.path.join(path,'Cards', name)): files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0])) @@ -1086,13 +1086,13 @@ def check_open(self, args): else: raise self.InvalidCmd('No default path for this file') elif not os.path.isfile(args[0]): - raise self.InvalidCmd('No default path for this file') - + raise self.InvalidCmd('No default path for this file') + def check_initMadLoop(self, args): """ check initMadLoop command arguments are valid.""" - + opt = {'refresh': False, 'nPS': None, 'force': False} - + for arg in args: if arg in ['-r','--refresh']: opt['refresh'] = True @@ -1105,14 +1105,14 @@ def check_initMadLoop(self, args): except ValueError: raise InvalidCmd("The number of attempts specified "+ "'%s' is not a valid integer."%n_attempts) - + return opt - + def check_treatcards(self, args): """check that treatcards arguments are valid [param|run|all] [--output_dir=] [--param_card=] [--run_card=] """ - + opt = {'output_dir':pjoin(self.me_dir,'Source'), 'param_card':pjoin(self.me_dir,'Cards','param_card.dat'), 'run_card':pjoin(self.me_dir,'Cards','run_card.dat'), @@ -1129,14 +1129,14 @@ def check_treatcards(self, args): if os.path.isfile(value): card_name = self.detect_card_type(value) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' % (card_name, key)) opt[key] = value elif os.path.isfile(pjoin(self.me_dir,value)): card_name = self.detect_card_type(pjoin(self.me_dir,value)) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' - % (card_name, key)) + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + % (card_name, key)) opt[key] = value else: raise self.InvalidCmd('No such file: %s ' % value) @@ -1154,14 +1154,14 @@ def check_treatcards(self, args): else: self.help_treatcards() raise self.InvalidCmd('Unvalid argument %s' % arg) - - return mode, opt - - + + return mode, opt + + def check_survey(self, args, cmd='survey'): """check that the argument for survey are valid""" - - + + self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) @@ -1183,41 +1183,41 @@ def check_survey(self, args, cmd='survey'): self.help_survey() raise self.InvalidCmd('Too many argument for %s command' % cmd) elif not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], None,'parton', True) args.pop(0) - + return True def check_generate_events(self, args): """check that the argument for generate_events are valid""" - + run = None if args and args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['auto','parton', 'pythia', 'pgs', 'delphes']: self.help_generate_events() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + #if len(args) > 1: # self.help_generate_events() # raise self.InvalidCmd('Too many argument for generate_events command: %s' % cmd) - + return run def check_calculate_decay_widths(self, args): """check that the argument for calculate_decay_widths are valid""" - + if self.ninitial != 1: raise self.InvalidCmd('Can only calculate decay widths for decay processes A > B C ...') @@ -1232,7 +1232,7 @@ def check_calculate_decay_widths(self, args): if len(args) > 1: self.help_calculate_decay_widths() raise self.InvalidCmd('Too many argument for calculate_decay_widths command: %s' % cmd) - + return accuracy @@ -1241,25 +1241,25 @@ def check_multi_run(self, args): """check that the argument for survey are valid""" run = None - + if not len(args): self.help_multi_run() raise self.InvalidCmd("""multi_run command requires at least one argument for the number of times that it call generate_events command""") - + if args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['parton', 'pythia', 'pgs', 'delphes']: self.help_multi_run() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + elif not args[0].isdigit(): self.help_multi_run() @@ -1267,7 +1267,7 @@ def check_multi_run(self, args): #pass nb run to an integer nb_run = args.pop(0) args.insert(0, int(nb_run)) - + return run @@ -1284,7 +1284,7 @@ def check_refine(self, args): self.help_refine() raise self.InvalidCmd('require_precision argument is require for refine cmd') - + if not self.run_name: if self.results.lastrun: self.set_run_name(self.results.lastrun) @@ -1296,17 +1296,17 @@ def check_refine(self, args): else: try: [float(arg) for arg in args] - except ValueError: - self.help_refine() + except ValueError: + self.help_refine() raise self.InvalidCmd('refine arguments are suppose to be number') - + return True - + def check_combine_events(self, arg): """ Check the argument for the combine events command """ - + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] elif not self.run_tag: @@ -1314,53 +1314,53 @@ def check_combine_events(self, arg): else: tag = self.run_tag self.run_tag = tag - + if len(arg) > 1: self.help_combine_events() raise self.InvalidCmd('Too many argument for combine_events command') - + if len(arg) == 1: self.set_run_name(arg[0], self.run_tag, 'parton', True) - + if not self.run_name: if not self.results.lastrun: raise self.InvalidCmd('No run_name currently define. Unable to run combine') else: self.set_run_name(self.results.lastrun) - + return True - + def check_pythia(self, args): """Check the argument for pythia command - syntax: pythia [NAME] + syntax: pythia [NAME] Note that other option are already removed at this point """ - + mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia', 'pgs', 'delphes']: self.help_pythia() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') - + if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' raise self.InvalidCmd(error_msg) - - - + + + tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1368,8 +1368,8 @@ def check_pythia(self, args): if self.results.lastrun: args.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): @@ -1388,21 +1388,21 @@ def check_pythia(self, args): files.ln(input_file, os.path.dirname(output_file)) else: misc.gunzip(input_file, keep=True, stdout=output_file) - + args.append(mode) - + def check_pythia8(self, args): """Check the argument for pythia command - syntax: pythia8 [NAME] + syntax: pythia8 [NAME] Note that other option are already removed at this point - """ + """ mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia','pythia8','delphes']: self.help_pythia8() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') @@ -1410,7 +1410,7 @@ def check_pythia8(self, args): if not self.options['pythia8_path']: logger.info('Retry reading configuration file to find pythia8 path') self.set_configuration() - + if not self.options['pythia8_path'] or not \ os.path.exists(pjoin(self.options['pythia8_path'],'bin','pythia8-config')): error_msg = 'No valid pythia8 path set.\n' @@ -1421,7 +1421,7 @@ def check_pythia8(self, args): raise self.InvalidCmd(error_msg) tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1430,11 +1430,11 @@ def check_pythia8(self, args): args.insert(0, self.results.lastrun) else: raise self.InvalidCmd('No run name currently define. '+ - 'Please add this information.') - + 'Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ - not os.path.exists(pjoin(self.me_dir,'Events',args[0], + not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): raise self.InvalidCmd('No events file corresponding to %s run. ' % args[0]) @@ -1451,9 +1451,9 @@ def check_pythia8(self, args): else: raise self.InvalidCmd('No event file corresponding to %s run. ' % self.run_name) - + args.append(mode) - + def check_remove(self, args): """Check that the remove command is valid""" @@ -1484,33 +1484,33 @@ def check_plot(self, args): madir = self.options['madanalysis_path'] td = self.options['td_path'] - + if not madir or not td: logger.info('Retry to read configuration file to find madanalysis/td') self.set_configuration() madir = self.options['madanalysis_path'] - td = self.options['td_path'] - + td = self.options['td_path'] + if not madir: error_msg = 'No valid MadAnalysis path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) + raise self.InvalidCmd(error_msg) if not td: error_msg = 'No valid td path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') + raise self.InvalidCmd('No run name currently define. Please add this information.') args.append('all') return - + if args[0] not in self._plot_mode: self.set_run_name(args[0], level='plot') del args[0] @@ -1518,45 +1518,45 @@ def check_plot(self, args): args.append('all') elif not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + for arg in args: if arg not in self._plot_mode and arg != self.run_name: self.help_plot() - raise self.InvalidCmd('unknown options %s' % arg) - + raise self.InvalidCmd('unknown options %s' % arg) + def check_syscalc(self, args): """Check the argument for the syscalc command syscalc run_name modes""" scdir = self.options['syscalc_path'] - + if not scdir: logger.info('Retry to read configuration file to find SysCalc') self.set_configuration() scdir = self.options['syscalc_path'] - + if not scdir: error_msg = 'No valid SysCalc path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' error_msg += 'Please note that you need to compile SysCalc first.' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') args.append('all') return #deal options tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] - + if args[0] not in self._syscalc_mode: self.set_run_name(args[0], tag=tag, level='syscalc') del args[0] @@ -1564,61 +1564,61 @@ def check_syscalc(self, args): args.append('all') elif not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') elif tag and tag != self.run_tag: self.set_run_name(self.run_name, tag=tag, level='syscalc') - + for arg in args: if arg not in self._syscalc_mode and arg != self.run_name: self.help_syscalc() - raise self.InvalidCmd('unknown options %s' % arg) + raise self.InvalidCmd('unknown options %s' % arg) if self.run_card['use_syst'] not in self.true: raise self.InvalidCmd('Run %s does not include ' % self.run_name + \ 'systematics information needed for syscalc.') - - + + def check_pgs(self, arg, no_default=False): """Check the argument for pythia command - syntax is "pgs [NAME]" + syntax is "pgs [NAME]" Note that other option are already remove at this point """ - + # If not pythia-pgs path if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] - - + + if len(arg) == 0 and not self.run_name: if self.results.lastrun: arg.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(arg) == 1 and self.run_name == arg[0]: arg.pop(0) - + if not len(arg) and \ not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): if not no_default: self.help_pgs() raise self.InvalidCmd('''No file file pythia_events.hep currently available Please specify a valid run_name''') - - lock = None + + lock = None if len(arg) == 1: prev_tag = self.set_run_name(arg[0], tag, 'pgs') if not os.path.exists(pjoin(self.me_dir,'Events',self.run_name,'%s_pythia_events.hep.gz' % prev_tag)): @@ -1626,25 +1626,25 @@ def check_pgs(self, arg, no_default=False): else: input_file = pjoin(self.me_dir,'Events', self.run_name, '%s_pythia_events.hep.gz' % prev_tag) output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') - lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), + lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), argument=['-c', input_file]) else: - if tag: + if tag: self.run_card['run_tag'] = tag self.set_run_name(self.run_name, tag, 'pgs') - - return lock + + return lock def check_display(self, args): """check the validity of line syntax is "display XXXXX" """ - + if len(args) < 1 or args[0] not in self._display_opts: self.help_display() raise self.InvalidCmd - + if args[0] == 'variable' and len(args) !=2: raise self.InvalidCmd('variable need a variable name') @@ -1654,39 +1654,39 @@ def check_display(self, args): def check_import(self, args): """check the validity of line""" - + if not args: self.help_import() raise self.InvalidCmd('wrong \"import\" format') - + if args[0] != 'command': args.insert(0,'command') - - + + if not len(args) == 2 or not os.path.exists(args[1]): raise self.InvalidCmd('PATH is mandatory for import command\n') - + #=============================================================================== # CompleteForCmd #=============================================================================== class CompleteForCmd(CheckValidForCmd): """ The Series of help routine for the MadGraphCmd""" - - + + def complete_banner_run(self, text, line, begidx, endidx, formatting=True): "Complete the banner run command" try: - - + + args = self.split_arg(line[0:begidx], error=False) - + if args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args \ - if a.endswith(os.path.sep)])) - - + if a.endswith(os.path.sep)])) + + if len(args) > 1: # only options are possible tags = misc.glob('%s_*_banner.txt' % args[1], pjoin(self.me_dir, 'Events' , args[1])) @@ -1697,9 +1697,9 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): else: return self.list_completion(text, tags) return self.list_completion(text, tags +['--name=','-f'], line) - + # First argument - possibilites = {} + possibilites = {} comp = self.path_completion(text, os.path.join('.',*[a for a in args \ if a.endswith(os.path.sep)])) @@ -1711,10 +1711,10 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) run_list = [n.rsplit('/',2)[1] for n in run_list] possibilites['RUN Name'] = self.list_completion(text, run_list) - + return self.deal_multiple_categories(possibilites, formatting) - - + + except Exception as error: print(error) @@ -1732,12 +1732,12 @@ def complete_history(self, text, line, begidx, endidx): if len(args) == 1: return self.path_completion(text) - - def complete_open(self, text, line, begidx, endidx): + + def complete_open(self, text, line, begidx, endidx): """ complete the open command """ args = self.split_arg(line[0:begidx]) - + # Directory continuation if os.path.sep in args[-1] + text: return self.path_completion(text, @@ -1751,10 +1751,10 @@ def complete_open(self, text, line, begidx, endidx): if os.path.isfile(os.path.join(path,'README')): possibility.append('README') if os.path.isdir(os.path.join(path,'Cards')): - possibility += [f for f in os.listdir(os.path.join(path,'Cards')) + possibility += [f for f in os.listdir(os.path.join(path,'Cards')) if f.endswith('.dat')] if os.path.isdir(os.path.join(path,'HTML')): - possibility += [f for f in os.listdir(os.path.join(path,'HTML')) + possibility += [f for f in os.listdir(os.path.join(path,'HTML')) if f.endswith('.html') and 'default' not in f] else: possibility.extend(['./','../']) @@ -1763,7 +1763,7 @@ def complete_open(self, text, line, begidx, endidx): if os.path.exists('MG5_debug'): possibility.append('MG5_debug') return self.list_completion(text, possibility) - + def complete_set(self, text, line, begidx, endidx): "Complete the set command" @@ -1784,27 +1784,27 @@ def complete_set(self, text, line, begidx, endidx): elif len(args) >2 and args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args if a.endswith(os.path.sep)]), - only_dirs = True) - + only_dirs = True) + def complete_survey(self, text, line, begidx, endidx): """ Complete the survey command """ - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + return self.list_completion(text, self._run_options, line) - + complete_refine = complete_survey complete_combine_events = complete_survey complite_store = complete_survey complete_generate_events = complete_survey complete_create_gridpack = complete_survey - + def complete_generate_events(self, text, line, begidx, endidx): """ Complete the generate events""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() @@ -1813,17 +1813,17 @@ def complete_generate_events(self, text, line, begidx, endidx): return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) def complete_initMadLoop(self, text, line, begidx, endidx): "Complete the initMadLoop command" - + numbers = [str(i) for i in range(10)] opts = ['-f','-r','--nPS='] - + args = self.split_arg(line[0:begidx], error=False) if len(line) >=6 and line[begidx-6:begidx]=='--nPS=': return self.list_completion(text, numbers, line) @@ -1840,18 +1840,18 @@ def complete_launch(self, *args, **opts): def complete_calculate_decay_widths(self, text, line, begidx, endidx): """ Complete the calculate_decay_widths command""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + opts = self._run_options + self._calculate_decay_options return self.list_completion(text, opts, line) - + def complete_display(self, text, line, begidx, endidx): - """ Complete the display command""" - + """ Complete the display command""" + args = self.split_arg(line[0:begidx], error=False) if len(args) >= 2 and args[1] =='results': start = line.find('results') @@ -1860,44 +1860,44 @@ def complete_display(self, text, line, begidx, endidx): def complete_multi_run(self, text, line, begidx, endidx): """complete multi run command""" - + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: data = [str(i) for i in range(0,20)] return self.list_completion(text, data, line) - + if line.endswith('run=') and not text: return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - - - + + + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - + def complete_plot(self, text, line, begidx, endidx): """ Complete the plot command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1: return self.list_completion(text, self._plot_mode) else: return self.list_completion(text, self._plot_mode + list(self.results.keys())) - + def complete_syscalc(self, text, line, begidx, endidx, formatting=True): """ Complete the syscalc command """ - + output = {} args = self.split_arg(line[0:begidx], error=False) - + if len(args) <=1: output['RUN_NAME'] = self.list_completion(list(self.results.keys())) output['MODE'] = self.list_completion(text, self._syscalc_mode) @@ -1907,12 +1907,12 @@ def complete_syscalc(self, text, line, begidx, endidx, formatting=True): if run in self.results: tags = ['--tag=%s' % tag['tag'] for tag in self.results[run]] output['options'] += tags - + return self.deal_multiple_categories(output, formatting) - + def complete_remove(self, text, line, begidx, endidx): """Complete the remove command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1 and (text.startswith('--t')): run = args[1] @@ -1932,8 +1932,8 @@ def complete_remove(self, text, line, begidx, endidx): data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1] for n in data] return self.list_completion(text, ['all'] + data) - - + + def complete_shower(self,text, line, begidx, endidx): "Complete the shower command" args = self.split_arg(line[0:begidx], error=False) @@ -1941,7 +1941,7 @@ def complete_shower(self,text, line, begidx, endidx): return self.list_completion(text, self._interfaced_showers) elif len(args)>1 and args[1] in self._interfaced_showers: return getattr(self, 'complete_%s' % text)\ - (text, args[1],line.replace(args[0]+' ',''), + (text, args[1],line.replace(args[0]+' ',''), begidx-len(args[0])-1, endidx-len(args[0])-1) def complete_pythia8(self,text, line, begidx, endidx): @@ -1955,11 +1955,11 @@ def complete_pythia8(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_madanalysis5_parton(self,text, line, begidx, endidx): @@ -1978,19 +1978,19 @@ def complete_madanalysis5_parton(self,text, line, begidx, endidx): else: tmp2 = self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) - return tmp1 + tmp2 + return tmp1 + tmp2 elif '--MA5_stdout_lvl=' in line and not any(arg.startswith( '--MA5_stdout_lvl=') for arg in args): - return self.list_completion(text, - ['--MA5_stdout_lvl=%s'%opt for opt in + return self.list_completion(text, + ['--MA5_stdout_lvl=%s'%opt for opt in ['logging.INFO','logging.DEBUG','logging.WARNING', 'logging.CRITICAL','90']], line) else: - return self.list_completion(text, ['-f', + return self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) def complete_pythia(self,text, line, begidx, endidx): - "Complete the pythia command" + "Complete the pythia command" args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: @@ -2001,16 +2001,16 @@ def complete_pythia(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_pgs(self,text, line, begidx, endidx): "Complete the pythia command" - args = self.split_arg(line[0:begidx], error=False) + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: #return valid run_name data = misc.glob(pjoin('*', '*_pythia_events.hep.gz'), pjoin(self.me_dir, 'Events')) @@ -2019,23 +2019,23 @@ def complete_pgs(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--tag=' ,'--no_default'], line) - return tmp1 + tmp2 + return tmp1 + tmp2 else: - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--tag=','--no_default'], line) - complete_delphes = complete_pgs - complete_rivet = complete_pgs + complete_delphes = complete_pgs + complete_rivet = complete_pgs #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCmd): - """The command line processor of Mad Graph""" - + """The command line processor of Mad Graph""" + LO = True # Truth values @@ -2063,7 +2063,7 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm cluster_mode = 0 queue = 'madgraph' nb_core = None - + next_possibility = { 'start': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]', 'calculate_decay_widths [OPTIONS]', @@ -2080,9 +2080,9 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm 'pgs': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'], 'delphes' : ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'] } - + asking_for_run = AskRun - + ############################################################################ def __init__(self, me_dir = None, options={}, *completekey, **stdin): """ add information to the cmd """ @@ -2095,16 +2095,16 @@ def __init__(self, me_dir = None, options={}, *completekey, **stdin): if self.web: os.system('touch %s' % pjoin(self.me_dir,'Online')) - self.load_results_db() + self.load_results_db() self.results.def_web_mode(self.web) self.Gdirs = None - + self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) self.configured = 0 # time for reading the card self._options = {} # for compatibility with extended_cmd - - + + def pass_in_web_mode(self): """configure web data""" self.web = True @@ -2113,22 +2113,22 @@ def pass_in_web_mode(self): if os.environ['MADGRAPH_BASE']: self.options['mg5_path'] = pjoin(os.environ['MADGRAPH_BASE'],'MG5') - ############################################################################ + ############################################################################ def check_output_type(self, path): """ Check that the output path is a valid madevent directory """ - + bin_path = os.path.join(path,'bin') if os.path.isfile(os.path.join(bin_path,'generate_events')): return True - else: + else: return False ############################################################################ def set_configuration(self, amcatnlo=False, final=True, **opt): - """assign all configuration variable from file + """assign all configuration variable from file loop over the different config file if config_file not define """ - - super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, + + super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, final=final, **opt) if not final: @@ -2171,24 +2171,24 @@ def set_configuration(self, amcatnlo=False, final=True, **opt): if not os.path.exists(pjoin(path, 'sys_calc')): logger.info("No valid SysCalc path found") continue - # No else since the next line reinitialize the option to the + # No else since the next line reinitialize the option to the #previous value anyway self.options[key] = os.path.realpath(path) continue else: self.options[key] = None - - + + return self.options ############################################################################ - def do_banner_run(self, line): + def do_banner_run(self, line): """Make a run from the banner file""" - + args = self.split_arg(line) #check the validity of the arguments - self.check_banner_run(args) - + self.check_banner_run(args) + # Remove previous cards for name in ['delphes_trigger.dat', 'delphes_card.dat', 'pgs_card.dat', 'pythia_card.dat', 'madspin_card.dat', @@ -2197,20 +2197,20 @@ def do_banner_run(self, line): os.remove(pjoin(self.me_dir, 'Cards', name)) except Exception: pass - + banner_mod.split_banner(args[0], self.me_dir, proc_card=False) - + # Check if we want to modify the run if not self.force: ans = self.ask('Do you want to modify the Cards?', 'n', ['y','n']) if ans == 'n': self.force = True - + # Call Generate events self.exec_cmd('generate_events %s %s' % (self.run_name, self.force and '-f' or '')) - - - + + + ############################################################################ def do_display(self, line, output=sys.stdout): """Display current internal status""" @@ -2223,7 +2223,7 @@ def do_display(self, line, output=sys.stdout): #return valid run_name data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1:] for n in data] - + if data: out = {} for name, tag in data: @@ -2235,11 +2235,11 @@ def do_display(self, line, output=sys.stdout): print('the runs available are:') for run_name, tags in out.items(): print(' run: %s' % run_name) - print(' tags: ', end=' ') + print(' tags: ', end=' ') print(', '.join(tags)) else: print('No run detected.') - + elif args[0] == 'options': outstr = " Run Options \n" outstr += " ----------- \n" @@ -2260,8 +2260,8 @@ def do_display(self, line, output=sys.stdout): if value == default: outstr += " %25s \t:\t%s\n" % (key,value) else: - outstr += " %25s \t:\t%s (user set)\n" % (key,value) - outstr += "\n" + outstr += " %25s \t:\t%s (user set)\n" % (key,value) + outstr += "\n" outstr += " Configuration Options \n" outstr += " --------------------- \n" for key, default in self.options_configuration.items(): @@ -2275,15 +2275,15 @@ def do_display(self, line, output=sys.stdout): self.do_print_results(' '.join(args[1:])) else: super(MadEventCmd, self).do_display(line, output) - + def do_save(self, line, check=True, to_keep={}): - """Not in help: Save information to file""" + """Not in help: Save information to file""" args = self.split_arg(line) # Check argument validity if check: self.check_save(args) - + if args[0] == 'options': # First look at options which should be put in MG5DIR/input to_define = {} @@ -2295,7 +2295,7 @@ def do_save(self, line, check=True, to_keep={}): for key, default in self.options_madevent.items(): if self.options[key] != self.options_madevent[key]: to_define[key] = self.options[key] - + if '--all' in args: for key, default in self.options_madgraph.items(): if self.options[key] != self.options_madgraph[key]: @@ -2312,12 +2312,12 @@ def do_save(self, line, check=True, to_keep={}): filepath = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basefile = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basedir = self.me_dir - + if to_keep: to_define = to_keep self.write_configuration(filepath, basefile, basedir, to_define) - - + + def do_edit_cards(self, line): @@ -2326,80 +2326,80 @@ def do_edit_cards(self, line): # Check argument's validity mode = self.check_generate_events(args) self.ask_run_configuration(mode) - + return ############################################################################ - + ############################################################################ def do_restart_gridpack(self, line): """ syntax restart_gridpack --precision=1.0 --restart_zero collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) - + # initialize / remove lhapdf mode #self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) #self.configure_directory() - + gensym = gen_ximprove.gensym(self) - + min_precision = 1.0 resubmit_zero=False if '--precision=' in line: s = line.index('--precision=') + len('--precision=') arg=line[s:].split(1)[0] min_precision = float(arg) - + if '--restart_zero' in line: resubmit_zero = True - - + + gensym.resubmit(min_precision, resubmit_zero) self.monitor(run_type='All jobs submitted for gridpack', html=True) #will be done during the refine (more precisely in gen_ximprove) cross, error = sum_html.make_all_html_results(self) self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(gensym.run_statistics)) - + #self.exec_cmd('combine_events', postcmd=False) #self.exec_cmd('store_events', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) self.exec_cmd('create_gridpack', postcmd=False) - - - ############################################################################ + + + ############################################################################ ############################################################################ def do_generate_events(self, line): """Main Commands: launch the full chain """ - + self.banner = None self.Gdirs = None - + args = self.split_arg(line) # Check argument's validity mode = self.check_generate_events(args) switch_mode = self.ask_run_configuration(mode, args) if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir), None, 'parton') else: self.set_run_name(args[0], None, 'parton', True) args.pop(0) - + self.run_generate_events(switch_mode, args) self.postprocessing() @@ -2420,8 +2420,8 @@ def postprocessing(self): def rivet_postprocessing(self, rivet_config, postprocess_RIVET, postprocess_CONTUR): - # Check number of Rivet jobs to run - run_dirs = [pjoin(self.me_dir, 'Events',run_name) + # Check number of Rivet jobs to run + run_dirs = [pjoin(self.me_dir, 'Events',run_name) for run_name in self.postprocessing_dirs] nb_rivet = len(run_dirs) @@ -2550,10 +2550,10 @@ def wait_monitoring(Idle, Running, Done): wrapper = open(pjoin(self.me_dir, "Analysis", "contur", "run_contur.sh"), "w") wrapper.write(set_env) - + wrapper.write('{0}\n'.format(contur_cmd)) wrapper.close() - + misc.call(["run_contur.sh"], cwd=(pjoin(self.me_dir, "Analysis", "contur"))) logger.info("Contur outputs are stored in {0}".format(pjoin(self.me_dir, "Analysis", "contur","conturPlot"))) @@ -2572,7 +2572,7 @@ def run_generate_events(self, switch_mode, args): self.do_set('run_mode 2') self.do_set('nb_core 1') - if self.run_card['gridpack'] in self.true: + if self.run_card['gridpack'] in self.true: # Running gridpack warmup gridpack_opts=[('accuracy', 0.01), ('points', 2000), @@ -2593,7 +2593,7 @@ def run_generate_events(self, switch_mode, args): # Regular run mode logger.info('Generating %s events with run name %s' % (self.run_card['nevents'], self.run_name)) - + self.exec_cmd('survey %s %s' % (self.run_name,' '.join(args)), postcmd=False) nb_event = self.run_card['nevents'] @@ -2601,7 +2601,7 @@ def run_generate_events(self, switch_mode, args): self.exec_cmd('refine %s' % nb_event, postcmd=False) if not float(self.results.current['cross']): # Zero cross-section. Try to guess why - text = '''Survey return zero cross section. + text = '''Survey return zero cross section. Typical reasons are the following: 1) A massive s-channel particle has a width set to zero. 2) The pdf are zero for at least one of the initial state particles @@ -2613,17 +2613,17 @@ def run_generate_events(self, switch_mode, args): raise ZeroResult('See https://cp3.irmp.ucl.ac.be/projects/madgraph/wiki/FAQ-General-14') else: bypass_run = True - + #we can bypass the following if scan and first result is zero if not bypass_run: self.exec_cmd('refine %s --treshold=%s' % (nb_event,self.run_card['second_refine_treshold']) , postcmd=False) - + self.exec_cmd('combine_events', postcmd=False,printcmd=False) self.print_results_in_shell(self.results.current) if self.run_card['use_syst']: - if self.run_card['systematics_program'] == 'auto': + if self.run_card['systematics_program'] == 'auto': scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): to_use = 'systematics' @@ -2634,26 +2634,26 @@ def run_generate_events(self, switch_mode, args): else: logger.critical('Unvalid options for systematics_program: bypass computation of systematics variations.') to_use = 'none' - + if to_use == 'systematics': if self.run_card['systematics_arguments'] != ['']: self.exec_cmd('systematics %s %s ' % (self.run_name, - ' '.join(self.run_card['systematics_arguments'])), + ' '.join(self.run_card['systematics_arguments'])), postcmd=False, printcmd=False) else: self.exec_cmd('systematics %s --from_card' % self.run_name, - postcmd=False,printcmd=False) + postcmd=False,printcmd=False) elif to_use == 'syscalc': self.run_syscalc('parton') - - - self.create_plot('parton') - self.exec_cmd('store_events', postcmd=False) + + + self.create_plot('parton') + self.exec_cmd('store_events', postcmd=False) if self.run_card['boost_event'].strip() and self.run_card['boost_event'] != 'False': self.boost_events() - - - self.exec_cmd('reweight -from_cards', postcmd=False) + + + self.exec_cmd('reweight -from_cards', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) if self.run_card['time_of_flight']>=0: self.exec_cmd("add_time_of_flight --threshold=%s" % self.run_card['time_of_flight'] ,postcmd=False) @@ -2664,43 +2664,43 @@ def run_generate_events(self, switch_mode, args): self.create_root_file(input , output) self.exec_cmd('madanalysis5_parton --no_default', postcmd=False, printcmd=False) - # shower launches pgs/delphes if needed + # shower launches pgs/delphes if needed self.exec_cmd('shower --no_default', postcmd=False, printcmd=False) self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) self.exec_cmd('rivet --no_default', postcmd=False, printcmd=False) self.store_result() - - if self.allow_notification_center: - misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), - '%s: %s +- %s ' % (self.results.current['run_name'], + + if self.allow_notification_center: + misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), + '%s: %s +- %s ' % (self.results.current['run_name'], self.results.current['cross'], self.results.current['error'])) - + def boost_events(self): - + if not self.run_card['boost_event']: return - + if self.run_card['boost_event'].startswith('lambda'): if not isinstance(self, cmd.CmdShell): raise Exception("boost not allowed online") filter = eval(self.run_card['boost_event']) else: raise Exception - + path = [pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')] - + for p in path: if os.path.exists(p): event_path = p break else: raise Exception("fail to find event file for the boost") - - + + lhe = lhe_parser.EventFile(event_path) with misc.TMP_directory() as tmp_dir: output = lhe_parser.EventFile(pjoin(tmp_dir, os.path.basename(event_path)), 'w') @@ -2711,28 +2711,28 @@ def boost_events(self): event.boost(filter) #write this modify event output.write(str(event)) - output.write('\n') + output.write('\n') lhe.close() - files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) - - - - - + files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) + + + + + def do_initMadLoop(self,line): - """Compile and run MadLoop for a certain number of PS point so as to + """Compile and run MadLoop for a certain number of PS point so as to initialize MadLoop (setup the zero helicity and loop filter.)""" - + args = line.split() # Check argument's validity options = self.check_initMadLoop(args) - + if not options['force']: self.ask_edit_cards(['MadLoopParams.dat'], mode='fixed', plot=False) self.exec_cmd('treatcards loop --no_MadLoopInit') if options['refresh']: - for filter in misc.glob('*Filter*', + for filter in misc.glob('*Filter*', pjoin(self.me_dir,'SubProcesses','MadLoop5_resources')): logger.debug("Resetting filter '%s'."%os.path.basename(filter)) os.remove(filter) @@ -2753,14 +2753,14 @@ def do_initMadLoop(self,line): def do_launch(self, line, *args, **opt): """Main Commands: exec generate_events for 2>N and calculate_width for 1>N""" - + if self.ninitial == 1: logger.info("Note that since 2.3. The launch for 1>N pass in event generation\n"+ " To have the previous behavior use the calculate_decay_widths function") # self.do_calculate_decay_widths(line, *args, **opt) #else: self.do_generate_events(line, *args, **opt) - + def print_results_in_shell(self, data): """Have a nice results prints in the shell, data should be of type: gen_crossxhtml.OneTagResults""" @@ -2770,7 +2770,7 @@ def print_results_in_shell(self, data): if data['run_statistics']: globalstat = sum_html.RunStatistics() - + logger.info(" " ) logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2786,13 +2786,13 @@ def print_results_in_shell(self, data): logger.warning(globalstat.get_warning_text()) logger.info(" ") - + logger.info(" === Results Summary for run: %s tag: %s ===\n" % (data['run_name'],data['tag'])) - + total_time = int(sum(_['cumulative_timing'] for _ in data['run_statistics'].values())) if total_time > 0: logger.info(" Cumulative sequential time for this run: %s"%misc.format_time(total_time)) - + if self.ninitial == 1: logger.info(" Width : %.4g +- %.4g GeV" % (data['cross'], data['error'])) else: @@ -2810,18 +2810,18 @@ def print_results_in_shell(self, data): if len(split)!=3: continue scale, cross, error = split - cross_sections[float(scale)] = (float(cross), float(error)) + cross_sections[float(scale)] = (float(cross), float(error)) if len(cross_sections)>0: logger.info(' Pythia8 merged cross-sections are:') for scale in sorted(cross_sections.keys()): logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ (scale,cross_sections[scale][0],cross_sections[scale][1])) - + else: if self.ninitial == 1: logger.info(" Matched width : %.4g +- %.4g GeV" % (data['cross_pythia'], data['error_pythia'])) else: - logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) + logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) logger.info(" Nb of events after matching/merging : %d" % int(data['nb_event_pythia'])) if self.run_card['use_syst'] in self.true and \ (int(self.run_card['ickkw'])==1 or self.run_card['ktdurham']>0.0 @@ -2838,9 +2838,9 @@ def print_results_in_file(self, data, path, mode='w', format='full'): data should be of type: gen_crossxhtml.OneTagResults""" if not data: return - + fsock = open(path, mode) - + if data['run_statistics']: logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2851,7 +2851,7 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if format == "full": fsock.write(" === Results Summary for run: %s tag: %s process: %s ===\n" % \ (data['run_name'],data['tag'], os.path.basename(self.me_dir))) - + if self.ninitial == 1: fsock.write(" Width : %.4g +- %.4g GeV\n" % (data['cross'], data['error'])) else: @@ -2861,20 +2861,20 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if self.ninitial == 1: fsock.write(" Matched Width : %.4g +- %.4g GeV\n" % (data['cross_pythia'], data['error_pythia'])) else: - fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) + fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) fsock.write(" Nb of events after Matching : %s\n" % data['nb_event_pythia']) fsock.write(" \n" ) elif format == "short": if mode == "w": fsock.write("# run_name tag cross error Nb_event cross_after_matching nb_event_after matching\n") - + if data['cross_pythia'] and data['nb_event_pythia']: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s %(cross_pythia)s %(nb_event_pythia)s\n" else: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s\n" fsock.write(text % data) - - ############################################################################ + + ############################################################################ def do_calculate_decay_widths(self, line): """Main Commands: launch decay width calculation and automatic inclusion of calculated widths and BRs in the param_card.""" @@ -2887,21 +2887,21 @@ def do_calculate_decay_widths(self, line): self.Gdirs = None if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], reload_card=True) args.pop(0) self.configure_directory() - + # Running gridpack warmup opts=[('accuracy', accuracy), # default 0.01 ('points', 1000), ('iterations',9)] logger.info('Calculating decay widths with run name %s' % self.run_name) - + self.exec_cmd('survey %s %s' % \ (self.run_name, " ".join(['--' + opt + '=' + str(val) for (opt,val) \ @@ -2910,26 +2910,26 @@ def do_calculate_decay_widths(self, line): self.refine_mode = "old" # specify how to combine event self.exec_cmd('combine_events', postcmd=False) self.exec_cmd('store_events', postcmd=False) - + self.collect_decay_widths() self.print_results_in_shell(self.results.current) - self.update_status('calculate_decay_widths done', - level='parton', makehtml=False) + self.update_status('calculate_decay_widths done', + level='parton', makehtml=False) + - ############################################################################ def collect_decay_widths(self): - """ Collect the decay widths and calculate BRs for all particles, and put - in param_card form. + """ Collect the decay widths and calculate BRs for all particles, and put + in param_card form. """ - + particle_dict = {} # store the results run_name = self.run_name # Looping over the Subprocesses for P_path in SubProcesses.get_subP(self.me_dir): ids = SubProcesses.get_subP_ids(P_path) - # due to grouping we need to compute the ratio factor for the + # due to grouping we need to compute the ratio factor for the # ungroup resutls (that we need here). Note that initial particles # grouping are not at the same stage as final particle grouping nb_output = len(ids) / (len(set([p[0] for p in ids]))) @@ -2940,30 +2940,30 @@ def collect_decay_widths(self): particle_dict[particles[0]].append([particles[1:], result/nb_output]) except KeyError: particle_dict[particles[0]] = [[particles[1:], result/nb_output]] - + self.update_width_in_param_card(particle_dict, initial = pjoin(self.me_dir, 'Cards', 'param_card.dat'), output=pjoin(self.me_dir, 'Events', run_name, "param_card.dat")) - + @staticmethod def update_width_in_param_card(decay_info, initial=None, output=None): # Open the param_card.dat and insert the calculated decays and BRs - + if not output: output = initial - + param_card_file = open(initial) param_card = param_card_file.read().split('\n') param_card_file.close() decay_lines = [] line_number = 0 - # Read and remove all decays from the param_card + # Read and remove all decays from the param_card while line_number < len(param_card): line = param_card[line_number] if line.lower().startswith('decay'): - # Read decay if particle in decay_info - # DECAY 6 1.455100e+00 + # Read decay if particle in decay_info + # DECAY 6 1.455100e+00 line = param_card.pop(line_number) line = line.split() particle = 0 @@ -2996,7 +2996,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): break line=param_card[line_number] if particle and particle not in decay_info: - # No decays given, only total width + # No decays given, only total width decay_info[particle] = [[[], width]] else: # Not decay line_number += 1 @@ -3004,7 +3004,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): while not param_card[-1] or param_card[-1].startswith('#'): param_card.pop(-1) - # Append calculated and read decays to the param_card + # Append calculated and read decays to the param_card param_card.append("#\n#*************************") param_card.append("# Decay widths *") param_card.append("#*************************") @@ -3018,7 +3018,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): param_card.append("# BR NDA ID1 ID2 ...") brs = [[(val[1]/width).real, val[0]] for val in decay_info[key] if val[1]] for val in sorted(brs, reverse=True): - param_card.append(" %e %i %s # %s" % + param_card.append(" %e %i %s # %s" % (val[0].real, len(val[1]), " ".join([str(v) for v in val[1]]), val[0] * width @@ -3031,7 +3031,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): ############################################################################ def do_multi_run(self, line): - + args = self.split_arg(line) # Check argument's validity mode = self.check_multi_run(args) @@ -3047,7 +3047,7 @@ def do_multi_run(self, line): self.check_param_card(path, run=False) #store it locally to avoid relaunch param_card_iterator, self.param_card_iterator = self.param_card_iterator, [] - + crossoversig = 0 inv_sq_err = 0 nb_event = 0 @@ -3055,8 +3055,8 @@ def do_multi_run(self, line): self.nb_refine = 0 self.exec_cmd('generate_events %s_%s -f' % (main_name, i), postcmd=False) # Update collected value - nb_event += int(self.results[self.run_name][-1]['nb_event']) - self.results.add_detail('nb_event', nb_event , run=main_name) + nb_event += int(self.results[self.run_name][-1]['nb_event']) + self.results.add_detail('nb_event', nb_event , run=main_name) cross = self.results[self.run_name][-1]['cross'] error = self.results[self.run_name][-1]['error'] + 1e-99 crossoversig+=cross/error**2 @@ -3070,7 +3070,7 @@ def do_multi_run(self, line): os.mkdir(pjoin(self.me_dir,'Events', self.run_name)) except Exception: pass - os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' + os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' % {'bin': self.dirbin, 'event': pjoin(self.me_dir,'Events'), 'name': self.run_name}) @@ -3084,19 +3084,19 @@ def do_multi_run(self, line): self.create_root_file('%s/unweighted_events.lhe' % self.run_name, '%s/unweighted_events.root' % self.run_name) - - path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") + + path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") self.create_plot('parton', path, pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') ) - - if not os.path.exists('%s.gz' % path): + + if not os.path.exists('%s.gz' % path): misc.gzip(path) self.update_status('', level='parton') - self.print_results_in_shell(self.results.current) - + self.print_results_in_shell(self.results.current) + cpath = pjoin(self.me_dir,'Cards','param_card.dat') if param_card_iterator: @@ -3112,21 +3112,21 @@ def do_multi_run(self, line): path = pjoin(self.me_dir, 'Events','scan_%s.txt' % scan_name) logger.info("write all cross-section results in %s" % path, '$MG:BOLD') param_card_iterator.write_summary(path) - - ############################################################################ + + ############################################################################ def do_treatcards(self, line, mode=None, opt=None): """Advanced commands: create .inc files from param_card.dat/run_card.dat""" if not mode and not opt: args = self.split_arg(line) mode, opt = self.check_treatcards(args) - + # To decide whether to refresh MadLoop's helicity filters, it is necessary # to check if the model parameters where modified or not, before doing - # anything else. + # anything else. need_MadLoopFilterUpdate = False - # Just to record what triggered the reinitialization of MadLoop for a + # Just to record what triggered the reinitialization of MadLoop for a # nice debug message. type_of_change = '' if not opt['forbid_MadLoopInit'] and self.proc_characteristics['loop_induced'] \ @@ -3137,10 +3137,10 @@ def do_treatcards(self, line, mode=None, opt=None): (os.path.getmtime(paramDat)-os.path.getmtime(paramInc)) > 0.0: need_MadLoopFilterUpdate = True type_of_change = 'model' - + ML_in = pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat') ML_out = pjoin(self.me_dir,"SubProcesses", - "MadLoop5_resources", "MadLoopParams.dat") + "MadLoop5_resources", "MadLoopParams.dat") if (not os.path.isfile(ML_in)) or (not os.path.isfile(ML_out)) or \ (os.path.getmtime(ML_in)-os.path.getmtime(ML_out)) > 0.0: need_MadLoopFilterUpdate = True @@ -3148,7 +3148,7 @@ def do_treatcards(self, line, mode=None, opt=None): #check if no 'Auto' are present in the file self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) - + if mode in ['param', 'all']: model = self.find_model_name() tmp_model = os.path.basename(model) @@ -3160,9 +3160,9 @@ def do_treatcards(self, line, mode=None, opt=None): check_param_card.check_valid_param_card(mg5_param) opt['param_card'] = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') else: - check_param_card.check_valid_param_card(opt['param_card']) - - logger.debug('write compile file for card: %s' % opt['param_card']) + check_param_card.check_valid_param_card(opt['param_card']) + + logger.debug('write compile file for card: %s' % opt['param_card']) param_card = check_param_card.ParamCard(opt['param_card']) outfile = pjoin(opt['output_dir'], 'param_card.inc') ident_card = pjoin(self.me_dir,'Cards','ident_card.dat') @@ -3185,10 +3185,10 @@ def do_treatcards(self, line, mode=None, opt=None): devnull.close() default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat') - need_mp = self.proc_characteristics['loop_induced'] + need_mp = self.proc_characteristics['loop_induced'] param_card.write_inc_file(outfile, ident_card, default, need_mp=need_mp) - - + + if mode in ['run', 'all']: if not hasattr(self, 'run_card'): run_card = banner_mod.RunCard(opt['run_card'], path=pjoin(self.me_dir, 'Cards', 'run_card.dat')) @@ -3202,7 +3202,7 @@ def do_treatcards(self, line, mode=None, opt=None): run_card['lpp2'] = 0 run_card['ebeam1'] = 0 run_card['ebeam2'] = 0 - + # Ensure that the bias parameters has all the required input from the # run_card if run_card['bias_module'].lower() not in ['dummy','none']: @@ -3219,7 +3219,7 @@ def do_treatcards(self, line, mode=None, opt=None): mandatory_file,run_card['bias_module'])) misc.copytree(run_card['bias_module'], pjoin(self.me_dir,'Source','BIAS', os.path.basename(run_card['bias_module']))) - + #check expected parameters for the module. default_bias_parameters = {} start, last = False,False @@ -3244,50 +3244,50 @@ def do_treatcards(self, line, mode=None, opt=None): for pair in line.split(','): if not pair.strip(): continue - x,y =pair.split(':') + x,y =pair.split(':') x=x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y elif ':' in line: x,y = line.split(':') x = x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y for key,value in run_card['bias_parameters'].items(): if key not in default_bias_parameters: logger.warning('%s not supported by the bias module. We discard this entry.', key) else: default_bias_parameters[key] = value - run_card['bias_parameters'] = default_bias_parameters - - - # Finally write the include file + run_card['bias_parameters'] = default_bias_parameters + + + # Finally write the include file run_card.write_include_file(opt['output_dir']) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: - self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, + self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat')) # The writing out of MadLoop filter is potentially dangerous # when running in multi-core with a central disk. So it is turned - # off here. If these filters were not initialized then they will + # off here. If these filters were not initialized then they will # have to be re-computed at the beginning of each run. if 'WriteOutFilters' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('WriteOutFilters'): logger.info( -"""You chose to have MadLoop writing out filters. +"""You chose to have MadLoop writing out filters. Beware that this can be dangerous for local multicore runs.""") self.MadLoopparam.set('WriteOutFilters',False, changeifuserset=False) - + # The conservative settings below for 'CTModeInit' and 'ZeroThres' # help adress issues for processes like g g > h z, and g g > h g - # where there are some helicity configuration heavily suppressed - # (by several orders of magnitude) so that the helicity filter + # where there are some helicity configuration heavily suppressed + # (by several orders of magnitude) so that the helicity filter # needs high numerical accuracy to correctly handle this spread in # magnitude. Also, because one cannot use the Born as a reference - # scale, it is better to force quadruple precision *for the + # scale, it is better to force quadruple precision *for the # initialization points only*. This avoids numerical accuracy issues # when setting up the helicity filters and does not significantly # slow down the run. @@ -3298,21 +3298,21 @@ def do_treatcards(self, line, mode=None, opt=None): # It is a bit superficial to use the level 2 which tries to numerically # map matching helicities (because of CP symmetry typically) together. -# It is useless in the context of MC over helicities and it can +# It is useless in the context of MC over helicities and it can # potentially make the helicity double checking fail. self.MadLoopparam.set('HelicityFilterLevel',1, changeifuserset=False) # To be on the safe side however, we ask for 4 consecutive matching # helicity filters. self.MadLoopparam.set('CheckCycle',4, changeifuserset=False) - + # For now it is tricky to have each channel performing the helicity # double check. What we will end up doing is probably some kind # of new initialization round at the beginning of each launch - # command, to reset the filters. + # command, to reset the filters. self.MadLoopparam.set('DoubleCheckHelicityFilter',False, changeifuserset=False) - + # Thanks to TIR recycling, TIR is typically much faster for Loop-induced # processes when not doing MC over helicities, so that we place OPP last. if not hasattr(self, 'run_card'): @@ -3349,7 +3349,7 @@ def do_treatcards(self, line, mode=None, opt=None): logger.warning( """You chose to also use a lorentz rotation for stability tests (see parameter NRotations_[DP|QP]). Beware that, for optimization purposes, MadEvent uses manual TIR cache clearing which is not compatible - with the lorentz rotation stability test. The number of these rotations to be used will be reset to + with the lorentz rotation stability test. The number of these rotations to be used will be reset to zero by MadLoop. You can avoid this by changing the parameter 'FORCE_ML_HELICITY_SUM' int he matrix.f files to be .TRUE. so that the sum over helicity configurations is performed within MadLoop (in which case the helicity of final state particles cannot be speicfied in the LHE file.""") @@ -3363,15 +3363,15 @@ def do_treatcards(self, line, mode=None, opt=None): # self.MadLoopparam.set('NRotations_DP',0,changeifuserset=False) # Revert to the above to be slightly less robust but twice faster. self.MadLoopparam.set('NRotations_DP',1,changeifuserset=False) - self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) - + self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) + # Finally, the stability tests are slightly less reliable for process - # with less or equal than 4 final state particles because the + # with less or equal than 4 final state particles because the # accessible kinematic is very limited (i.e. lorentz rotations don't # shuffle invariants numerics much). In these cases, we therefore # increase the required accuracy to 10^-7. # This is important for getting g g > z z [QCD] working with a - # ptheavy cut as low as 1 GeV. + # ptheavy cut as low as 1 GeV. if self.proc_characteristics['nexternal']<=4: if ('MLStabThres' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('MLStabThres')>1.0e-7): @@ -3381,12 +3381,12 @@ def do_treatcards(self, line, mode=None, opt=None): than four external legs, so this is not recommended (especially not for g g > z z).""") self.MadLoopparam.set('MLStabThres',1.0e-7,changeifuserset=False) else: - self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) + self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) #write the output file self.MadLoopparam.write(pjoin(self.me_dir,"SubProcesses","MadLoop5_resources", "MadLoopParams.dat")) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: # Now Update MadLoop filters if necessary (if modifications were made to # the model parameters). @@ -3403,12 +3403,12 @@ def do_treatcards(self, line, mode=None, opt=None): elif not opt['forbid_MadLoopInit'] and \ MadLoopInitializer.need_MadLoopInit(self.me_dir): self.exec_cmd('initMadLoop -f') - - ############################################################################ + + ############################################################################ def do_survey(self, line): """Advanced commands: launch survey for the current process """ - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) @@ -3416,7 +3416,7 @@ def do_survey(self, line): if os.path.exists(pjoin(self.me_dir,'error')): os.remove(pjoin(self.me_dir,'error')) - + self.configure_directory() # Save original random number self.random_orig = self.random @@ -3435,9 +3435,9 @@ def do_survey(self, line): P_zero_result = [] # check the number of times where they are no phase-space # File for the loop (for loop induced) - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources')) and cluster.need_transfer(self.options): - tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', + tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', 'MadLoop5_resources.tar.gz'), 'w:gz', dereference=True) tf.add(pjoin(self.me_dir,'SubProcesses','MadLoop5_resources'), arcname='MadLoop5_resources') @@ -3467,7 +3467,7 @@ def do_survey(self, line): except Exception as error: logger.debug(error) pass - + jobs, P_zero_result = ajobcreator.launch() # Check if all or only some fails if P_zero_result: @@ -3481,60 +3481,60 @@ def do_survey(self, line): self.get_Gdir() for P in P_zero_result: self.Gdirs[0][pjoin(self.me_dir,'SubProcesses',P)] = [] - + self.monitor(run_type='All jobs submitted for survey', html=True) if not self.history or 'survey' in self.history[-1] or self.ninitial ==1 or \ self.run_card['gridpack']: #will be done during the refine (more precisely in gen_ximprove) cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(ajobcreator.run_statistics)) self.update_status('End survey', 'parton', makehtml=False) ############################################################################ def pass_in_difficult_integration_mode(self, rate=1): """be more secure for the integration to not miss it due to strong cut""" - + # improve survey options if default if self.opts['points'] == self._survey_options['points'][1]: self.opts['points'] = (rate+2) * self._survey_options['points'][1] if self.opts['iterations'] == self._survey_options['iterations'][1]: self.opts['iterations'] = 1 + rate + self._survey_options['iterations'][1] if self.opts['accuracy'] == self._survey_options['accuracy'][1]: - self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) - + self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) + # Modify run_config.inc in order to improve the refine conf_path = pjoin(self.me_dir, 'Source','run_config.inc') files.cp(conf_path, conf_path + '.bk') # text = open(conf_path).read() - min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) - + min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) + text = re.sub('''\(min_events = \d+\)''', '(min_events = %i )' % min_evt, text) text = re.sub('''\(max_events = \d+\)''', '(max_events = %i )' % max_evt, text) fsock = open(conf_path, 'w') fsock.write(text) fsock.close() - + # Compile for name in ['../bin/internal/gen_ximprove', 'all']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) - - - ############################################################################ + + + ############################################################################ def do_refine(self, line): """Advanced commands: launch survey for the current process """ - devnull = open(os.devnull, 'w') + devnull = open(os.devnull, 'w') self.nb_refine += 1 args = self.split_arg(line) treshold=None - - + + for a in args: if a.startswith('--treshold='): treshold = float(a.split('=',1)[1]) @@ -3548,8 +3548,8 @@ def do_refine(self, line): break # Check argument's validity self.check_refine(args) - - refine_opt = {'err_goal': args[0], 'split_channels': True} + + refine_opt = {'err_goal': args[0], 'split_channels': True} precision = args[0] if len(args) == 2: refine_opt['max_process']= args[1] @@ -3560,15 +3560,15 @@ def do_refine(self, line): # Update random number self.update_random() self.save_random() - + if self.cluster_mode: logger.info('Creating Jobs') self.update_status('Refine results to %s' % precision, level=None) - + self.total_jobs = 0 - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + # cleanning the previous job for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() @@ -3589,14 +3589,14 @@ def do_refine(self, line): level = 5 if value.has_warning(): level = 10 - logger.log(level, + logger.log(level, value.nice_output(str('/'.join([key[0],'G%s'%key[1]]))). replace(' statistics','')) logger.debug(globalstat.nice_output('combined', no_warning=True)) - + if survey_statistics: x_improve.run_statistics = survey_statistics - + x_improve.launch() # create the ajob for the refinment. if not self.history or 'refine' not in self.history[-1]: cross, error = x_improve.update_html() #update html results for survey @@ -3610,9 +3610,9 @@ def do_refine(self, line): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) - + if os.path.exists(pjoin(Pdir, 'ajob1')): cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py @@ -3629,7 +3629,7 @@ def do_refine(self, line): ###self.compile(['all'], cwd=Pdir) alljobs = misc.glob('ajob*', Pdir) - + #remove associated results.dat (ensure to not mix with all data) Gre = re.compile("\s*j=(G[\d\.\w]+)") for job in alljobs: @@ -3637,49 +3637,49 @@ def do_refine(self, line): for Gdir in Gdirs: if os.path.exists(pjoin(Pdir, Gdir, 'results.dat')): os.remove(pjoin(Pdir, Gdir,'results.dat')) - - nb_tot = len(alljobs) + + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), - run_type='Refine number %s on %s (%s/%s)' % + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) - self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, html=True) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + if isinstance(x_improve, gen_ximprove.gen_ximprove_v4): # the merge of the events.lhe is handle in the x_improve class - # for splitted runs. (and partly in store_events). + # for splitted runs. (and partly in store_events). combine_runs.CombineRuns(self.me_dir) self.refine_mode = "old" else: self.refine_mode = "new" - + cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - self.results.add_detail('run_statistics', + self.results.add_detail('run_statistics', dict(self.results.get_detail('run_statistics'))) self.update_status('finish refine', 'parton', makehtml=False) devnull.close() - - ############################################################################ + + ############################################################################ def do_comine_iteration(self, line): """Not in help: Combine a given iteration combine_iteration Pdir Gdir S|R step - S is for survey + S is for survey R is for refine - step is the iteration number (not very critical)""" + step is the iteration number (not very critical)""" self.set_run_name("tmp") self.configure_directory(html_opening=False) @@ -3695,12 +3695,12 @@ def do_comine_iteration(self, line): gensym.combine_iteration(Pdir, Gdir, int(step)) elif mode == "R": refine = gen_ximprove.gen_ximprove_share(self) - refine.combine_iteration(Pdir, Gdir, int(step)) - - + refine.combine_iteration(Pdir, Gdir, int(step)) - - ############################################################################ + + + + ############################################################################ def do_combine_events(self, line): """Advanced commands: Launch combine events""" start=time.time() @@ -3710,11 +3710,11 @@ def do_combine_events(self, line): self.check_combine_events(args) self.update_status('Combining Events', level='parton') - + if self.run_card['gridpack'] and isinstance(self, GridPackCmd): return GridPackCmd.do_combine_events(self, line) - + # Define The Banner tag = self.run_card['run_tag'] # Update the banner with the pythia card @@ -3727,14 +3727,14 @@ def do_combine_events(self, line): self.banner.change_seed(self.random_orig) if not os.path.exists(pjoin(self.me_dir, 'Events', self.run_name)): os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) - self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, + self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -3751,12 +3751,12 @@ def do_combine_events(self, line): os.remove(pjoin(Gdir, 'events.lhe')) continue - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec'), result.get('xerru'), result.get('axsec') ) - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.run_card['nevents']) @@ -3765,13 +3765,13 @@ def do_combine_events(self, line): AllEvent.add(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() if len(AllEvent) == 0: - nb_event = 0 + nb_event = 0 else: nb_event = AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.run_card['nevents'], @@ -3791,22 +3791,22 @@ def do_combine_events(self, line): os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) - + if self.run_card['bias_module'].lower() not in ['dummy', 'none'] and nb_event: self.correct_bias() elif self.run_card['custom_fcts']: self.correct_bias() logger.info("combination of events done in %s s ", time.time()-start) - + self.to_store.append('event') - - ############################################################################ + + ############################################################################ def correct_bias(self): - """check the first event and correct the weight by the bias + """check the first event and correct the weight by the bias and correct the cross-section. - If the event do not have the bias tag it means that the bias is + If the event do not have the bias tag it means that the bias is one modifying the cross-section/shape so we have nothing to do """ @@ -3834,7 +3834,7 @@ def correct_bias(self): output.write('') output.close() lhe.close() - + # MODIFY THE BANNER i.e. INIT BLOCK # ensure information compatible with normalisation choice total_cross = sum(cross[key] for key in cross) @@ -3846,8 +3846,8 @@ def correct_bias(self): elif self.run_card['event_norm'] == 'unity': total_cross = self.results.current['cross'] * total_cross / nb_event for key in cross: - cross[key] *= total_cross / nb_event - + cross[key] *= total_cross / nb_event + bannerfile = lhe_parser.EventFile(pjoin(self.me_dir, 'Events', self.run_name, '.banner.tmp.gz'),'w') banner = banner_mod.Banner(lhe.banner) banner.modify_init_cross(cross) @@ -3862,12 +3862,12 @@ def correct_bias(self): os.remove(lhe.name) os.remove(bannerfile.name) os.remove(output.name) - - + + self.results.current['cross'] = total_cross self.results.current['error'] = 0 - - ############################################################################ + + ############################################################################ def do_store_events(self, line): """Advanced commands: Launch store events""" @@ -3883,16 +3883,16 @@ def do_store_events(self, line): if not os.path.exists(pjoin(self.me_dir, 'Events', run)): os.mkdir(pjoin(self.me_dir, 'Events', run)) if not os.path.exists(pjoin(self.me_dir, 'HTML', run)): - os.mkdir(pjoin(self.me_dir, 'HTML', run)) - + os.mkdir(pjoin(self.me_dir, 'HTML', run)) + # 1) Store overall process information #input = pjoin(self.me_dir, 'SubProcesses', 'results.dat') #output = pjoin(self.me_dir, 'SubProcesses', '%s_results.dat' % run) - #files.cp(input, output) + #files.cp(input, output) # 2) Treat the files present in the P directory - # Ensure that the number of events is different of 0 + # Ensure that the number of events is different of 0 if self.results.current['nb_event'] == 0 and not self.run_card['gridpack']: logger.warning("No event detected. No cleaning performed! This should allow to run:\n" + " cd Subprocesses; ../bin/internal/combine_events\n"+ @@ -3910,18 +3910,18 @@ def do_store_events(self, line): # if os.path.exists(pjoin(G_path, 'results.dat')): # input = pjoin(G_path, 'results.dat') # output = pjoin(G_path, '%s_results.dat' % run) - # files.cp(input, output) + # files.cp(input, output) #except Exception: - # continue + # continue # Store log try: if os.path.exists(pjoin(G_path, 'log.txt')): input = pjoin(G_path, 'log.txt') output = pjoin(G_path, '%s_log.txt' % run) - files.mv(input, output) + files.mv(input, output) except Exception: continue - #try: + #try: # # Grid # for name in ['ftn26']: # if os.path.exists(pjoin(G_path, name)): @@ -3930,7 +3930,7 @@ def do_store_events(self, line): # input = pjoin(G_path, name) # output = pjoin(G_path, '%s_%s' % (run,name)) # files.mv(input, output) - # misc.gzip(pjoin(G_path, output), error=None) + # misc.gzip(pjoin(G_path, output), error=None) #except Exception: # continue # Delete ftn25 to ensure reproducible runs @@ -3940,11 +3940,11 @@ def do_store_events(self, line): # 3) Update the index.html self.gen_card_html() - + # 4) Move the Files present in Events directory E_path = pjoin(self.me_dir, 'Events') O_path = pjoin(self.me_dir, 'Events', run) - + # The events file for name in ['events.lhe', 'unweighted_events.lhe']: finput = pjoin(E_path, name) @@ -3960,30 +3960,30 @@ def do_store_events(self, line): # os.remove(pjoin(O_path, '%s.gz' % name)) # input = pjoin(E_path, name) ## output = pjoin(O_path, name) - + self.update_status('End Parton', level='parton', makehtml=False) devnull.close() - - - ############################################################################ + + + ############################################################################ def do_create_gridpack(self, line): """Advanced commands: Create gridpack from present run""" self.update_status('Creating gridpack', level='parton') # compile gen_ximprove misc.compile(['../bin/internal/gen_ximprove'], cwd=pjoin(self.me_dir, "Source")) - + Gdir = self.get_Gdir() Pdir = set([os.path.dirname(G) for G in Gdir]) - for P in Pdir: + for P in Pdir: allG = misc.glob('G*', path=P) for G in allG: if pjoin(P, G) not in Gdir: logger.debug('removing %s', pjoin(P,G)) shutil.rmtree(pjoin(P,G)) - - + + args = self.split_arg(line) self.check_combine_events(args) if not self.run_tag: self.run_tag = 'tag_1' @@ -3996,13 +3996,13 @@ def do_create_gridpack(self, line): cwd=self.me_dir) misc.call(['./bin/internal/clean'], cwd=self.me_dir) misc.call(['./bin/internal/make_gridpack'], cwd=self.me_dir) - files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), + files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), pjoin(self.me_dir, '%s_gridpack.tar.gz' % self.run_name)) os.system("sed -i.bak \"s/\s*.true.*=.*GridRun/ .false. = GridRun/g\" %s/Cards/grid_card.dat" \ % self.me_dir) self.update_status('gridpack created', level='gridpack') - - ############################################################################ + + ############################################################################ def do_shower(self, line): """launch the shower""" @@ -4010,7 +4010,7 @@ def do_shower(self, line): if len(args)>1 and args[0] in self._interfaced_showers: chosen_showers = [args.pop(0)] elif '--no_default' in line: - # If '--no_default' was specified in the arguments, then only one + # If '--no_default' was specified in the arguments, then only one # shower will be run, depending on which card is present. # but we each of them are called. (each of them check if the file exists) chosen_showers = list(self._interfaced_showers) @@ -4021,9 +4021,9 @@ def do_shower(self, line): shower_priority = ['pythia8','pythia'] chosen_showers = [sorted(chosen_showers,key=lambda sh: shower_priority.index(sh) if sh in shower_priority else len(shower_priority)+1)[0]] - + for shower in chosen_showers: - self.exec_cmd('%s %s'%(shower,' '.join(args)), + self.exec_cmd('%s %s'%(shower,' '.join(args)), postcmd=False, printcmd=False) def do_madanalysis5_parton(self, line): @@ -4039,11 +4039,11 @@ def do_madanalysis5_parton(self, line): def mg5amc_py8_interface_consistency_warning(options): """ Check the consistency of the mg5amc_py8_interface installed with the current MG5 and Pythia8 versions. """ - + # All this is only relevant is Pythia8 is interfaced to MG5 if not options['pythia8_path']: return None - + if not options['mg5amc_py8_interface_path']: return \ """ @@ -4053,7 +4053,7 @@ def mg5amc_py8_interface_consistency_warning(options): Consider installing the MG5_aMC-PY8 interface with the following command: MG5_aMC>install mg5amc_py8_interface """ - + mg5amc_py8_interface_path = options['mg5amc_py8_interface_path'] py8_path = options['pythia8_path'] # If the specified interface path is relative, make it absolut w.r.t MGDIR if @@ -4062,7 +4062,7 @@ def mg5amc_py8_interface_consistency_warning(options): mg5amc_py8_interface_path = pjoin(MG5DIR,mg5amc_py8_interface_path) py8_path = pjoin(MG5DIR,py8_path) - # Retrieve all the on-install and current versions + # Retrieve all the on-install and current versions fsock = open(pjoin(mg5amc_py8_interface_path, 'MG5AMC_VERSION_ON_INSTALL')) MG5_version_on_install = fsock.read().replace('\n','') fsock.close() @@ -4074,7 +4074,7 @@ def mg5amc_py8_interface_consistency_warning(options): MG5_curr_version =misc.get_pkg_info()['version'] try: p = subprocess.Popen(['./get_pythia8_version.py',py8_path], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mg5amc_py8_interface_path) (out, err) = p.communicate() out = out.decode(errors='ignore').replace('\n','') @@ -4084,37 +4084,37 @@ def mg5amc_py8_interface_consistency_warning(options): float(out) except: PY8_curr_version = None - + if not MG5_version_on_install is None and not MG5_curr_version is None: if MG5_version_on_install != MG5_curr_version: return \ """ The current version of MG5_aMC (v%s) is different than the one active when - installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). + installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(MG5_curr_version, MG5_version_on_install) - + if not PY8_version_on_install is None and not PY8_curr_version is None: if PY8_version_on_install != PY8_curr_version: return \ """ The current version of Pythia8 (v%s) is different than the one active when - installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). + installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(PY8_curr_version,PY8_version_on_install) - + return None def setup_Pythia8RunAndCard(self, PY8_Card, run_type): """ Setup the Pythia8 Run environment and card. In particular all the process and run specific parameters of the card are automatically set here. This function returns the path where HEPMC events will be output, if any.""" - + HepMC_event_output = None tag = self.run_tag - + PY8_Card.subruns[0].systemSet('Beams:LHEF',"unweighted_events.lhe.gz") hepmc_format = PY8_Card['HEPMCoutput:file'].lower() @@ -4185,7 +4185,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): misc.mkfifo(fifo_path) # Use defaultSet not to overwrite the current userSet status PY8_Card.defaultSet('HEPMCoutput:file',fifo_path) - HepMC_event_output=fifo_path + HepMC_event_output=fifo_path elif hepmc_format in ['','/dev/null','None']: logger.warning('User disabled the HepMC output of Pythia8.') HepMC_event_output = None @@ -4206,7 +4206,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): # only if it is not already user_set. if PY8_Card['JetMatching:qCut']==-1.0: PY8_Card.MadGraphSet('JetMatching:qCut',1.5*self.run_card['xqcut'], force=True) - + if PY8_Card['JetMatching:qCut']<(1.5*self.run_card['xqcut']): logger.error( 'The MLM merging qCut parameter you chose (%f) is less than '%PY8_Card['JetMatching:qCut']+ @@ -4233,7 +4233,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): if PY8_Card['JetMatching:qCut'] not in qCutList: qCutList.append(PY8_Card['JetMatching:qCut']) PY8_Card.MadGraphSet('SysCalc:qCutList', qCutList, force=True) - + if PY8_Card['SysCalc:qCutList']!='auto': for scale in PY8_Card['SysCalc:qCutList']: @@ -4244,7 +4244,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): "'sys_matchscale' in the run_card) is less than 1.5*xqcut, where xqcut is"+ ' the run_card parameter (=%f)\n'%self.run_card['xqcut']+ 'It would be better/safer to use a larger qCut or a smaller xqcut.') - + # Specific MLM settings # PY8 should not implement the MLM veto since the driver should do it # if merging scale variation is turned on @@ -4294,18 +4294,18 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): CKKW_cut = 'ktdurham' elif self.run_card['ptlund']>0.0 and self.run_card['ktdurham']<=0.0: PY8_Card.subruns[0].MadGraphSet('Merging:doPTLundMerging',True) - CKKW_cut = 'ptlund' + CKKW_cut = 'ptlund' else: raise InvalidCmd("*Either* the 'ptlund' or 'ktdurham' cut in "+\ " the run_card must be turned on to activate CKKW(L) merging"+ " with Pythia8, but *both* cuts cannot be turned on at the same time."+ "\n ptlund=%f, ktdurham=%f."%(self.run_card['ptlund'],self.run_card['ktdurham'])) - + # Automatically set qWeed to the CKKWL cut if not defined by the user. if PY8_Card['SysCalc:qWeed']==-1.0: PY8_Card.MadGraphSet('SysCalc:qWeed',self.run_card[CKKW_cut], force=True) - + # MadGraphSet sets the corresponding value (in system mode) # only if it is not already user_set. if PY8_Card['Merging:TMS']==-1.0: @@ -4319,7 +4319,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): 'The CKKWl merging scale you chose (%f) is less than '%PY8_Card['Merging:TMS']+ 'the %s cut specified in the run_card parameter (=%f).\n'%(CKKW_cut,self.run_card[CKKW_cut])+ 'It is incorrect to use a smaller CKKWl scale than the generation-level %s cut!'%CKKW_cut) - + PY8_Card.MadGraphSet('TimeShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:rapidityOrder',False) @@ -4381,7 +4381,7 @@ def do_pythia8(self, line): try: import madgraph - except ImportError: + except ImportError: import internal.histograms as histograms else: import madgraph.various.histograms as histograms @@ -4400,16 +4400,16 @@ def do_pythia8(self, line): self.check_pythia8(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) - self.check_pythia8(args) + self.check_pythia8(args) # Update the banner with the pythia card if not self.banner or len(self.banner) <=1: # Here the level keyword 'pythia' must not be changed to 'pythia8'. self.banner = banner_mod.recover_banner(self.results, 'pythia') - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1], pythia_version=8, banner=self.banner) @@ -4425,7 +4425,7 @@ def do_pythia8(self, line): #"Please use 'event_norm = average' in the run_card to avoid this problem.") - + if not self.options['mg5amc_py8_interface_path'] or not \ os.path.exists(pjoin(self.options['mg5amc_py8_interface_path'], 'MG5aMC_PY8_interface')): @@ -4444,16 +4444,16 @@ def do_pythia8(self, line): # Again here 'pythia' is just a keyword for the simulation level. self.update_status('\033[92mRunning Pythia8 [arXiv:1410.3012]\033[0m', 'pythia8') - - tag = self.run_tag + + tag = self.run_tag # Now write Pythia8 card # Start by reading, starting from the default one so that the 'user_set' # tag are correctly set. - PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', + PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', 'pythia8_card_default.dat')) PY8_Card.read(pjoin(self.me_dir, 'Cards', 'pythia8_card.dat'), setter='user') - + run_type = 'default' merged_run_types = ['MLM','CKKW'] if int(self.run_card['ickkw'])==1: @@ -4471,7 +4471,7 @@ def do_pythia8(self, line): cmd_card = StringIO.StringIO() PY8_Card.write(cmd_card,pjoin(self.me_dir,'Cards','pythia8_card_default.dat'), direct_pythia_input=True) - + # Now setup the preamble to make sure that everything will use the locally # installed tools (if present) even if the user did not add it to its # environment variables. @@ -4486,13 +4486,13 @@ def do_pythia8(self, line): preamble = misc.get_HEPTools_location_setter( pjoin(MG5DIR,'HEPTools'),'lib') preamble += "\n unset PYTHIA8DATA\n" - + open(pythia_cmd_card,'w').write("""! ! It is possible to run this card manually with: ! %s %s ! """%(preamble+pythia_main,os.path.basename(pythia_cmd_card))+cmd_card.getvalue()) - + # launch pythia8 pythia_log = pjoin(self.me_dir , 'Events', self.run_name , '%s_pythia8.log' % tag) @@ -4504,13 +4504,13 @@ def do_pythia8(self, line): shell_exe = None if os.path.exists('/usr/bin/env'): shell_exe = '/usr/bin/env %s'%shell - else: + else: shell_exe = misc.which(shell) if not shell_exe: raise self.InvalidCmd('No s hell could be found in your environment.\n'+ "Make sure that either '%s' is in your path or that the"%shell+\ " command '/usr/bin/env %s' exists and returns a valid path."%shell) - + exe_cmd = "#!%s\n%s"%(shell_exe,' '.join( [preamble+pythia_main, os.path.basename(pythia_cmd_card)])) @@ -4528,7 +4528,7 @@ def do_pythia8(self, line): ( os.path.exists(HepMC_event_output) and \ stat.S_ISFIFO(os.stat(HepMC_event_output).st_mode)) startPY8timer = time.time() - + # Information that will be extracted from this PY8 run PY8_extracted_information={ 'sigma_m':None, 'Nacc':None, 'Ntry':None, 'cross_sections':{} } @@ -4556,7 +4556,7 @@ def do_pythia8(self, line): n_cores = max(int(self.options['cluster_size']),1) elif self.options['run_mode']==2: n_cores = max(int(self.cluster.nb_core),1) - + lhe_file_name = os.path.basename(PY8_Card.subruns[0]['Beams:LHEF']) lhe_file = lhe_parser.EventFile(pjoin(self.me_dir,'Events', self.run_name,PY8_Card.subruns[0]['Beams:LHEF'])) @@ -4574,7 +4574,7 @@ def do_pythia8(self, line): if self.options['run_mode']==2: min_n_events_per_job = 100 elif self.options['run_mode']==1: - min_n_events_per_job = 1000 + min_n_events_per_job = 1000 min_n_core = n_events//min_n_events_per_job n_cores = max(min(min_n_core,n_cores),1) @@ -4584,8 +4584,8 @@ def do_pythia8(self, line): logger.info('Follow Pythia8 shower by running the '+ 'following command (in a separate terminal):\n tail -f %s'%pythia_log) - if self.options['run_mode']==2 and self.options['nb_core']>1: - ret_code = self.cluster.launch_and_wait(wrapper_path, + if self.options['run_mode']==2 and self.options['nb_core']>1: + ret_code = self.cluster.launch_and_wait(wrapper_path, argument= [], stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events',self.run_name)) else: @@ -4630,10 +4630,10 @@ def do_pythia8(self, line): wrapper = open(wrapper_path,'w') if self.options['cluster_temp_path'] is None: exe_cmd = \ -"""#!%s +"""#!%s ./%s PY8Card.dat >& PY8_log.txt """ - else: + else: exe_cmd = \ """#!%s ln -s ./events_$1.lhe.gz ./events.lhe.gz @@ -4663,21 +4663,21 @@ def do_pythia8(self, line): # Set it as executable st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) - + # Split the .lhe event file, create event partition partition=[n_available_events//n_cores]*n_cores for i in range(n_available_events%n_cores): partition[i] += 1 - + # Splitting according to the total number of events requested by the user # Will be used to determine the number of events to indicate in the PY8 split cards. partition_for_PY8=[n_events//n_cores]*n_cores for i in range(n_events%n_cores): partition_for_PY8[i] += 1 - - logger.info('Splitting .lhe event file for PY8 parallelization...') - n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) - + + logger.info('Splitting .lhe event file for PY8 parallelization...') + n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) + if n_splits!=len(partition): raise MadGraph5Error('Error during lhe file splitting. Expected %d files but obtained %d.' %(len(partition),n_splits)) @@ -4690,7 +4690,7 @@ def do_pythia8(self, line): # Add the necessary run content shutil.move(pjoin(parallelization_dir,lhe_file.name+'_%d.lhe.gz'%split_id), pjoin(parallelization_dir,split_files[-1])) - + logger.info('Submitting Pythia8 jobs...') for i, split_file in enumerate(split_files): # We must write a PY8Card tailored for each split so as to correct the normalization @@ -4706,7 +4706,7 @@ def do_pythia8(self, line): split_PY8_Card.write(pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,'PY8Card.dat'), add_missing=False) in_files = [pjoin(parallelization_dir,os.path.basename(pythia_main)), - pjoin(parallelization_dir,'PY8Card_%d.dat'%i), + pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,split_file)] if self.options['cluster_temp_path'] is None: out_files = [] @@ -4718,35 +4718,35 @@ def do_pythia8(self, line): if os.path.basename(in_file)==split_file: ln(in_file,selected_cwd,name='events.lhe.gz') elif os.path.basename(in_file).startswith('PY8Card'): - ln(in_file,selected_cwd,name='PY8Card.dat') + ln(in_file,selected_cwd,name='PY8Card.dat') else: - ln(in_file,selected_cwd) + ln(in_file,selected_cwd) in_files = [] wrapper_path = os.path.basename(wrapper_path) else: out_files = ['split_%d.tar.gz'%i] selected_cwd = parallelization_dir - self.cluster.submit2(wrapper_path, - argument=[str(i)], cwd=selected_cwd, + self.cluster.submit2(wrapper_path, + argument=[str(i)], cwd=selected_cwd, input_files=in_files, output_files=out_files, required_output=out_files) - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return logger.info('Pythia8 shower jobs: %d Idle, %d Running, %d Done [%s]'\ %(Idle, Running, Done, misc.format_time(time.time() - startPY8timer))) self.cluster.wait(parallelization_dir,wait_monitoring) - + logger.info('Merging results from the split PY8 runs...') if self.options['cluster_temp_path']: # Decompressing the output for i, split_file in enumerate(split_files): misc.call(['tar','-xzf','split_%d.tar.gz'%i],cwd=parallelization_dir) os.remove(pjoin(parallelization_dir,'split_%d.tar.gz'%i)) - + # Now merge logs pythia_log_file = open(pythia_log,'w') n_added = 0 @@ -4778,7 +4778,7 @@ def wait_monitoring(Idle, Running, Done): if n_added>0: PY8_extracted_information['sigma_m'] /= float(n_added) pythia_log_file.close() - + # djr plots djr_HwU = None n_added = 0 @@ -4845,7 +4845,7 @@ def wait_monitoring(Idle, Running, Done): if not os.path.isfile(hepmc_file): continue all_hepmc_files.append(hepmc_file) - + if len(all_hepmc_files)>0: hepmc_output = pjoin(self.me_dir,'Events',self.run_name,HepMC_event_output) with misc.TMP_directory() as tmp_dir: @@ -4860,8 +4860,8 @@ def wait_monitoring(Idle, Running, Done): break header.close() tail = open(pjoin(tmp_dir,'tail.hepmc'),'w') - n_tail = 0 - + n_tail = 0 + for line in misc.reverse_readline(all_hepmc_files[-1]): if line.startswith('HepMC::'): n_tail += 1 @@ -4871,7 +4871,7 @@ def wait_monitoring(Idle, Running, Done): tail.close() if n_tail>1: raise MadGraph5Error('HEPMC files should only have one trailing command.') - ###################################################################### + ###################################################################### # This is the most efficient way of putting together HEPMC's, *BUT* # # WARNING: NEED TO RENDER THE CODE BELOW SAFE TOWARDS INJECTION # ###################################################################### @@ -4888,12 +4888,12 @@ def wait_monitoring(Idle, Running, Done): elif sys.platform == 'darwin': # sed on MAC has slightly different synthax than on os.system(' '.join(['sed','-i',"''","'%s;$d'"% - (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) - else: - # other UNIX systems + (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) + else: + # other UNIX systems os.system(' '.join(['sed','-i']+["-e '%id'"%(i+1) for i in range(n_head)]+ ["-e '$d'",hepmc_file])) - + os.system(' '.join(['cat',pjoin(tmp_dir,'header.hepmc')]+all_hepmc_files+ [pjoin(tmp_dir,'tail.hepmc'),'>',hepmc_output])) @@ -4915,12 +4915,12 @@ def wait_monitoring(Idle, Running, Done): 'Inclusive cross section:' not in '\n'.join(open(pythia_log,'r').readlines()[-20:]): logger.warning('Fail to produce a pythia8 output. More info in \n %s'%pythia_log) return - + # Plot for Pythia8 successful = self.create_plot('Pythia8') if not successful: logger.warning('Failed to produce Pythia8 merging plots.') - + self.to_store.append('pythia8') # Study matched cross-sections @@ -4931,7 +4931,7 @@ def wait_monitoring(Idle, Running, Done): if self.options['run_mode']==0 or (self.options['run_mode']==2 and self.options['nb_core']==1): PY8_extracted_information['sigma_m'],PY8_extracted_information['Nacc'],\ PY8_extracted_information['Ntry'] = self.parse_PY8_log_file( - pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) + pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) else: logger.warning('Pythia8 cross-section could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1. YYYYY') @@ -4944,8 +4944,8 @@ def wait_monitoring(Idle, Running, Done): Ntry = PY8_extracted_information['Ntry'] sigma_m = PY8_extracted_information['sigma_m'] # Compute pythia error - error = self.results[self.run_name].return_tag(self.run_tag)['error'] - try: + error = self.results[self.run_name].return_tag(self.run_tag)['error'] + try: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) except ZeroDivisionError: # Cannot compute error @@ -4966,31 +4966,31 @@ def wait_monitoring(Idle, Running, Done): else: logger.warning('Pythia8 merged cross-sections could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1.XXXXX') - PY8_extracted_information['cross_sections'] = {} - + PY8_extracted_information['cross_sections'] = {} + cross_sections = PY8_extracted_information['cross_sections'] if cross_sections: - # Filter the cross_sections specified an keep only the ones + # Filter the cross_sections specified an keep only the ones # with central parameters and a different merging scale a_float_re = '[\+|-]?\d+(\.\d*)?([EeDd][\+|-]?\d+)?' central_merging_re = re.compile( '^\s*Weight_MERGING\s*=\s*(?P%s)\s*$'%a_float_re, - re.IGNORECASE) + re.IGNORECASE) cross_sections = dict( (float(central_merging_re.match(xsec).group('merging')),value) - for xsec, value in cross_sections.items() if not + for xsec, value in cross_sections.items() if not central_merging_re.match(xsec) is None) central_scale = PY8_Card['JetMatching:qCut'] if \ int(self.run_card['ickkw'])==1 else PY8_Card['Merging:TMS'] if central_scale in cross_sections: self.results.add_detail('cross_pythia8', cross_sections[central_scale][0]) self.results.add_detail('error_pythia8', cross_sections[central_scale][1]) - + #logger.info('Pythia8 merged cross-sections are:') #for scale in sorted(cross_sections.keys()): # logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ - # (scale,cross_sections[scale][0],cross_sections[scale][1])) - + # (scale,cross_sections[scale][0],cross_sections[scale][1])) + xsecs_file = open(pjoin(self.me_dir,'Events',self.run_name, '%s_merged_xsecs.txt'%tag),'w') if cross_sections: @@ -5003,9 +5003,9 @@ def wait_monitoring(Idle, Running, Done): xsecs_file.write('Cross-sections could not be read from the'+\ "XML node 'xsection' of the .dat file produced by Pythia8.") xsecs_file.close() - + #Update the banner - # We add directly the pythia command card because it has the full + # We add directly the pythia command card because it has the full # information self.banner.add(pythia_cmd_card) @@ -5022,13 +5022,13 @@ def wait_monitoring(Idle, Running, Done): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + def parse_PY8_log_file(self, log_file_path): """ Parse a log file to extract number of event and cross-section. """ pythiare = re.compile("Les Houches User Process\(es\)\s*\d+\s*\|\s*(?P\d+)\s*(?P\d+)\s*(?P\d+)\s*\|\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") pythia_xsec_re = re.compile("Inclusive cross section\s*:\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") sigma_m, Nacc, Ntry = None, None, None - for line in misc.BackRead(log_file_path): + for line in misc.BackRead(log_file_path): info = pythiare.search(line) if not info: # Also try to obtain the cross-section and error from the final xsec line of pythia8 log @@ -5058,7 +5058,7 @@ def parse_PY8_log_file(self, log_file_path): raise self.InvalidCmd("Could not find cross-section and event number information "+\ "in Pythia8 log\n '%s'."%log_file_path) - + def extract_cross_sections_from_DJR(self,djr_output): """Extract cross-sections from a djr XML output.""" import xml.dom.minidom as minidom @@ -5075,11 +5075,11 @@ def extract_cross_sections_from_DJR(self,djr_output): [float(xsec.childNodes[0].data.split()[0]), float(xsec.childNodes[0].data.split()[1])]) for xsec in xsections) - + def do_pythia(self, line): """launch pythia""" - - + + # Check argument's validity args = self.split_arg(line) if '--no_default' in args: @@ -5089,12 +5089,12 @@ def do_pythia(self, line): args.remove('--no_default') else: no_default = False - + if not self.run_name: self.check_pythia(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) self.check_pythia(args) @@ -5102,7 +5102,7 @@ def do_pythia(self, line): logger.error('pythia-pgs require event_norm to be on sum. Do not run pythia6') return - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1]) if self.options['automatic_html_opening']: @@ -5114,35 +5114,35 @@ def do_pythia(self, line): self.banner = banner_mod.recover_banner(self.results, 'pythia') pythia_src = pjoin(self.options['pythia-pgs_path'],'src') - + self.results.add_detail('run_mode', 'madevent') self.update_status('Running Pythia', 'pythia') try: os.remove(pjoin(self.me_dir,'Events','pythia.done')) except Exception: - pass - + pass + ## LAUNCHING PYTHIA # check that LHAPATH is define. if not re.search(r'^\s*LHAPATH=%s/PDFsets' % pythia_src, - open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), + open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), re.M): f = open(pjoin(self.me_dir,'Cards','pythia_card.dat'),'a') f.write('\n LHAPATH=%s/PDFsets' % pythia_src) f.close() tag = self.run_tag pythia_log = pjoin(self.me_dir, 'Events', self.run_name , '%s_pythia.log' % tag) - #self.cluster.launch_and_wait('../bin/internal/run_pythia', + #self.cluster.launch_and_wait('../bin/internal/run_pythia', # argument= [pythia_src], stdout= pythia_log, # stderr=subprocess.STDOUT, # cwd=pjoin(self.me_dir,'Events')) output_files = ['pythia_events.hep'] if self.run_card['use_syst']: output_files.append('syst.dat') - if self.run_card['ickkw'] == 1: + if self.run_card['ickkw'] == 1: output_files += ['beforeveto.tree', 'xsecs.tree', 'events.tree'] - + os.environ['PDG_MASS_TBL'] = pjoin(pythia_src,'mass_width_2004.mc') self.cluster.launch_and_wait(pjoin(pythia_src, 'pythia'), input_files=[pjoin(self.me_dir, "Events", "unweighted_events.lhe"), @@ -5152,23 +5152,23 @@ def do_pythia(self, line): stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events')) - + os.remove(pjoin(self.me_dir, "Events", "unweighted_events.lhe")) if not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): logger.warning('Fail to produce pythia output. More info in \n %s' % pythia_log) return - + self.to_store.append('pythia') - + # Find the matched cross-section if int(self.run_card['ickkw']): # read the line from the bottom of the file - #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, + #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, # '%s_pythia.log' % tag)) - pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") - for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, + pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") + for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, '%s_pythia.log' % tag)): info = pythiare.search(line) if not info: @@ -5188,16 +5188,16 @@ def do_pythia(self, line): self.results.add_detail('nb_event_pythia', Nacc) #compute pythia error error = self.results[self.run_name].return_tag(self.run_tag)['error'] - if Nacc: + if Nacc: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) else: error_m = 10000 * sigma_m # works both for fixed number of generated events and fixed accepted events self.results.add_detail('error_pythia', error_m) - break + break #pythia_log.close() - + pydir = pjoin(self.options['pythia-pgs_path'], 'src') eradir = self.options['exrootanalysis_path'] madir = self.options['madanalysis_path'] @@ -5216,12 +5216,12 @@ def do_pythia(self, line): # Creating LHE file self.run_hep2lhe(banner_path) - + if int(self.run_card['ickkw']): misc.gzip(pjoin(self.me_dir,'Events','beforeveto.tree'), - stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + - if self.run_card['use_syst'] in self.true: # Calculate syscalc info based on syst.dat try: @@ -5233,7 +5233,7 @@ def do_pythia(self, line): # Store syst.dat misc.gzip(pjoin(self.me_dir,'Events', 'syst.dat'), stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_syst.dat.gz')) - + # Store syscalc.dat if os.path.exists(pjoin(self.me_dir, 'Events', 'syscalc.dat')): filename = pjoin(self.me_dir, 'Events' ,self.run_name, @@ -5253,7 +5253,7 @@ def do_pythia(self, line): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + ################################################################################ def do_remove(self, line): @@ -5263,8 +5263,8 @@ def do_remove(self, line): run, tag, mode = self.check_remove(args) if 'banner' in mode: mode.append('all') - - + + if run == 'all': # Check first if they are not a run with a name run. if os.path.exists(pjoin(self.me_dir, 'Events', 'all')): @@ -5280,7 +5280,7 @@ def do_remove(self, line): logger.info(error) pass # run already clear return - + # Check that run exists if not os.path.exists(pjoin(self.me_dir, 'Events', run)): raise self.InvalidCmd('No run \'%s\' detected' % run) @@ -5294,7 +5294,7 @@ def do_remove(self, line): # Found the file to delete - + to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) to_delete += misc.glob('*', pjoin(self.me_dir, 'HTML', run)) # forbid the banner to be removed @@ -5314,7 +5314,7 @@ def do_remove(self, line): if os.path.exists(pjoin(self.me_dir, 'Events', run, 'unweighted_events.lhe.gz')): to_delete.append('unweighted_events.lhe.gz') if os.path.exists(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')): - to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) + to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) if nb_rm != len(to_delete): logger.warning('Be carefull that partonic information are on the point to be removed.') if 'all' in mode: @@ -5327,8 +5327,8 @@ def do_remove(self, line): if 'delphes' not in mode: to_delete = [f for f in to_delete if 'delphes' not in f] if 'parton' not in mode: - to_delete = [f for f in to_delete if 'delphes' in f - or 'pgs' in f + to_delete = [f for f in to_delete if 'delphes' in f + or 'pgs' in f or 'pythia' in f] if not self.force and len(to_delete): question = 'Do you want to delete the following files?\n %s' % \ @@ -5336,7 +5336,7 @@ def do_remove(self, line): ans = self.ask(question, 'y', choices=['y','n']) else: ans = 'y' - + if ans == 'y': for file2rm in to_delete: if os.path.exists(pjoin(self.me_dir, 'Events', run, file2rm)): @@ -5374,7 +5374,7 @@ def do_remove(self, line): if ans == 'y': for file2rm in to_delete: os.remove(file2rm) - + if 'banner' in mode: to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) if tag: @@ -5389,8 +5389,8 @@ def do_remove(self, line): return elif any(['banner' not in os.path.basename(p) for p in to_delete]): if to_delete: - raise MadGraph5Error('''Some output still exists for this run. - Please remove those output first. Do for example: + raise MadGraph5Error('''Some output still exists for this run. + Please remove those output first. Do for example: remove %s all banner ''' % run) else: @@ -5400,7 +5400,7 @@ def do_remove(self, line): return else: logger.info('''The banner is not removed. In order to remove it run: - remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) + remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) # update database. self.results.clean(mode, run, tag) @@ -5420,7 +5420,7 @@ def do_plot(self, line): logger.info('plot for run %s' % self.run_name) if not self.force: self.ask_edit_cards(['plot_card.dat'], args, plot=True) - + if any([arg in ['all','parton'] for arg in args]): filename = pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe') if os.path.exists(filename+'.gz'): @@ -5438,8 +5438,8 @@ def do_plot(self, line): except Exception: pass else: - logger.info('No valid files for partonic plot') - + logger.info('No valid files for partonic plot') + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_events.lhe' % self.run_tag) @@ -5452,10 +5452,10 @@ def do_plot(self, line): stdout= "%s.gz" % filename) else: logger.info('No valid files for pythia plot') - - + + if any([arg in ['all','pgs'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) @@ -5464,15 +5464,15 @@ def do_plot(self, line): misc.gzip(filename) else: logger.info('No valid files for pgs plot') - + if any([arg in ['all','delphes'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) if os.path.exists(filename): self.create_plot('Delphes') - misc.gzip(filename) + misc.gzip(filename) else: logger.info('No valid files for delphes plot') @@ -5488,9 +5488,9 @@ def do_syscalc(self, line): if self.ninitial == 1: logger.error('SysCalc can\'t be run for decay processes') return - + logger.info('Calculating systematics for run %s' % self.run_name) - + self.ask_edit_cards(['run_card.dat'], args, plot=False) self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) if any([arg in ['all','parton'] for arg in args]): @@ -5504,7 +5504,7 @@ def do_syscalc(self, line): stdout="%s.gz" % filename) else: logger.info('No valid files for parton level systematics run.') - + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_syst.dat' % self.run_tag) @@ -5525,17 +5525,17 @@ def do_syscalc(self, line): else: logger.info('No valid files for pythia level') - + def store_result(self): - """ tar the pythia results. This is done when we are quite sure that + """ tar the pythia results. This is done when we are quite sure that the pythia output will not be use anymore """ if not self.run_name: return - + if not self.to_store: - return - + return + tag = self.run_card['run_tag'] self.update_status('storing files of previous run', level=None,\ error=True) @@ -5546,14 +5546,14 @@ def store_result(self): misc.gzip(pjoin(self.me_dir,'Events',self.run_name,"unweighted_events.lhe")) if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) - + if 'pythia' in self.to_store: self.update_status('Storing Pythia files of previous run', level='pythia', error=True) p = pjoin(self.me_dir,'Events') n = self.run_name t = tag self.to_store.remove('pythia') - misc.gzip(pjoin(p,'pythia_events.hep'), + misc.gzip(pjoin(p,'pythia_events.hep'), stdout=pjoin(p, str(n),'%s_pythia_events.hep' % t),forceexternal=True) if 'pythia8' in self.to_store: @@ -5581,26 +5581,26 @@ def store_result(self): os.system("mv " + file_path + hepmc_fileformat + " " + move_hepmc_path) self.update_status('Done', level='pythia',makehtml=False,error=True) - self.results.save() - + self.results.save() + self.to_store = [] - def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, + def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, run_type='', mode=None, **opt): """ """ argument = [str(arg) for arg in argument] if mode is None: mode = self.cluster_mode - + # ensure that exe is executable if os.path.exists(exe) and not os.access(exe, os.X_OK): os.system('chmod +x %s ' % exe) elif (cwd and os.path.exists(pjoin(cwd, exe))) and not \ os.access(pjoin(cwd, exe), os.X_OK): os.system('chmod +x %s ' % pjoin(cwd, exe)) - + if mode == 0: - self.update_status((remaining, 1, + self.update_status((remaining, 1, self.total_jobs - remaining -1, run_type), level=None, force=False) start = time.time() #os.system('cd %s; ./%s' % (cwd,exe)) @@ -5613,24 +5613,24 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, elif mode in [1,2]: exename = os.path.basename(exe) # For condor cluster, create the input/output files - if 'ajob' in exename: + if 'ajob' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat','dname.mg', pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) - + output_files = [] required_output = [] - + #Find the correct PDF input file input_files.append(self.get_pdf_input_filename()) - + #Find the correct ajob Gre = re.compile("\s*j=(G[\d\.\w]+)") origre = re.compile("grid_directory=(G[\d\.\w]+)") - try : + try : fsock = open(exe) except Exception: fsock = open(pjoin(cwd,exe)) @@ -5648,21 +5648,21 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if os.path.isdir(pjoin(cwd,G)): input_files.append(G) required_output.append('%s/results.dat' % G) - + if origre.search(text): G_grid = origre.search(text).groups()[0] input_files.append(pjoin(G_grid, 'ftn26')) - + #submitting - self.cluster.submit2(exe, stdout=stdout, cwd=cwd, + self.cluster.submit2(exe, stdout=stdout, cwd=cwd, input_files=input_files, output_files=output_files, required_output=required_output) elif 'survey' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5671,7 +5671,7 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [] required_output = [] - + #Find the correct ajob suffix = "_%s" % int(float(argument[0])) if suffix == '_0': @@ -5685,12 +5685,12 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if '.' in argument[0]: offset = int(str(argument[0]).split('.')[1]) else: - offset = 0 - + offset = 0 + if offset ==0 or offset == int(float(argument[0])): if os.path.exists(pjoin(cwd, G, 'input_app.txt')): os.remove(pjoin(cwd, G, 'input_app.txt')) - + if os.path.exists(os.path.realpath(pjoin(cwd, G, 'ftn25'))): if offset == 0 or offset == int(float(argument[0])): os.remove(pjoin(cwd, G, 'ftn25')) @@ -5706,16 +5706,16 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, pass #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, required_output=required_output, **opt) elif "refine_splitted.sh" in exename: input_files = ['madevent','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5725,25 +5725,25 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [argument[0]] required_output = [] for G in output_files: - required_output.append('%s/results.dat' % G) + required_output.append('%s/results.dat' % G) input_files.append(pjoin(argument[1], "input_app.txt")) input_files.append(pjoin(argument[1], "ftn26")) - + #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, - required_output=required_output, **opt) + required_output=required_output, **opt) + + - - else: self.cluster.submit(exe, argument=argument, stdout=stdout, cwd=cwd, **opt) - + ############################################################################ def find_madevent_mode(self): """Find if Madevent is in Group mode or not""" - + # The strategy is too look in the files Source/run_configs.inc # if we found: ChanPerJob=3 then it's a group mode. file_path = pjoin(self.me_dir, 'Source', 'run_config.inc') @@ -5752,11 +5752,11 @@ def find_madevent_mode(self): return 'group' else: return 'v4' - + ############################################################################ def monitor(self, run_type='monitor', mode=None, html=False): """ monitor the progress of running job """ - + starttime = time.time() if mode is None: @@ -5772,8 +5772,8 @@ def monitor(self, run_type='monitor', mode=None, html=False): else: update_status = lambda idle, run, finish: None update_first = None - try: - self.cluster.wait(self.me_dir, update_status, update_first=update_first) + try: + self.cluster.wait(self.me_dir, update_status, update_first=update_first) except Exception as error: logger.info(error) if not self.force: @@ -5788,24 +5788,24 @@ def monitor(self, run_type='monitor', mode=None, html=False): raise except KeyboardInterrupt as error: self.cluster.remove() - raise - - + raise + - ############################################################################ + + ############################################################################ def configure_directory(self, html_opening=True): - """ All action require before any type of run """ + """ All action require before any type of run """ # Basic check assert os.path.exists(pjoin(self.me_dir,'SubProcesses')) # environmental variables to be included in make_opts self.make_opts_var = {} - + #see when the last file was modified time_mod = max([os.path.getmtime(pjoin(self.me_dir,'Cards','run_card.dat')), os.path.getmtime(pjoin(self.me_dir,'Cards','param_card.dat'))]) - + if self.configured >= time_mod and hasattr(self, 'random') and hasattr(self, 'run_card'): #just ensure that cluster specific are correctly handled if self.cluster: @@ -5820,7 +5820,7 @@ def configure_directory(self, html_opening=True): #open only once the web page # Change current working directory self.launching_dir = os.getcwd() - + # Check if we need the MSSM special treatment model = self.find_model_name() if model == 'mssm' or model.startswith('mssm-'): @@ -5828,14 +5828,14 @@ def configure_directory(self, html_opening=True): mg5_param = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') check_param_card.convert_to_mg5card(param_card, mg5_param) check_param_card.check_valid_param_card(mg5_param) - + # limit the number of event to 100k self.check_nb_events() # this is in order to avoid conflicts between runs with and without # lhapdf. not needed anymore the makefile handles it automaticallu #misc.compile(['clean4pdf'], cwd = pjoin(self.me_dir, 'Source')) - + self.make_opts_var['pdlabel1'] = '' self.make_opts_var['pdlabel2'] = '' if self.run_card['pdlabel1'] in ['eva', 'iww']: @@ -5866,7 +5866,7 @@ def configure_directory(self, html_opening=True): self.copy_lep_densities(self.run_card['pdlabel'], pjoin(self.me_dir, 'Source')) self.make_opts_var['pdlabel1'] = 'ee' self.make_opts_var['pdlabel2'] = 'ee' - + # set random number if self.run_card['iseed'] != 0: self.random = int(self.run_card['iseed']) @@ -5885,18 +5885,18 @@ def configure_directory(self, html_opening=True): break else: self.random = random.randint(1, 30107) - + #set random seed for python part of the code if self.run_card['python_seed'] == -2: #-2 means same as run_card import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] elif self.run_card['python_seed'] >= 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] if self.run_card['ickkw'] == 2: logger.info('Running with CKKW matching') self.treat_ckkw_matching() @@ -5905,12 +5905,12 @@ def configure_directory(self, html_opening=True): self.update_make_opts(self.run_card) # reset list of Gdirectory self.Gdirs = None - + # create param_card.inc and run_card.inc self.do_treatcards('') - + logger.info("compile Source Directory") - + # Compile for name in [ 'all']:#, '../bin/internal/combine_events']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) @@ -5933,7 +5933,7 @@ def configure_directory(self, html_opening=True): os.remove(pjoin(self.me_dir, 'lib','libbias.a')) force_subproc_clean = True - + # Finally compile the bias module as well if self.run_card['bias_module'] not in ['dummy',None]: logger.debug("Compiling the bias module '%s'"%bias_name) @@ -5945,7 +5945,7 @@ def configure_directory(self, html_opening=True): 'INVALID' in str(bias_module_valid).upper(): raise InvalidCmd("The bias module '%s' cannot be used because of:\n%s"% (bias_name,bias_module_valid)) - + self.compile(arg=[], cwd=os.path.join(self.me_dir, 'Source','BIAS',bias_name)) self.proc_characteristics['bias_module']=bias_name # Update the proc_characterstics file @@ -5954,7 +5954,7 @@ def configure_directory(self, html_opening=True): if force_subproc_clean: # Make sure that madevent will be recompiled - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] for nb_proc,subdir in enumerate(subproc): Pdir = pjoin(self.me_dir, 'SubProcesses',subdir.strip()) @@ -5971,20 +5971,20 @@ def configure_directory(self, html_opening=True): ############################################################################ @staticmethod def check_dir(path, default=''): - """check if the directory exists. if so return the path otherwise the + """check if the directory exists. if so return the path otherwise the default""" - + if os.path.isdir(path): return path else: return default - + ############################################################################ def get_Gdir(self, Pdir=None, symfact=None): """get the list of Gdirectory if not yet saved.""" - + if hasattr(self, "Gdirs") and self.Gdirs: if self.me_dir in self.Gdirs[0]: if Pdir is None: @@ -6000,8 +6000,8 @@ def get_Gdir(self, Pdir=None, symfact=None): Pdirs = self.get_Pdir() - Gdirs = {self.me_dir:[]} - mfactors = {} + Gdirs = {self.me_dir:[]} + mfactors = {} for P in Pdirs: Gdirs[P] = [] #for the next line do not use P, since in readonly mode it might not have symfact @@ -6012,7 +6012,7 @@ def get_Gdir(self, Pdir=None, symfact=None): mfactors[pjoin(P, "G%s" % tag)] = mfactor self.Gdirs = (Gdirs, mfactors) return self.get_Gdir(Pdir, symfact=symfact) - + ############################################################################ def set_run_name(self, name, tag=None, level='parton', reload_card=False, allow_new_tag=True): @@ -6030,8 +6030,8 @@ def get_last_tag(self, level): tagRun = self.results[self.run_name][i] if tagRun.pythia or tagRun.shower or tagRun.pythia8 : return tagRun['tag'] - - + + # when are we force to change the tag new_run:previous run requiring changes upgrade_tag = {'parton': ['parton','pythia','pgs','delphes','madanalysis5_hadron','madanalysis5_parton', 'rivet'], 'pythia': ['pythia','pgs','delphes','madanalysis5_hadron'], @@ -6044,7 +6044,7 @@ def get_last_tag(self, level): 'syscalc':[], 'rivet':['rivet']} - if name == self.run_name: + if name == self.run_name: if reload_card: run_card = pjoin(self.me_dir, 'Cards','run_card.dat') self.run_card = banner_mod.RunCard(run_card) @@ -6064,13 +6064,13 @@ def get_last_tag(self, level): break return get_last_tag(self, level) - + # save/clean previous run if self.run_name: self.store_result() # store new name self.run_name = name - + new_tag = False # First call for this run -> set the banner self.banner = banner_mod.recover_banner(self.results, level, name) @@ -6079,8 +6079,8 @@ def get_last_tag(self, level): else: # Read run_card run_card = pjoin(self.me_dir, 'Cards','run_card.dat') - self.run_card = banner_mod.RunCard(run_card) - + self.run_card = banner_mod.RunCard(run_card) + if tag: self.run_card['run_tag'] = tag new_tag = True @@ -6093,7 +6093,7 @@ def get_last_tag(self, level): self.results.update('add run %s' % name, 'all', makehtml=False) else: for tag in upgrade_tag[level]: - + if getattr(self.results[self.run_name][-1], tag): # LEVEL is already define in the last tag -> need to switch tag tag = self.get_available_tag() @@ -6103,8 +6103,8 @@ def get_last_tag(self, level): if not new_tag: # We can add the results to the current run tag = self.results[self.run_name][-1]['tag'] - self.run_card['run_tag'] = tag # ensure that run_tag is correct - + self.run_card['run_tag'] = tag # ensure that run_tag is correct + if allow_new_tag and (name in self.results and not new_tag): self.results.def_current(self.run_name) else: @@ -6113,15 +6113,15 @@ def get_last_tag(self, level): self.run_tag = self.run_card['run_tag'] return get_last_tag(self, level) - - - + + + ############################################################################ def check_nb_events(self): - """Find the number of event in the run_card, and check that this is not + """Find the number of event in the run_card, and check that this is not too large""" - + nb_event = int(self.run_card['nevents']) if nb_event > 1000000: logger.warning("Attempting to generate more than 1M events") @@ -6133,20 +6133,20 @@ def check_nb_events(self): return - - ############################################################################ + + ############################################################################ def update_random(self): """ change random number""" - + self.random += 3 if self.random > 30081*30081: # can't use too big random number raise MadGraph5Error('Random seed too large ' + str(self.random) + ' > 30081*30081') - if self.run_card['python_seed'] == -2: + if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.random) + random.seed(self.random) random.mg_seedset = self.random - + ############################################################################ def save_random(self): """save random number in appropirate file""" @@ -6155,14 +6155,14 @@ def save_random(self): fsock.writelines('r=%s\n' % self.random) def do_quit(self, *args, **opts): - + return common_run.CommonRunCmd.do_quit(self, *args, **opts) #return CmdExtended.do_quit(self, *args, **opts) - + ############################################################################ def treat_CKKW_matching(self): """check for ckkw""" - + lpp1 = self.run_card['lpp1'] lpp2 = self.run_card['lpp2'] e1 = self.run_card['ebeam1'] @@ -6170,19 +6170,19 @@ def treat_CKKW_matching(self): pd = self.run_card['pdlabel'] lha = self.run_card['lhaid'] xq = self.run_card['xqcut'] - translation = {'e1': e1, 'e2':e2, 'pd':pd, + translation = {'e1': e1, 'e2':e2, 'pd':pd, 'lha':lha, 'xq':xq} if lpp1 or lpp2: - # Remove ':s from pd + # Remove ':s from pd if pd.startswith("'"): pd = pd[1:] if pd.endswith("'"): - pd = pd[:-1] + pd = pd[:-1] if xq >2 or xq ==2: xq = 2 - + # find data file if pd == "lhapdf": issudfile = 'lib/issudgrid-%(e1)s-%(e2)s-%(pd)s-%(lha)s-%(xq)s.dat.gz' @@ -6192,9 +6192,9 @@ def treat_CKKW_matching(self): issudfile = pjoin(self.webbin, issudfile % translation) else: issudfile = pjoin(self.me_dir, issudfile % translation) - + logger.info('Sudakov grid file: %s' % issudfile) - + # check that filepath exists if os.path.exists(issudfile): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') @@ -6203,20 +6203,20 @@ def treat_CKKW_matching(self): msg = 'No sudakov grid file for parameter choice. Start to generate it. This might take a while' logger.info(msg) self.update_status('GENERATE SUDAKOV GRID', level='parton') - + for i in range(-2,6): - self.cluster.submit('%s/gensudgrid ' % self.dirbin, + self.cluster.submit('%s/gensudgrid ' % self.dirbin, argument = ['%d'%i], - cwd=self.me_dir, + cwd=self.me_dir, stdout=open(pjoin(self.me_dir, 'gensudgrid%s.log' % i),'w')) self.monitor() for i in range(-2,6): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') os.system('cat %s/gensudgrid%s.log >> %s' % (self.me_dir, path)) misc.gzip(path, stdout=issudfile) - + ############################################################################ - def create_root_file(self, input='unweighted_events.lhe', + def create_root_file(self, input='unweighted_events.lhe', output='unweighted_events.root' ): """create the LHE root file """ self.update_status('Creating root files', level='parton') @@ -6233,14 +6233,14 @@ def create_root_file(self, input='unweighted_events.lhe', totar = False torm = True input = input[:-3] - + try: - misc.call(['%s/ExRootLHEFConverter' % eradir, + misc.call(['%s/ExRootLHEFConverter' % eradir, input, output], cwd=pjoin(self.me_dir, 'Events')) except Exception: logger.warning('fail to produce Root output [problem with ExRootAnalysis]') - + if totar: if os.path.exists('%s.gz' % input): try: @@ -6251,13 +6251,13 @@ def create_root_file(self, input='unweighted_events.lhe', misc.gzip(input) if torm: os.remove(input) - + def run_syscalc(self, mode='parton', event_path=None, output=None): - """create the syscalc output""" + """create the syscalc output""" if self.run_card['use_syst'] not in self.true: return - + scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): return @@ -6265,12 +6265,12 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): if self.run_card['event_norm'] != 'sum': logger.critical('SysCalc works only when event_norm is on \'sum\'.') return - logger.info('running SysCalc on mode %s' % mode) - + logger.info('running SysCalc on mode %s' % mode) + # Restore the old default for SysCalc+PY6 if self.run_card['sys_matchscale']=='auto': self.run_card['sys_matchscale'] = "30 50" - + # Check that all pdfset are correctly installed lhaid = [self.run_card.get_lhapdf_id()] if '&&' in self.run_card['sys_pdf']: @@ -6285,20 +6285,20 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.debug(str(error)) logger.warning('Systematic computation requires lhapdf to run. Bypass SysCalc') return - + # Copy all the relevant PDF sets [self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid] - + to_syscalc={'sys_scalefact': self.run_card['sys_scalefact'], 'sys_alpsfact': self.run_card['sys_alpsfact'], 'sys_matchscale': self.run_card['sys_matchscale'], 'sys_scalecorrelation': self.run_card['sys_scalecorrelation'], 'sys_pdf': self.run_card['sys_pdf']} - - tag = self.run_card['run_tag'] + + tag = self.run_card['run_tag'] card = pjoin(self.me_dir, 'bin','internal', 'syscalc_card.dat') template = open(pjoin(self.me_dir, 'bin','internal', 'syscalc_template.dat')).read() - + if '&&' in to_syscalc['sys_pdf']: to_syscalc['sys_pdf'] = to_syscalc['sys_pdf'].split('#',1)[0].replace('&&',' \n ') else: @@ -6311,8 +6311,8 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): new.append(d) else: new[-1] += ' %s' % d - to_syscalc['sys_pdf'] = '\n'.join(new) - + to_syscalc['sys_pdf'] = '\n'.join(new) + if to_syscalc['sys_pdf'].lower() in ['', 'f', 'false', 'none', '.false.']: to_syscalc['sys_pdf'] = '' if to_syscalc['sys_alpsfact'].lower() in ['', 'f', 'false', 'none','.false.']: @@ -6320,17 +6320,17 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): - + # check if the scalecorrelation parameter is define: if not 'sys_scalecorrelation' in self.run_card: self.run_card['sys_scalecorrelation'] = -1 open(card,'w').write(template % self.run_card) - + if not os.path.exists(card): return False - - + + event_dir = pjoin(self.me_dir, 'Events') if not event_path: @@ -6353,19 +6353,19 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): raise SysCalcError('qcut value for sys_matchscale lower than qcut in pythia_card. Bypass syscalc') if float(value) < xqcut: raise SysCalcError('qcut value for sys_matchscale lower than xqcut in run_card. Bypass syscalc') - - + + event_path = pjoin(event_dir,'syst.dat') output = pjoin(event_dir, 'syscalc.dat') else: raise self.InvalidCmd('Invalid mode %s' % mode) - + if not os.path.exists(event_path): if os.path.exists(event_path+'.gz'): misc.gunzip(event_path+'.gz') else: raise SysCalcError('Events file %s does not exits' % event_path) - + self.update_status('Calculating systematics for %s level' % mode, level = mode.lower()) try: proc = misc.call([os.path.join(scdir, 'sys_calc'), @@ -6374,7 +6374,7 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): stderr = subprocess.STDOUT, cwd=event_dir) # Wait 5 s to make sure file is finished writing - time.sleep(5) + time.sleep(5) except OSError as error: logger.error('fail to run syscalc: %s. Please check that SysCalc is correctly installed.' % error) else: @@ -6382,11 +6382,11 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.warning('SysCalc Failed. Please read the associate log to see the reason. Did you install the associate PDF set?') elif mode == 'parton': files.mv(output, event_path) - + self.update_status('End syscalc for %s level' % mode, level = mode.lower(), makehtml=False) - - return True + + return True action_switcher = AskRun @@ -6399,23 +6399,23 @@ def ask_run_configuration(self, mode=None, args=[]): passing_cmd.append('reweight=ON') if '-M' in args or '--madspin' in args: passing_cmd.append('madspin=ON') - + switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, mode=mode, line_args=args, force=self.force, first_cmd=passing_cmd, return_instance=True) # - self.switch = switch # store the value of the switch for plugin purpose + self.switch = switch # store the value of the switch for plugin purpose if 'dynamical' in switch: mode = 'auto' - + # Now that we know in which mode we are check that all the card #exists (copy default if needed) - + cards = ['param_card.dat', 'run_card.dat'] if switch['shower'] == 'Pythia6': cards.append('pythia_card.dat') if switch['shower'] == 'Pythia8': - cards.append('pythia8_card.dat') + cards.append('pythia8_card.dat') if switch['detector'] in ['PGS','DELPHES+PGS']: cards.append('pgs_card.dat') if switch['detector'] in ['Delphes', 'DELPHES+PGS']: @@ -6438,29 +6438,29 @@ def ask_run_configuration(self, mode=None, args=[]): cards.append('rivet_card.dat') self.keep_cards(cards) - + first_cmd = cmd_switch.get_cardcmd() - + if os.path.isfile(pjoin(self.me_dir,'Cards','MadLoopParams.dat')): cards.append('MadLoopParams.dat') - + if self.force: self.check_param_card(pjoin(self.me_dir,'Cards','param_card.dat' )) return switch - + if 'dynamical' in switch and switch['dynamical']: self.ask_edit_cards(cards, plot=False, mode='auto', first_cmd=first_cmd) else: self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) return switch - + ############################################################################ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None): """Ask the question when launching pythia""" - + pythia_suffix = '' if pythia_version==6 else '%d'%pythia_version - + available_mode = ['0', '1'] if pythia_version==6: available_mode.append('2') @@ -6485,10 +6485,10 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = self.ask(question, '0', options) elif not mode: mode = 'auto' - + if mode.isdigit(): mode = name[mode] - + auto = False if mode == 'auto': auto = True @@ -6497,7 +6497,7 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = 'pgs' elif os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')): mode = 'delphes' - else: + else: mode = 'pythia%s'%pythia_suffix logger.info('Will run in mode %s' % mode) # Now that we know in which mode we are check that all the card @@ -6513,15 +6513,15 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) cards.append('delphes_trigger.dat') self.keep_cards(cards, ignore=['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat', 'plot_card.dat']) - + if self.force: return mode - + if not banner: banner = self.banner - + if auto: - self.ask_edit_cards(cards, from_banner=['param', 'run'], + self.ask_edit_cards(cards, from_banner=['param', 'run'], mode='auto', plot=(pythia_version==6), banner=banner ) else: @@ -6529,12 +6529,12 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) plot=(pythia_version==6), banner=banner) return mode - + #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmdShell(MadEventCmd, cmd.CmdShell): - """The command line processor of MadGraph""" + """The command line processor of MadGraph""" @@ -6548,11 +6548,11 @@ class SubProcesses(object): @classmethod def clean(cls): cls.name_to_pdg = {} - + @staticmethod def get_subP(me_dir): """return the list of Subprocesses""" - + out = [] for line in open(pjoin(me_dir,'SubProcesses', 'subproc.mg')): if not line: @@ -6560,9 +6560,9 @@ def get_subP(me_dir): name = line.strip() if os.path.exists(pjoin(me_dir, 'SubProcesses', name)): out.append(pjoin(me_dir, 'SubProcesses', name)) - + return out - + @staticmethod @@ -6623,9 +6623,9 @@ def get_subP_ids(path): particles = re.search("/([\d,-]+)/", line) all_ids.append([int(p) for p in particles.group(1).split(',')]) return all_ids - - -#=============================================================================== + + +#=============================================================================== class GridPackCmd(MadEventCmd): """The command for the gridpack --Those are not suppose to be use interactively--""" @@ -6639,7 +6639,7 @@ def __init__(self, me_dir = None, nb_event=0, seed=0, gran=-1, *completekey, **s self.random = seed self.random_orig = self.random self.granularity = gran - + self.options['automatic_html_opening'] = False #write the grid_card.dat on disk self.nb_event = int(nb_event) @@ -6680,7 +6680,7 @@ def write_RunWeb(self, me_dir): def write_gridcard(self, nb_event, seed, gran): """write the grid_card.dat file at appropriate location""" - + # first try to write grid_card within the gridpack. print("WRITE GRIDCARD", self.me_dir) if self.readonly: @@ -6689,35 +6689,35 @@ def write_gridcard(self, nb_event, seed, gran): fsock = open('grid_card.dat','w') else: fsock = open(pjoin(self.me_dir, 'Cards', 'grid_card.dat'),'w') - + gridpackcard = banner_mod.GridpackCard() gridpackcard['GridRun'] = True gridpackcard['gevents'] = nb_event gridpackcard['gseed'] = seed gridpackcard['ngran'] = gran - + gridpackcard.write(fsock) ############################################################################ def get_Pdir(self): """get the list of Pdirectory if not yet saved.""" - + if hasattr(self, "Pdirs"): if self.me_dir in self.Pdirs[0]: return self.Pdirs - + if not self.readonly: - self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) + self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] else: - self.Pdirs = [l.strip() - for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + self.Pdirs = [l.strip() + for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] + return self.Pdirs - + def prepare_local_dir(self): """create the P directory structure in the local directory""" - + if not self.readonly: os.chdir(self.me_dir) else: @@ -6726,7 +6726,7 @@ def prepare_local_dir(self): os.mkdir(p) files.cp(pjoin(self.me_dir,'SubProcesses',p,'symfact.dat'), pjoin(p, 'symfact.dat')) - + def launch(self, nb_event, seed): """ launch the generation for the grid """ @@ -6742,13 +6742,13 @@ def launch(self, nb_event, seed): if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(seed) + random.seed(seed) random.mg_seedset = seed elif self.run_card['python_seed'] > 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] # 2) Run the refine for the grid self.update_status('Generating Events', level=None) #misc.call([pjoin(self.me_dir,'bin','refine4grid'), @@ -6767,70 +6767,70 @@ def launch(self, nb_event, seed): self.exec_cmd('decay_events -from_cards', postcmd=False) elif self.run_card['use_syst'] and self.run_card['systematics_program'] == 'systematics': self.options['nb_core'] = 1 - self.exec_cmd('systematics %s --from_card' % + self.exec_cmd('systematics %s --from_card' % pjoin('Events', self.run_name, 'unweighted_events.lhe.gz'), postcmd=False,printcmd=False) - + def refine4grid(self, nb_event): """Special refine for gridpack run.""" self.nb_refine += 1 - + precision = nb_event self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) - + # initialize / remove lhapdf mode # self.configure_directory() # All this has been done before self.cluster_mode = 0 # force single machine # Store seed in randinit file, to be read by ranmar.f self.save_random() - + self.update_status('Refine results to %s' % precision, level=None) logger.info("Using random number seed offset = %s" % self.random) refine_opt = {'err_goal': nb_event, 'split_channels': False, - 'ngran':self.granularity, 'readonly': self.readonly} + 'ngran':self.granularity, 'readonly': self.readonly} x_improve = gen_ximprove.gen_ximprove_gridpack(self, refine_opt) x_improve.launch() # create the ajob for the refinment and run those! - self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack - - + self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack + + #bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) #print 'run combine!!!' #combine_runs.CombineRuns(self.me_dir) - + return #update html output Presults = sum_html.collect_result(self) cross, error = Presults.xsec, Presults.xerru self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + #self.update_status('finish refine', 'parton', makehtml=False) #devnull.close() - - - + + + return self.total_jobs = 0 - subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if + subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if P.startswith('P') and os.path.isdir(pjoin(self.me_dir,'SubProcesses', P))] devnull = open(os.devnull, 'w') for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) # clean previous run for match in misc.glob('*ajob*', Pdir): if os.path.basename(match)[:4] in ['ajob', 'wait', 'run.', 'done']: os.remove(pjoin(Pdir, match)) - + logfile = pjoin(Pdir, 'gen_ximprove.log') misc.call([pjoin(bindir, 'gen_ximprove')], @@ -6840,40 +6840,40 @@ def refine4grid(self, nb_event): if os.path.exists(pjoin(Pdir, 'ajob1')): alljobs = misc.glob('ajob*', Pdir) - nb_tot = len(alljobs) + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) if os.path.exists(pjoin(self.me_dir,'error')): self.monitor(html=True) raise MadEventError('Error detected in dir %s: %s' % \ (Pdir, open(pjoin(self.me_dir,'error')).read())) - self.monitor(run_type='All job submitted for refine number %s' % + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) combine_runs.CombineRuns(self.me_dir) - + #update html output cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + self.update_status('finish refine', 'parton', makehtml=False) devnull.close() def do_combine_events(self, line): - """Advanced commands: Launch combine events""" + """Advanced commands: Launch combine events""" if self.readonly: outdir = 'Events' @@ -6895,17 +6895,17 @@ def do_combine_events(self, line): self.banner.add_generation_info(self.results.current['cross'], self.run_card['nevents']) if not hasattr(self, 'random_orig'): self.random_orig = 0 self.banner.change_seed(self.random_orig) - - + + if not os.path.exists(pjoin(outdir, self.run_name)): os.mkdir(pjoin(outdir, self.run_name)) - self.banner.write(pjoin(outdir, self.run_name, + self.banner.write(pjoin(outdir, self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -6915,7 +6915,7 @@ def do_combine_events(self, line): if os.path.exists(pjoin(Gdir, 'events.lhe')): result = sum_html.OneResult('') result.read_results(pjoin(Gdir, 'results.dat')) - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec')*gscalefact[Gdir], result.get('xerru')*gscalefact[Gdir], result.get('axsec')*gscalefact[Gdir] @@ -6924,7 +6924,7 @@ def do_combine_events(self, line): sum_xsec += result.get('xsec')*gscalefact[Gdir] sum_xerru.append(result.get('xerru')*gscalefact[Gdir]) sum_axsec += result.get('axsec')*gscalefact[Gdir] - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.nb_event) @@ -6933,26 +6933,26 @@ def do_combine_events(self, line): AllEvent.add(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() - + self.banner.add_generation_info(sum_xsec, self.nb_event) nb_event = AllEvent.unweight(pjoin(outdir, self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.nb_event, log_level=logging.DEBUG, normalization=self.run_card['event_norm'], proc_charac=self.proc_characteristic) - - + + if partials: for i in range(partials): try: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) self.banner.add_generation_info(sum_xsec, nb_event) if self.run_card['bias_module'].lower() not in ['dummy', 'none']: @@ -6961,7 +6961,7 @@ def do_combine_events(self, line): class MadLoopInitializer(object): """ A container class for the various methods for initializing MadLoop. It is - placed in MadEventInterface because it is used by Madevent for loop-induced + placed in MadEventInterface because it is used by Madevent for loop-induced simulations. """ @staticmethod @@ -6974,7 +6974,7 @@ def make_and_run(dir_name,checkRam=False): if os.path.isfile(pjoin(dir_name,'check')): os.remove(pjoin(dir_name,'check')) os.remove(pjoin(dir_name,'check_sa.o')) - os.remove(pjoin(dir_name,'loop_matrix.o')) + os.remove(pjoin(dir_name,'loop_matrix.o')) # Now run make devnull = open(os.devnull, 'w') start=time.time() @@ -6996,7 +6996,7 @@ def make_and_run(dir_name,checkRam=False): stdout=devnull, stderr=devnull, close_fds=True) try: ptimer.execute() - #poll as often as possible; otherwise the subprocess might + #poll as often as possible; otherwise the subprocess might # "sneak" in some extra memory usage while you aren't looking # Accuracy of .2 seconds is enough for the timing. while ptimer.poll(): @@ -7028,7 +7028,7 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, If mu_r > 0.0, then the renormalization constant value will be hardcoded directly in check_sa.f, if is is 0 it will be set to Sqrt(s) and if it is < 0.0 the value in the param_card.dat is used. - If the split_orders target (i.e. the target squared coupling orders for + If the split_orders target (i.e. the target squared coupling orders for the computation) is != -1, it will be changed in check_sa.f via the subroutine CALL SET_COUPLINGORDERS_TARGET(split_orders).""" @@ -7043,12 +7043,12 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, file_path = pjoin(directories[0],'check_sa.f') if not os.path.isfile(file_path): raise MadGraph5Error('Could not find the location of check_sa.f'+\ - ' from the specified path %s.'%str(file_path)) + ' from the specified path %s.'%str(file_path)) file = open(file_path, 'r') check_sa = file.read() file.close() - + file = open(file_path, 'w') check_sa = re.sub(r"READPS = \S+\)","READPS = %s)"%('.TRUE.' if read_ps \ else '.FALSE.'), check_sa) @@ -7064,42 +7064,42 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, (("%.17e"%mu_r).replace('e','d')),check_sa) elif mu_r < 0.0: check_sa = re.sub(r"MU_R=SQRTS","",check_sa) - + if split_orders > 0: check_sa = re.sub(r"SET_COUPLINGORDERS_TARGET\(-?\d+\)", - "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) - + "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) + file.write(check_sa) file.close() - @staticmethod + @staticmethod def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ req_files = ['HelFilter.dat','LoopFilter.dat'], attempts = [4,15]): - """ Run the initialization of the process in 'run_dir' with success + """ Run the initialization of the process in 'run_dir' with success characterized by the creation of the files req_files in this directory. The directory containing the driving source code 'check_sa.f'. - The list attempt gives the successive number of PS points the + The list attempt gives the successive number of PS points the initialization should be tried with before calling it failed. Returns the number of PS points which were necessary for the init. Notice at least run_dir or SubProc_dir must be provided. A negative attempt number given in input means that quadprec will be forced for initialization.""" - + # If the user does not want detailed info, then set the dictionary # to a dummy one. if infos is None: infos={} - + if SubProc_dir is None and run_dir is None: raise MadGraph5Error('At least one of [SubProc_dir,run_dir] must'+\ ' be provided in run_initialization.') - + # If the user does not specify where is check_sa.f, then it is assumed # to be one levels above run_dir if SubProc_dir is None: SubProc_dir = os.path.abspath(pjoin(run_dir,os.pardir)) - + if run_dir is None: directories =[ dir for dir in misc.glob('P[0-9]*', SubProc_dir) if os.path.isdir(dir) ] @@ -7109,7 +7109,7 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find a valid running directory'+\ ' in %s.'%str(SubProc_dir)) - # Use the presence of the file born_matrix.f to decide if it is a + # Use the presence of the file born_matrix.f to decide if it is a # loop-induced process or not. It's not crucial, but just that because # of the dynamic adjustment of the ref scale used for deciding what are # the zero contributions, more points are neeeded for loop-induced. @@ -7128,9 +7128,9 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ %MLCardPath) else: - MLCard = banner_mod.MadLoopParam(MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) MLCard_orig = banner_mod.MadLoopParam(MLCard) - + # Make sure that LoopFilter really is needed. if not MLCard['UseLoopFilter']: try: @@ -7153,11 +7153,11 @@ def need_init(): proc_prefix+fname)) for fname in my_req_files]) or \ not os.path.isfile(pjoin(run_dir,'check')) or \ not os.access(pjoin(run_dir,'check'), os.X_OK) - + # Check if this is a process without born by checking the presence of the # file born_matrix.f is_loop_induced = os.path.exists(pjoin(run_dir,'born_matrix.f')) - + # For loop induced processes, always attempt quadruple precision if # double precision attempts fail and the user didn't specify himself # quadruple precision initializations attempts @@ -7166,11 +7166,11 @@ def need_init(): use_quad_prec = 1 curr_attempt = 1 - MLCard.set('WriteOutFilters',True) - + MLCard.set('WriteOutFilters',True) + while to_attempt!=[] and need_init(): curr_attempt = to_attempt.pop() - # if the attempt is a negative number it means we must force + # if the attempt is a negative number it means we must force # quadruple precision at initialization time if curr_attempt < 0: use_quad_prec = -1 @@ -7183,11 +7183,11 @@ def need_init(): MLCard.set('ZeroThres',1e-9) # Plus one because the filter are written on the next PS point after curr_attempt = abs(curr_attempt+1) - MLCard.set('MaxAttempts',curr_attempt) + MLCard.set('MaxAttempts',curr_attempt) MLCard.write(pjoin(SubProc_dir,'MadLoopParams.dat')) # initialization is performed. - MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, + MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, npoints = curr_attempt) compile_time, run_time, ram_usage = \ MadLoopInitializer.make_and_run(run_dir) @@ -7200,7 +7200,7 @@ def need_init(): infos['Process_compilation']==None: infos['Process_compilation'] = compile_time infos['Initialization'] = run_time - + MLCard_orig.write(pjoin(SubProc_dir,'MadLoopParams.dat')) if need_init(): return None @@ -7219,8 +7219,8 @@ def need_init(ML_resources_path, proc_prefix, r_files): MLCardPath = pjoin(proc_dir,'SubProcesses','MadLoopParams.dat') if not os.path.isfile(MLCardPath): raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ - %MLCardPath) - MLCard = banner_mod.MadLoopParam(MLCardPath) + %MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) req_files = ['HelFilter.dat','LoopFilter.dat'] # Make sure that LoopFilter really is needed. @@ -7234,9 +7234,9 @@ def need_init(ML_resources_path, proc_prefix, r_files): req_files.remove('HelFilter.dat') except ValueError: pass - + for v_folder in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)): + '%s*'%subproc_prefix)): # Make sure it is a valid MadLoop directory if not os.path.isdir(v_folder) or not os.path.isfile(\ pjoin(v_folder,'loop_matrix.f')): @@ -7247,7 +7247,7 @@ def need_init(ML_resources_path, proc_prefix, r_files): if need_init(pjoin(proc_dir,'SubProcesses','MadLoop5_resources'), proc_prefix, req_files): return True - + return False @staticmethod @@ -7265,7 +7265,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['treatCardsLoopNoInit'], cwd=pjoin(proc_dir,'Source')) else: interface.do_treatcards('all --no_MadLoopInit') - + # First make sure that IREGI and CUTTOOLS are compiled if needed if os.path.exists(pjoin(proc_dir,'Source','CutTools')): misc.compile(arg=['libcuttools'],cwd=pjoin(proc_dir,'Source')) @@ -7273,8 +7273,8 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['libiregi'],cwd=pjoin(proc_dir,'Source')) # Then make sure DHELAS and MODEL are compiled misc.compile(arg=['libmodel'],cwd=pjoin(proc_dir,'Source')) - misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) - + misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) + # Now initialize the MadLoop outputs logger.info('Initializing MadLoop loop-induced matrix elements '+\ '(this can take some time)...') @@ -7283,7 +7283,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, if MG_options: if interface and hasattr(interface, 'cluster') and isinstance(interface.cluster, cluster.MultiCore): mcore = interface.cluster - else: + else: mcore = cluster.MultiCore(**MG_options) else: mcore = cluster.onecore @@ -7294,10 +7294,10 @@ def run_initialization_wrapper(run_dir, infos, attempts): run_dir=run_dir, infos=infos) else: n_PS = MadLoopInitializer.run_initialization( - run_dir=run_dir, infos=infos, attempts=attempts) + run_dir=run_dir, infos=infos, attempts=attempts) infos['nPS'] = n_PS return 0 - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return @@ -7307,21 +7307,21 @@ def wait_monitoring(Idle, Running, Done): init_info = {} # List all virtual folders while making sure they are valid MadLoop folders VirtualFolders = [f for f in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)) if (os.path.isdir(f) or + '%s*'%subproc_prefix)) if (os.path.isdir(f) or os.path.isfile(pjoin(f,'loop_matrix.f')))] logger.debug("Now Initializing MadLoop matrix element in %d folder%s:"%\ (len(VirtualFolders),'s' if len(VirtualFolders)>1 else '')) - logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in + logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in VirtualFolders)) for v_folder in VirtualFolders: init_info[v_folder] = {} - + # We try all multiples of n_PS from 1 to max_mult, first in DP and then # in QP before giving up, or use default values if n_PS is None. max_mult = 3 if n_PS is None: # Then use the default list of number of PS points to try - mcore.submit(run_initialization_wrapper, + mcore.submit(run_initialization_wrapper, [pjoin(v_folder), init_info[v_folder], None]) else: # Use specific set of PS points @@ -7348,8 +7348,8 @@ def wait_monitoring(Idle, Running, Done): '%d PS points (%s), in %.3g(compil.) + %.3g(init.) secs.'%( abs(init['nPS']),'DP' if init['nPS']>0 else 'QP', init['Process_compilation'],init['Initialization'])) - - logger.info('MadLoop initialization finished.') + + logger.info('MadLoop initialization finished.') AskforEditCard = common_run.AskforEditCard @@ -7364,16 +7364,16 @@ def wait_monitoring(Idle, Running, Done): import os import optparse - # Get the directory of the script real path (bin) - # and add it to the current PYTHONPATH + # Get the directory of the script real path (bin) + # and add it to the current PYTHONPATH #root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath( __file__ )))) sys.path.insert(0, root_path) - class MyOptParser(optparse.OptionParser): + class MyOptParser(optparse.OptionParser): class InvalidOption(Exception): pass def error(self, msg=''): raise MyOptParser.InvalidOption(msg) - # Write out nice usage message if called with -h or --help + # Write out nice usage message if called with -h or --help usage = "usage: %prog [options] [FILE] " parser = MyOptParser(usage=usage) parser.add_option("-l", "--logging", default='INFO', @@ -7384,7 +7384,7 @@ def error(self, msg=''): help='force to launch debug mode') parser_error = '' done = False - + for i in range(len(sys.argv)-1): try: (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) @@ -7394,7 +7394,7 @@ def error(self, msg=''): else: args += sys.argv[len(sys.argv)-i:] if not done: - # raise correct error: + # raise correct error: try: (options, args) = parser.parse_args() except MyOptParser.InvalidOption as error: @@ -7407,8 +7407,8 @@ def error(self, msg=''): import subprocess import logging import logging.config - # Set logging level according to the logging level given by options - #logging.basicConfig(level=vars(logging)[options.logging]) + # Set logging level according to the logging level given by options + #logging.basicConfig(level=vars(logging)[options.logging]) import internal import internal.coloring_logging # internal.file = XXX/bin/internal/__init__.py @@ -7431,13 +7431,13 @@ def error(self, msg=''): raise pass - # Call the cmd interface main loop + # Call the cmd interface main loop try: if args: # a single command is provided if '--web' in args: - i = args.index('--web') - args.pop(i) + i = args.index('--web') + args.pop(i) cmd_line = MadEventCmd(me_dir, force_run=True) else: cmd_line = MadEventCmdShell(me_dir, force_run=True) @@ -7457,13 +7457,13 @@ def error(self, msg=''): pass - - - - - - - - + + + + + + + + diff --git a/epochX/cudacpp/gg_tt01g.mad/src/cudacpp_src.mk b/epochX/cudacpp/gg_tt01g.mad/src/cudacpp_src.mk index d4cc628aec..b4e446bc45 100644 --- a/epochX/cudacpp/gg_tt01g.mad/src/cudacpp_src.mk +++ b/epochX/cudacpp/gg_tt01g.mad/src/cudacpp_src.mk @@ -1,12 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories - -THISMK = $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #------------------------------------------------------------------------------- @@ -16,165 +11,24 @@ SHELL := /bin/bash #------------------------------------------------------------------------------- -#=== Configure common compiler flags for CUDA and C++ - -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here - -#------------------------------------------------------------------------------- - #=== Configure the C++ compiler -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) $(USE_NVTX) -fPIC -Wall -Wshadow -Wextra +include ../Source/make_opts + +MG_CXXFLAGS += -fPIC -I. $(USE_NVTX) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS+= -ffast-math # see issue #117 +MG_CXXFLAGS += -ffast-math # see issue #117 endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY # Note: AR, CXX and FC are implicitly defined if not set externally # See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html ###RANLIB = ranlib -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -LDFLAGS = -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -LDFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler (note: NVCC is already exported including ccache) - -###$(info NVCC=$(NVCC)) - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ builds (note: NVCC is already exported including ccache) - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for CUDA and C++ - -# Assuming uname is available, detect if architecture is PowerPC -UNAME_P := $(shell uname -p) - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # BUILD ERROR IF THIS ADDED IN SRC?! -else - ###AR=gcc-ar # needed by -flto - ###RANLIB=gcc-ranlib # needed by -flto - ###CXXFLAGS+= -flto # NB: build error from src/Makefile unless gcc-ar and gcc-ranlib are used - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -#------------------------------------------------------------------------------- - #=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the build flags appropriate to OMPFLAGS ###$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -###$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -###$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -###$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -###$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - CXXFLAGS += -DMGONGPU_HAS_NO_CURAND -else ifneq ($(RNDGEN),hasCurand) - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- @@ -182,28 +36,18 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -###$(info Current directory is $(shell pwd)) -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIRREL = ../lib/$(BUILDDIR) - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR=1 is set)) -else - override BUILDDIR = . - override LIBDIRREL = ../lib - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) -endif -######$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) +# Build directory: +BUILDDIR := build.$(DIRTAG) +LIBDIRREL := ../lib/$(BUILDDIR) # Workaround for Mac #375 (I did not manage to fix rpath with @executable_path): use absolute paths for LIBDIR # (NB: this is quite ugly because it creates the directory if it does not exist - to avoid removing src by mistake) -UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Darwin) override LIBDIR = $(shell mkdir -p $(LIBDIRREL); cd $(LIBDIRREL); pwd) ifeq ($(wildcard $(LIBDIR)),) @@ -223,55 +67,35 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -# Target (and build options): debug -debug: OPTFLAGS = -g -O0 -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ #------------------------------------------------------------------------------- cxx_objects=$(addprefix $(BUILDDIR)/, Parameters_sm.o read_slha.o) -ifneq ($(NVCC),) +ifeq ($(AVX),cuda) +COMPILER=$(NVCC) cu_objects=$(addprefix $(BUILDDIR)/, Parameters_sm_cu.o) +else +COMPILER=$(CXX) +cu_objects= endif # Target (and build rules): common (src) library -ifneq ($(NVCC),) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) $(cu_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(NVCC) -shared -o $@ $(cxx_objects) $(cu_objects) $(LDFLAGS) -else -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(CXX) -shared -o $@ $(cxx_objects) $(LDFLAGS) -endif + mkdir -p $(LIBDIR) + $(COMPILER) -shared -o $@ $(cxx_objects) $(cu_objects) $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- @@ -279,19 +103,7 @@ endif .PHONY: clean clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) -else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe -endif - -cleanall: - @echo - $(MAKE) clean -f $(THISMK) - @echo - rm -rf $(LIBDIR)/build.* - rm -rf build.* + $(RM) -f ../lib/build.*/*.so + $(RM) -rf build.* #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_tt01g.mad/src/mgOnGpuCxtypes.h b/epochX/cudacpp/gg_tt01g.mad/src/mgOnGpuCxtypes.h index ca9a9f00c0..3290d314d6 100644 --- a/epochX/cudacpp/gg_tt01g.mad/src/mgOnGpuCxtypes.h +++ b/epochX/cudacpp/gg_tt01g.mad/src/mgOnGpuCxtypes.h @@ -21,10 +21,14 @@ // Complex type in cuda: thrust or cucomplex or cxsmpl #ifdef __CUDACC__ #if defined MGONGPU_CUCXTYPE_THRUST +#ifdef __CLANG__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wtautological-compare" // for icpx2021/clang13 (https://stackoverflow.com/a/15864661) +#endif #include +#ifdef __CLANG__ #pragma clang diagnostic pop +#endif #elif defined MGONGPU_CUCXTYPE_CUCOMPLEX #include #elif not defined MGONGPU_CUCXTYPE_CXSMPL diff --git a/epochX/cudacpp/gg_ttg.mad/CODEGEN_mad_gg_ttg_log.txt b/epochX/cudacpp/gg_ttg.mad/CODEGEN_mad_gg_ttg_log.txt index b52dc31122..2b336b0d56 100644 --- a/epochX/cudacpp/gg_ttg.mad/CODEGEN_mad_gg_ttg_log.txt +++ b/epochX/cudacpp/gg_ttg.mad/CODEGEN_mad_gg_ttg_log.txt @@ -52,7 +52,7 @@ Note that you can still compile and run aMC@NLO with the built-in PDFs Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt import /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttg.mg The import format was not given, so we guess it as command set stdout_level DEBUG @@ -62,7 +62,7 @@ generate g g > t t~ g No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.0053288936614990234  +DEBUG: model prefixing takes 0.005417346954345703  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -155,7 +155,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=3: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ g WEIGHTED<=3 @1 INFO: Process has 16 diagrams -1 processes with 16 diagrams generated in 0.021 s +1 processes with 16 diagrams generated in 0.022 s Total: 1 processes with 16 diagrams output madevent ../TMPOUT/CODEGEN_mad_gg_ttg --hel_recycling=False --vector_size=32 --me_exporter=standalone_cudacpp Load PLUGIN.CUDACPP_OUTPUT @@ -175,7 +175,7 @@ INFO: Generating Helas calls for process: g g > t t~ g WEIGHTED<=3 @1 INFO: Processing color information for process: g g > t t~ g @1 INFO: Creating files in directory P1_gg_ttxg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -190,15 +190,15 @@ INFO: Created files CPPProcess.h and CPPProcess.cc in directory ./. DEBUG: vector, subproc_group,self.opt['vector_size'] =  32 True 32 [export_v4.py at line 1872]  INFO: Generating Feynman diagrams for Process: g g > t t~ g WEIGHTED<=3 @1 INFO: Finding symmetric diagrams for subprocess group gg_ttxg -Generated helas calls for 1 subprocesses (16 diagrams) in 0.037 s -Wrote files for 36 helas calls in 0.155 s +Generated helas calls for 1 subprocesses (16 diagrams) in 0.038 s +Wrote files for 36 helas calls in 0.148 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 set of routines with options: P0 ALOHA: aloha creates VVVV3 set of routines with options: P0 ALOHA: aloha creates VVVV4 set of routines with options: P0 -ALOHA: aloha creates 5 routines in 0.326 s +ALOHA: aloha creates 5 routines in 0.322 s DEBUG: Entering PLUGIN_ProcessExporter.convert_model (create the model) [output.py at line 202]  ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines @@ -206,7 +206,7 @@ ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 set of routines with options: P0 ALOHA: aloha creates VVVV3 set of routines with options: P0 ALOHA: aloha creates VVVV4 set of routines with options: P0 -ALOHA: aloha creates 10 routines in 0.327 s +ALOHA: aloha creates 10 routines in 0.311 s VVV1 VVV1 FFV1 @@ -230,12 +230,14 @@ save configuration file to /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CO INFO: Use Fortran compiler gfortran INFO: Use c++ compiler g++ INFO: Generate web pages +DEBUG: standardise /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttg/Source/make_opts (fix f2py3 and sort make_opts_variables) before applying patch.common DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttg; patch -p4 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common patching file Source/genps.inc +patching file Source/make_opts patching file Source/makefile patching file SubProcesses/makefile +patching file bin/internal/banner.py patching file bin/internal/gen_ximprove.py -Hunk #1 succeeded at 391 (offset 6 lines). patching file bin/internal/madevent_interface.py DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttg/SubProcesses/P1_gg_ttxg; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f @@ -252,9 +254,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m2.189s -user 0m1.943s -sys 0m0.223s +real 0m2.447s +user 0m1.940s +sys 0m0.238s ************************************************************ * * * W E L C O M E to * @@ -280,7 +282,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttg/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards run quit INFO: @@ -310,7 +312,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttg/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards param quit INFO: diff --git a/epochX/cudacpp/gg_ttg.mad/Source/make_opts b/epochX/cudacpp/gg_ttg.mad/Source/make_opts index e4b87ee6ad..435bed0dc7 100644 --- a/epochX/cudacpp/gg_ttg.mad/Source/make_opts +++ b/epochX/cudacpp/gg_ttg.mad/Source/make_opts @@ -1,7 +1,7 @@ DEFAULT_CPP_COMPILER=g++ DEFAULT_F2PY_COMPILER=f2py3 DEFAULT_F_COMPILER=gfortran -GLOBAL_FLAG=-O3 -ffast-math -fbounds-check +GLOBAL_FLAG=-O3 -ffast-math MACFLAG= MG5AMC_VERSION=SpecifiedByMG5aMCAtRunTime PYTHIA8_PATH=NotInstalled @@ -13,31 +13,53 @@ BIASLIBDIR=../../../lib/ BIASLIBRARY=libbias.$(libext) # Rest of the makefile -ifeq ($(origin FFLAGS),undefined) -FFLAGS= -w -fPIC -#FFLAGS+= -g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none -endif -FFLAGS += $(GLOBAL_FLAG) +#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) + +# Detect O/S kernel (Linux, Darwin...) +UNAME_S := $(shell uname -s) + +# Detect architecture (x86_64, ppc64le...) +UNAME_P := $(shell uname -p) + +#------------------------------------------------------------------------------- # REMOVE MACFLAG IF NOT ON MAC OR FOR F2PY -UNAME := $(shell uname -s) ifdef f2pymode MACFLAG= else -ifneq ($(UNAME), Darwin) +ifneq ($(UNAME_S), Darwin) MACFLAG= endif endif +############################################################ +# Default compiler flags +# To change optimisation level, override these as follows: +# make CXXFLAGS="-O0 -g" +# or export them as environment variables +# For debugging Fortran, one could e.g. use: +# FCFLAGS="-g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none" +############################################################ +FCFLAGS ?= $(GLOBAL_FLAG) -fbounds-check +CXXFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG +NVCCFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG -use_fast_math -lineinfo +LDFLAGS ?= $(STDLIB) -ifeq ($(origin CXXFLAGS),undefined) -CXXFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +ifneq ($(FFLAGS),) +# Madgraph used to use FFLAGS, so the user probably tries to change the flags specifically for madgraph: +FCFLAGS = $(FFLAGS) endif -ifeq ($(origin CFLAGS),undefined) -CFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +# Madgraph-specific flags: +WARNFLAGS = -Wall -Wshadow -Wextra +ifeq (,$(findstring -std=,$(CXXFLAGS))) +CXXSTANDARD= -std=c++17 endif +MG_FCFLAGS += -fPIC -w +MG_CXXFLAGS += -fPIC $(CXXSTANDARD) $(WARNFLAGS) $(MACFLAG) +MG_NVCCFLAGS += -fPIC $(CXXSTANDARD) --forward-unknown-to-host-compiler $(WARNFLAGS) +MG_LDFLAGS += $(MACFLAG) # Set FC unless it's defined by an environment variable ifeq ($(origin FC),default) @@ -49,45 +71,40 @@ endif # Increase the number of allowed charcters in a Fortran line ifeq ($(FC), ftn) -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler else VERS="$(shell $(FC) --version | grep ifort -i)" ifeq ($(VERS), "") -FFLAGS+= -ffixed-line-length-132 +MG_FCFLAGS += -ffixed-line-length-132 else -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler endif endif -UNAME := $(shell uname -s) -ifeq ($(origin LDFLAGS), undefined) -LDFLAGS=$(STDLIB) $(MACFLAG) -endif - # Options: dynamic, lhapdf # Option dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) dylibext=dylib else dylibext=so endif ifdef dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) libext=dylib -FFLAGS+= -fno-common -LDFLAGS += -bundle +MG_FCFLAGS += -fno-common +MG_LDFLAGS += -bundle define CREATELIB $(FC) -dynamiclib -undefined dynamic_lookup -o $(1) $(2) endef else libext=so -FFLAGS+= -fPIC -LDFLAGS += -shared +MG_FCFLAGS += -fPIC +MG_LDFLAGS += -shared define CREATELIB -$(FC) $(FFLAGS) $(LDFLAGS) -o $(1) $(2) +$(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MG_LDFLAGS) $(LDFLAGS) -o $(1) $(2) endef endif else @@ -101,17 +118,9 @@ endif # Option lhapdf ifneq ($(lhapdf),) -CXXFLAGS += $(shell $(lhapdf) --cppflags) +MG_CXXFLAGS += $(shell $(lhapdf) --cppflags) alfas_functions=alfas_functions_lhapdf llhapdf+= $(shell $(lhapdf) --cflags --libs) -lLHAPDF -# check if we need to activate c++11 (for lhapdf6.2) -ifeq ($(origin CXX),default) -ifeq ($lhapdfversion$lhapdfsubversion,62) -CXX=$(DEFAULT_CPP_COMPILER) -std=c++11 -else -CXX=$(DEFAULT_CPP_COMPILER) -endif -endif else alfas_functions=alfas_functions llhapdf= @@ -120,4 +129,207 @@ endif # Helper function to check MG5 version define CHECK_MG5AMC_VERSION python -c 'import re; from distutils.version import StrictVersion; print StrictVersion("$(MG5AMC_VERSION)") >= StrictVersion("$(1)") if re.match("^[\d\.]+$$","$(MG5AMC_VERSION)") else True;' -endef \ No newline at end of file +endef + +#------------------------------------------------------------------------------- + +# Set special cases for non-gcc/clang builds +# AVX below gets overridden from outside in architecture-specific builds +AVX ?= none +# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] +# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] +$(info AVX=$(AVX)) +ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + endif +else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif +endif + +# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? +MG_CXXFLAGS+= $(AVXFLAGS) + +#------------------------------------------------------------------------------- + +#=== Configure the CUDA compiler if available + +# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) +# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below +ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside + $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") + override CUDA_HOME=disabled +endif + +# If CUDA_HOME is not set, try to set it from the location of nvcc +ifndef CUDA_HOME + CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) + $(info CUDA_HOME="$(CUDA_HOME)") +endif + +# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists +ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) + NVCC = $(CUDA_HOME)/bin/nvcc + USE_NVTX ?=-DUSE_NVTX + # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html + # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ + # Default: use compute capability 70 (Volta architecture), and embed PTX to support later architectures, too. + # Set MADGRAPH_CUDA_ARCHITECTURE to the desired value to change the default. + # Build for multiple architectures using a space-separated list, e.g. MADGRAPH_CUDA_ARCHITECTURE="70 80" + MADGRAPH_CUDA_ARCHITECTURE ?= 70 + # Generate PTX for the first architecture: + CUARCHFLAGS := --generate-code arch=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)),code=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)) + # Generate device code for all architectures: + CUARCHFLAGS += $(foreach arch,$(MADGRAPH_CUDA_ARCHITECTURE), --generate-code arch=compute_$(arch),code=sm_$(arch)) + + CUINC = -I$(CUDA_HOME)/include/ + CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! + MG_LDFLAGS += $(CURANDLIBFLAGS) + MG_NVCCFLAGS += $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) + +else ifeq ($(AVX),cuda) + $(error nvcc is not visible in PATH. Either add it to PATH or export CUDA_HOME to compile with cuda) + ifeq ($(AVX),cuda) + $(error Cannot compile for cuda without NVCC) + endif +endif + +# Set the host C++ compiler for nvcc via "-ccbin " +# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) +MG_NVCCFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) + +# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) +ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) +MG_NVCCFLAGS += -allow-unsupported-compiler +endif + +#------------------------------------------------------------------------------- + +#=== Configure ccache for C++ and CUDA builds + +# Enable ccache if USECCACHE=1 +ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) + override CXX:=ccache $(CXX) +endif + +ifneq ($(NVCC),) + ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) + override NVCC:=ccache $(NVCC) + endif +endif + +#------------------------------------------------------------------------------- + +#=== Configure PowerPC-specific compiler flags for C++ and CUDA + +# PowerPC-specific CXX / CUDA compiler flags (being reviewed) +ifeq ($(UNAME_P),ppc64le) + MG_CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 + MG_NVCCFLAGS+= -Xcompiler -mno-float128 + + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + endif +endif + +#------------------------------------------------------------------------------- +#=== Apple-specific compiler/linker options + +# Add -std=c++17 explicitly to avoid build errors on macOS +# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" +ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) +MG_CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 +endif + +ifeq ($(UNAME_S),Darwin) +STDLIB = -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) +MG_LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" +else +MG_LDFLAGS += -Xlinker --no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +endif + +#------------------------------------------------------------------------------- + +#=== C++/CUDA-specific flags for floating-point types and random generators to use + +# Set the default FPTYPE (floating point type) choice +FPTYPE ?= m + +# Set the default HELINL (inline helicities?) choice +HELINL ?= 0 + +# Set the default HRDCOD (hardcode cIPD physics parameters?) choice +HRDCOD ?= 0 + +# Set the default RNDGEN (random number generator) choice +ifeq ($(NVCC),) + RNDGEN ?= hasNoCurand +else + RNDGEN ?= hasCurand +endif + +# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so sub-makes don't go back to the defaults +export AVX +export AVXFLAGS +export FPTYPE +export HELINL +export HRDCOD +export RNDGEN + +#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN + +# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") +# $(info FPTYPE=$(FPTYPE)) +ifeq ($(FPTYPE),d) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE +else ifeq ($(FPTYPE),f) + COMMONFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT +else ifeq ($(FPTYPE),m) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT +else + $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) +endif + +# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") +# $(info HELINL=$(HELINL)) +ifeq ($(HELINL),1) + COMMONFLAGS += -DMGONGPU_INLINE_HELAMPS +else ifneq ($(HELINL),0) + $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") +# $(info HRDCOD=$(HRDCOD)) +ifeq ($(HRDCOD),1) + COMMONFLAGS += -DMGONGPU_HARDCODE_PARAM +else ifneq ($(HRDCOD),0) + $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") +$(info RNDGEN=$(RNDGEN)) +ifeq ($(RNDGEN),hasNoCurand) + override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND + override CURANDLIBFLAGS = +else ifeq ($(RNDGEN),hasCurand) + CXXFLAGSCURAND = $(CUINC) +else + $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) +endif + +MG_CXXFLAGS += $(COMMONFLAGS) +MG_NVCCFLAGS += $(COMMONFLAGS) + +#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_ttg.mad/Source/makefile b/epochX/cudacpp/gg_ttg.mad/Source/makefile index 00c73099a0..407b1b753e 100644 --- a/epochX/cudacpp/gg_ttg.mad/Source/makefile +++ b/epochX/cudacpp/gg_ttg.mad/Source/makefile @@ -10,8 +10,8 @@ include make_opts # Source files -PROCESS= hfill.o matrix.o myamp.o -DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o +PROCESS = hfill.o matrix.o myamp.o +DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o HBOOK = hfill.o hcurve.o hbook1.o hbook2.o GENERIC = $(alfas_functions).o transpole.o invarients.o hfill.o pawgraphs.o ran1.o \ rw_events.o rw_routines.o kin_functions.o open_file.o basecode.o setrun.o \ @@ -22,7 +22,7 @@ GENSUDGRID = gensudgrid.o is-sud.o setrun_gen.o rw_routines.o open_file.o # Locally compiled libraries -LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) +LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) # Binaries @@ -32,6 +32,9 @@ BINARIES = $(BINDIR)gen_ximprove $(BINDIR)gensudgrid $(BINDIR)combine_runs all: $(LIBRARIES) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(LIBDIR)libbias.$(libext) +%.o: %.f *.inc + $(FC) -I. $(MG_FCFLAGS) $(FCFLAGS) -c $< -o $@ + # Libraries $(LIBDIR)libdsample.$(libext): $(DSAMPLE) @@ -39,36 +42,35 @@ $(LIBDIR)libdsample.$(libext): $(DSAMPLE) $(LIBDIR)libgeneric.$(libext): $(GENERIC) $(call CREATELIB, $@, $^) $(LIBDIR)libdhelas.$(libext): DHELAS - cd DHELAS; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libpdf.$(libext): PDF make_opts - cd PDF; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" ifneq (,$(filter edff chff, $(pdlabel1) $(pdlabel2))) $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make ; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" else $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make -f makefile_dummy; cd ../../ -endif + $(MAKE) -C $< -f makefile_dummy FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" +endif $(LIBDIR)libcernlib.$(libext): CERNLIB - cd CERNLIB; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" # The bias library is here the dummy by default; compilation of other ones specified in the run_card will be done by MG5aMC directly. $(LIBDIR)libbias.$(libext): BIAS/dummy - cd BIAS/dummy; make; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libmodel.$(libext): MODEL param_card.inc - cd MODEL; make + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" param_card.inc: ../Cards/param_card.dat ../bin/madevent treatcards param + touch $@ # madevent doesn't update the time stamp if there's nothing to do -$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o - $(FC) $(LDFLAGS) -o $@ $^ -#$(BINDIR)combine_events: $(COMBINE) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) run_card.inc $(LIBDIR)libbias.$(libext) -# $(FC) -o $@ $(COMBINE) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC $(llhapdf) $(LDFLAGS) -lbias +$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o + $(FC) $(MG_LDFLAGS) $(LDFLAGS) -o $@ $^ $(BINDIR)gensudgrid: $(GENSUDGRID) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libcernlib.$(libext) - $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(LDFLAGS) + $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(MG_LDFLAGS) $(LDFLAGS) # Dependencies @@ -85,6 +87,7 @@ rw_events.o: rw_events.f run_config.inc run_card.inc: ../Cards/run_card.dat ../bin/madevent treatcards run + touch $@ # madevent doesn't update the time stamp if there's nothing to do clean4pdf: rm -f ../lib/libpdf.$(libext) @@ -120,7 +123,7 @@ $(LIBDIR)libiregi.a: $(IREGIDIR) cd $(IREGIDIR); make ln -sf ../Source/$(IREGIDIR)libiregi.a $(LIBDIR)libiregi.a -cleanSource: +clean: $(RM) *.o $(LIBRARIES) $(BINARIES) cd PDF; make clean; cd .. cd PDF/gammaUPC; make clean; cd ../../ @@ -132,11 +135,3 @@ cleanSource: cd BIAS/ptj_bias; make clean; cd ../.. if [ -d $(CUTTOOLSDIR) ]; then cd $(CUTTOOLSDIR); make clean; cd ..; fi if [ -d $(IREGIDIR) ]; then cd $(IREGIDIR); make clean; cd ..; fi - -clean: cleanSource - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make clean; cd -; done; - -cleanavx: - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; -cleanall: cleanSource # THIS IS THE ONE - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; diff --git a/epochX/cudacpp/gg_ttg.mad/SubProcesses/Bridge.h b/epochX/cudacpp/gg_ttg.mad/SubProcesses/Bridge.h index bf8b5e024d..c263f39a62 100644 --- a/epochX/cudacpp/gg_ttg.mad/SubProcesses/Bridge.h +++ b/epochX/cudacpp/gg_ttg.mad/SubProcesses/Bridge.h @@ -236,7 +236,7 @@ namespace mg5amcCpu #ifdef __CUDACC__ if( ( m_nevt < s_gputhreadsmin ) || ( m_nevt % s_gputhreadsmin != 0 ) ) throw std::runtime_error( "Bridge constructor: nevt should be a multiple of " + std::to_string( s_gputhreadsmin ) ); - while( m_nevt != m_gpublocks * m_gputhreads ) + while( m_nevt != static_cast( m_gpublocks * m_gputhreads ) ) { m_gputhreads /= 2; if( m_gputhreads < s_gputhreadsmin ) @@ -266,7 +266,7 @@ namespace mg5amcCpu template void Bridge::set_gpugrid( const int gpublocks, const int gputhreads ) { - if( m_nevt != gpublocks * gputhreads ) + if( m_nevt != static_cast( gpublocks * gputhreads ) ) throw std::runtime_error( "Bridge: gpublocks*gputhreads must equal m_nevt in set_gpugrid" ); m_gpublocks = gpublocks; m_gputhreads = gputhreads; diff --git a/epochX/cudacpp/gg_ttg.mad/SubProcesses/MadgraphTest.h b/epochX/cudacpp/gg_ttg.mad/SubProcesses/MadgraphTest.h index ef40624c88..b0f2250c25 100644 --- a/epochX/cudacpp/gg_ttg.mad/SubProcesses/MadgraphTest.h +++ b/epochX/cudacpp/gg_ttg.mad/SubProcesses/MadgraphTest.h @@ -199,10 +199,6 @@ class MadgraphTest : public testing::TestWithParam } }; -// Since we link both the CPU-only and GPU tests into the same executable, we prevent -// a multiply defined symbol by only compiling this in the non-CUDA phase: -#ifndef __CUDACC__ - /// Compare momenta and matrix elements. /// This uses an implementation of TestDriverBase to run a madgraph workflow, /// and compares momenta and matrix elements with a reference file. @@ -307,6 +303,4 @@ TEST_P( MadgraphTest, CompareMomentaAndME ) } } -#endif // __CUDACC__ - #endif /* MADGRAPHTEST_H_ */ diff --git a/epochX/cudacpp/gg_ttg.mad/SubProcesses/MatrixElementKernels.cc b/epochX/cudacpp/gg_ttg.mad/SubProcesses/MatrixElementKernels.cc index 74b5239ebf..2d6f27cd5d 100644 --- a/epochX/cudacpp/gg_ttg.mad/SubProcesses/MatrixElementKernels.cc +++ b/epochX/cudacpp/gg_ttg.mad/SubProcesses/MatrixElementKernels.cc @@ -196,6 +196,9 @@ namespace mg5amcGpu void MatrixElementKernelDevice::setGrid( const int gpublocks, const int gputhreads ) { + m_gpublocks = gpublocks; + m_gputhreads = gputhreads; + if( m_gpublocks == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gpublocks must be > 0 in setGrid" ); if( m_gputhreads == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gputhreads must be > 0 in setGrid" ); if( this->nevt() != m_gpublocks * m_gputhreads ) throw std::runtime_error( "MatrixElementKernelDevice: nevt mismatch in setGrid" ); diff --git a/epochX/cudacpp/gg_ttg.mad/SubProcesses/P1_gg_ttxg/check_sa.cc b/epochX/cudacpp/gg_ttg.mad/SubProcesses/P1_gg_ttxg/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/gg_ttg.mad/SubProcesses/P1_gg_ttxg/check_sa.cc +++ b/epochX/cudacpp/gg_ttg.mad/SubProcesses/P1_gg_ttxg/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/gg_ttg.mad/SubProcesses/P1_gg_ttxg/counters.cc b/epochX/cudacpp/gg_ttg.mad/SubProcesses/P1_gg_ttxg/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/gg_ttg.mad/SubProcesses/P1_gg_ttxg/counters.cc +++ b/epochX/cudacpp/gg_ttg.mad/SubProcesses/P1_gg_ttxg/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk index 509307506b..a522ddb335 100644 --- a/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk @@ -1,56 +1,41 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: use ':=' to ensure that the value of CUDACPP_MAKEFILE is not modified further down after including make_opts -#=== NB: use 'override' to ensure that the value can not be modified from the outside -override CUDACPP_MAKEFILE := $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) -###$(info CUDACPP_MAKEFILE='$(CUDACPP_MAKEFILE)') - -#=== NB: different names (e.g. cudacpp.mk and cudacpp_src.mk) are used in the Subprocess and src directories -override CUDACPP_SRC_MAKEFILE = cudacpp_src.mk - -#------------------------------------------------------------------------------- - -#=== Use bash in the Makefile (https://www.gnu.org/software/make/manual/html_node/Choosing-the-Shell.html) - -SHELL := /bin/bash - -#------------------------------------------------------------------------------- - -#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) - -# Detect O/S kernel (Linux, Darwin...) -UNAME_S := $(shell uname -s) -###$(info UNAME_S='$(UNAME_S)') - -# Detect architecture (x86_64, ppc64le...) -UNAME_P := $(shell uname -p) -###$(info UNAME_P='$(UNAME_P)') - -#------------------------------------------------------------------------------- - -#=== Include the common MG5aMC Makefile options - -# OM: this is crucial for MG5aMC flag consistency/documentation -# AV: temporarely comment this out because it breaks cudacpp builds -ifneq ($(wildcard ../../Source/make_opts),) -include ../../Source/make_opts -endif +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. + +# This makefile extends the Fortran makefile called "makefile" + +CUDACPP_SRC_MAKEFILE = cudacpp_src.mk + +# Self-invocation with adapted flags: +cppnative: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=native AVXFLAGS="-march=native" cppbuild +cppnone: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=none AVXFLAGS= cppbuild +cppsse4: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=sse4 AVXFLAGS=-march=nehalem cppbuild +cppavx2: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=avx2 AVXFLAGS=-march=haswell cppbuild +cppavx512y: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512y AVXFLAGS="-march=skylake-avx512 -mprefer-vector-width=256" cppbuild +cppavx512z: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512z AVXFLAGS="-march=skylake-avx512 -DMGONGPU_PVW512" cppbuild +cuda: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=cuda cudabuild #------------------------------------------------------------------------------- #=== Configure common compiler flags for C++ and CUDA +# NB: The base flags are defined in the fortran "makefile" + +# Include directories +INCFLAGS = -I. -I../../src -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here +MG_CXXFLAGS += $(INCFLAGS) +MG_NVCCFLAGS += $(INCFLAGS) # Dependency on src directory -MG5AMC_COMMONLIB = mg5amc_common -LIBFLAGS = -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -INCFLAGS += -I../../src +MG5AMC_COMMONLIB = mg5amc_common # Compiler-specific googletest build directory (#125 and #738) ifneq ($(shell $(CXX) --version | grep '^Intel(R) oneAPI DPC++/C++ Compiler'),) @@ -99,356 +84,42 @@ endif #------------------------------------------------------------------------------- -#=== Configure the C++ compiler - -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) -Wall -Wshadow -Wextra -ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS += -ffast-math # see issue #117 -endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY - -# Optionally add debug flags to display the full list of flags (eg on Darwin) -###CXXFLAGS+= -v - -# Note: AR, CXX and FC are implicitly defined if not set externally -# See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html - -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler - -# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) -# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below -ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside - $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") - override CUDA_HOME=disabled -endif - -# If CUDA_HOME is not set, try to set it from the location of nvcc -ifndef CUDA_HOME - CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) - $(warning CUDA_HOME was not set: using "$(CUDA_HOME)") -endif - -# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists -ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) - NVCC = $(CUDA_HOME)/bin/nvcc - USE_NVTX ?=-DUSE_NVTX - # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html - # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ - # Default: use compute capability 70 for V100 (CERN lxbatch, CERN itscrd, Juwels Cluster). - # Embed device code for 70, and PTX for 70+. - # Export MADGRAPH_CUDA_ARCHITECTURE (comma-separated list) to use another value or list of values (see #533). - # Examples: use 60 for P100 (Piz Daint), 80 for A100 (Juwels Booster, NVidia raplab/Curiosity). - MADGRAPH_CUDA_ARCHITECTURE ?= 70 - ###CUARCHFLAGS = -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=compute_$(MADGRAPH_CUDA_ARCHITECTURE) -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=sm_$(MADGRAPH_CUDA_ARCHITECTURE) # Older implementation (AV): go back to this one for multi-GPU support #533 - ###CUARCHFLAGS = --gpu-architecture=compute_$(MADGRAPH_CUDA_ARCHITECTURE) --gpu-code=sm_$(MADGRAPH_CUDA_ARCHITECTURE),compute_$(MADGRAPH_CUDA_ARCHITECTURE) # Newer implementation (SH): cannot use this as-is for multi-GPU support #533 - comma:=, - CUARCHFLAGS = $(foreach arch,$(subst $(comma), ,$(MADGRAPH_CUDA_ARCHITECTURE)),-gencode arch=compute_$(arch),code=compute_$(arch) -gencode arch=compute_$(arch),code=sm_$(arch)) - CUINC = -I$(CUDA_HOME)/include/ - ifeq ($(RNDGEN),hasNoCurand) - CURANDLIBFLAGS= - else - CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! - endif - CUOPTFLAGS = -lineinfo - CUFLAGS = $(foreach opt, $(OPTFLAGS), -Xcompiler $(opt)) $(CUOPTFLAGS) $(INCFLAGS) $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) -use_fast_math - ###CUFLAGS += -Xcompiler -Wall -Xcompiler -Wextra -Xcompiler -Wshadow - ###NVCC_VERSION = $(shell $(NVCC) --version | grep 'Cuda compilation tools' | cut -d' ' -f5 | cut -d, -f1) - CUFLAGS += -std=c++17 # need CUDA >= 11.2 (see #333): this is enforced in mgOnGpuConfig.h - # Without -maxrregcount: baseline throughput: 6.5E8 (16384 32 12) up to 7.3E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 160 # improves throughput: 6.9E8 (16384 32 12) up to 7.7E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 128 # improves throughput: 7.3E8 (16384 32 12) up to 7.6E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 96 # degrades throughput: 4.1E8 (16384 32 12) up to 4.5E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 64 # degrades throughput: 1.7E8 (16384 32 12) flat at 1.7E8 (65536 128 12) -else ifneq ($(origin REQUIRE_CUDA),undefined) - # If REQUIRE_CUDA is set but no cuda is found, stop here (e.g. for CI tests on GPU #443) - $(error No cuda installation found (set CUDA_HOME or make nvcc visible in PATH)) -else - # No cuda. Switch cuda compilation off and go to common random numbers in C++ - $(warning CUDA_HOME is not set or is invalid: export CUDA_HOME to compile with cuda) - override NVCC= - override USE_NVTX= - override CUINC= - override CURANDLIBFLAGS= -endif -export NVCC -export CUFLAGS - -# Set the host C++ compiler for nvcc via "-ccbin " -# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) -CUFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) - -# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) -ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) -CUFLAGS += -allow-unsupported-compiler -endif - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ and CUDA builds - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif -ifneq ($(NVCC),) - ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) - override NVCC:=ccache $(NVCC) - endif -endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for C++ and CUDA - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # would increase to none=4.08-4.12E6, sse4=4.99-5.03E6! -else - ###CXXFLAGS+= -flto # also on Intel this would increase throughputs by a factor 2 to 4... - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -# PowerPC-specific CUDA compiler flags (to be reviewed!) -ifeq ($(UNAME_P),ppc64le) - CUFLAGS+= -Xcompiler -mno-float128 -endif - -#------------------------------------------------------------------------------- - #=== Configure defaults and check if user-defined choices exist for OMPFLAGS, AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the default OMPFLAGS choice -ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on Intel (was ok without nvcc but not ok with nvcc before #578) -else ifneq ($(shell $(CXX) --version | egrep '^(clang)'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on clang (was not ok without or with nvcc before #578) -###else ifneq ($(shell $(CXX) --version | egrep '^(Apple clang)'),) # AV for Mac (Apple clang compiler) -else ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) +OMPFLAGS ?= -fopenmp +ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) override OMPFLAGS = # AV disable OpenMP MT on Apple clang (builds fail in the CI #578) -###override OMPFLAGS = -fopenmp # OM reenable OpenMP MT on Apple clang? (AV Oct 2023: this still fails in the CI) -else -override OMPFLAGS = -fopenmp # enable OpenMP MT by default on all other platforms -###override OMPFLAGS = # disable OpenMP MT on all other platforms (default before #575) -endif - -# Set the default AVX (vectorization) choice -ifeq ($(AVX),) - ifeq ($(UNAME_P),ppc64le) - ###override AVX = none - override AVX = sse4 - else ifeq ($(UNAME_P),arm) - ###override AVX = none - override AVX = sse4 - else ifeq ($(wildcard /proc/cpuinfo),) - override AVX = none - $(warning Using AVX='$(AVX)' because host SIMD features cannot be read from /proc/cpuinfo) - else ifeq ($(shell grep -m1 -c avx512vl /proc/cpuinfo)$(shell $(CXX) --version | grep ^clang),1) - override AVX = 512y - ###$(info Using AVX='$(AVX)' as no user input exists) - else - override AVX = avx2 - ifneq ($(shell grep -m1 -c avx512vl /proc/cpuinfo),1) - $(warning Using AVX='$(AVX)' because host does not support avx512vl) - else - $(warning Using AVX='$(AVX)' because this is faster than avx512vl for clang) - endif - endif -else - ###$(info Using AVX='$(AVX)' according to user input) -endif - -# Set the default FPTYPE (floating point type) choice -ifeq ($(FPTYPE),) - override FPTYPE = d -endif - -# Set the default HELINL (inline helicities?) choice -ifeq ($(HELINL),) - override HELINL = 0 -endif - -# Set the default HRDCOD (hardcode cIPD physics parameters?) choice -ifeq ($(HRDCOD),) - override HRDCOD = 0 -endif - -# Set the default RNDGEN (random number generator) choice -ifeq ($(RNDGEN),) - ifeq ($(NVCC),) - override RNDGEN = hasNoCurand - else ifeq ($(RNDGEN),) - override RNDGEN = hasCurand - endif endif -# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so that it is not necessary to pass them to the src Makefile too -export AVX -export FPTYPE -export HELINL -export HRDCOD -export RNDGEN +# Export here, so sub makes don't fall back to the defaults: export OMPFLAGS -#------------------------------------------------------------------------------- - -#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN - -# Set the build flags appropriate to OMPFLAGS -$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS - CUFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM - CUFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND -else ifeq ($(RNDGEN),hasCurand) - override CXXFLAGSCURAND = -else - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- #=== Configure build directories and build lockfiles === -# Build directory "short" tag (defines target and path to the optional build directory) -# (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) - -# Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) -# (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) - -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIR = ../../lib/$(BUILDDIR) - override LIBDIRRPATH = '$$ORIGIN/../$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is set = 1)) -else - override BUILDDIR = . - override LIBDIR = ../../lib - override LIBDIRRPATH = '$$ORIGIN/$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) +# Build directory "short" tag (defines target and path to the build directory) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +CUDACPP_BUILDDIR = build.$(DIRTAG) +CUDACPP_LIBDIR := ../../lib/$(CUDACPP_BUILDDIR) +LIBDIRRPATH := '$$ORIGIN:$$ORIGIN/../$(CUDACPP_LIBDIR)' +ifneq ($(AVX),) + $(info Building CUDACPP in CUDACPP_BUILDDIR=$(CUDACPP_BUILDDIR). Libs in $(CUDACPP_LIBDIR)) endif -###override INCDIR = ../../include -###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) -# On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH +# On Linux, set rpath to CUDACPP_LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables or shared libraries ($ORIGIN on Linux) -# On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary +# On Darwin, building libraries with absolute paths in CUDACPP_LIBDIR makes this unnecessary ifeq ($(UNAME_S),Darwin) override CXXLIBFLAGSRPATH = override CULIBFLAGSRPATH = - override CXXLIBFLAGSRPATH2 = - override CULIBFLAGSRPATH2 = else # RPATH to cuda/cpp libs when linking executables override CXXLIBFLAGSRPATH = -Wl,-rpath,$(LIBDIRRPATH) override CULIBFLAGSRPATH = -Xlinker -rpath,$(LIBDIRRPATH) - # RPATH to common lib when linking cuda/cpp libs - override CXXLIBFLAGSRPATH2 = -Wl,-rpath,'$$ORIGIN' - override CULIBFLAGSRPATH2 = -Xlinker -rpath,'$$ORIGIN' endif # Setting LD_LIBRARY_PATH or DYLD_LIBRARY_PATH in the RUNTIME is no longer necessary (neither on Linux nor on Mac) @@ -458,107 +129,68 @@ override RUNTIME = #=== Makefile TARGETS and build rules below #=============================================================================== -cxx_main=$(BUILDDIR)/check.exe -fcxx_main=$(BUILDDIR)/fcheck.exe +cxx_main=$(CUDACPP_BUILDDIR)/check.exe +fcxx_main=$(CUDACPP_BUILDDIR)/fcheck.exe -ifneq ($(NVCC),) -cu_main=$(BUILDDIR)/gcheck.exe -fcu_main=$(BUILDDIR)/fgcheck.exe -else -cu_main= -fcu_main= -endif - -testmain=$(BUILDDIR)/runTest.exe +cu_main=$(CUDACPP_BUILDDIR)/gcheck.exe +fcu_main=$(CUDACPP_BUILDDIR)/fgcheck.exe ifneq ($(GTESTLIBS),) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) $(testmain) -else -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) +testmain=$(CUDACPP_BUILDDIR)/runTest.exe +cutestmain=$(CUDACPP_BUILDDIR)/runTest_cuda.exe endif -# Target (and build options): debug -MAKEDEBUG= -debug: OPTFLAGS = -g -O0 -debug: CUOPTFLAGS = -G -debug: MAKEDEBUG := debug -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -$(BUILDDIR)/.build.$(TAG): - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @if [ "$(oldtagsb)" != "" ]; then echo "Cannot build for tag=$(TAG) as old builds exist for other tags:"; echo " $(oldtagsb)"; echo "Please run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @touch $(BUILDDIR)/.build.$(TAG) +cppbuild: $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(cxx_main) $(fcxx_main) $(testmain) +cudabuild: $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(cu_main) $(fcu_main) $(cutestmain) # Generic target and build rules: objects from CUDA compilation -ifneq ($(NVCC),) -$(BUILDDIR)/%.o : %.cu *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cu *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c $< -o $@ -$(BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ -endif +$(CUDACPP_BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ # Generic target and build rules: objects from C++ compilation # (NB do not include CUINC here! add it only for NVTX or curand #679) -$(BUILDDIR)/%.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Apply special build flags only to CrossSectionKernel.cc and gCrossSectionKernel.cu (no fast math, see #117 and #516) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS := $(filter-out -ffast-math,$(CXXFLAGS)) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math +$(CUDACPP_BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math ifneq ($(NVCC),) -$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -fno-fast-math +$(CUDACPP_BUILDDIR)/gCrossSectionKernels.o: NVCCFLAGS += -Xcompiler -fno-fast-math endif endif # Apply special build flags only to check_sa.o and gcheck_sa.o (NVTX in timermap.h, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) -$(BUILDDIR)/gcheck_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) # Apply special build flags only to check_sa and CurandRandomNumberKernel (curand headers, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gcheck_sa.o: CUFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gCurandRandomNumberKernel.o: CUFLAGS += $(CXXFLAGSCURAND) -ifeq ($(RNDGEN),hasCurand) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CUINC) -endif +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) + # Avoid "warning: builtin __has_trivial_... is deprecated; use __is_trivially_... instead" in nvcc with icx2023 (#592) ifneq ($(shell $(CXX) --version | egrep '^(Intel)'),) ifneq ($(NVCC),) -CUFLAGS += -Xcompiler -Wno-deprecated-builtins +MG_NVCCFLAGS += -Xcompiler -Wno-deprecated-builtins endif endif -# Avoid clang warning "overriding '-ffp-contract=fast' option with '-ffp-contract=on'" (#516) -# This patch does remove the warning, but I prefer to keep it disabled for the moment... -###ifneq ($(shell $(CXX) --version | egrep '^(clang|Apple clang|Intel)'),) -###$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -Wno-overriding-t-option -###ifneq ($(NVCC),) -###$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -Wno-overriding-t-option -###endif -###endif - #### Apply special build flags only to CPPProcess.cc (-flto) ###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += -flto -#### Apply special build flags only to CPPProcess.cc (AVXFLAGS) -###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += $(AVXFLAGS) - #------------------------------------------------------------------------------- -# Target (and build rules): common (src) library -commonlib : $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc $(BUILDDIR)/.build.$(TAG) - $(MAKE) -C ../../src $(MAKEDEBUG) -f $(CUDACPP_SRC_MAKEFILE) +$(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc + $(MAKE) AVX=$(AVX) AVXFLAGS="$(AVXFLAGS)" -C ../../src -f $(CUDACPP_SRC_MAKEFILE) #------------------------------------------------------------------------------- @@ -566,162 +198,123 @@ processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') ###$(info processid_short=$(processid_short)) MG5AMC_CXXLIB = mg5amc_$(processid_short)_cpp -cxx_objects_lib=$(BUILDDIR)/CPPProcess.o $(BUILDDIR)/MatrixElementKernels.o $(BUILDDIR)/BridgeKernels.o $(BUILDDIR)/CrossSectionKernels.o -cxx_objects_exe=$(BUILDDIR)/CommonRandomNumberKernel.o $(BUILDDIR)/RamboSamplingKernels.o +cxx_objects_lib=$(CUDACPP_BUILDDIR)/CPPProcess.o $(CUDACPP_BUILDDIR)/MatrixElementKernels.o $(CUDACPP_BUILDDIR)/BridgeKernels.o $(CUDACPP_BUILDDIR)/CrossSectionKernels.o +cxx_objects_exe=$(CUDACPP_BUILDDIR)/CommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/RamboSamplingKernels.o -ifneq ($(NVCC),) MG5AMC_CULIB = mg5amc_$(processid_short)_cuda -cu_objects_lib=$(BUILDDIR)/gCPPProcess.o $(BUILDDIR)/gMatrixElementKernels.o $(BUILDDIR)/gBridgeKernels.o $(BUILDDIR)/gCrossSectionKernels.o -cu_objects_exe=$(BUILDDIR)/gCommonRandomNumberKernel.o $(BUILDDIR)/gRamboSamplingKernels.o -endif +cu_objects_lib=$(CUDACPP_BUILDDIR)/gCPPProcess.o $(CUDACPP_BUILDDIR)/gMatrixElementKernels.o $(CUDACPP_BUILDDIR)/gBridgeKernels.o $(CUDACPP_BUILDDIR)/gCrossSectionKernels.o +cu_objects_exe=$(CUDACPP_BUILDDIR)/gCommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/gRamboSamplingKernels.o # Target (and build rules): C++ and CUDA shared libraries -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) - $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) - -ifneq ($(NVCC),) -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) - $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -endif +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) + $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) $(MG_LDFLAGS) $(LDFLAGS) -#------------------------------------------------------------------------------- - -# Target (and build rules): Fortran include files -###$(INCDIR)/%.inc : ../%.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### \cp $< $@ +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) + $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) #------------------------------------------------------------------------------- # Target (and build rules): C++ and CUDA standalone executables -$(cxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cxx_main): $(BUILDDIR)/check_sa.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o - $(CXX) -o $@ $(BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o $(CURANDLIBFLAGS) -ifneq ($(NVCC),) +$(cxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(cxx_main): $(CUDACPP_BUILDDIR)/check_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) + ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(cu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(cu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(cu_main): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(cu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cu_main): $(BUILDDIR)/gcheck_sa.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o - $(NVCC) -o $@ $(BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o $(CURANDLIBFLAGS) +$(cu_main): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif +$(cu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(cu_main): $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- - -# Generic target and build rules: objects from Fortran compilation -$(BUILDDIR)/%.o : %.f *.inc - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(FC) -I. -c $< -o $@ - -# Generic target and build rules: objects from Fortran compilation -###$(BUILDDIR)/%.o : %.f *.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi -### $(FC) -I. -I$(INCDIR) -c $< -o $@ - -# Target (and build rules): Fortran standalone executables -###$(BUILDDIR)/fcheck_sa.o : $(INCDIR)/fbridge.inc +# Check executables: ifeq ($(UNAME_S),Darwin) -$(fcxx_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 +$(fcxx_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif -$(fcxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcxx_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) - $(CXX) -o $@ $(BUILDDIR)/fcheck_sa.o $(OMPFLAGS) $(BUILDDIR)/fsampler.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) +$(fcxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(fcxx_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(cxx_objects_exe) $(OMPFLAGS) $(CUDACPP_BUILDDIR)/fsampler.o -lgfortran -L$(CUDACPP_LIBDIR) $(MG_LDFLAGS) $(LDFLAGS) -ifneq ($(NVCC),) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(fcu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(fcu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(fcu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(fcu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') endif ifeq ($(UNAME_S),Darwin) -$(fcu_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 -endif -$(fcu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcu_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) - $(NVCC) -o $@ $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) +$(fcu_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif +$(fcu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(fcu_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(cu_objects_exe) -lgfortran $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- # Target (and build rules): test objects and test executable -$(BUILDDIR)/testxxx.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx_cu.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx_cu.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -endif +$(testmain) $(cutestmain): $(GTESTLIBS) +$(testmain) $(cutestmain): INCFLAGS += $(GTESTINC) +$(testmain) $(cutestmain): MG_LDFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main -$(BUILDDIR)/testmisc.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(CUDACPP_BUILDDIR)/testxxx.o $(CUDACPP_BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) testxxx_cc_ref.txt +$(testmain): $(CUDACPP_BUILDDIR)/testxxx.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions +$(cutestmain): $(CUDACPP_BUILDDIR)/testxxx_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc_cu.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests -endif -$(BUILDDIR)/runTest.o: $(GTESTLIBS) -$(BUILDDIR)/runTest.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/runTest.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/runTest.o +$(CUDACPP_BUILDDIR)/testmisc.o $(CUDACPP_BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/testmisc.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(cutestmain): $(CUDACPP_BUILDDIR)/testmisc_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests + + +$(CUDACPP_BUILDDIR)/runTest.o $(CUDACPP_BUILDDIR)/runTest_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/runTest.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/runTest.o +$(cutestmain): $(CUDACPP_BUILDDIR)/runTest_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/runTest_cu.o + -ifneq ($(NVCC),) -$(BUILDDIR)/runTest_cu.o: $(GTESTLIBS) -$(BUILDDIR)/runTest_cu.o: INCFLAGS += $(GTESTINC) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(testmain): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(testmain): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cutestmain): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cutestmain): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(testmain): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(testmain): $(BUILDDIR)/runTest_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/runTest_cu.o +$(cutestmain): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif -$(testmain): $(GTESTLIBS) -$(testmain): INCFLAGS += $(GTESTINC) -$(testmain): LIBFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main ifneq ($(OMPFLAGS),) ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -$(testmain): LIBFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) +$(testmain): MG_LDFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -$(testmain): LIBFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 +$(testmain): MG_LDFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 ###else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) ###$(testmain): LIBFLAGS += ???? # OMP is not supported yet by cudacpp for Apple clang (see #578 and #604) else -$(testmain): LIBFLAGS += -lgomp +$(testmain): MG_LDFLAGS += -lgomp endif endif -ifeq ($(NVCC),) # link only runTest.o -$(testmain): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) - $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -ldl -pthread $(LIBFLAGS) -else # link both runTest.o and runTest_cu.o -$(testmain): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) - $(NVCC) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) -ldl $(LIBFLAGS) -lcuda -endif +$(testmain): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(testmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) + $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -pthread $(MG_LDFLAGS) $(LDFLAGS) + +$(cutestmain): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cutestmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) + $(NVCC) -o $@ $(cu_objects_lib) $(cu_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -lcuda $(MG_LDFLAGS) $(LDFLAGS) # Use target gtestlibs to build only googletest ifneq ($(GTESTLIBS),) @@ -731,72 +324,15 @@ endif # Use flock (Linux only, no Mac) to allow 'make -j' if googletest has not yet been downloaded https://stackoverflow.com/a/32666215 $(GTESTLIBS): ifneq ($(shell which flock 2>/dev/null),) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - flock $(BUILDDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) + flock $(TESTDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) else if [ -d $(TESTDIR) ]; then $(MAKE) -C $(TESTDIR); fi endif #------------------------------------------------------------------------------- -# Target: build all targets in all AVX modes (each AVX mode in a separate build directory) -# Split the avxall target into five separate targets to allow parallel 'make -j avxall' builds -# (Hack: add a fbridge.inc dependency to avxall, to ensure it is only copied once for all AVX modes) -avxnone: - @echo - $(MAKE) USEBUILDDIR=1 AVX=none -f $(CUDACPP_MAKEFILE) - -avxsse4: - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 -f $(CUDACPP_MAKEFILE) - -avxavx2: - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 -f $(CUDACPP_MAKEFILE) - -avx512y: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y -f $(CUDACPP_MAKEFILE) - -avx512z: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z -f $(CUDACPP_MAKEFILE) - -ifeq ($(UNAME_P),ppc64le) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else ifeq ($(UNAME_P),arm) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 avxavx2 avx512y avx512z -avxall: avxnone avxsse4 avxavx2 avx512y avx512z -endif - -#------------------------------------------------------------------------------- - -# Target: clean the builds -.PHONY: clean - -clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(BUILDDIR) -else - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe - rm -f $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(LIBDIR)/lib$(MG5AMC_CULIB).so -endif - $(MAKE) -C ../../src clean -f $(CUDACPP_SRC_MAKEFILE) -### rm -rf $(INCDIR) - -cleanall: - @echo - $(MAKE) USEBUILDDIR=0 clean -f $(CUDACPP_MAKEFILE) - @echo - $(MAKE) USEBUILDDIR=0 -C ../../src cleanall -f $(CUDACPP_SRC_MAKEFILE) - rm -rf build.* - # Target: clean the builds as well as the gtest installation(s) -distclean: cleanall +distclean: clean cleansrc ifneq ($(wildcard $(TESTDIRCOMMON)),) $(MAKE) -C $(TESTDIRCOMMON) clean endif @@ -848,50 +384,55 @@ endif #------------------------------------------------------------------------------- -# Target: check (run the C++ test executable) +# Target: check/gcheck (run the C++ test executable) # [NB THIS IS WHAT IS USED IN THE GITHUB CI!] -ifneq ($(NVCC),) -check: runTest cmpFcheck cmpFGcheck -else check: runTest cmpFcheck -endif +gcheck: + $(MAKE) AVX=cuda runTest cmpFGcheck # Target: runTest (run the C++ test executable runTest.exe) -runTest: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/runTest.exe +ifneq ($(AVX),cuda) +runTest: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest.exe +else +runTest: cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest_cuda.exe +endif + # Target: runCheck (run the C++ standalone executable check.exe, with a small number of events) -runCheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/check.exe -p 2 32 2 +runCheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe -p 2 32 2 # Target: runGcheck (run the CUDA standalone executable gcheck.exe, with a small number of events) -runGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/gcheck.exe -p 2 32 2 +runGcheck: AVX=cuda +runGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe -p 2 32 2 # Target: runFcheck (run the Fortran standalone executable - with C++ MEs - fcheck.exe, with a small number of events) -runFcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 +runFcheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 # Target: runFGcheck (run the Fortran standalone executable - with CUDA MEs - fgcheck.exe, with a small number of events) -runFGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 +runFGcheck: AVX=cuda +runFGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 # Target: cmpFcheck (compare ME results from the C++ and Fortran with C++ MEs standalone executables, with a small number of events) -cmpFcheck: all.$(TAG) +cmpFcheck: cppbuild @echo - @echo "$(BUILDDIR)/check.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi # Target: cmpFGcheck (compare ME results from the CUDA and Fortran with CUDA MEs standalone executables, with a small number of events) -cmpFGcheck: all.$(TAG) +cmpFGcheck: AVX=cuda +cmpFGcheck: + $(MAKE) AVX=cuda cudabuild @echo - @echo "$(BUILDDIR)/gcheck.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fgcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi -# Target: memcheck (run the CUDA standalone executable gcheck.exe with a small number of events through cuda-memcheck) -memcheck: all.$(TAG) - $(RUNTIME) $(CUDA_HOME)/bin/cuda-memcheck --check-api-memory-access yes --check-deprecated-instr yes --check-device-heap yes --demangle full --language c --leak-check full --racecheck-report all --report-api-errors all --show-backtrace yes --tool memcheck --track-unused-memory yes $(BUILDDIR)/gcheck.exe -p 2 32 2 - -#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_ttg.mad/SubProcesses/makefile b/epochX/cudacpp/gg_ttg.mad/SubProcesses/makefile index d572486c2e..b69917ee1f 100644 --- a/epochX/cudacpp/gg_ttg.mad/SubProcesses/makefile +++ b/epochX/cudacpp/gg_ttg.mad/SubProcesses/makefile @@ -1,27 +1,30 @@ SHELL := /bin/bash -include ../../Source/make_opts -FFLAGS+= -w +# Include general setup +OPTIONS_MAKEFILE := ../../Source/make_opts +include $(OPTIONS_MAKEFILE) # Enable the C preprocessor https://gcc.gnu.org/onlinedocs/gfortran/Preprocessing-Options.html -FFLAGS+= -cpp +MG_FCFLAGS += -cpp +MG_CXXFLAGS += -I. -# Compile counters with -O3 as in the cudacpp makefile (avoid being "unfair" to Fortran #740) -CXXFLAGS = -O3 -Wall -Wshadow -Wextra +all: help cppnative + +# Target if user does not specify target +help: + $(info No target specified.) + $(info Viable targets are 'cppnative' (default), 'cppnone', 'cppsse4', 'cppavx2', 'cpp512y', 'cpp512z' and 'cuda') + $(info Or 'cppall' for all C++ targets) + $(info Or 'ALL' for all C++ and cuda targets) -# Add -std=c++17 explicitly to avoid build errors on macOS -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 -endif -# Enable ccache if USECCACHE=1 +# Enable ccache for C++ if USECCACHE=1 (do not enable it for Fortran since it is not supported for Fortran) ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) override CXX:=ccache $(CXX) endif -ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) - override FC:=ccache $(FC) -endif +###ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) +### override FC:=ccache $(FC) +###endif # Load additional dependencies of the bias module, if present ifeq (,$(wildcard ../bias_dependencies)) @@ -46,34 +49,25 @@ else MADLOOP_LIB = endif -LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias - -processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') -CUDACPP_MAKEFILE=cudacpp.mk -# NB1 Using ":=" below instead of "=" is much faster (it only runs the subprocess once instead of many times) -# NB2 Use '|&' in CUDACPP_BUILDDIR to avoid confusing errors about googletest #507 -# NB3 Do not add a comment inlined "CUDACPP_BUILDDIR=$(shell ...) # comment" as otherwise a trailing space is included... -# NB4 The variables relevant to the cudacpp Makefile must be explicitly passed to $(shell...) -CUDACPP_MAKEENV:=$(shell echo '$(.VARIABLES)' | tr " " "\n" | egrep "(USEBUILDDIR|AVX|FPTYPE|HELINL|HRDCOD)") -###$(info CUDACPP_MAKEENV=$(CUDACPP_MAKEENV)) -###$(info $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))")) -CUDACPP_BUILDDIR:=$(shell $(MAKE) $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))") -f $(CUDACPP_MAKEFILE) -pn 2>&1 | awk '/Building/{print $$3}' | sed s/BUILDDIR=//) -ifeq ($(CUDACPP_BUILDDIR),) -$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) -else -$(info CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)') -endif -CUDACPP_COMMONLIB=mg5amc_common -CUDACPP_CXXLIB=mg5amc_$(processid_short)_cpp -CUDACPP_CULIB=mg5amc_$(processid_short)_cuda - +LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias LIBS = $(LIBDIR)libbias.$(libext) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(MADLOOP_LIB) $(LOOP_LIBS) ifneq ("$(wildcard ../../Source/RUNNING)","") LINKLIBS += -lrunning - LIBS += $(LIBDIR)librunning.$(libext) + LIBS += $(LIBDIR)librunning.$(libext) endif +SOURCEDIR_GUARD:=../../Source/.timestamp_guard +# We use $(SOURCEDIR_GUARD) to figure out if Source is out of date. The Source makefile doesn't correctly +# update all files, so we need a proxy that is updated every time we run "$(MAKE) -C ../../Source". +$(SOURCEDIR_GUARD) ../../Source/discretesampler.mod &: ../../Source/*.f ../../Cards/param_card.dat ../../Cards/run_card.dat +ifneq ($(shell which flock 2>/dev/null),) + flock ../../Source/.lock -c "$(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD)" +else + $(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD) +endif + +$(LIBS): $(SOURCEDIR_GUARD) # Source files @@ -91,82 +85,83 @@ PROCESS= myamp.o genps.o unwgt.o setcuts.o get_color.o \ DSIG=driver.o $(patsubst %.f, %.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) DSIG_cudacpp=driver_cudacpp.o $(patsubst %.f, %_cudacpp.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) -SYMMETRY = symmetry.o idenparts.o +SYMMETRY = symmetry.o idenparts.o -# Binaries +# cudacpp targets: +CUDACPP_MAKEFILE := cudacpp.mk +ifneq (,$(wildcard $(CUDACPP_MAKEFILE))) +include $(CUDACPP_MAKEFILE) +endif -ifeq ($(UNAME),Darwin) -LDFLAGS += -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) -LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" -else -LDFLAGS += -Wl,--no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +ifeq ($(CUDACPP_BUILDDIR),) +$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) endif +CUDACPP_COMMONLIB=mg5amc_common +CUDACPP_CXXLIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so +CUDACPP_CULIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so -all: $(PROG)_fortran $(CUDACPP_BUILDDIR)/$(PROG)_cpp # also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (#503) +# Set up OpenMP if supported +OMPFLAGS ?= -fopenmp ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp LINKLIBS += -liomp5 # see #578 LINKLIBS += -lintlc # undefined reference to `_intel_fast_memcpy' else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -override OMPFLAGS = -fopenmp $(CUDACPP_BUILDDIR)/$(PROG)_cpp: LINKLIBS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -override OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang -else -override OMPFLAGS = -fopenmp +OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang endif -$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o - $(FC) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) - -$(LIBS): .libs -.libs: ../../Cards/param_card.dat ../../Cards/run_card.dat - cd ../../Source; make - touch $@ +# Binaries -$(CUDACPP_BUILDDIR)/.cudacpplibs: - $(MAKE) -f $(CUDACPP_MAKEFILE) - touch $@ +$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) # On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables ($ORIGIN on Linux) # On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary -ifeq ($(UNAME_S),Darwin) - override LIBFLAGSRPATH = -else ifeq ($(USEBUILDDIR),1) - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' -else - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/$(LIBDIR)' +ifneq ($(UNAME_S),Darwin) + LIBFLAGSRPATH := -Wl,-rpath,'$$ORIGIN:$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' endif -.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link +.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link madevent_cppnone_link madevent_cppsse4_link madevent_cppavx2_link madevent_cpp512y_link madevent_cpp512z_link clean cleanall cleansrc madevent_fortran_link: $(PROG)_fortran rm -f $(PROG) ln -s $(PROG)_fortran $(PROG) -madevent_cpp_link: $(CUDACPP_BUILDDIR)/$(PROG)_cpp - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) +madevent_cppnone_link: AVX=none +madevent_cppnone_link: cppnone + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -madevent_cuda_link: $(CUDACPP_BUILDDIR)/$(PROG)_cuda - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) +madevent_cppavx2_link: AVX=avx2 +madevent_cppavx2_link: cppavx2 + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512y_link: AVX=512y +madevent_cpp512y_link: cppavx512y + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512z_link: AVX=512z +madevent_cpp512z_link: cppavx512z + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -# Building $(PROG)_cpp also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (improved patch for cpp-only builds #503) -$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o $(CUDACPP_BUILDDIR)/.cudacpplibs - $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CXXLIB) $(LIBFLAGSRPATH) $(LDFLAGS) - if [ -f $(LIBDIR)/$(CUDACPP_BUILDDIR)/lib$(CUDACPP_CULIB).* ]; then $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CULIB) $(LIBFLAGSRPATH) $(LDFLAGS); fi +madevent_cuda_link: AVX=cuda +madevent_cuda_link: cuda + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) -$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(CUDACPP_BUILDDIR)/$(PROG)_cpp +$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(LIBS) $(CUDACPP_CXXLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) + +$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(LIBS) $(CUDACPP_CULIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) counters.o: counters.cc timer.h - $(CXX) $(CXXFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ ompnumthreads.o: ompnumthreads.cc ompnumthreads.h - $(CXX) -I. $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) $(FC) -o $(PROG)_forhel $(PROCESS) $(MATRIX_HEL) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) $(OMPFLAGS) @@ -174,27 +169,14 @@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) gensym: $(SYMMETRY) configs.inc $(LIBS) $(FC) -o gensym $(SYMMETRY) -L$(LIBDIR) $(LINKLIBS) $(LDFLAGS) -###ifeq (,$(wildcard fbridge.inc)) # Pointless: fbridge.inc always exists as this is the cudacpp-modified makefile! -###$(LIBDIR)libmodel.$(libext): ../../Cards/param_card.dat -### cd ../../Source/MODEL; make -### -###$(LIBDIR)libgeneric.$(libext): ../../Cards/run_card.dat -### cd ../../Source; make -### -###$(LIBDIR)libpdf.$(libext): -### cd ../../Source/PDF; make -### -###$(LIBDIR)libgammaUPC.$(libext): -### cd ../../Source/PDF/gammaUPC; make -###endif # Add source so that the compiler finds the DiscreteSampler module. $(MATRIX): %.o: %.f - $(FC) $(FFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC -%.o: %.f - $(FC) $(FFLAGS) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC + $(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC +%.o $(CUDACPP_BUILDDIR)/%.o: %.f + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -I../../Source/ -I../../Source/PDF/gammaUPC -c $< -o $@ %_cudacpp.o: %.f - $(FC) $(FFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ # Dependencies @@ -215,60 +197,42 @@ unwgt.o: genps.inc nexternal.inc symswap.inc cluster.inc run.inc message.inc \ initcluster.o: message.inc # Extra dependencies on discretesampler.mod +../../Source/discretesampler.mod: ../../Source/DiscreteSampler.f -auto_dsig.o: .libs -driver.o: .libs -driver_cudacpp.o: .libs -$(MATRIX): .libs -genps.o: .libs +auto_dsig.o: ../../Source/discretesampler.mod +driver.o: ../../Source/discretesampler.mod +driver_cudacpp.o: ../../Source/discretesampler.mod +$(MATRIX): ../../Source/discretesampler.mod +genps.o: ../../Source/discretesampler.mod # Cudacpp avxall targets -UNAME_P := $(shell uname -p) ifeq ($(UNAME_P),ppc64le) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else ifeq ($(UNAME_P),arm) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else -avxall: avxnone avxsse4 avxavx2 avx512y avx512z +cppall: cppnative cppnone cppsse4 cppavx2 cppavx512y cppavx512z endif -avxnone: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=none - -avxsse4: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 - -avxavx2: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 - -avx512y: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y - -avx512z: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z - -###endif - -# Clean (NB: 'make clean' in Source calls 'make clean' in all P*) +ALL: cppall cuda -clean: # Clean builds: fortran in this Pn; cudacpp executables for one AVX in this Pn - $(RM) *.o gensym $(PROG) $(PROG)_fortran $(PROG)_forhel $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(CUDACPP_BUILDDIR)/$(PROG)_cuda +# Clean all architecture-specific builds: +clean: + $(RM) *.o gensym $(PROG) $(PROG)_* + $(RM) -rf build.*/*{.o,.so,.exe,.dylib,madevent_*} + @for dir in build.*; do if [ -z "$$(ls -A $${dir})" ]; then rm -r $${dir}; else echo "Not cleaning $${dir}; not empty"; fi; done -cleanavxs: clean # Clean builds: fortran in this Pn; cudacpp for all AVX in this Pn and in src - $(MAKE) -f $(CUDACPP_MAKEFILE) cleanall - rm -f $(CUDACPP_BUILDDIR)/.cudacpplibs - rm -f .libs +cleanall: cleansrc + for PROCESS in ../P[0-9]*; do $(MAKE) -C $${PROCESS} clean; done -cleanall: # Clean builds: fortran in all P* and in Source; cudacpp for all AVX in all P* and in src - make -C ../../Source cleanall - rm -rf $(LIBDIR)libbias.$(libext) - rm -f ../../Source/*.mod ../../Source/*/*.mod +# Clean one architecture-specific build +clean%: + $(RM) -r build.$*_* -distclean: cleanall # Clean all fortran and cudacpp builds as well as the googletest installation - $(MAKE) -f $(CUDACPP_MAKEFILE) distclean +# Clean common source directories (interferes with other P*) +cleansrc: + make -C ../../Source clean + $(RM) -f $(SOURCEDIR_GUARD) ../../Source/{*.mod,.lock} ../../Source/*/*.mod + $(RM) -r $(LIBDIR)libbias.$(libext) + if [ -d ../../src ]; then $(MAKE) -C ../../src -f cudacpp_src.mk clean; fi diff --git a/epochX/cudacpp/gg_ttg.mad/SubProcesses/runTest.cc b/epochX/cudacpp/gg_ttg.mad/SubProcesses/runTest.cc index d4a760a71b..6c77775fb2 100644 --- a/epochX/cudacpp/gg_ttg.mad/SubProcesses/runTest.cc +++ b/epochX/cudacpp/gg_ttg.mad/SubProcesses/runTest.cc @@ -243,18 +243,20 @@ struct CUDATest : public CUDA_CPU_TestBase // Use two levels of macros to force stringification at the right level // (see https://gcc.gnu.org/onlinedocs/gcc-3.0.1/cpp_3.html#SEC17 and https://stackoverflow.com/a/3419392) // Google macro is in https://github.com/google/googletest/blob/master/googletest/include/gtest/gtest-param-test.h +/* clang-format off */ #define TESTID_CPU( s ) s##_CPU #define XTESTID_CPU( s ) TESTID_CPU( s ) #define MG_INSTANTIATE_TEST_SUITE_CPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); #define TESTID_GPU( s ) s##_GPU #define XTESTID_GPU( s ) TESTID_GPU( s ) #define MG_INSTANTIATE_TEST_SUITE_GPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); +/* clang-format on */ #ifdef __CUDACC__ MG_INSTANTIATE_TEST_SUITE_GPU( XTESTID_GPU( MG_EPOCH_PROCESS_ID ), MadgraphTest ); diff --git a/epochX/cudacpp/gg_ttg.mad/SubProcesses/testxxx.cc b/epochX/cudacpp/gg_ttg.mad/SubProcesses/testxxx.cc index 3361fe5aa9..1d315f6d75 100644 --- a/epochX/cudacpp/gg_ttg.mad/SubProcesses/testxxx.cc +++ b/epochX/cudacpp/gg_ttg.mad/SubProcesses/testxxx.cc @@ -40,7 +40,7 @@ namespace mg5amcCpu { std::string FPEhandlerMessage = "unknown"; int FPEhandlerIevt = -1; - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU): '" << FPEhandlerMessage << "' ievt=" << FPEhandlerIevt << std::endl; @@ -71,11 +71,10 @@ TEST( XTESTID( MG_EPOCH_PROCESS_ID ), testxxx ) constexpr bool testEvents = !dumpEvents; // run the test? constexpr fptype toleranceXXXs = std::is_same::value ? 1.E-15 : 1.E-5; // Constant parameters - constexpr int neppM = MemoryAccessMomenta::neppM; // AOSOA layout constexpr int np4 = CPPProcess::np4; - const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') - assert( nevt % neppM == 0 ); // nevt must be a multiple of neppM - assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV + const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') + assert( nevt % MemoryAccessMomenta::neppM == 0 ); // nevt must be a multiple of neppM + assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV // Fill in the input momenta #ifdef __CUDACC__ mg5amcGpu::PinnedHostBufferMomenta hstMomenta( nevt ); // AOSOA[npagM][npar=4][np4=4][neppM] diff --git a/epochX/cudacpp/gg_ttg.mad/bin/internal/banner.py b/epochX/cudacpp/gg_ttg.mad/bin/internal/banner.py index bd1517985f..b408679c2f 100755 --- a/epochX/cudacpp/gg_ttg.mad/bin/internal/banner.py +++ b/epochX/cudacpp/gg_ttg.mad/bin/internal/banner.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,7 +53,7 @@ MADEVENT = False import madgraph.various.misc as misc import madgraph.iolibs.file_writers as file_writers - import madgraph.iolibs.files as files + import madgraph.iolibs.files as files import models.check_param_card as param_card_reader from madgraph import MG5DIR, MadGraph5Error, InvalidCmd @@ -80,36 +80,36 @@ class Banner(dict): 'mgproccard': 'MGProcCard', 'mgruncard': 'MGRunCard', 'ma5card_parton' : 'MA5Card_parton', - 'ma5card_hadron' : 'MA5Card_hadron', + 'ma5card_hadron' : 'MA5Card_hadron', 'mggenerationinfo': 'MGGenerationInfo', 'mgpythiacard': 'MGPythiaCard', 'mgpgscard': 'MGPGSCard', 'mgdelphescard': 'MGDelphesCard', 'mgdelphestrigger': 'MGDelphesTrigger', 'mgshowercard': 'MGShowerCard' } - + forbid_cdata = ['initrwgt'] - + def __init__(self, banner_path=None): """ """ if isinstance(banner_path, Banner): dict.__init__(self, banner_path) self.lhe_version = banner_path.lhe_version - return + return else: dict.__init__(self) - + #Look at the version if MADEVENT: self['mgversion'] = '#%s\n' % open(pjoin(MEDIR, 'MGMEVersion.txt')).read() else: info = misc.get_pkg_info() self['mgversion'] = info['version']+'\n' - + self.lhe_version = None - + if banner_path: self.read_banner(banner_path) @@ -123,7 +123,7 @@ def __init__(self, banner_path=None): 'mgruncard':'run_card.dat', 'mgpythiacard':'pythia_card.dat', 'mgpgscard' : 'pgs_card.dat', - 'mgdelphescard':'delphes_card.dat', + 'mgdelphescard':'delphes_card.dat', 'mgdelphestrigger':'delphes_trigger.dat', 'mg5proccard':'proc_card_mg5.dat', 'mgproccard': 'proc_card.dat', @@ -137,10 +137,10 @@ def __init__(self, banner_path=None): 'mgshowercard':'shower_card.dat', 'pythia8':'pythia8_card.dat', 'ma5card_parton':'madanalysis5_parton_card.dat', - 'ma5card_hadron':'madanalysis5_hadron_card.dat', + 'ma5card_hadron':'madanalysis5_hadron_card.dat', 'run_settings':'' } - + def read_banner(self, input_path): """read a banner""" @@ -151,7 +151,7 @@ def read_banner(self, input_path): def split_iter(string): return (x.groups(0)[0] for x in re.finditer(r"([^\n]*\n)", string, re.DOTALL)) input_path = split_iter(input_path) - + text = '' store = False for line in input_path: @@ -170,13 +170,13 @@ def split_iter(string): text += line else: text += '%s%s' % (line, '\n') - - #reaching end of the banner in a event file avoid to read full file + + #reaching end of the banner in a event file avoid to read full file if "
" in line: break elif "" in line: break - + def __getattribute__(self, attr): """allow auto-build for the run_card/param_card/... """ try: @@ -187,23 +187,23 @@ def __getattribute__(self, attr): return self.charge_card(attr) - + def change_lhe_version(self, version): """change the lhe version associate to the banner""" - + version = float(version) if version < 3: version = 1 elif version > 3: raise Exception("Not Supported version") self.lhe_version = version - + def get_cross(self, witherror=False): """return the cross-section of the file""" if "init" not in self: raise Exception - + text = self["init"].split('\n') cross = 0 error = 0 @@ -217,13 +217,13 @@ def get_cross(self, witherror=False): return cross else: return cross, math.sqrt(error) - + def scale_init_cross(self, ratio): """modify the init information with the associate scale""" assert "init" in self - + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -231,29 +231,29 @@ def scale_init_cross(self, ratio): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break pid = int(pid) - + line = " %+13.7e %+13.7e %+13.7e %i" % \ (ratio*float(xsec), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + def get_pdg_beam(self): """return the pdg of each beam""" - + assert "init" in self - + all_lines = self["init"].split('\n') pdg1,pdg2,_ = all_lines[0].split(None, 2) return int(pdg1), int(pdg2) - + def load_basic(self, medir): """ Load the proc_card /param_card and run_card """ - + self.add(pjoin(medir,'Cards', 'param_card.dat')) self.add(pjoin(medir,'Cards', 'run_card.dat')) if os.path.exists(pjoin(medir, 'SubProcesses', 'procdef_mg5.dat')): @@ -261,29 +261,29 @@ def load_basic(self, medir): self.add(pjoin(medir,'Cards', 'proc_card_mg5.dat')) else: self.add(pjoin(medir,'Cards', 'proc_card.dat')) - + def change_seed(self, seed): """Change the seed value in the banner""" # 0 = iseed p = re.compile(r'''^\s*\d+\s*=\s*iseed''', re.M) new_seed_str = " %s = iseed" % seed self['mgruncard'] = p.sub(new_seed_str, self['mgruncard']) - + def add_generation_info(self, cross, nb_event): """add info on MGGeneration""" - + text = """ # Number of Events : %s # Integrated weight (pb) : %s """ % (nb_event, cross) self['MGGenerationInfo'] = text - + ############################################################################ # SPLIT BANNER ############################################################################ def split(self, me_dir, proc_card=True): """write the banner in the Cards directory. - proc_card argument is present to avoid the overwrite of proc_card + proc_card argument is present to avoid the overwrite of proc_card information""" for tag, text in self.items(): @@ -305,37 +305,37 @@ def check_pid(self, pid2label): """special routine removing width/mass of particles not present in the model This is usefull in case of loop model card, when we want to use the non loop model.""" - + if not hasattr(self, 'param_card'): self.charge_card('slha') - + for tag in ['mass', 'decay']: block = self.param_card.get(tag) for data in block: pid = data.lhacode[0] - if pid not in list(pid2label.keys()): + if pid not in list(pid2label.keys()): block.remove((pid,)) def get_lha_strategy(self): """get the lha_strategy: how the weight have to be handle by the shower""" - + if not self["init"]: raise Exception("No init block define") - + data = self["init"].split('\n')[0].split() if len(data) != 10: misc.sprint(len(data), self['init']) raise Exception("init block has a wrong format") return int(float(data[-2])) - + def set_lha_strategy(self, value): """set the lha_strategy: how the weight have to be handle by the shower""" - + if not (-4 <= int(value) <= 4): six.reraise(Exception, "wrong value for lha_strategy", value) if not self["init"]: raise Exception("No init block define") - + all_lines = self["init"].split('\n') data = all_lines[0].split() if len(data) != 10: @@ -351,13 +351,13 @@ def modify_init_cross(self, cross, allow_zero=False): assert isinstance(cross, dict) # assert "all" in cross assert "init" in self - + cross = dict(cross) for key in cross.keys(): if isinstance(key, str) and key.isdigit() and int(key) not in cross: cross[int(key)] = cross[key] - - + + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -365,7 +365,7 @@ def modify_init_cross(self, cross, allow_zero=False): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break @@ -383,23 +383,23 @@ def modify_init_cross(self, cross, allow_zero=False): (float(cross[pid]), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + ############################################################################ # WRITE BANNER ############################################################################ def write(self, output_path, close_tag=True, exclude=[]): """write the banner""" - + if isinstance(output_path, str): ff = open(output_path, 'w') else: ff = output_path - + if MADEVENT: header = open(pjoin(MEDIR, 'Source', 'banner_header.txt')).read() else: header = open(pjoin(MG5DIR,'Template', 'LO', 'Source', 'banner_header.txt')).read() - + if not self.lhe_version: self.lhe_version = self.get('run_card', 'lhe_version', default=1.0) if float(self.lhe_version) < 3: @@ -412,7 +412,7 @@ def write(self, output_path, close_tag=True, exclude=[]): for tag in [t for t in self.ordered_items if t in list(self.keys())]+ \ [t for t in self.keys() if t not in self.ordered_items]: - if tag in ['init'] or tag in exclude: + if tag in ['init'] or tag in exclude: continue capitalized_tag = self.capitalized_items[tag] if tag in self.capitalized_items else tag start_data, stop_data = '', '' @@ -422,19 +422,19 @@ def write(self, output_path, close_tag=True, exclude=[]): stop_data = ']]>\n' out = '<%(tag)s>%(start_data)s\n%(text)s\n%(stop_data)s\n' % \ {'tag':capitalized_tag, 'text':self[tag].strip(), - 'start_data': start_data, 'stop_data':stop_data} + 'start_data': start_data, 'stop_data':stop_data} try: ff.write(out) except: ff.write(out.encode('utf-8')) - - + + if not '/header' in exclude: out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) if 'init' in self and not 'init' in exclude: text = self['init'] @@ -444,22 +444,22 @@ def write(self, output_path, close_tag=True, exclude=[]): ff.write(out) except: ff.write(out.encode('utf-8')) - + if close_tag: - out = '\n' + out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) return ff - - + + ############################################################################ # BANNER ############################################################################ def add(self, path, tag=None): """Add the content of the file to the banner""" - + if not tag: card_name = os.path.basename(path) if 'param_card' in card_name: @@ -505,33 +505,33 @@ def add_text(self, tag, text): if tag == 'param_card': tag = 'slha' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' - + self[tag.lower()] = text - - + + def charge_card(self, tag): """Build the python object associated to the card""" - + if tag in ['param_card', 'param']: tag = 'slha' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse'], 'invalid card %s' % tag - + if tag == 'slha': param_card = self[tag].split('\n') self.param_card = param_card_reader.ParamCard(param_card) @@ -544,56 +544,56 @@ def charge_card(self, tag): self.proc_card = ProcCard(proc_card) return self.proc_card elif tag =='mgshowercard': - shower_content = self[tag] + shower_content = self[tag] if MADEVENT: import internal.shower_card as shower_card else: import madgraph.various.shower_card as shower_card self.shower_card = shower_card.ShowerCard(shower_content, True) - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.shower_card.testing = False return self.shower_card elif tag =='foanalyse': - analyse_content = self[tag] + analyse_content = self[tag] if MADEVENT: import internal.FO_analyse_card as FO_analyse_card else: import madgraph.various.FO_analyse_card as FO_analyse_card - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.FOanalyse_card = FO_analyse_card.FOAnalyseCard(analyse_content, True) self.FOanalyse_card.testing = False return self.FOanalyse_card - + def get_detail(self, tag, *arg, **opt): """return a specific """ - + if tag in ['param_card', 'param']: tag = 'slha' attr_tag = 'param_card' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], '%s not recognized' % tag - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) + self.charge_card(attr_tag) card = getattr(self, attr_tag) if len(arg) == 0: @@ -613,7 +613,7 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 2 and tag == 'slha': try: return card[arg[0]].get(arg[1:]) @@ -621,15 +621,15 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 0: return card else: raise Exception("Unknow command") - + #convenient alias get = get_detail - + def set(self, tag, *args): """modify one of the cards""" @@ -637,27 +637,27 @@ def set(self, tag, *args): tag = 'slha' attr_tag = 'param_card' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized' - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) - + self.charge_card(attr_tag) + card = getattr(self, attr_tag) if len(args) ==2: if tag == 'mg5proccard': @@ -666,20 +666,20 @@ def set(self, tag, *args): card[args[0]] = args[1] else: card[args[:-1]] = args[-1] - - + + @misc.multiple_try() def add_to_file(self, path, seed=None, out=None): """Add the banner to a file and change the associate seed in the banner""" if seed is not None: self.set("run_card", "iseed", seed) - + if not out: path_out = "%s.tmp" % path else: path_out = out - + ff = self.write(path_out, close_tag=False, exclude=['MGGenerationInfo', '/header', 'init']) ff.write("## END BANNER##\n") @@ -698,44 +698,44 @@ def add_to_file(self, path, seed=None, out=None): files.mv(path_out, path) - + def split_banner(banner_path, me_dir, proc_card=True): """a simple way to split a banner""" - + banner = Banner(banner_path) banner.split(me_dir, proc_card) - + def recover_banner(results_object, level, run=None, tag=None): """as input we receive a gen_crossxhtml.AllResults object. This define the current banner and load it """ - + if not run: - try: - _run = results_object.current['run_name'] - _tag = results_object.current['tag'] + try: + _run = results_object.current['run_name'] + _tag = results_object.current['tag'] except Exception: return Banner() else: _run = run if not tag: - try: - _tag = results_object[run].tags[-1] + try: + _tag = results_object[run].tags[-1] except Exception as error: if os.path.exists( pjoin(results_object.path,'Events','%s_banner.txt' % (run))): tag = None else: - return Banner() + return Banner() else: _tag = tag - - path = results_object.path - if tag: + + path = results_object.path + if tag: banner_path = pjoin(path,'Events',run,'%s_%s_banner.txt' % (run, tag)) else: banner_path = pjoin(results_object.path,'Events','%s_banner.txt' % (run)) - + if not os.path.exists(banner_path): if level != "parton" and tag != _tag: return recover_banner(results_object, level, _run, results_object[_run].tags[0]) @@ -754,12 +754,12 @@ def recover_banner(results_object, level, run=None, tag=None): return Banner(lhe.banner) # security if the banner was remove (or program canceled before created it) - return Banner() - + return Banner() + banner = Banner(banner_path) - - - + + + if level == 'pythia': if 'mgpythiacard' in banner: del banner['mgpythiacard'] @@ -768,13 +768,13 @@ def recover_banner(results_object, level, run=None, tag=None): if tag in banner: del banner[tag] return banner - + class InvalidRunCard(InvalidCmd): pass class ProcCard(list): """Basic Proccard object""" - + history_header = \ '#************************************************************\n' + \ '#* MadGraph5_aMC@NLO *\n' + \ @@ -798,10 +798,10 @@ class ProcCard(list): '#* run as ./bin/mg5_aMC filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - - - - + + + + def __init__(self, init=None): """ initialize a basic proc_card""" self.info = {'model': 'sm', 'generate':None, @@ -810,13 +810,13 @@ def __init__(self, init=None): if init: self.read(init) - + def read(self, init): """read the proc_card and save the information""" - + if isinstance(init, str): #path to file init = open(init, 'r') - + store_line = '' for line in init: line = line.rstrip() @@ -828,28 +828,28 @@ def read(self, init): store_line = "" if store_line: raise Exception("WRONG CARD FORMAT") - - + + def move_to_last(self, cmd): """move an element to the last history.""" for line in self[:]: if line.startswith(cmd): self.remove(line) list.append(self, line) - + def append(self, line): """"add a line in the proc_card perform automatically cleaning""" - + line = line.strip() cmds = line.split() if len(cmds) == 0: return - + list.append(self, line) - + # command type: cmd = cmds[0] - + if cmd == 'output': # Remove previous outputs from history self.clean(allow_for_removal = ['output'], keep_switch=True, @@ -875,7 +875,7 @@ def append(self, line): elif cmds[1] == 'proc_v4': #full cleaning self[:] = [] - + def clean(self, to_keep=['set','add','load'], remove_bef_last=None, @@ -884,13 +884,13 @@ def clean(self, to_keep=['set','add','load'], keep_switch=False): """Remove command in arguments from history. All command before the last occurrence of 'remove_bef_last' - (including it) will be removed (but if another options tells the opposite). + (including it) will be removed (but if another options tells the opposite). 'to_keep' is a set of line to always keep. - 'to_remove' is a set of line to always remove (don't care about remove_bef_ + 'to_remove' is a set of line to always remove (don't care about remove_bef_ status but keep_switch acts.). - if 'allow_for_removal' is define only the command in that list can be + if 'allow_for_removal' is define only the command in that list can be remove of the history for older command that remove_bef_lb1. all parameter - present in to_remove are always remove even if they are not part of this + present in to_remove are always remove even if they are not part of this list. keep_switch force to keep the statement remove_bef_??? which changes starts the removal mode. @@ -900,8 +900,8 @@ def clean(self, to_keep=['set','add','load'], if __debug__ and allow_for_removal: for arg in to_keep: assert arg not in allow_for_removal - - + + nline = -1 removal = False #looping backward @@ -912,7 +912,7 @@ def clean(self, to_keep=['set','add','load'], if not removal and remove_bef_last: if self[nline].startswith(remove_bef_last): removal = True - switch = True + switch = True # if this is the switch and is protected pass to the next element if switch and keep_switch: @@ -923,12 +923,12 @@ def clean(self, to_keep=['set','add','load'], if any([self[nline].startswith(arg) for arg in to_remove]): self.pop(nline) continue - + # Only if removal mode is active! if removal: if allow_for_removal: # Only a subset of command can be removed - if any([self[nline].startswith(arg) + if any([self[nline].startswith(arg) for arg in allow_for_removal]): self.pop(nline) continue @@ -936,10 +936,10 @@ def clean(self, to_keep=['set','add','load'], # All command have to be remove but protected self.pop(nline) continue - + # update the counter to pass to the next element nline -= 1 - + def get(self, tag, default=None): if isinstance(tag, int): list.__getattr__(self, tag) @@ -954,32 +954,32 @@ def get(self, tag, default=None): except ValueError: name, content = line[7:].split(None,1) out.append((name, content)) - return out + return out else: return self.info[tag] - + def write(self, path): """write the proc_card to a given path""" - + fsock = open(path, 'w') fsock.write(self.history_header) for line in self: while len(line) > 70: - sub, line = line[:70]+"\\" , line[70:] + sub, line = line[:70]+"\\" , line[70:] fsock.write(sub+"\n") else: fsock.write(line+"\n") - -class InvalidCardEdition(InvalidCmd): pass - + +class InvalidCardEdition(InvalidCmd): pass + class ConfigFile(dict): """ a class for storing/dealing with input file. - """ + """ def __init__(self, finput=None, **opt): """initialize a new instance. input can be an instance of MadLoopParam, - a file, a path to a file, or simply Nothing""" - + a file, a path to a file, or simply Nothing""" + if isinstance(finput, self.__class__): dict.__init__(self) for key in finput.__dict__: @@ -989,7 +989,7 @@ def __init__(self, finput=None, **opt): return else: dict.__init__(self) - + # Initialize it with all the default value self.user_set = set() self.auto_set = set() @@ -1000,15 +1000,15 @@ def __init__(self, finput=None, **opt): self.comments = {} # comment associated to parameters. can be display via help message # store the valid options for a given parameter. self.allowed_value = {} - + self.default_setup() self.plugin_input(finput) - + # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, **opt) - + @@ -1028,7 +1028,7 @@ def __add__(self, other): base = self.__class__(self) #base = copy.copy(self) base.update((key.lower(),value) for key, value in other.items()) - + return base def __radd__(self, other): @@ -1036,26 +1036,26 @@ def __radd__(self, other): new = copy.copy(other) new.update((key, value) for key, value in self.items()) return new - + def __contains__(self, key): return dict.__contains__(self, key.lower()) def __iter__(self): - + for name in super(ConfigFile, self).__iter__(): yield self.lower_to_case[name.lower()] - - + + #iter = super(ConfigFile, self).__iter__() #misc.sprint(iter) #return (self.lower_to_case[name] for name in iter) - + def keys(self): return [name for name in self] - + def items(self): return [(name,self[name]) for name in self] - + @staticmethod def warn(text, level, raiseerror=False): """convenient proxy to raiseerror/print warning""" @@ -1071,11 +1071,11 @@ def warn(text, level, raiseerror=False): log = lambda t: logger.log(level, t) elif level: log = level - + return log(text) def post_set(self, name, value, change_userdefine, raiseerror): - + if value is None: value = self[name] @@ -1087,25 +1087,25 @@ def post_set(self, name, value, change_userdefine, raiseerror): return getattr(self, 'post_set_%s' % name)(value, change_userdefine, raiseerror) else: raise - + def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): """set the attribute and set correctly the type if the value is a string. change_userdefine on True if we have to add the parameter in user_set """ - + if not len(self): #Should never happen but when deepcopy/pickle self.__init__() - + name = name.strip() - lower_name = name.lower() - + lower_name = name.lower() + # 0. check if this parameter is a system only one if change_userdefine and lower_name in self.system_only: text='%s is a private entry which can not be modify by the user. Keep value at %s' % (name,self[name]) self.warn(text, 'critical', raiseerror) return - + #1. check if the parameter is set to auto -> pass it to special if lower_name in self: targettype = type(dict.__getitem__(self, lower_name)) @@ -1115,22 +1115,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): self.user_set.remove(lower_name) #keep old value. self.post_set(lower_name, 'auto', change_userdefine, raiseerror) - return + return elif lower_name in self.auto_set: self.auto_set.remove(lower_name) - + # 2. Find the type of the attribute that we want if lower_name in self.list_parameter: targettype = self.list_parameter[lower_name] - - - + + + if isinstance(value, str): # split for each comma/space value = value.strip() if value.startswith('[') and value.endswith(']'): value = value[1:-1] - #do not perform split within a " or ' block + #do not perform split within a " or ' block data = re.split(r"((? bad input dropped.append(val) - + if not new_values: text= "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ % (value, name, self[lower_name]) text += "allowed values are any list composed of the following entries: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) - return self.warn(text, 'warning', raiseerror) - elif dropped: + return self.warn(text, 'warning', raiseerror) + elif dropped: text = "some value for entry '%s' are not valid. Invalid items are: '%s'.\n" \ % (name, dropped) text += "value will be set to %s" % new_values - text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) + text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) self.warn(text, 'warning') values = new_values # make the assignment - dict.__setitem__(self, lower_name, values) + dict.__setitem__(self, lower_name, values) if change_userdefine: self.user_set.add(lower_name) #check for specific action - return self.post_set(lower_name, None, change_userdefine, raiseerror) + return self.post_set(lower_name, None, change_userdefine, raiseerror) elif lower_name in self.dict_parameter: - targettype = self.dict_parameter[lower_name] + targettype = self.dict_parameter[lower_name] full_reset = True #check if we just update the current dict or not - + if isinstance(value, str): value = value.strip() # allowed entry: @@ -1209,7 +1209,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): # name , value => just add the entry # name value => just add the entry # {name1:value1, name2:value2} => full reset - + # split for each comma/space if value.startswith('{') and value.endswith('}'): new_value = {} @@ -1219,23 +1219,23 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): x, y = pair.split(':') x, y = x.strip(), y.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] new_value[x] = y value = new_value elif ',' in value: x,y = value.split(',') value = {x.strip():y.strip()} full_reset = False - + elif ':' in value: x,y = value.split(':') value = {x.strip():y.strip()} - full_reset = False + full_reset = False else: x,y = value.split() value = {x:y} - full_reset = False - + full_reset = False + if isinstance(value, dict): for key in value: value[key] = self.format_variable(value[key], targettype, name=name) @@ -1248,7 +1248,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - elif name in self: + elif name in self: targettype = type(self[name]) else: logger.debug('Trying to add argument %s in %s. ' % (name, self.__class__.__name__) +\ @@ -1256,22 +1256,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): suggestions = [k for k in self.keys() if k.startswith(name[0].lower())] if len(suggestions)>0: logger.debug("Did you mean one of the following: %s"%suggestions) - self.add_param(lower_name, self.format_variable(UnknownType(value), + self.add_param(lower_name, self.format_variable(UnknownType(value), UnknownType, name)) self.lower_to_case[lower_name] = name if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - + value = self.format_variable(value, targettype, name=name) #check that the value is allowed: if lower_name in self.allowed_value and '*' not in self.allowed_value[lower_name]: valid = False allowed = self.allowed_value[lower_name] - + # check if the current value is allowed or not (set valid to True) if value in allowed: - valid=True + valid=True elif isinstance(value, str): value = value.lower().strip() allowed = [str(v).lower() for v in allowed] @@ -1279,7 +1279,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): i = allowed.index(value) value = self.allowed_value[lower_name][i] valid=True - + if not valid: # act if not valid: text = "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ @@ -1303,7 +1303,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, if __debug__: if lower_name in self: raise Exception("Duplicate case for %s in %s" % (name,self.__class__)) - + dict.__setitem__(self, lower_name, value) self.lower_to_case[lower_name] = name if isinstance(value, list): @@ -1318,12 +1318,12 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, elif isinstance(value, dict): allvalues = list(value.values()) if any([type(allvalues[0]) != type(v) for v in allvalues]): - raise Exception("All entry should have the same type") - self.dict_parameter[lower_name] = type(allvalues[0]) + raise Exception("All entry should have the same type") + self.dict_parameter[lower_name] = type(allvalues[0]) if '__type__' in value: del value['__type__'] dict.__setitem__(self, lower_name, value) - + if allowed and allowed != ['*']: self.allowed_value[lower_name] = allowed if lower_name in self.list_parameter: @@ -1333,8 +1333,8 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, assert value in allowed or '*' in allowed #elif isinstance(value, bool) and allowed != ['*']: # self.allowed_value[name] = [True, False] - - + + if system: self.system_only.add(lower_name) if comment: @@ -1342,7 +1342,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, def do_help(self, name): """return a minimal help for the parameter""" - + out = "## Information on parameter %s from class %s\n" % (name, self.__class__.__name__) if name.lower() in self: out += "## current value: %s (parameter should be of type %s)\n" % (self[name], type(self[name])) @@ -1351,7 +1351,7 @@ def do_help(self, name): else: out += "## Unknown for this class\n" if name.lower() in self.user_set: - out += "## This value is considered as being set by the user\n" + out += "## This value is considered as being set by the user\n" else: out += "## This value is considered as being set by the system\n" if name.lower() in self.allowed_value: @@ -1359,17 +1359,17 @@ def do_help(self, name): out += "Allowed value are: %s\n" % ','.join([str(p) for p in self.allowed_value[name.lower()]]) else: out += "Suggested value are : %s\n " % ','.join([str(p) for p in self.allowed_value[name.lower()] if p!='*']) - + logger.info(out) return out @staticmethod def guess_type_from_value(value): "try to guess the type of the string --do not use eval as it might not be safe" - + if not isinstance(value, str): return str(value.__class__.__name__) - + #use ast.literal_eval to be safe since value is untrusted # add a timeout to mitigate infinite loop, memory stack attack with misc.stdchannel_redirected(sys.stdout, os.devnull): @@ -1388,7 +1388,7 @@ def guess_type_from_value(value): @staticmethod def format_variable(value, targettype, name="unknown"): """assign the value to the attribute for the given format""" - + if isinstance(targettype, str): if targettype in ['str', 'int', 'float', 'bool']: targettype = eval(targettype) @@ -1412,7 +1412,7 @@ def format_variable(value, targettype, name="unknown"): (name, type(value), targettype, value)) else: raise InvalidCmd("Wrong input type for %s found %s and expecting %s for value %s" %\ - (name, type(value), targettype, value)) + (name, type(value), targettype, value)) else: if targettype != UnknownType: value = value.strip() @@ -1441,8 +1441,8 @@ def format_variable(value, targettype, name="unknown"): value = int(value) elif value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} - value =int(value[:-1]) * convert[value[-1]] - elif '/' in value or '*' in value: + value =int(value[:-1]) * convert[value[-1]] + elif '/' in value or '*' in value: try: split = re.split('(\*|/)',value) v = float(split[0]) @@ -1461,7 +1461,7 @@ def format_variable(value, targettype, name="unknown"): try: value = float(value.replace('d','e')) except ValueError: - raise InvalidCmd("%s can not be mapped to an integer" % value) + raise InvalidCmd("%s can not be mapped to an integer" % value) try: new_value = int(value) except ValueError: @@ -1471,7 +1471,7 @@ def format_variable(value, targettype, name="unknown"): value = new_value else: raise InvalidCmd("incorect input: %s need an integer for %s" % (value,name)) - + elif targettype == float: if value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} @@ -1496,33 +1496,33 @@ def format_variable(value, targettype, name="unknown"): value = v else: raise InvalidCmd("type %s is not handle by the card" % targettype) - + return value - - + + def __getitem__(self, name): - + lower_name = name.lower() if __debug__: if lower_name not in self: if lower_name in [key.lower() for key in self] : raise Exception("Some key are not lower case %s. Invalid use of the class!"\ % [key for key in self if key.lower() != key]) - + if lower_name in self.auto_set: return 'auto' - + return dict.__getitem__(self, name.lower()) - + get = __getitem__ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): """convenient way to change attribute. changeifuserset=False means that the value is NOT change is the value is not on default. - user=True, means that the value will be marked as modified by the user - (potentially preventing future change to the value) + user=True, means that the value will be marked as modified by the user + (potentially preventing future change to the value) """ # changeifuserset=False -> we need to check if the user force a value. @@ -1530,8 +1530,8 @@ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): if name.lower() in self.user_set: #value modified by the user -> do nothing return - self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) - + self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) + class RivetCard(ConfigFile): @@ -1706,7 +1706,7 @@ def setRelevantParamCard(self, f_params, f_relparams): yexec_dict = {} yexec_line = exec_line + "yaxis_relvar = " + self['yaxis_relvar'] exec(yexec_line, locals(), yexec_dict) - if self['yaxis_label'] == "": + if self['yaxis_label'] == "": self['yaxis_label'] = "yaxis_relvar" f_relparams.write("{0} = {1}\n".format(self['yaxis_label'], yexec_dict['yaxis_relvar'])) else: @@ -1715,11 +1715,11 @@ def setRelevantParamCard(self, f_params, f_relparams): class ProcCharacteristic(ConfigFile): """A class to handle information which are passed from MadGraph to the madevent - interface.""" - + interface.""" + def default_setup(self): """initialize the directory to the default value""" - + self.add_param('loop_induced', False) self.add_param('has_isr', False) self.add_param('has_fsr', False) @@ -1735,16 +1735,16 @@ def default_setup(self): self.add_param('pdg_initial1', [0]) self.add_param('pdg_initial2', [0]) self.add_param('splitting_types',[], typelist=str) - self.add_param('perturbation_order', [], typelist=str) - self.add_param('limitations', [], typelist=str) - self.add_param('hel_recycling', False) + self.add_param('perturbation_order', [], typelist=str) + self.add_param('limitations', [], typelist=str) + self.add_param('hel_recycling', False) self.add_param('single_color', True) - self.add_param('nlo_mixed_expansion', True) + self.add_param('nlo_mixed_expansion', True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1752,49 +1752,49 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: if '#' in line: line = line.split('#',1)[0] if not line: continue - + if '=' in line: key, value = line.split('=',1) self[key.strip()] = value - + def write(self, outputpath): """write the file""" template ="# Information about the process #\n" template +="#########################################\n" - + fsock = open(outputpath, 'w') fsock.write(template) - + for key, value in self.items(): fsock.write(" %s = %s \n" % (key, value)) - - fsock.close() - + + fsock.close() + class GridpackCard(ConfigFile): """an object for the GridpackCard""" - + def default_setup(self): """default value for the GridpackCard""" - + self.add_param("GridRun", True) self.add_param("gevents", 2500) self.add_param("gseed", 1) - self.add_param("ngran", -1) - + self.add_param("ngran", -1) + def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1802,7 +1802,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -1812,19 +1812,19 @@ def read(self, finput): self[line[1].strip()] = line[0].replace('\'','').strip() def write(self, output_file, template=None): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'grid_card_default.dat') else: template = pjoin(MEDIR, 'Cards', 'grid_card_default.dat') - + text = "" - for line in open(template,'r'): + for line in open(template,'r'): nline = line.split('#')[0] nline = nline.split('!')[0] comment = line[len(nline):] @@ -1832,19 +1832,19 @@ def write(self, output_file, template=None): if len(nline) != 2: text += line elif nline[1].strip() in self: - text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) + text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) else: logger.info('Adding missing parameter %s to current run_card (with default value)' % nline[1].strip()) - text += line - + text += line + if isinstance(output_file, str): fsock = open(output_file,'w') else: fsock = output_file - + fsock.write(text) fsock.close() - + class PY8Card(ConfigFile): """ Implements the Pythia8 card.""" @@ -1868,7 +1868,7 @@ def add_default_subruns(self, type): def default_setup(self): """ Sets up the list of available PY8 parameters.""" - + # Visible parameters # ================== self.add_param("Main:numberOfEvents", -1) @@ -1877,11 +1877,11 @@ def default_setup(self): self.add_param("JetMatching:qCut", -1.0, always_write_to_card=False) self.add_param("JetMatching:doShowerKt",False,always_write_to_card=False) # -1 means that it is automatically set. - self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) + self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) # for CKKWL merging self.add_param("Merging:TMS", -1.0, always_write_to_card=False) self.add_param("Merging:Process", '', always_write_to_card=False) - # -1 means that it is automatically set. + # -1 means that it is automatically set. self.add_param("Merging:nJetMax", -1, always_write_to_card=False) # for both merging, chose whether to also consider different merging # scale values for the extra weights related to scale and PDF variations. @@ -1918,10 +1918,10 @@ def default_setup(self): comment='This allows to turn on/off hadronization alltogether.') self.add_param("partonlevel:mpi", True, hidden=True, always_write_to_card=False, comment='This allows to turn on/off MPI alltogether.') - self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, + self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, always_write_to_card=False, comment='This parameter is automatically set to True by MG5aMC when doing MLM merging with PY8.') - + # for MLM merging self.add_param("JetMatching:merge", False, hidden=True, always_write_to_card=False, comment='Specifiy if we are merging sample of different multiplicity.') @@ -1931,9 +1931,9 @@ def default_setup(self): comment='Value of the merging scale below which one does not even write the HepMC event.') self.add_param("JetMatching:doVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') - self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) + self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) self.add_param("JetMatching:setMad", False, hidden=True, always_write_to_card=False, - comment='Specify one must read inputs from the MadGraph banner.') + comment='Specify one must read inputs from the MadGraph banner.') self.add_param("JetMatching:coneRadius", 1.0, hidden=True, always_write_to_card=False) self.add_param("JetMatching:nQmatch",4,hidden=True, always_write_to_card=False) # for CKKWL merging (common with UMEPS, UNLOPS) @@ -1946,7 +1946,7 @@ def default_setup(self): self.add_param("Merging:applyVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') self.add_param("Merging:includeWeightInXsection", True, hidden=True, always_write_to_card=False, - comment='If turned off, then the option belows forces PY8 to keep the original weight.') + comment='If turned off, then the option belows forces PY8 to keep the original weight.') self.add_param("Merging:muRen", 91.188, hidden=True, always_write_to_card=False, comment='Set renormalization scales of the 2->2 process.') self.add_param("Merging:muFacInME", 91.188, hidden=True, always_write_to_card=False, @@ -1958,7 +1958,7 @@ def default_setup(self): # To be added in subruns for CKKWL self.add_param("Merging:mayRemoveDecayProducts", False, hidden=True, always_write_to_card=False) self.add_param("Merging:doKTMerging", False, hidden=True, always_write_to_card=False) - self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) + self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) self.add_param("Merging:doPTLundMerging", False, hidden=True, always_write_to_card=False) # Special Pythia8 paremeters useful to simplify the shower. @@ -1975,33 +1975,33 @@ def default_setup(self): # Add parameters controlling the subruns execution flow. # These parameters should not be part of PY8SubRun daughter. self.add_default_subruns('parameters') - + def __init__(self, *args, **opts): - # Parameters which are not printed in the card unless they are - # 'user_set' or 'system_set' or part of the + # Parameters which are not printed in the card unless they are + # 'user_set' or 'system_set' or part of the # self.hidden_params_to_always_print set. self.hidden_param = [] self.hidden_params_to_always_write = set() self.visible_params_to_always_write = set() # List of parameters that should never be written out given the current context. self.params_to_never_write = set() - + # Parameters which have been set by the system (i.e. MG5 itself during # the regular course of the shower interface) self.system_set = set() - + # Add attributes controlling the subruns execution flow. # These attributes should not be part of PY8SubRun daughter. self.add_default_subruns('attributes') - - # Parameters which have been set by the + + # Parameters which have been set by the super(PY8Card, self).__init__(*args, **opts) - def add_param(self, name, value, hidden=False, always_write_to_card=True, + def add_param(self, name, value, hidden=False, always_write_to_card=True, comment=None): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. The option 'hidden' decides whether the parameter should be visible to the user. The option 'always_write_to_card' decides whether it should @@ -2017,7 +2017,7 @@ def add_param(self, name, value, hidden=False, always_write_to_card=True, self.hidden_params_to_always_write.add(name) else: if always_write_to_card: - self.visible_params_to_always_write.add(name) + self.visible_params_to_always_write.add(name) if not comment is None: if not isinstance(comment, str): raise MadGraph5Error("Option 'comment' must be a string, not"+\ @@ -2036,7 +2036,7 @@ def add_subrun(self, py8_subrun): self.subruns[py8_subrun['Main:subrun']] = py8_subrun if not 'LHEFInputs:nSubruns' in self.user_set: self['LHEFInputs:nSubruns'] = max(self.subruns.keys()) - + def userSet(self, name, value, **opts): """Set an attribute of this card, following a user_request""" self.__setitem__(name, value, change_userdefine=True, **opts) @@ -2044,10 +2044,10 @@ def userSet(self, name, value, **opts): self.system_set.remove(name.lower()) def vetoParamWriteOut(self, name): - """ Forbid the writeout of a specific parameter of this card when the + """ Forbid the writeout of a specific parameter of this card when the "write" function will be invoked.""" self.params_to_never_write.add(name.lower()) - + def systemSet(self, name, value, **opts): """Set an attribute of this card, independently of a specific user request and only if not already user_set.""" @@ -2058,7 +2058,7 @@ def systemSet(self, name, value, **opts): if force or name.lower() not in self.user_set: self.__setitem__(name, value, change_userdefine=False, **opts) self.system_set.add(name.lower()) - + def MadGraphSet(self, name, value, **opts): """ Sets a card attribute, but only if it is absent or not already user_set.""" @@ -2068,18 +2068,18 @@ def MadGraphSet(self, name, value, **opts): force = False if name.lower() not in self or (force or name.lower() not in self.user_set): self.__setitem__(name, value, change_userdefine=False, **opts) - self.system_set.add(name.lower()) - + self.system_set.add(name.lower()) + def defaultSet(self, name, value, **opts): self.__setitem__(name, value, change_userdefine=False, **opts) - + @staticmethod def pythia8_formatting(value, formatv=None): """format the variable into pythia8 card convention. The type is detected by default""" if not formatv: if isinstance(value,UnknownType): - formatv = 'unknown' + formatv = 'unknown' elif isinstance(value, bool): formatv = 'bool' elif isinstance(value, int): @@ -2095,7 +2095,7 @@ def pythia8_formatting(value, formatv=None): formatv = 'str' else: assert formatv - + if formatv == 'unknown': # No formatting then return str(value) @@ -2116,7 +2116,7 @@ def pythia8_formatting(value, formatv=None): elif formatv == 'float': return '%.10e' % float(value) elif formatv == 'shortfloat': - return '%.3f' % float(value) + return '%.3f' % float(value) elif formatv == 'str': return "%s" % value elif formatv == 'list': @@ -2124,9 +2124,9 @@ def pythia8_formatting(value, formatv=None): return ','.join([PY8Card.pythia8_formatting(arg, 'shortfloat') for arg in value]) else: return ','.join([PY8Card.pythia8_formatting(arg) for arg in value]) - - def write(self, output_file, template, read_subrun=False, + + def write(self, output_file, template, read_subrun=False, print_only_visible=False, direct_pythia_input=False, add_missing=True): """ Write the card to output_file using a specific template. > 'print_only_visible' specifies whether or not the hidden parameters @@ -2143,28 +2143,28 @@ def write(self, output_file, template, read_subrun=False, or p.lower() in self.user_set] # Filter against list of parameters vetoed for write-out visible_param = [p for p in visible_param if p.lower() not in self.params_to_never_write] - + # Now the hidden param which must be written out if print_only_visible: hidden_output_param = [] else: hidden_output_param = [p for p in self if p.lower() in self.hidden_param and not p.lower() in self.user_set and - (p.lower() in self.hidden_params_to_always_write or + (p.lower() in self.hidden_params_to_always_write or p.lower() in self.system_set)] # Filter against list of parameters vetoed for write-out hidden_output_param = [p for p in hidden_output_param if p not in self.params_to_never_write] - + if print_only_visible: subruns = [] else: if not read_subrun: subruns = sorted(self.subruns.keys()) - + # Store the subruns to write in a dictionary, with its ID in key # and the corresponding stringstream in value subruns_to_write = {} - + # Sort these parameters nicely so as to put together parameters # belonging to the same group (i.e. prefix before the ':' in their name). def group_params(params): @@ -2191,7 +2191,7 @@ def group_params(params): # First dump in a temporary_output (might need to have a second pass # at the very end to update 'LHEFInputs:nSubruns') output = StringIO.StringIO() - + # Setup template from which to read if isinstance(template, str): if os.path.isfile(template): @@ -2199,7 +2199,7 @@ def group_params(params): elif '\n' in template: tmpl = StringIO.StringIO(template) else: - raise Exception("File input '%s' not found." % file_input) + raise Exception("File input '%s' not found." % file_input) elif template is None: # Then use a dummy empty StringIO, hence skipping the reading tmpl = StringIO.StringIO() @@ -2257,8 +2257,8 @@ def group_params(params): # Remove all of its variables (so that nothing is overwritten) DummySubrun.clear() DummySubrun.write(subruns_to_write[int(value)], - tmpl, read_subrun=True, - print_only_visible=print_only_visible, + tmpl, read_subrun=True, + print_only_visible=print_only_visible, direct_pythia_input=direct_pythia_input) logger.info('Adding new unknown subrun with ID %d.'% @@ -2267,7 +2267,7 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - + # Change parameters which must be output if param in visible_param: new_value = PY8Card.pythia8_formatting(self[param]) @@ -2286,10 +2286,10 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - - # Substitute the value. + + # Substitute the value. # If it is directly the pytia input, then don't write the param if it - # is not in the list of visible_params_to_always_write and was + # is not in the list of visible_params_to_always_write and was # not user_set or system_set if ((not direct_pythia_input) or (param.lower() in self.visible_params_to_always_write) or @@ -2304,16 +2304,16 @@ def group_params(params): output.write(template%(param_entry, value_entry.replace(value,new_value))) - + # Proceed to next line last_pos = tmpl.tell() line = tmpl.readline() - + # If add_missing is False, make sure to empty the list of remaining parameters if not add_missing: visible_param = [] hidden_output_param = [] - + # Now output the missing parameters. Warn about visible ones. if len(visible_param)>0 and not template is None: output.write( @@ -2343,12 +2343,12 @@ def group_params(params): """%(' for subrun %d'%self['Main:subrun'] if 'Main:subrun' in self else '')) for param in hidden_output_param: if param.lower() in self.comments: - comment = '\n'.join('! %s'%c for c in + comment = '\n'.join('! %s'%c for c in self.comments[param.lower()].split('\n')) output.write(comment+'\n') output.write('%s=%s\n'%(param,PY8Card.pythia8_formatting(self[param]))) - - # Don't close the file if we were reading a subrun, but simply write + + # Don't close the file if we were reading a subrun, but simply write # output and return now if read_subrun: output_file.write(output.getvalue()) @@ -2382,12 +2382,12 @@ def group_params(params): out.close() else: output_file.write(output.getvalue()) - + def read(self, file_input, read_subrun=False, setter='default'): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file. - The setter option choses the authority that sets potential - modified/new parameters. It can be either: + The setter option choses the authority that sets potential + modified/new parameters. It can be either: 'default' or 'user' or 'system'""" if isinstance(file_input, str): if "\n" in file_input: @@ -2423,8 +2423,8 @@ def read(self, file_input, read_subrun=False, setter='default'): raise MadGraph5Error("Could not read line '%s' of Pythia8 card."%\ line) if '!' in value: - value,_ = value.split('!',1) - + value,_ = value.split('!',1) + # Read a subrun if detected: if param=='Main:subrun': if read_subrun: @@ -2451,7 +2451,7 @@ def read(self, file_input, read_subrun=False, setter='default'): last_pos = finput.tell() line = finput.readline() continue - + # Read parameter. The case of a parameter not defined in the card is # handled directly in ConfigFile. @@ -2478,7 +2478,7 @@ def add_default_subruns(self, type): def __init__(self, *args, **opts): """ Initialize a subrun """ - + # Force user to set it manually. subrunID = -1 if 'subrun_id' in opts: @@ -2489,7 +2489,7 @@ def __init__(self, *args, **opts): def default_setup(self): """Sets up the list of available PY8SubRun parameters.""" - + # Add all default PY8Card parameters super(PY8SubRun, self).default_setup() # Make sure they are all hidden @@ -2501,33 +2501,33 @@ def default_setup(self): self.add_param("Main:subrun", -1) self.add_param("Beams:LHEF", "events.lhe.gz") - + class RunBlock(object): """ Class for a series of parameter in the run_card that can be either visible or hidden. - name: allow to set in the default run_card $name to set where that + name: allow to set in the default run_card $name to set where that block need to be inserted template_on: information to include is block is active template_off: information to include is block is not active on_fields/off_fields: paramater associated to the block - can be specify but are otherwise automatically but + can be specify but are otherwise automatically but otherwise determined from the template. - + function: status(self,run_card) -> return which template need to be used check_validity(self, runcard) -> sanity check - create_default_for_process(self, run_card, proc_characteristic, - history, proc_def) + create_default_for_process(self, run_card, proc_characteristic, + history, proc_def) post_set_XXXX(card, value, change_userdefine, raiseerror) -> fct called when XXXXX is set post_set(card, value, change_userdefine, raiseerror, **opt) -> fct called when a parameter is changed - -> no access to parameter name + -> no access to parameter name -> not called if post_set_XXXX is defined """ - - + + def __init__(self, name, template_on, template_off, on_fields=False, off_fields=False): self.name = name @@ -2550,7 +2550,7 @@ def fields(self): def find_fields_from_template(template): """ return the list of fields from a template. checking line like %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ - + return re.findall(r"^\s*%\((.*)\)s\s*=\s*\1", template, re.M) def get_template(self, card): @@ -2565,7 +2565,7 @@ def get_unused_template(self, card): if self.status(card): return self.template_off else: - return self.template_on + return self.template_on def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -2594,20 +2594,20 @@ def manage_parameters(self, card, written, to_write): written.add(name) if name in to_write: to_write.remove(name) - + def check_validity(self, runcard): """run self consistency check here --avoid to use runcard[''] = xxx here since it can trigger post_set function""" return def create_default_for_process(self, run_card, proc_characteristic, history, proc_def): - return + return # @staticmethod # def post_set(card, value, change_userdefine, raiseerror, **opt): # """default action to run when a parameter of the block is defined. # Here we do not know which parameter is modified. if this is needed. # then one need to define post_set_XXXXX(card, value, change_userdefine, raiseerror) -# and then only that function is used +# and then only that function is used # """ # # if 'pdlabel' in card.user_set: @@ -2621,7 +2621,7 @@ class RunCard(ConfigFile): blocks = [] parameter_in_block = {} - allowed_lep_densities = {} + allowed_lep_densities = {} default_include_file = 'run_card.inc' default_autodef_file = 'run.inc' donewarning = [] @@ -2637,7 +2637,7 @@ def plugin_input(self, finput): curr_dir = os.path.dirname(os.path.dirname(finput.name)) elif isinstance(finput, str): curr_dir = os.path.dirname(os.path.dirname(finput)) - + if curr_dir: if os.path.exists(pjoin(curr_dir, 'bin', 'internal', 'plugin_run_card')): # expected format {} passing everything as optional argument @@ -2646,7 +2646,7 @@ def plugin_input(self, finput): continue opts = dict(eval(line)) self.add_param(**opts) - + @classmethod def fill_post_set_from_blocks(cls): """set the post_set function for any parameter defined in a run_block""" @@ -2659,8 +2659,8 @@ def fill_post_set_from_blocks(cls): elif hasattr(block, 'post_set'): setattr(cls, 'post_set_%s' % parameter, block.post_set) cls.parameter_in_block[parameter] = block - - + + def __new__(cls, finput=None, **opt): cls.fill_post_set_from_blocks() @@ -2718,9 +2718,9 @@ def __new__(cls, finput=None, **opt): return super(RunCard, cls).__new__(cls, finput, **opt) def __init__(self, *args, **opts): - + # The following parameter are updated in the defaultsetup stage. - + #parameter for which no warning should be raised if not define self.hidden_param = [] # in which include file the parameer should be written @@ -2739,11 +2739,11 @@ def __init__(self, *args, **opts): self.cuts_parameter = {} # parameter added where legacy requires an older value. self.system_default = {} - + self.display_block = [] # set some block to be displayed self.fct_mod = {} # {param: (fct_pointer, *argument, **opts)} - self.cut_class = {} + self.cut_class = {} self.warned=False @@ -2776,11 +2776,11 @@ def get_lepton_densities(cls): else: cls.allowed_lep_densities[identity].append(name) - def add_param(self, name, value, fortran_name=None, include=True, + def add_param(self, name, value, fortran_name=None, include=True, hidden=False, legacy=False, cut=False, system=False, sys_default=None, autodef=False, fct_mod=None, **opts): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. fortran_name: defines what is the associate name in the f77 code include: defines if we have to put the value in the include file @@ -2795,7 +2795,7 @@ def add_param(self, name, value, fortran_name=None, include=True, fct_mod: defines a function to run if the parameter is modify in the include file options of **opts: - allowed: list of valid options. '*' means anything else should be allowed. - empty list means anything possible as well. + empty list means anything possible as well. - comment: add comment for writing/help - typelist: type of the list if default is empty """ @@ -2823,9 +2823,9 @@ def add_param(self, name, value, fortran_name=None, include=True, self.fct_mod[name] = fct_mod def read(self, finput, consistency=True, unknown_warning=True, **opt): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -2836,7 +2836,7 @@ def read(self, finput, consistency=True, unknown_warning=True, **opt): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -2864,8 +2864,8 @@ def add_unknown_entry(self, name, value, unknow_warning): This is based on the guess_entry_fromname for the various syntax providing input. This then call add_param accordingly. - This function does not returns anything. - """ + This function does not returns anything. + """ if name == "dsqrt_q2fact1" and not self.LO: raise InvalidRunCard("Looks like you passed a LO run_card for a NLO run. Please correct") @@ -2903,7 +2903,7 @@ def add_unknown_entry(self, name, value, unknow_warning): " The type was assigned to %s. \n"+\ " The definition of that variable will %sbe automatically added to fortran file %s\n"+\ " The value of that variable will %sbe passed to the fortran code via fortran file %s",\ - name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, + name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, "" if opts.get('autodef', False) else "not", "" if opts.get('autodef', False) in [True,False] else opts.get('autodef'), "" if opts.get('include', True) else "not", "" if opts.get('include', True) in [True,False] else opts.get('include')) RunCard.donewarning.append(name) @@ -2923,19 +2923,19 @@ def valid_line(self, line, tmp): return False elif line.strip().startswith('%'): parameter = line[line.find('(')+1:line.find(')')] - + try: cond = self.cuts_parameter[parameter] except KeyError: return True - - + + if template_options.get(cond, default) or cond is True: return True else: - return False + return False else: - return True + return True def reset_simd(self, old_value, new_value, name, *args, **opts): @@ -2946,28 +2946,28 @@ def make_clean(self,old_value, new_value, name, dir): raise Exception('pass make clean for ', dir) def make_Ptouch(self,old_value, new_value, name, reset): - raise Exception('pass Ptouch for ', reset) - + raise Exception('pass Ptouch for ', reset) + def write(self, output_file, template=None, python_template=False, write_hidden=False, template_options=None, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" - to_write = set(self.user_set) + to_write = set(self.user_set) written = set() if not template: raise Exception if not template_options: template_options = collections.defaultdict(str) - + if python_template: text = open(template,'r').read() - text = text.split('\n') + text = text.split('\n') # remove if templating - text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] + text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] for l in text if self.valid_line(l, template_options)] text ='\n'.join(text) - + if python_template and not to_write: import string if self.blocks: @@ -2981,14 +2981,14 @@ def write(self, output_file, template=None, python_template=False, if not self.list_parameter: text = text % self else: - data = dict((key.lower(),value) for key, value in self.items()) + data = dict((key.lower(),value) for key, value in self.items()) for name in self.list_parameter: if self.list_parameter[name] != str: data[name] = ', '.join(str(v) for v in data[name]) else: data[name] = "['%s']" % "', '".join(str(v) for v in data[name]) text = text % data - else: + else: text = "" for line in open(template,'r'): nline = line.split('#')[0] @@ -3005,11 +3005,11 @@ def write(self, output_file, template=None, python_template=False, this_group = this_group[0] text += this_group.get_template(self) % self this_group.manage_parameters(self, written, to_write) - + elif len(nline) != 2: text += line elif nline[1].strip() in self: - + name = nline[1].strip().lower() value = self[name] if name in self.list_parameter: @@ -3026,15 +3026,15 @@ def write(self, output_file, template=None, python_template=False, else: endline = '' text += ' %s\t= %s %s%s' % (value, name, comment, endline) - written.add(name) + written.add(name) if name in to_write: to_write.remove(name) else: logger.info('Adding missing parameter %s to current %s (with default value)', (name, self.filename)) - written.add(name) - text += line + written.add(name) + text += line for b in self.blocks: if b.status(self): @@ -3057,7 +3057,7 @@ def write(self, output_file, template=None, python_template=False, else: #partial writting -> add only what is needed to_add = [] - for line in b.get_template(self).split('\n'): + for line in b.get_template(self).split('\n'): nline = line.split('#')[0] nline = nline.split('!')[0] nline = nline.split('=') @@ -3072,8 +3072,8 @@ def write(self, output_file, template=None, python_template=False, continue #already include before else: to_add.append(line % {nline[1].strip():value, name:value}) - written.add(name) - + written.add(name) + if name in to_write: to_write.remove(name) else: @@ -3095,13 +3095,13 @@ def write(self, output_file, template=None, python_template=False, text += '\n'.join(to_add) if to_write or write_hidden: - text+="""#********************************************************************* + text+="""#********************************************************************* # Additional hidden parameters #********************************************************************* -""" +""" if write_hidden: # - # do not write hidden parameter not hidden for this template + # do not write hidden parameter not hidden for this template # if python_template: written = written.union(set(re.findall('\%\((\w*)\)s', open(template,'r').read(), re.M))) @@ -3129,7 +3129,7 @@ def get_last_value_include(self, output_dir): if inc file does not exist we will return the current value (i.e. set has no change) """ - #remember that + #remember that # default_include_file is a class variable # self.includepath is on the form include_path : [list of param ] out = {} @@ -3165,7 +3165,7 @@ def get_value_from_include(self, path, list_of_params, output_dir): with open(pjoin(output_dir,path), 'r') as fsock: text = fsock.read() - + for name in list_of_params: misc.sprint(name, name in self.fortran_name) misc.sprint(self.fortran_name[name] if name in self.fortran_name[name] else name) @@ -3191,11 +3191,11 @@ def get_value_from_include(self, path, list_of_params, output_dir): misc.sprint(self.fortran_name) misc.sprint(text) raise Exception - return out + return out def get_default(self, name, default=None, log_level=None): - """return self[name] if exist otherwise default. log control if we + """return self[name] if exist otherwise default. log control if we put a warning or not if we use the default value""" lower_name = name.lower() @@ -3216,13 +3216,13 @@ def get_default(self, name, default=None, log_level=None): log_level = 20 if not default: default = dict.__getitem__(self, name.lower()) - + logger.log(log_level, '%s missed argument %s. Takes default: %s' % (self.filename, name, default)) self[name] = default return default else: - return self[name] + return self[name] def mod_inc_pdlabel(self, value): """flag pdlabel has 'dressed' if one of the special lepton PDF with beamstralung. @@ -3237,16 +3237,16 @@ def edit_dummy_fct_from_file(self, filelist, outdir): filelist is a list of input files (given by the user) containing a series of function to be placed in replacement of standard (typically dummy) functions of the code. - This use LO/NLO class attribute that defines which function name need to - be placed in which file. + This use LO/NLO class attribute that defines which function name need to + be placed in which file. First time this is used, a backup of the original file is done in order to - recover if the user remove some of those files. + recover if the user remove some of those files. The function present in the file are determined automatically via regular expression. and only that function is replaced in the associated file. - function in the filelist starting with user_ will also be include within the + function in the filelist starting with user_ will also be include within the dummy_fct.f file """ @@ -3269,7 +3269,7 @@ def edit_dummy_fct_from_file(self, filelist, outdir): fsock = file_writers.FortranWriter(tmp,'w') function_text = fsock.remove_routine(text, fct) fsock.close() - test = open(tmp,'r').read() + test = open(tmp,'r').read() if fct not in self.dummy_fct_file: if fct.startswith('user_'): self.dummy_fct_file[fct] = self.dummy_fct_file['user_'] @@ -3315,22 +3315,22 @@ def guess_entry_fromname(self, name, value): - vartype: type of the variable - name: name of the variable (stripped from metadata) - options: additional options for the add_param - rules: - - if name starts with str_, int_, float_, bool_, list_, dict_ then + rules: + - if name starts with str_, int_, float_, bool_, list_, dict_ then - vartype is set accordingly - name is strip accordingly - otherwise guessed from value (which is string) - if name contains min/max - vartype is set to float - options has an added {'cut':True} - - suffixes like + - suffixes like - will be removed from named - will be added in options (for add_param) as {'cut':True} see add_param documentation for the list of supported options - if include is on False set autodef to False (i.e. enforce it False for future change) """ - # local function + # local function def update_typelist(value, name, opts): """convert a string to a list and update opts to keep track of the type """ value = value.strip() @@ -3358,7 +3358,7 @@ def update_typelist(value, name, opts): opts[key] = val name = name.replace("<%s=%s>" %(key,val), '') - # get vartype + # get vartype # first check that name does not force it supported_type = ["str", "float", "int", "bool", "list", "dict"] if "_" in name and name.split("_")[0].lower() in supported_type: @@ -3406,13 +3406,13 @@ def f77_formatting(value, formatv=None): value = str(value).lower() else: assert formatv - + if formatv == 'bool': if str(value) in ['1','T','.true.','True']: return '.true.' else: return '.false.' - + elif formatv == 'int': try: return str(int(value)) @@ -3422,12 +3422,12 @@ def f77_formatting(value, formatv=None): return str(int(fl)) else: raise - + elif formatv == 'float': if isinstance(value, str): value = value.replace('d','e') return ('%.10e' % float(value)).replace('e','d') - + elif formatv == 'str': # Check if it is a list if value.strip().startswith('[') and value.strip().endswith(']'): @@ -3437,20 +3437,20 @@ def f77_formatting(value, formatv=None): enumerate(elements)] else: return "'%s'" % value - - + + def check_validity(self, log_level=30): """check that parameter missing in the card are set to the expected value""" for name, value in self.system_default.items(): self.set(name, value, changeifuserset=False) - + for name in self.includepath[False]: to_bypass = self.hidden_param + list(self.legacy_parameter.keys()) if name not in to_bypass: - self.get_default(name, log_level=log_level) + self.get_default(name, log_level=log_level) for name in self.legacy_parameter: if self[name] != self.legacy_parameter[name]: @@ -3458,28 +3458,28 @@ def check_validity(self, log_level=30): for block in self.blocks: block.check_validity(self) - + def update_system_parameter_for_include(self): - """update hidden system only parameter for the correct writtin in the + """update hidden system only parameter for the correct writtin in the include""" return - + def write_include_file(self, output_dir, output_file=None): """Write the various include file in output_dir. The entry True of self.includepath will be written in run_card.inc The entry False will not be written anywhere output_file allows testing by providing stream. - This also call the function to add variable definition for the - variable with autodef=True (handle by write_autodef function) + This also call the function to add variable definition for the + variable with autodef=True (handle by write_autodef function) """ - + # ensure that all parameter are coherent and fix those if needed self.check_validity() - + #ensusre that system only parameter are correctly set self.update_system_parameter_for_include() @@ -3490,10 +3490,10 @@ def write_include_file(self, output_dir, output_file=None): self.write_autodef(output_dir, output_file=None) # check/fix status of customised functions self.edit_dummy_fct_from_file(self["custom_fcts"], os.path.dirname(output_dir)) - + for incname in self.includepath: self.write_one_include_file(output_dir, incname, output_file) - + for name,value in value_in_old_include.items(): if value != self[name]: self.fct_mod[name][0](value, self[name], name, *self.fct_mod[name][1],**self.fct_mod[name][2]) @@ -3515,13 +3515,13 @@ def write_one_include_file(self, output_dir, incname, output_file=None): fsock = file_writers.FortranWriter(pjoin(output_dir,pathinc+'.tmp')) - for key in self.includepath[incname]: + for key in self.includepath[incname]: #define the fortran name if key in self.fortran_name: fortran_name = self.fortran_name[key] else: fortran_name = key - + if incname in self.include_as_parameter: fsock.writelines('INTEGER %s\n' % fortran_name) #get the value with warning if the user didn't set it @@ -3534,7 +3534,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): # in case of a list, add the length of the list as 0th # element in fortran. Only in case of integer or float # list (not for bool nor string) - targettype = self.list_parameter[key] + targettype = self.list_parameter[key] if targettype is bool: pass elif targettype is int: @@ -3550,7 +3550,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): elif isinstance(value, dict): for fortran_name, onevalue in value.items(): line = '%s = %s \n' % (fortran_name, self.f77_formatting(onevalue)) - fsock.writelines(line) + fsock.writelines(line) elif isinstance(incname,str) and 'compile' in incname: if incname in self.include_as_parameter: line = 'PARAMETER (%s=%s)' %( fortran_name, value) @@ -3585,7 +3585,7 @@ def write_autodef(self, output_dir, output_file=None): filetocheck = dict(self.definition_path) if True not in self.definition_path: filetocheck[True] = [] - + for incname in filetocheck: if incname is True: @@ -3598,7 +3598,7 @@ def write_autodef(self, output_dir, output_file=None): if output_file: fsock = output_file input = fsock.getvalue() - + else: input = open(pjoin(output_dir,pathinc),'r').read() # do not define fsock here since we might not need to overwrite it @@ -3608,7 +3608,7 @@ def write_autodef(self, output_dir, output_file=None): previous = re.findall(re_pat, input, re.M) # now check which one needed to be added (and remove those identicaly defined) to_add = [] - for key in filetocheck[incname]: + for key in filetocheck[incname]: curr_type = self[key].__class__.__name__ length = "" if curr_type in [list, "list"]: @@ -3640,10 +3640,10 @@ def write_autodef(self, output_dir, output_file=None): fsock.truncate(0) fsock.seek(0) - # remove outdated lines + # remove outdated lines lines = input.split('\n') if previous: - out = [line for line in lines if not re.search(re_pat, line, re.M) or + out = [line for line in lines if not re.search(re_pat, line, re.M) or re.search(re_pat, line, re.M).groups() not in previous] else: out = lines @@ -3662,7 +3662,7 @@ def write_autodef(self, output_dir, output_file=None): stop = out.index('C STOP USER COMMON BLOCK') out = out[:start]+ out[stop+1:] #add new common-block - if self.definition_path[incname]: + if self.definition_path[incname]: out.append("C START USER COMMON BLOCK") if isinstance(pathinc , str): filename = os.path.basename(pathinc).split('.',1)[0] @@ -3675,10 +3675,10 @@ def write_autodef(self, output_dir, output_file=None): filename = filename.upper() out.append(" COMMON/USER_CUSTOM_%s/%s" %(filename,','.join( self.definition_path[incname]))) out.append('C STOP USER COMMON BLOCK') - + if not output_file: fsock.writelines(out) - fsock.close() + fsock.close() else: # for iotest out = ["%s\n" %l for l in out] @@ -3702,7 +3702,7 @@ def get_idbmup(lpp): def get_banner_init_information(self): """return a dictionary with the information needed to write the first line of the block of the lhe file.""" - + output = {} output["idbmup1"] = self.get_idbmup(self['lpp1']) output["idbmup2"] = self.get_idbmup(self['lpp2']) @@ -3713,7 +3713,7 @@ def get_banner_init_information(self): output["pdfsup1"] = self.get_pdf_id(self["pdlabel"]) output["pdfsup2"] = self.get_pdf_id(self["pdlabel"]) return output - + def get_pdf_id(self, pdf): if pdf == "lhapdf": lhaid = self["lhaid"] @@ -3721,19 +3721,19 @@ def get_pdf_id(self, pdf): return lhaid[0] else: return lhaid - else: + else: try: return {'none': 0, 'iww': 0, 'eva':0, 'edff':0, 'chff':0, 'cteq6_m':10000,'cteq6_l':10041,'cteq6l1':10042, 'nn23lo':246800,'nn23lo1':247000,'nn23nlo':244800 - }[pdf] + }[pdf] except: - return 0 - + return 0 + def get_lhapdf_id(self): return self.get_pdf_id(self['pdlabel']) - def remove_all_cut(self): + def remove_all_cut(self): """remove all the cut""" for name in self.cuts_parameter: @@ -3749,7 +3749,7 @@ def remove_all_cut(self): elif 'eta' in name: self[name] = -1 else: - self[name] = 0 + self[name] = 0 ################################################################################################ ### Define various template subpart for the LO Run_card @@ -3767,11 +3767,11 @@ def remove_all_cut(self): %(nb_proton1)s = nb_proton1 # number of proton for the first beam %(nb_neutron1)s = nb_neutron1 # number of neutron for the first beam %(mass_ion1)s = mass_ion1 # mass of the heavy ion (first beam) -# Note that seting differently the two beams only work if you use +# Note that seting differently the two beams only work if you use # group_subprocess=False when generating your matrix-element %(nb_proton2)s = nb_proton2 # number of proton for the second beam %(nb_neutron2)s = nb_neutron2 # number of neutron for the second beam - %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) + %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ template_off = "# To see heavy ion options: type \"update ion_pdf\"" @@ -3834,11 +3834,11 @@ def remove_all_cut(self): # Frame for polarization ------------------------------------------------------------------------------------ template_on = \ """#********************************************************************* -# Frame where to evaluate the matrix-element (not the cut!) for polarization +# Frame where to evaluate the matrix-element (not the cut!) for polarization #********************************************************************* %(me_frame)s = me_frame ! list of particles to sum-up to define the rest-frame ! in which to evaluate the matrix-element - ! [1,2] means the partonic center of mass + ! [1,2] means the partonic center of mass """ template_off = "" frame_block = RunBlock('frame', template_on=template_on, template_off=template_off) @@ -3891,7 +3891,7 @@ def remove_all_cut(self): # CONTROL The extra running scale (not QCD) * # Such running is NOT include in systematics computation * #*********************************************************************** - %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale + %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode %(mue_over_ref)s = mue_over_ref ! ratio to mur if dynamical scale """ @@ -3908,10 +3908,10 @@ def remove_all_cut(self): %(tmin_for_channel)s = tmin_for_channel ! limit the non-singular reach of --some-- channel of integration related to T-channel diagram (value between -1 and 0), -1 is no impact %(survey_splitting)s = survey_splitting ! for loop-induced control how many core are used at survey for the computation of a single iteration. %(survey_nchannel_per_job)s = survey_nchannel_per_job ! control how many Channel are integrated inside a single job on cluster/multicore - %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) + %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) +#********************************************************************* +# Compilation flag. #********************************************************************* -# Compilation flag. -#********************************************************************* %(global_flag)s = global_flag ! fortran optimization flag use for the all code. %(aloha_flag)s = aloha_flag ! fortran optimization flag for aloha function. Suggestions: '-ffast-math' %(matrix_flag)s = matrix_flag ! fortran optimization flag for matrix.f function. Suggestions: '-O3' @@ -3948,7 +3948,7 @@ def check_validity(self, card): if card['pdlabel'] != card['pdlabel1']: dict.__setitem__(card, 'pdlabel', card['pdlabel1']) elif card['pdlabel1'] in sum(card.allowed_lep_densities.values(),[]): - raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") + raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel2'] in sum(card.allowed_lep_densities.values(),[]): raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel1'] == 'none': @@ -3962,7 +3962,7 @@ def check_validity(self, card): dict.__setitem__(card, 'pdlabel2', card['pdlabel']) if abs(card['lpp1']) == 1 == abs(card['lpp2']) and card['pdlabel1'] != card['pdlabel2']: - raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") + raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -4028,7 +4028,7 @@ def post_set(card, value, change_userdefine, raiseerror, name='unknown', **opt): if name == 'fixed_fac_scale2' and 'fixed_fac_scale1' not in card.user_set: dict.__setitem__(card, 'fixed_fac_scale1', card['fixed_fac_scale']) if name == 'fixed_fac_scale1' and 'fixed_fac_scale2' not in card.user_set: - dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) + dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) def status(self, card): @@ -4061,32 +4061,32 @@ def status(self, card): class RunCardLO(RunCard): """an object to handle in a nice way the run_card information""" - + blocks = [heavy_ion_block, beam_pol_block, syscalc_block, ecut_block, frame_block, eva_scale_block, mlm_block, ckkw_block, psoptim_block, pdlabel_block, fixedfacscale, running_block] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), "get_dummy_x1": pjoin("SubProcesses","dummy_fct.f"), - "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), + "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), "dummy_boostframe": pjoin("SubProcesses","dummy_fct.f"), "user_dynamical_scale": pjoin("SubProcesses","dummy_fct.f"), "bias_wgt_custom": pjoin("SubProcesses","dummy_fct.f"), "user_": pjoin("SubProcesses","dummy_fct.f") # all function starting by user will be added to that file } - + include_as_parameter = ['vector.inc'] if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_lo.dat") - + def default_setup(self): """default value for the run_card.dat""" - + self.add_param("run_tag", "tag_1", include=False) self.add_param("gridpack", False) self.add_param("time_of_flight", -1.0, include=False) - self.add_param("nevents", 10000) + self.add_param("nevents", 10000) self.add_param("iseed", 0) self.add_param("python_seed", -2, include=False, hidden=True, comment="controlling python seed [handling in particular the final unweighting].\n -1 means use default from random module.\n -2 means set to same value as iseed") self.add_param("lpp1", 1, fortran_name="lpp(1)", allowed=[-1,1,0,2,3,9,-2,-3,4,-4], @@ -4106,7 +4106,7 @@ def default_setup(self): self.add_param('nb_neutron1', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(1)", comment='For heavy ion physics nb of neutron in the ion (for both beam but if group_subprocess was False)') self.add_param('nb_neutron2', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(2)", - comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') + comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') self.add_param('mass_ion1', -1.0, hidden=True, fortran_name="mass_ion(1)", allowed=[-1,0, 0.938, 207.9766521*0.938, 0.000511, 0.105, '*'], comment='For heavy ion physics mass in GeV of the ion (of beam 1)') @@ -4133,11 +4133,11 @@ def default_setup(self): self.add_param("mue_over_ref", 1.0, hidden=True, comment='ratio mu_other/mu for dynamical scale') self.add_param("ievo_eva",0,hidden=True, allowed=[0,1],fortran_name="ievo_eva", comment='eva: 0 for EW pdf muf evolution by q^2; 1 for evo by pT^2') - + # Bias module options self.add_param("bias_module", 'None', include=False, hidden=True) self.add_param('bias_parameters', {'__type__':1.0}, include='BIAS/bias.inc', hidden=True) - + #matching self.add_param("scalefact", 1.0) self.add_param("ickkw", 0, allowed=[0,1], hidden=True, comment="\'0\' for standard fixed order computation.\n\'1\' for MLM merging activates alphas and pdf re-weighting according to a kt clustering of the QCD radiation.") @@ -4221,7 +4221,7 @@ def default_setup(self): self.add_param("mmaa", 0.0, cut='aa') self.add_param("mmll", 0.0, cut='ll') self.add_param("mmjjmax", -1.0, cut='jj') - self.add_param("mmbbmax", -1.0, cut='bb') + self.add_param("mmbbmax", -1.0, cut='bb') self.add_param("mmaamax", -1.0, cut='aa') self.add_param("mmllmax", -1.0, cut='ll') self.add_param("mmnl", 0.0, cut='LL') @@ -4231,9 +4231,9 @@ def default_setup(self): self.add_param("ptllmax", -1.0, cut='ll') self.add_param("xptj", 0.0, cut='jj') self.add_param("xptb", 0.0, cut='bb') - self.add_param("xpta", 0.0, cut='aa') + self.add_param("xpta", 0.0, cut='aa') self.add_param("xptl", 0.0, cut='ll') - # ordered pt jet + # ordered pt jet self.add_param("ptj1min", 0.0, cut='jj') self.add_param("ptj1max", -1.0, cut='jj') self.add_param("ptj2min", 0.0, cut='jj') @@ -4241,7 +4241,7 @@ def default_setup(self): self.add_param("ptj3min", 0.0, cut='jjj') self.add_param("ptj3max", -1.0, cut='jjj') self.add_param("ptj4min", 0.0, cut='j'*4) - self.add_param("ptj4max", -1.0, cut='j'*4) + self.add_param("ptj4max", -1.0, cut='j'*4) self.add_param("cutuse", 0, cut='jj') # ordered pt lepton self.add_param("ptl1min", 0.0, cut='l'*2) @@ -4249,7 +4249,7 @@ def default_setup(self): self.add_param("ptl2min", 0.0, cut='l'*2) self.add_param("ptl2max", -1.0, cut='l'*2) self.add_param("ptl3min", 0.0, cut='l'*3) - self.add_param("ptl3max", -1.0, cut='l'*3) + self.add_param("ptl3max", -1.0, cut='l'*3) self.add_param("ptl4min", 0.0, cut='l'*4) self.add_param("ptl4max", -1.0, cut='l'*4) # Ht sum of jets @@ -4257,7 +4257,7 @@ def default_setup(self): self.add_param("htjmax", -1.0, cut='j'*2) self.add_param("ihtmin", 0.0, cut='J'*2) self.add_param("ihtmax", -1.0, cut='J'*2) - self.add_param("ht2min", 0.0, cut='J'*3) + self.add_param("ht2min", 0.0, cut='J'*3) self.add_param("ht3min", 0.0, cut='J'*3) self.add_param("ht4min", 0.0, cut='J'*4) self.add_param("ht2max", -1.0, cut='J'*3) @@ -4267,7 +4267,7 @@ def default_setup(self): self.add_param("ptgmin", 0.0, cut='aj') self.add_param("r0gamma", 0.4, hidden=True) self.add_param("xn", 1.0, hidden=True) - self.add_param("epsgamma", 1.0, hidden=True) + self.add_param("epsgamma", 1.0, hidden=True) self.add_param("isoem", True, hidden=True) self.add_param("xetamin", 0.0, cut='jj') self.add_param("deltaeta", 0.0, cut='j'*2) @@ -4280,7 +4280,7 @@ def default_setup(self): self.add_param("use_syst", True) self.add_param('systematics_program', 'systematics', include=False, hidden=True, comment='Choose which program to use for systematics computation: none, systematics, syscalc') self.add_param('systematics_arguments', ['--mur=0.5,1,2', '--muf=0.5,1,2', '--pdf=errorset'], include=False, hidden=True, comment='Choose the argment to pass to the systematics command. like --mur=0.25,1,4. Look at the help of the systematics function for more details.') - + self.add_param("sys_scalefact", "0.5 1 2", include=False, hidden=True) self.add_param("sys_alpsfact", "None", include=False, hidden=True) self.add_param("sys_matchscale", "auto", include=False, hidden=True) @@ -4315,8 +4315,8 @@ def default_setup(self): self.add_param('aloha_flag', '', include=False, hidden=True, comment='global fortran compilation flag, suggestion: -ffast-math', fct_mod=(self.make_clean, ('Source/DHELAS'),{})) self.add_param('matrix_flag', '', include=False, hidden=True, comment='fortran compilation flag for the matrix-element files, suggestion -O3', - fct_mod=(self.make_Ptouch, ('matrix'),{})) - self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', + fct_mod=(self.make_Ptouch, ('matrix'),{})) + self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', fortran_name='VECSIZE_MEMMAX', fct_mod=(self.reset_simd,(),{})) # parameter allowing to define simple cut via the pdg @@ -4329,24 +4329,24 @@ def default_setup(self): self.add_param('eta_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False) - + self.add_param('pdg_cut',[0], system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], system=True) # store pt min self.add_param('ptmax4pdg',[-1.], system=True) self.add_param('Emin4pdg',[0.], system=True) # store pt min - self.add_param('Emax4pdg',[-1.], system=True) + self.add_param('Emax4pdg',[-1.], system=True) self.add_param('etamin4pdg',[0.], system=True) # store pt min - self.add_param('etamax4pdg',[-1.], system=True) + self.add_param('etamax4pdg',[-1.], system=True) self.add_param('mxxmin4pdg',[-1.], system=True) self.add_param('mxxpart_antipart', [False], system=True) - - - + + + def check_validity(self): """ """ - + super(RunCardLO, self).check_validity() - + #Make sure that nhel is only either 0 (i.e. no MC over hel) or #1 (MC over hel with importance sampling). In particular, it can #no longer be > 1. @@ -4357,12 +4357,12 @@ def check_validity(self): "not %s." % self['nhel']) if int(self['maxjetflavor']) > 6: raise InvalidRunCard('maxjetflavor should be lower than 5! (6 is partly supported)') - + if len(self['pdgs_for_merging_cut']) > 1000: raise InvalidRunCard("The number of elements in "+\ "'pdgs_for_merging_cut' should not exceed 1000.") - + # some cut need to be deactivated in presence of isolation if self['ptgmin'] > 0: if self['pta'] > 0: @@ -4370,18 +4370,18 @@ def check_validity(self): self['pta'] = 0.0 if self['draj'] > 0: logger.warning('draj cut discarded since photon isolation is used') - self['draj'] = 0.0 - - # special treatment for gridpack use the gseed instead of the iseed + self['draj'] = 0.0 + + # special treatment for gridpack use the gseed instead of the iseed if self['gridrun']: self['iseed'] = self['gseed'] - + #Some parameter need to be fixed when using syscalc #if self['use_syst']: # if self['scalefact'] != 1.0: # logger.warning('Since use_syst=T, changing the value of \'scalefact\' to 1') # self['scalefact'] = 1.0 - + # CKKW Treatment if self['ickkw'] > 0: if self['ickkw'] != 1: @@ -4399,7 +4399,7 @@ def check_validity(self): raise InvalidRunCard('maxjetflavor at 6 is NOT supported for matching!') if self['ickkw'] == 2: # add warning if ckkw selected but the associate parameter are empty - self.get_default('highestmult', log_level=20) + self.get_default('highestmult', log_level=20) self.get_default('issgridfile', 'issudgrid.dat', log_level=20) if self['xqcut'] > 0: if self['ickkw'] == 0: @@ -4412,13 +4412,13 @@ def check_validity(self): if self['drjl'] != 0: if 'drjl' in self.user_set: logger.warning('Since icckw>0, changing the value of \'drjl\' to 0') - self['drjl'] = 0 - if not self['auto_ptj_mjj']: + self['drjl'] = 0 + if not self['auto_ptj_mjj']: if self['mmjj'] > self['xqcut']: logger.warning('mmjj > xqcut (and auto_ptj_mjj = F). MMJJ set to 0') - self['mmjj'] = 0.0 - - # check validity of the pdf set + self['mmjj'] = 0.0 + + # check validity of the pdf set # note that pdlabel is automatically set to lhapdf if pdlabel1 or pdlabel2 is set to lhapdf if self['pdlabel'] == 'lhapdf': #add warning if lhaid not define @@ -4426,7 +4426,7 @@ def check_validity(self): mod = False for i in [1,2]: - lpp = 'lpp%i' %i + lpp = 'lpp%i' %i pdlabelX = 'pdlabel%i' % i if self[lpp] == 0: # nopdf if self[pdlabelX] != 'none': @@ -4459,12 +4459,12 @@ def check_validity(self): raise InvalidRunCard( "Heavy ion mode is only supported for lpp1=1/2") if self['lpp2'] not in [1,2]: if self['nb_proton2'] !=1 or self['nb_neutron2'] !=0: - raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") + raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") # check that fixed_fac_scale(1/2) is setting as expected # if lpp=2/3/4 -> default is that beam in fixed scale - # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are + # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are # check that both fixed_fac_scale1/2 are defined together # ensure that fixed_fac_scale1 and fixed_fac_scale2 are setup as needed if 'fixed_fac_scale1' in self.user_set: @@ -4475,13 +4475,13 @@ def check_validity(self): elif 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale1 are defined but not fixed_fac_scale2. The value of fixed_fac_scale2 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale2'] = self['fixed_fac_scale'] - elif self['lpp2'] !=0: + elif self['lpp2'] !=0: raise Exception('fixed_fac_scale2 not defined while fixed_fac_scale1 is. Please fix your run_card.') elif 'fixed_fac_scale2' in self.user_set: if 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale2 are defined but not fixed_fac_scale1. The value of fixed_fac_scale1 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale1'] = self['fixed_fac_scale'] - elif self['lpp1'] !=0: + elif self['lpp1'] !=0: raise Exception('fixed_fac_scale1 not defined while fixed_fac_scale2 is. Please fix your run_card.') else: if 'fixed_fac_scale' in self.user_set: @@ -4500,12 +4500,12 @@ def check_validity(self): logger.warning('fixed_fac_scale1 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale1']) logger.warning('fixed_fac_scale2 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale2']) - # check if lpp = + # check if lpp = if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]): for i in [1,2]: if abs(self['lpp%s' % i ]) in [3,4] and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: logger.warning("Vector boson from lepton PDF is using fixed scale value of muf [dsqrt_q2fact%s]. Looks like you kept the default value (Mz). Is this really the cut-off that you want to use?" % i) - + if abs(self['lpp%s' % i ]) == 2 and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: if self['pdlabel'] in ['edff','chff']: logger.warning("Since 3.5.0 exclusive photon-photon processes in ultraperipheral proton and nuclear collisions from gamma-UPC (arXiv:2207.03012) will ignore the factorisation scale.") @@ -4515,10 +4515,10 @@ def check_validity(self): if six.PY2 and self['hel_recycling']: self['hel_recycling'] = False - logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. + logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. In general this optimization speeds up the computation by a factor of two.""") - + # check that ebeam is bigger than the associated mass. for i in [1,2]: if self['lpp%s' % i ] not in [1,2]: @@ -4529,13 +4529,13 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") - elif self['ebeam%i' % i] < self['mass_ion%i' % i]: + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + elif self['ebeam%i' % i] < self['mass_ion%i' % i]: if self['ebeam%i' %i] == 0: logger.warning("At rest ion mode set: Energy beam set to %s" % self['mass_ion%i' % i]) self.set('ebeam%i' %i, self['mass_ion%i' % i]) - - + + # check the tmin_for_channel is negative if self['tmin_for_channel'] == 0: raise InvalidRunCard('tmin_for_channel can not be set to 0.') @@ -4543,15 +4543,15 @@ def check_validity(self): logger.warning('tmin_for_channel should be negative. Will be using -%f instead' % self['tmin_for_channel']) self.set('tmin_for_channel', -self['tmin_for_channel']) - + def update_system_parameter_for_include(self): """system parameter need to be setupe""" - + # polarization self['frame_id'] = sum(2**(n) for n in self['me_frame']) - + # set the pdg_for_cut fortran parameter - pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + + pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + list(self['e_min_pdg'].keys()) +list(self['e_max_pdg'].keys()) + list(self['eta_min_pdg'].keys()) +list(self['eta_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys()) + list(self['mxx_only_part_antipart'].keys())) @@ -4559,15 +4559,15 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different pdgs are allowed for pdg specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative pdg code') - - + + if any(pdg in pdg_to_cut for pdg in [1,2,3,4,5,21,22,11,13,15]): raise Exception("Can not use PDG related cut for light quark/b quark/lepton/gluon/photon") - + if pdg_to_cut: self['pdg_cut'] = list(pdg_to_cut) self['ptmin4pdg'] = [] @@ -4595,7 +4595,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -4605,11 +4605,11 @@ def update_system_parameter_for_include(self): self['ptmax4pdg'] = [-1.] self['Emax4pdg'] = [-1.] self['etamax4pdg'] =[-1.] - self['mxxmin4pdg'] =[0.] + self['mxxmin4pdg'] =[0.] self['mxxpart_antipart'] = [False] - - - + + + def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules process 1->N all cut set on off. @@ -4626,7 +4626,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if proc_characteristic['loop_induced']: self['nhel'] = 1 self['pdgs_for_merging_cut'] = proc_characteristic['colored_pdgs'] - + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() @@ -4636,7 +4636,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): # check for beam_id beam_id = set() beam_id_split = [set(), set()] - for proc in proc_def: + for proc in proc_def: for oneproc in proc: for i,leg in enumerate(oneproc['legs']): if not leg['state']: @@ -4654,20 +4654,20 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): maxjetflavor = max([4]+[abs(i) for i in beam_id if -7< i < 7]) self['maxjetflavor'] = maxjetflavor self['asrwgtflavor'] = maxjetflavor - + if any(i in beam_id for i in [1,-1,2,-2,3,-3,4,-4,5,-5,21,22]): # check for e p collision if any(id in beam_id for id in [11,-11,13,-13]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [11,-11,13,-13]): - self['lpp1'] = 0 - self['lpp2'] = 1 - self['ebeam1'] = '1k' - self['ebeam2'] = '6500' + self['lpp1'] = 0 + self['lpp2'] = 1 + self['ebeam1'] = '1k' + self['ebeam2'] = '6500' else: - self['lpp1'] = 1 - self['lpp2'] = 0 - self['ebeam1'] = '6500' + self['lpp1'] = 1 + self['lpp2'] = 0 + self['ebeam1'] = '6500' self['ebeam2'] = '1k' # UPC for p p collision @@ -4677,7 +4677,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam1'] = '6500' self['ebeam2'] = '6500' self['pdlabel'] = 'edff' - + elif any(id in beam_id for id in [11,-11,13,-13]): self['lpp1'] = 0 self['lpp2'] = 0 @@ -4688,7 +4688,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('ecut') self.display_block.append('beam_pol') - + # check for possibility of eva eva_in_b1 = any(i in beam_id_split[0] for i in [23,24,-24]) #,12,-12,14,-14]) @@ -4701,10 +4701,10 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['nhel'] = 1 self['pdlabel'] = 'eva' self['fixed_fac_scale'] = True - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') elif eva_in_b1: - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') self['pdlabel1'] = 'eva' self['fixed_fac_scale1'] = True self['nhel'] = 1 @@ -4724,7 +4724,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['pdlabel2'] = 'eva' self['fixed_fac_scale2'] = True self['nhel'] = 1 - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') for i in beam_id_split[0]: if abs(i) == 11: self['lpp1'] = math.copysign(3,i) @@ -4740,34 +4740,34 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if any(i in beam_id for i in [22,23,24,-24,12,-12,14,-14]): self.display_block.append('eva_scale') - # automatic polarisation of the beam if neutrino beam + # automatic polarisation of the beam if neutrino beam if any(id in beam_id for id in [12,-12,14,-14,16,-16]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [12,14,16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = -100 if not all(id in [12,14,16] for id in beam_id_split[0]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1]. %s') elif any(id in beam_id_split[0] for id in [-12,-14,-16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[0]): - logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') + logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') if any(id in beam_id_split[1] for id in [12,14,16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = -100 if not all(id in [12,14,16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') elif any(id in beam_id_split[1] for id in [-12,-14,-16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') - + # Check if need matching min_particle = 99 max_particle = 0 @@ -4798,12 +4798,12 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - + break + if matching: self['ickkw'] = 1 self['xqcut'] = 30 - #self['use_syst'] = False + #self['use_syst'] = False self['drjj'] = 0 self['drjl'] = 0 self['sys_alpsfact'] = "0.5 1 2" @@ -4811,8 +4811,8 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('mlm') self.display_block.append('ckkw') self['dynamical_scale_choice'] = -1 - - + + # For interference module, the systematics are wrong. # automatically set use_syst=F and set systematics_program=none no_systematics = False @@ -4826,14 +4826,14 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): continue break - + if interference or no_systematics: self['use_syst'] = False self['systematics_program'] = 'none' if interference: self['dynamical_scale_choice'] = 3 self['sde_strategy'] = 2 - + # set default integration strategy # interference case is already handle above # here pick strategy 2 if only one QCD color flow @@ -4852,7 +4852,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if pure_lepton and proton_initial: self['sde_strategy'] = 1 else: - # check if multi-jet j + # check if multi-jet j is_multijet = True for proc in proc_def: if any(abs(j.get('id')) not in jet_id for j in proc[0]['legs']): @@ -4860,7 +4860,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): break if is_multijet: self['sde_strategy'] = 2 - + # if polarization is used, set the choice of the frame in the run_card # But only if polarization is used for massive particles for plist in proc_def: @@ -4870,7 +4870,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): model = proc.get('model') particle = model.get_particle(l.get('id')) if particle.get('mass').lower() != 'zero': - self.display_block.append('frame') + self.display_block.append('frame') break else: continue @@ -4894,15 +4894,15 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): proc = proc_list[0] if proc['forbidden_onsh_s_channels']: self['sde_strategy'] = 1 - + if 'fix_scale' in proc_characteristic['limitations']: self['fixed_ren_scale'] = 1 self['fixed_fac_scale'] = 1 if self['ickkw'] == 1: logger.critical("MLM matching/merging not compatible with the model! You need to use another method to remove the double counting!") self['ickkw'] = 0 - - # define class of particles present to hide all the cuts associated to + + # define class of particles present to hide all the cuts associated to # not present class cut_class = collections.defaultdict(int) for proc in proc_def: @@ -4925,41 +4925,41 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): one_proc_cut['L'] += 1 elif abs(pdg) in [12,14,16]: one_proc_cut['n'] += 1 - one_proc_cut['L'] += 1 + one_proc_cut['L'] += 1 elif str(oneproc.get('model').get_particle(pdg)['mass']) != 'ZERO': one_proc_cut['H'] += 1 - + for key, nb in one_proc_cut.items(): cut_class[key] = max(cut_class[key], nb) self.cut_class = dict(cut_class) self.cut_class[''] = True #avoid empty - + # If model has running functionality add the additional parameter model = proc_def[0][0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Read file input/default_run_card_lo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'run_card.dat') python_template = True else: template = pjoin(MEDIR, 'Cards', 'run_card_default.dat') python_template = False - + hid_lines = {'default':True}#collections.defaultdict(itertools.repeat(True).next) if isinstance(output_file, str): @@ -4975,9 +4975,9 @@ def write(self, output_file, template=None, python_template=False, hid_lines[k1+k2] = True super(RunCardLO, self).write(output_file, template=template, - python_template=python_template, + python_template=python_template, template_options=hid_lines, - **opt) + **opt) class InvalidMadAnalysis5Card(InvalidCmd): @@ -4986,19 +4986,19 @@ class InvalidMadAnalysis5Card(InvalidCmd): class MadAnalysis5Card(dict): """ A class to store a MadAnalysis5 card. Very basic since it is basically free format.""" - + _MG5aMC_escape_tag = '@MG5aMC' - + _default_hadron_inputs = ['*.hepmc', '*.hep', '*.stdhep', '*.lhco','*.root'] _default_parton_inputs = ['*.lhe'] _skip_analysis = False - + @classmethod def events_can_be_reconstructed(cls, file_path): """ Checks from the type of an event file whether it can be reconstructed or not.""" return not (file_path.endswith('.lhco') or file_path.endswith('.lhco.gz') or \ file_path.endswith('.root') or file_path.endswith('.root.gz')) - + @classmethod def empty_analysis(cls): """ A method returning the structure of an empty analysis """ @@ -5012,7 +5012,7 @@ def empty_reconstruction(cls): 'reco_output':'lhe'} def default_setup(self): - """define the default value""" + """define the default value""" self['mode'] = 'parton' self['inputs'] = [] # None is the default stdout level, it will be set automatically by MG5aMC @@ -5025,8 +5025,8 @@ def default_setup(self): # of this class and some other property could be added to this dictionary # in the future. self['analyses'] = {} - # The recasting structure contains on set of commands and one set of - # card lines. + # The recasting structure contains on set of commands and one set of + # card lines. self['recasting'] = {'commands':[],'card':[]} # Add the default trivial reconstruction to use an lhco input # This is just for the structure @@ -5035,7 +5035,7 @@ def default_setup(self): 'root_input': MadAnalysis5Card.empty_reconstruction()} self['reconstruction']['lhco_input']['reco_output']='lhco' - self['reconstruction']['root_input']['reco_output']='root' + self['reconstruction']['root_input']['reco_output']='root' # Specify in which order the analysis/recasting were specified self['order'] = [] @@ -5049,7 +5049,7 @@ def __init__(self, finput=None,mode=None): return else: dict.__init__(self) - + # Initialize it with all the default value self.default_setup() if not mode is None: @@ -5058,15 +5058,15 @@ def __init__(self, finput=None,mode=None): # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, mode=mode) - + def read(self, input, mode=None): """ Read an MA5 card""" - + if mode not in [None,'parton','hadron']: raise MadGraph5Error('A MadAnalysis5Card can be read online the modes'+ "'parton' or 'hadron'") card_mode = mode - + if isinstance(input, (file, StringIO.StringIO)): input_stream = input elif isinstance(input, str): @@ -5099,10 +5099,10 @@ def read(self, input, mode=None): except ValueError: option = line[len(self._MG5aMC_escape_tag):] option = option.strip() - + if option=='inputs': self['inputs'].extend([v.strip() for v in value.split(',')]) - + elif option == 'skip_analysis': self._skip_analysis = True @@ -5118,7 +5118,7 @@ def read(self, input, mode=None): except: raise InvalidMadAnalysis5Card( "MA5 output level specification '%s' is incorrect."%str(value)) - + elif option=='analysis_name': current_type = 'analyses' current_name = value @@ -5127,7 +5127,7 @@ def read(self, input, mode=None): "Analysis '%s' already defined in MadAnalysis5 card"%current_name) else: self[current_type][current_name] = MadAnalysis5Card.empty_analysis() - + elif option=='set_reconstructions': try: reconstructions = eval(value) @@ -5142,7 +5142,7 @@ def read(self, input, mode=None): "analysis in a MadAnalysis5 card.") self[current_type][current_name]['reconstructions']=reconstructions continue - + elif option=='reconstruction_name': current_type = 'reconstruction' current_name = value @@ -5161,7 +5161,7 @@ def read(self, input, mode=None): raise InvalidMadAnalysis5Card( "Option '%s' can only take the values 'lhe' or 'root'"%option) self['reconstruction'][current_name]['reco_output'] = value.lower() - + elif option.startswith('recasting'): current_type = 'recasting' try: @@ -5171,11 +5171,11 @@ def read(self, input, mode=None): if len(self['recasting'][current_name])>0: raise InvalidMadAnalysis5Card( "Only one recasting can be defined in MadAnalysis5 hadron card") - + else: raise InvalidMadAnalysis5Card( "Unreckognized MG5aMC instruction in MadAnalysis5 card: '%s'"%option) - + if option in ['analysis_name','reconstruction_name'] or \ option.startswith('recasting'): self['order'].append((current_type,current_name)) @@ -5209,7 +5209,7 @@ def read(self, input, mode=None): self['inputs'] = self._default_hadron_inputs else: self['inputs'] = self._default_parton_inputs - + # Make sure at least one reconstruction is specified for each hadron # level analysis and that it exists. if self['mode']=='hadron': @@ -5221,7 +5221,7 @@ def read(self, input, mode=None): analysis['reconstructions']): raise InvalidMadAnalysis5Card('A reconstructions specified in'+\ " analysis '%s' is not defined."%analysis_name) - + def write(self, output): """ Write an MA5 card.""" @@ -5232,7 +5232,7 @@ def write(self, output): else: raise MadGraph5Error('Incorrect input for the write function of'+\ ' the MadAnalysis5Card card. Received argument type is: %s'%str(type(output))) - + output_lines = [] if self._skip_analysis: output_lines.append('%s skip_analysis'%self._MG5aMC_escape_tag) @@ -5240,11 +5240,11 @@ def write(self, output): if not self['stdout_lvl'] is None: output_lines.append('%s stdout_lvl=%s'%(self._MG5aMC_escape_tag,self['stdout_lvl'])) for definition_type, name in self['order']: - + if definition_type=='analyses': output_lines.append('%s analysis_name = %s'%(self._MG5aMC_escape_tag,name)) output_lines.append('%s set_reconstructions = %s'%(self._MG5aMC_escape_tag, - str(self['analyses'][name]['reconstructions']))) + str(self['analyses'][name]['reconstructions']))) elif definition_type=='reconstruction': output_lines.append('%s reconstruction_name = %s'%(self._MG5aMC_escape_tag,name)) elif definition_type=='recasting': @@ -5254,23 +5254,23 @@ def write(self, output): output_lines.extend(self[definition_type][name]) elif definition_type in ['reconstruction']: output_lines.append('%s reco_output = %s'%(self._MG5aMC_escape_tag, - self[definition_type][name]['reco_output'])) + self[definition_type][name]['reco_output'])) output_lines.extend(self[definition_type][name]['commands']) elif definition_type in ['analyses']: - output_lines.extend(self[definition_type][name]['commands']) - + output_lines.extend(self[definition_type][name]['commands']) + output_stream.write('\n'.join(output_lines)) - + return - - def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, + + def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, UFO_model_path=None, run_tag=''): - """ Returns a list of tuples ('AnalysisTag',['commands']) specifying - the commands of the MadAnalysis runs required from this card. - At parton-level, the number of such commands is the number of analysis + """ Returns a list of tuples ('AnalysisTag',['commands']) specifying + the commands of the MadAnalysis runs required from this card. + At parton-level, the number of such commands is the number of analysis asked for. In the future, the idea is that the entire card can be processed in one go from MA5 directly.""" - + if isinstance(inputs_arg, list): inputs = inputs_arg elif isinstance(inputs_arg, str): @@ -5278,21 +5278,21 @@ def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, else: raise MadGraph5Error("The function 'get_MA5_cmds' can only take "+\ " a string or a list for the argument 'inputs_arg'") - + if len(inputs)==0: raise MadGraph5Error("The function 'get_MA5_cmds' must have "+\ " at least one input specified'") - + if run_dir_path is None: run_dir_path = os.path.dirname(inputs_arg) - + cmds_list = [] - + UFO_load = [] # first import the UFO if provided if UFO_model_path: UFO_load.append('import %s'%UFO_model_path) - + def get_import(input, type=None): """ Generates the MA5 import commands for that event file. """ dataset_name = os.path.basename(input).split('.')[0] @@ -5304,7 +5304,7 @@ def get_import(input, type=None): if not type is None: res.append('set %s.type = %s'%(dataset_name, type)) return res - + fifo_status = {'warned_fifo':False,'fifo_used_up':False} def warn_fifo(input): if not input.endswith('.fifo'): @@ -5317,7 +5317,7 @@ def warn_fifo(input): logger.warning('Only the first MA5 analysis/reconstructions can be run on a fifo. Subsequent runs will skip fifo inputs.') fifo_status['warned_fifo'] = True return True - + # Then the event file(s) input(s) inputs_load = [] for input in inputs: @@ -5325,16 +5325,16 @@ def warn_fifo(input): if len(inputs) > 1: inputs_load.append('set main.stacking_method = superimpose') - + submit_command = 'submit %s'%submit_folder+'_%s' - + # Keep track of the reconstruction outpus in the MA5 workflow # Keys are reconstruction names and values are .lhe.gz reco file paths. # We put by default already the lhco/root ones present reconstruction_outputs = { - 'lhco_input':[f for f in inputs if + 'lhco_input':[f for f in inputs if f.endswith('.lhco') or f.endswith('.lhco.gz')], - 'root_input':[f for f in inputs if + 'root_input':[f for f in inputs if f.endswith('.root') or f.endswith('.root.gz')]} # If a recasting card has to be written out, chose here its path @@ -5343,7 +5343,7 @@ def warn_fifo(input): # Make sure to only run over one analysis over each fifo. for definition_type, name in self['order']: - if definition_type == 'reconstruction': + if definition_type == 'reconstruction': analysis_cmds = list(self['reconstruction'][name]['commands']) reco_outputs = [] for i_input, input in enumerate(inputs): @@ -5365,8 +5365,8 @@ def warn_fifo(input): analysis_cmds.append( submit_command%('reco_%s_%d'%(name,i_input+1))) analysis_cmds.append('remove reco_events') - - reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) + + reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) for rec_out in reco_outputs] if len(reco_outputs)>0: cmds_list.append(('_reco_%s'%name,analysis_cmds)) @@ -5386,7 +5386,7 @@ def warn_fifo(input): analysis_cmds = ['set main.mode = parton'] else: analysis_cmds = [] - analysis_cmds.extend(sum([get_import(rec_out) for + analysis_cmds.extend(sum([get_import(rec_out) for rec_out in reconstruction_outputs[reco]],[])) analysis_cmds.extend(self['analyses'][name]['commands']) analysis_cmds.append(submit_command%('%s_%s'%(name,reco))) @@ -5427,12 +5427,12 @@ def warn_fifo(input): %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode """ running_block_nlo = RunBlock('RUNNING', template_on=template_on, template_off="") - + class RunCardNLO(RunCard): """A class object for the run_card for a (aMC@)NLO pocess""" - + LO = False - + blocks = [running_block_nlo] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), @@ -5443,11 +5443,11 @@ class RunCardNLO(RunCard): if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_nlo.dat") - - + + def default_setup(self): """define the default value""" - + self.add_param('run_tag', 'tag_1', include=False) self.add_param('nevents', 10000) self.add_param('req_acc', -1.0, include=False) @@ -5455,27 +5455,27 @@ def default_setup(self): self.add_param("time_of_flight", -1.0, include=False) self.add_param('event_norm', 'average') #FO parameter - self.add_param('req_acc_fo', 0.01, include=False) + self.add_param('req_acc_fo', 0.01, include=False) self.add_param('npoints_fo_grid', 5000, include=False) self.add_param('niters_fo_grid', 4, include=False) - self.add_param('npoints_fo', 10000, include=False) + self.add_param('npoints_fo', 10000, include=False) self.add_param('niters_fo', 6, include=False) #seed and collider self.add_param('iseed', 0) - self.add_param('lpp1', 1, fortran_name='lpp(1)') - self.add_param('lpp2', 1, fortran_name='lpp(2)') + self.add_param('lpp1', 1, fortran_name='lpp(1)') + self.add_param('lpp2', 1, fortran_name='lpp(2)') self.add_param('ebeam1', 6500.0, fortran_name='ebeam(1)') - self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') + self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') self.add_param('pdlabel', 'nn23nlo', allowed=['lhapdf', 'emela', 'cteq6_m','cteq6_d','cteq6_l','cteq6l1', 'nn23lo','nn23lo1','nn23nlo','ct14q00','ct14q07','ct14q14','ct14q21'] +\ - sum(self.allowed_lep_densities.values(),[]) ) + sum(self.allowed_lep_densities.values(),[]) ) self.add_param('lhaid', [244600],fortran_name='lhaPDFid') self.add_param('pdfscheme', 0) # whether to include or not photon-initiated processes in lepton collisions self.add_param('photons_from_lepton', True) self.add_param('lhapdfsetname', ['internal_use_only'], system=True) - # stuff for lepton collisions - # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set - # whether the current PDF set has or not beamstrahlung + # stuff for lepton collisions + # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set + # whether the current PDF set has or not beamstrahlung self.add_param('has_bstrahl', False, system=True) # renormalisation scheme of alpha self.add_param('alphascheme', 0, system=True) @@ -5486,31 +5486,31 @@ def default_setup(self): # w contribution included or not in the running of alpha self.add_param('w_run', 1, system=True) #shower and scale - self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') + self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') self.add_param('shower_scale_factor',1.0) self.add_param('mcatnlo_delta', False) self.add_param('fixed_ren_scale', False) self.add_param('fixed_fac_scale', False) self.add_param('fixed_extra_scale', True, hidden=True, system=True) # set system since running from Ellis-Sexton scale not implemented - self.add_param('mur_ref_fixed', 91.118) + self.add_param('mur_ref_fixed', 91.118) self.add_param('muf1_ref_fixed', -1.0, hidden=True) - self.add_param('muf_ref_fixed', 91.118) + self.add_param('muf_ref_fixed', 91.118) self.add_param('muf2_ref_fixed', -1.0, hidden=True) - self.add_param('mue_ref_fixed', 91.118, hidden=True) - self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', + self.add_param('mue_ref_fixed', 91.118, hidden=True) + self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', allowed = [-2,-1,0,1,2,3,10], comment="\'-1\' is based on CKKW back clustering (following feynman diagram).\n \'1\' is the sum of transverse energy.\n '2' is HT (sum of the transverse mass)\n '3' is HT/2, '0' allows to use the user_hook definition (need to be defined via custom_fct entry) ") self.add_param('fixed_qes_scale', False, hidden=True) self.add_param('qes_ref_fixed', -1.0, hidden=True) self.add_param('mur_over_ref', 1.0) - self.add_param('muf_over_ref', 1.0) - self.add_param('muf1_over_ref', -1.0, hidden=True) + self.add_param('muf_over_ref', 1.0) + self.add_param('muf1_over_ref', -1.0, hidden=True) self.add_param('muf2_over_ref', -1.0, hidden=True) self.add_param('mue_over_ref', 1.0, hidden=True, system=True) # forbid the user to modigy due to incorrect handling of the Ellis-Sexton scale self.add_param('qes_over_ref', -1.0, hidden=True) self.add_param('reweight_scale', [True], fortran_name='lscalevar') - self.add_param('rw_rscale_down', -1.0, hidden=True) + self.add_param('rw_rscale_down', -1.0, hidden=True) self.add_param('rw_rscale_up', -1.0, hidden=True) - self.add_param('rw_fscale_down', -1.0, hidden=True) + self.add_param('rw_fscale_down', -1.0, hidden=True) self.add_param('rw_fscale_up', -1.0, hidden=True) self.add_param('rw_rscale', [1.0,2.0,0.5], fortran_name='scalevarR') self.add_param('rw_fscale', [1.0,2.0,0.5], fortran_name='scalevarF') @@ -5523,60 +5523,60 @@ def default_setup(self): #technical self.add_param('folding', [1,1,1], include=False) - + #merging self.add_param('ickkw', 0, allowed=[-1,0,3,4], comment=" - 0: No merging\n - 3: FxFx Merging : http://amcatnlo.cern.ch/FxFx_merging.htm\n - 4: UNLOPS merging (No interface within MG5aMC)\n - -1: NNLL+NLO jet-veto computation. See arxiv:1412.8408 [hep-ph]") self.add_param('bwcutoff', 15.0) - #cuts + #cuts self.add_param('jetalgo', 1.0) - self.add_param('jetradius', 0.7) + self.add_param('jetradius', 0.7) self.add_param('ptj', 10.0 , cut=True) - self.add_param('etaj', -1.0, cut=True) - self.add_param('gamma_is_j', True) + self.add_param('etaj', -1.0, cut=True) + self.add_param('gamma_is_j', True) self.add_param('ptl', 0.0, cut=True) - self.add_param('etal', -1.0, cut=True) + self.add_param('etal', -1.0, cut=True) self.add_param('drll', 0.0, cut=True) - self.add_param('drll_sf', 0.0, cut=True) + self.add_param('drll_sf', 0.0, cut=True) self.add_param('mll', 0.0, cut=True) - self.add_param('mll_sf', 30.0, cut=True) - self.add_param('rphreco', 0.1) - self.add_param('etaphreco', -1.0) - self.add_param('lepphreco', True) - self.add_param('quarkphreco', True) + self.add_param('mll_sf', 30.0, cut=True) + self.add_param('rphreco', 0.1) + self.add_param('etaphreco', -1.0) + self.add_param('lepphreco', True) + self.add_param('quarkphreco', True) self.add_param('ptgmin', 20.0, cut=True) - self.add_param('etagamma', -1.0) + self.add_param('etagamma', -1.0) self.add_param('r0gamma', 0.4) - self.add_param('xn', 1.0) + self.add_param('xn', 1.0) self.add_param('epsgamma', 1.0) - self.add_param('isoem', True) + self.add_param('isoem', True) self.add_param('maxjetflavor', 4, hidden=True) - self.add_param('pineappl', False) + self.add_param('pineappl', False) self.add_param('lhe_version', 3, hidden=True, include=False) - + # customization self.add_param("custom_fcts",[],typelist="str", include=False, comment="list of files containing function that overwritte dummy function of the code (like adding cuts/...)") #internal variable related to FO_analyse_card self.add_param('FO_LHE_weight_ratio',1e-3, hidden=True, system=True) - self.add_param('FO_LHE_postprocessing',['grouping','random'], + self.add_param('FO_LHE_postprocessing',['grouping','random'], hidden=True, system=True, include=False) - + # parameter allowing to define simple cut via the pdg self.add_param('pt_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('pt_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False, hidden=True) - + #hidden parameter that are transfer to the fortran code self.add_param('pdg_cut',[0], hidden=True, system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], hidden=True, system=True) # store pt min self.add_param('ptmax4pdg',[-1.], hidden=True, system=True) self.add_param('mxxmin4pdg',[0.], hidden=True, system=True) self.add_param('mxxpart_antipart', [False], hidden=True, system=True) - + def check_validity(self): """check the validity of the various input""" - + super(RunCardNLO, self).check_validity() # for lepton-lepton collisions, ignore 'pdlabel' and 'lhaid' @@ -5588,12 +5588,12 @@ def check_validity(self): # for dressed lepton collisions, check that the lhaid is a valid one if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]) + ['emela']: raise InvalidRunCard('pdlabel %s not allowed for dressed-lepton collisions' % self['pdlabel']) - + elif self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' self['reweight_pdf']=[False] logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') - + if self['lpp1'] == 0 == self['lpp2']: if self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' @@ -5601,8 +5601,8 @@ def check_validity(self): logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') # For FxFx merging, make sure that the following parameters are set correctly: - if self['ickkw'] == 3: - # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed + if self['ickkw'] == 3: + # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed scales=['fixed_ren_scale','fixed_fac_scale','fixed_QES_scale'] for scale in scales: if self[scale]: @@ -5615,7 +5615,7 @@ def check_validity(self): self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency in FxFx merging, dynamical_scale_choice has been set to -1 (default)''' ,'$MG:BOLD') - + # 2. Use kT algorithm for jets with pseudo-code size R=1.0 jetparams=['jetradius','jetalgo'] for jetparam in jetparams: @@ -5628,8 +5628,8 @@ def check_validity(self): self["dynamical_scale_choice"] = [-1] self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency with the jet veto, the scale which will be used is ptj. dynamical_scale_choice will be set at -1.''' - ,'$MG:BOLD') - + ,'$MG:BOLD') + # For interface to PINEAPPL, need to use LHAPDF and reweighting to get scale uncertainties if self['pineappl'] and self['pdlabel'].lower() != 'lhapdf': raise InvalidRunCard('PineAPPL generation only possible with the use of LHAPDF') @@ -5661,7 +5661,7 @@ def check_validity(self): if (self['rw_fscale_down'] != -1.0 and ['rw_fscale_down'] not in self['rw_fscale']) or\ (self['rw_fscale_up'] != -1.0 and ['rw_fscale_up'] not in self['rw_fscale']): self['rw_fscale']=[1.0,self['rw_fscale_up'],self['rw_fscale_down']] - + # PDF reweighting check if any(self['reweight_pdf']): # check that we use lhapdf if reweighting is ON @@ -5672,7 +5672,7 @@ def check_validity(self): if self['pdlabel'] != "lhapdf": self['reweight_pdf']=[self['reweight_pdf'][0]] self['lhaid']=[self['lhaid'][0]] - + # make sure set have reweight_scale and dyn_scale_choice of length 1 when fixed scales: if self['fixed_ren_scale'] and self['fixed_fac_scale']: self['reweight_scale']=[self['reweight_scale'][0]] @@ -5685,7 +5685,7 @@ def check_validity(self): self['reweight_pdf']=self['reweight_pdf']*len(self['lhaid']) logger.warning("Setting 'reweight_pdf' for all 'lhaid' to %s" % self['reweight_pdf'][0]) if len(self['reweight_scale']) == 1 and len(self['dynamical_scale_choice']) != 1: - self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) + self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) logger.warning("Setting 'reweight_scale' for all 'dynamical_scale_choice' to %s" % self['reweight_pdf'][0]) # Check that there are no identical elements in lhaid or dynamical_scale_choice @@ -5693,7 +5693,7 @@ def check_validity(self): raise InvalidRunCard("'lhaid' has two or more identical entries. They have to be all different for the code to work correctly.") if len(self['dynamical_scale_choice']) != len(set(self['dynamical_scale_choice'])): raise InvalidRunCard("'dynamical_scale_choice' has two or more identical entries. They have to be all different for the code to work correctly.") - + # Check that lenght of lists are consistent if len(self['reweight_pdf']) != len(self['lhaid']): raise InvalidRunCard("'reweight_pdf' and 'lhaid' lists should have the same length") @@ -5730,7 +5730,7 @@ def check_validity(self): if len(self['folding']) != 3: raise InvalidRunCard("'folding' should contain exactly three integers") for ifold in self['folding']: - if ifold not in [1,2,4,8]: + if ifold not in [1,2,4,8]: raise InvalidRunCard("The three 'folding' parameters should be equal to 1, 2, 4, or 8.") # Check MC@NLO-Delta if self['mcatnlo_delta'] and not self['parton_shower'].lower() == 'pythia8': @@ -5746,11 +5746,11 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938 GeV") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") def update_system_parameter_for_include(self): - + # set the pdg_for_cut fortran parameter pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys())+ list(self['mxx_only_part_antipart'].keys())) @@ -5758,12 +5758,12 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different PDGs are allowed for PDG specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative PDG codes') - - + + if any(pdg in pdg_to_cut for pdg in [21,22,11,13,15]+ list(range(self['maxjetflavor']+1))): # Note that this will double check in the fortran code raise Exception("Can not use PDG related cuts for massless SM particles/leptons") @@ -5790,7 +5790,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -5800,12 +5800,12 @@ def update_system_parameter_for_include(self): self['mxxpart_antipart'] = [False] def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', 'run_card.dat') python_template = True else: @@ -5818,7 +5818,7 @@ def write(self, output_file, template=None, python_template=False, **opt): def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules - e+ e- beam -> lpp:0 ebeam:500 + e+ e- beam -> lpp:0 ebeam:500 p p beam -> set maxjetflavor automatically process with tagged photons -> gamma_is_j = false process without QED splittings -> gamma_is_j = false, recombination = false @@ -5844,19 +5844,19 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam2'] = 500 else: self['lpp1'] = 0 - self['lpp2'] = 0 - + self['lpp2'] = 0 + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() # check for tagged photons tagged_particles = set() - + # If model has running functionality add the additional parameter model = proc_def[0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Check if need matching min_particle = 99 @@ -5885,7 +5885,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: idsmin = [l['id'] for l in procmin['legs']] break - + for procmax in proc_def: if len(procmax['legs']) != max_particle: continue @@ -5901,9 +5901,9 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - - if matching: + break + + if matching: self['ickkw'] = 3 self['fixed_ren_scale'] = False self["fixed_fac_scale"] = False @@ -5911,17 +5911,17 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self["jetalgo"] = 1 self["jetradius"] = 1 self["parton_shower"] = "PYTHIA8" - + # Read file input/default_run_card_nlo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + class MadLoopParam(ConfigFile): """ a class for storing/dealing with the file MadLoopParam.dat contains a parser to read it, facilities to write a new file,... """ - + _ID_reduction_tool_map = {1:'CutTools', 2:'PJFry++', 3:'IREGI', @@ -5929,10 +5929,10 @@ class MadLoopParam(ConfigFile): 5:'Samurai', 6:'Ninja', 7:'COLLIER'} - + def default_setup(self): """initialize the directory to the default value""" - + self.add_param("MLReductionLib", "6|7|1") self.add_param("IREGIMODE", 2) self.add_param("IREGIRECY", True) @@ -5954,7 +5954,7 @@ def default_setup(self): self.add_param("HelicityFilterLevel", 2) self.add_param("LoopInitStartOver", False) self.add_param("HelInitStartOver", False) - self.add_param("UseQPIntegrandForNinja", True) + self.add_param("UseQPIntegrandForNinja", True) self.add_param("UseQPIntegrandForCutTools", True) self.add_param("COLLIERMode", 1) self.add_param("COLLIERComputeUVpoles", True) @@ -5966,9 +5966,9 @@ def default_setup(self): self.add_param("COLLIERUseInternalStabilityTest",True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -5976,7 +5976,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % input) - + previous_line= '' for line in finput: if previous_line.startswith('#'): @@ -5985,20 +5985,20 @@ def read(self, finput): if len(value) and value[0] not in ['#', '!']: self.__setitem__(name, value, change_userdefine=True) previous_line = line - - + + def write(self, outputpath, template=None,commentdefault=False): - + if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', + template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', 'Cards', 'MadLoopParams.dat') else: template = pjoin(MEDIR, 'Cards', 'MadLoopParams_default.dat') fsock = open(template, 'r') template = fsock.readlines() fsock.close() - + if isinstance(outputpath, str): output = open(outputpath, 'w') else: @@ -6019,7 +6019,7 @@ def f77format(value): return value else: raise Exception("Can not format input %s" % type(value)) - + name = '' done = set() for line in template: @@ -6034,12 +6034,12 @@ def f77format(value): elif line.startswith('#'): name = line[1:].split()[0] output.write(line) - - - - - -class eMELA_info(ConfigFile): + + + + + +class eMELA_info(ConfigFile): """ a class for eMELA (LHAPDF-like) info files """ path = '' @@ -6053,7 +6053,7 @@ def __init__(self, finput, me_dir): def read(self, finput): - if isinstance(finput, file): + if isinstance(finput, file): lines = finput.open().read().split('\n') self.path = finput.name else: @@ -6066,7 +6066,7 @@ def read(self, finput): k, v = l.split(':', 1) # ignore further occurrences of : try: self[k.strip()] = eval(v) - except (NameError, SyntaxError): + except (NameError, SyntaxError): self[k.strip()] = v def default_setup(self): @@ -6091,7 +6091,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): +"powers of alpha should be reweighted a posteriori") - logger.info('Updating variables according to %s' % self.path) + logger.info('Updating variables according to %s' % self.path) # Flavours in the running of alpha nd, nu, nl = self['eMELA_ActiveFlavoursAlpha'] self.log_and_update(banner, 'run_card', 'ndnq_run', nd) @@ -6130,8 +6130,8 @@ def update_epdf_emela_variables(self, banner, uvscheme): logger.warning('Cannot treat the following renormalisation schemes for ME and PDFs: %d, %d' \ % (uvscheme, uvscheme_pdf)) - # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref - # also check that the com energy is equal to qref, otherwise print a + # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref + # also check that the com energy is equal to qref, otherwise print a # warning if uvscheme_pdf == 1: qref = self['eMELA_AlphaQref'] @@ -6144,23 +6144,23 @@ def update_epdf_emela_variables(self, banner, uvscheme): # LL / NLL PDF (0/1) pdforder = self['eMELA_PerturbativeOrder'] - # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) + # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) # 4->mixed (leptonic); 5-> nobeta (leptonic); 6->delta (leptonic) # if LL, use nobeta scheme unless LEGACYLLPDF > 0 if pdforder == 0: if 'eMELA_LEGACYLLPDF' not in self.keys() or self['eMELA_LEGACYLLPDF'] in [-1, 0]: self.log_and_update(banner, 'run_card', 'pdfscheme', 5) - elif self['eMELA_LEGACYLLPDF'] == 1: + elif self['eMELA_LEGACYLLPDF'] == 1: # mixed self.log_and_update(banner, 'run_card', 'pdfscheme', 4) - elif self['eMELA_LEGACYLLPDF'] == 2: + elif self['eMELA_LEGACYLLPDF'] == 2: # eta self.log_and_update(banner, 'run_card', 'pdfscheme', 2) - elif self['eMELA_LEGACYLLPDF'] == 3: + elif self['eMELA_LEGACYLLPDF'] == 3: # beta self.log_and_update(banner, 'run_card', 'pdfscheme', 3) elif pdforder == 1: - # for NLL, use eMELA_FactorisationSchemeInt = 0/1 + # for NLL, use eMELA_FactorisationSchemeInt = 0/1 # for delta/MSbar if self['eMELA_FactorisationSchemeInt'] == 0: # MSbar @@ -6177,7 +6177,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): - + def log_and_update(self, banner, card, par, v): """update the card parameter par to value v diff --git a/epochX/cudacpp/gg_ttg.mad/bin/internal/gen_ximprove.py b/epochX/cudacpp/gg_ttg.mad/bin/internal/gen_ximprove.py index 5fd170d18d..cc842aa50f 100755 --- a/epochX/cudacpp/gg_ttg.mad/bin/internal/gen_ximprove.py +++ b/epochX/cudacpp/gg_ttg.mad/bin/internal/gen_ximprove.py @@ -2,18 +2,18 @@ # # Copyright (c) 2014 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch # ################################################################################ """ A python file to replace the fortran script gen_ximprove. - This script analyses the result of the survey/ previous refine and + This script analyses the result of the survey/ previous refine and creates the jobs for the following script. """ from __future__ import division @@ -66,77 +66,77 @@ class gensym(object): """a class to call the fortran gensym executable and handle it's output in order to create the various job that are needed for the survey""" - + #convenient shortcut for the formatting of variable @ staticmethod def format_variable(*args): return bannermod.ConfigFile.format_variable(*args) - + combining_job = 2 # number of channel by ajob - splitted_grid = False + splitted_grid = False min_iterations = 3 mode= "survey" - + def __init__(self, cmd, opt=None): - + try: super(gensym, self).__init__(cmd, opt) except TypeError: pass - - # Run statistics, a dictionary of RunStatistics(), with + + # Run statistics, a dictionary of RunStatistics(), with self.run_statistics = {} - + self.cmd = cmd self.run_card = cmd.run_card self.me_dir = cmd.me_dir - - + + # dictionary to keep track of the precision when combining iteration self.cross = collections.defaultdict(int) self.abscross = collections.defaultdict(int) self.sigma = collections.defaultdict(int) self.chi2 = collections.defaultdict(int) - + self.splitted_grid = False if self.cmd.proc_characteristics['loop_induced']: nexternal = self.cmd.proc_characteristics['nexternal'] self.splitted_grid = max(2, (nexternal-2)**2) if hasattr(self.cmd, "opts") and self.cmd.opts['accuracy'] == 0.1: self.cmd.opts['accuracy'] = 0.02 - + if isinstance(cmd.cluster, cluster.MultiCore) and self.splitted_grid > 1: self.splitted_grid = int(cmd.cluster.nb_core**0.5) if self.splitted_grid == 1 and cmd.cluster.nb_core >1: self.splitted_grid = 2 - + #if the user defines it in the run_card: if self.run_card['survey_splitting'] != -1: self.splitted_grid = self.run_card['survey_splitting'] if self.run_card['survey_nchannel_per_job'] != 1 and 'survey_nchannel_per_job' in self.run_card.user_set: - self.combining_job = self.run_card['survey_nchannel_per_job'] + self.combining_job = self.run_card['survey_nchannel_per_job'] elif self.run_card['hard_survey'] > 1: self.combining_job = 1 - - + + self.splitted_Pdir = {} self.splitted_for_dir = lambda x,y: self.splitted_grid self.combining_job_for_Pdir = lambda x: self.combining_job self.lastoffset = {} - + done_warning_zero_coupling = False def get_helicity(self, to_submit=True, clean=True): """launch a single call to madevent to get the list of non zero helicity""" - - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc P_zero_result = [] nb_tot_proc = len(subproc) - job_list = {} - - + job_list = {} + + for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -154,7 +154,7 @@ def get_helicity(self, to_submit=True, clean=True): p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts - + (stdout, _) = p.communicate(''.encode()) stdout = stdout.decode('ascii',errors='ignore') if stdout: @@ -166,11 +166,11 @@ def get_helicity(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir, 'error')): os.remove(pjoin(self.me_dir, 'error')) continue # bypass bad process - + self.cmd.compile(['madevent_forhel'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'madevent_forhel')): - raise Exception('Error make madevent_forhel not successful') - + raise Exception('Error make madevent_forhel not successful') + if not os.path.exists(pjoin(Pdir, 'Hel')): os.mkdir(pjoin(Pdir, 'Hel')) ff = open(pjoin(Pdir, 'Hel', 'input_app.txt'),'w') @@ -180,15 +180,15 @@ def get_helicity(self, to_submit=True, clean=True): try: os.remove(pjoin(Pdir, 'Hel','results.dat')) except Exception: - pass + pass # Launch gensym - p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, + p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=pjoin(Pdir,'Hel'), shell=True) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(" ".encode()) stdout = stdout.decode('ascii',errors='ignore') if os.path.exists(pjoin(self.me_dir, 'error')): - raise Exception(pjoin(self.me_dir,'error')) + raise Exception(pjoin(self.me_dir,'error')) # note a continue is not enough here, we have in top to link # the matrixX_optim.f to matrixX_orig.f to let the code to work # after this error. @@ -203,7 +203,7 @@ def get_helicity(self, to_submit=True, clean=True): zero_gc = list() all_zampperhel = set() all_bad_amps_perhel = set() - + for line in stdout.splitlines(): if "=" not in line and ":" not in line: continue @@ -229,22 +229,22 @@ def get_helicity(self, to_submit=True, clean=True): "%s\n" % (' '.join(zero_gc)) +\ "This will slow down the computation. Please consider using restricted model:\n" +\ "https://answers.launchpad.net/mg5amcnlo/+faq/2312") - - + + all_good_hels = collections.defaultdict(list) for me_index, hel in all_hel: - all_good_hels[me_index].append(int(hel)) - + all_good_hels[me_index].append(int(hel)) + #print(all_hel) if self.run_card['hel_zeroamp']: all_bad_amps = collections.defaultdict(list) for me_index, amp in all_zamp: all_bad_amps[me_index].append(int(amp)) - + all_bad_amps_perhel = collections.defaultdict(list) for me_index, hel, amp in all_zampperhel: - all_bad_amps_perhel[me_index].append((int(hel),int(amp))) - + all_bad_amps_perhel[me_index].append((int(hel),int(amp))) + elif all_zamp: nb_zero = sum(int(a[1]) for a in all_zamp) if zero_gc: @@ -254,7 +254,7 @@ def get_helicity(self, to_submit=True, clean=True): else: logger.warning("The optimization detected that you have %i zero matrix-element for this SubProcess: %s.\n" % nb_zero +\ "This part can optimize if you set the flag hel_zeroamp to True in the run_card.") - + #check if we need to do something and write associate information" data = [all_hel, all_zamp, all_bad_amps_perhel] if not self.run_card['hel_zeroamp']: @@ -266,14 +266,14 @@ def get_helicity(self, to_submit=True, clean=True): old_data = open(pjoin(Pdir,'Hel','selection')).read() if old_data == data: continue - - + + with open(pjoin(Pdir,'Hel','selection'),'w') as fsock: - fsock.write(data) - - + fsock.write(data) + + for matrix_file in misc.glob('matrix*orig.f', Pdir): - + split_file = matrix_file.split('/') me_index = split_file[-1][len('matrix'):-len('_orig.f')] @@ -289,11 +289,11 @@ def get_helicity(self, to_submit=True, clean=True): #good_hels = sorted(list(good_hels)) good_hels = [str(x) for x in sorted(all_good_hels[me_index])] if self.run_card['hel_zeroamp']: - + bad_amps = [str(x) for x in sorted(all_bad_amps[me_index])] bad_amps_perhel = [x for x in sorted(all_bad_amps_perhel[me_index])] else: - bad_amps = [] + bad_amps = [] bad_amps_perhel = [] if __debug__: mtext = open(matrix_file).read() @@ -310,7 +310,7 @@ def get_helicity(self, to_submit=True, clean=True): recycler.set_input(matrix_file) recycler.set_output(out_file) - recycler.set_template(templ_file) + recycler.set_template(templ_file) recycler.generate_output_file() del recycler @@ -321,19 +321,19 @@ def get_helicity(self, to_submit=True, clean=True): return {}, P_zero_result - + def launch(self, to_submit=True, clean=True): """ """ if not hasattr(self, 'subproc'): - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc - + P_zero_result = [] # check the number of times where they are no phase-space - + nb_tot_proc = len(subproc) - job_list = {} + job_list = {} for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.
(previous processes already running)' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -341,7 +341,7 @@ def launch(self, to_submit=True, clean=True): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) logger.info(' %s ' % subdir) - + # clean previous run if clean: for match in misc.glob('*ajob*', Pdir): @@ -349,17 +349,17 @@ def launch(self, to_submit=True, clean=True): os.remove(match) for match in misc.glob('G*', Pdir): if os.path.exists(pjoin(match,'results.dat')): - os.remove(pjoin(match, 'results.dat')) + os.remove(pjoin(match, 'results.dat')) if os.path.exists(pjoin(match, 'ftn25')): - os.remove(pjoin(match, 'ftn25')) - + os.remove(pjoin(match, 'ftn25')) + #compile gensym self.cmd.compile(['gensym'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'gensym')): - raise Exception('Error make gensym not successful') - + raise Exception('Error make gensym not successful') + # Launch gensym - p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, + p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(''.encode()) @@ -367,8 +367,8 @@ def launch(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir,'error')): files.mv(pjoin(self.me_dir,'error'), pjoin(Pdir,'ajob.no_ps.log')) P_zero_result.append(subdir) - continue - + continue + jobs = stdout.split() job_list[Pdir] = jobs try: @@ -386,8 +386,8 @@ def launch(self, to_submit=True, clean=True): continue else: if done: - raise Exception('Parsing error in gensym: %s' % stdout) - job_list[Pdir] = l.split() + raise Exception('Parsing error in gensym: %s' % stdout) + job_list[Pdir] = l.split() done = True if not done: raise Exception('Parsing error in gensym: %s' % stdout) @@ -408,16 +408,16 @@ def launch(self, to_submit=True, clean=True): if to_submit: self.submit_to_cluster(job_list) job_list = {} - + return job_list, P_zero_result - + def resubmit(self, min_precision=1.0, resubmit_zero=False): """collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - + job_list, P_zero_result = self.launch(to_submit=False, clean=False) - + for P , jobs in dict(job_list).items(): misc.sprint(jobs) to_resub = [] @@ -434,7 +434,7 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): elif max(one_result.xerru, one_result.xerrc)/one_result.xsec > min_precision: to_resub.append(job) else: - to_resub.append(job) + to_resub.append(job) if to_resub: for G in to_resub: try: @@ -442,19 +442,19 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): except Exception as error: misc.sprint(error) pass - misc.sprint(to_resub) + misc.sprint(to_resub) self.submit_to_cluster({P: to_resub}) - - - - - - - - - - - + + + + + + + + + + + def submit_to_cluster(self, job_list): """ """ @@ -467,7 +467,7 @@ def submit_to_cluster(self, job_list): nexternal = self.cmd.proc_characteristics['nexternal'] current = open(pjoin(path, "nexternal.inc")).read() ext = re.search(r"PARAMETER \(NEXTERNAL=(\d+)\)", current).group(1) - + if self.run_card['job_strategy'] == 2: self.splitted_grid = 2 if nexternal == int(ext): @@ -498,18 +498,18 @@ def submit_to_cluster(self, job_list): return self.submit_to_cluster_no_splitting(job_list) else: return self.submit_to_cluster_splitted(job_list) - - + + def submit_to_cluster_no_splitting(self, job_list): """submit the survey without the parralelization. This is the old mode which is still usefull in single core""" - - # write the template file for the parameter file + + # write the template file for the parameter file self.write_parameter(parralelization=False, Pdirs=list(job_list.keys())) - - + + # launch the job with the appropriate grouping - for Pdir, jobs in job_list.items(): + for Pdir, jobs in job_list.items(): jobs = list(jobs) i=0 while jobs: @@ -518,16 +518,16 @@ def submit_to_cluster_no_splitting(self, job_list): for _ in range(self.combining_job_for_Pdir(Pdir)): if jobs: to_submit.append(jobs.pop(0)) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=to_submit, cwd=pjoin(self.me_dir,'SubProcesses' , Pdir)) - + def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): """prepare the input_file for submitting the channel""" - + if 'SubProcesses' not in Pdir: Pdir = pjoin(self.me_dir, 'SubProcesses', Pdir) @@ -535,8 +535,8 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): self.splitted_Pdir[(Pdir, G)] = int(nb_job) - # 1. write the new input_app.txt - run_card = self.cmd.run_card + # 1. write the new input_app.txt + run_card = self.cmd.run_card options = {'event' : submit_ps, 'maxiter': 1, 'miniter': 1, @@ -545,29 +545,29 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): else run_card['nhel'], 'gridmode': -2, 'channel' : G - } - + } + Gdir = pjoin(Pdir, 'G%s' % G) - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + # 2. check that ftn25 exists. - assert os.path.exists(pjoin(Gdir, "ftn25")) - - + assert os.path.exists(pjoin(Gdir, "ftn25")) + + # 3. Submit the new jobs #call back function - packet = cluster.Packet((Pdir, G, step+1), + packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, (Pdir, G, step+1)) - + if step ==0: - self.lastoffset[(Pdir, G)] = 0 - - # resubmit the new jobs + self.lastoffset[(Pdir, G)] = 0 + + # resubmit the new jobs for i in range(int(nb_job)): name = "G%s_%s" % (G,i+1) self.lastoffset[(Pdir, G)] += 1 - offset = self.lastoffset[(Pdir, G)] + offset = self.lastoffset[(Pdir, G)] self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'refine_splitted.sh'), argument=[name, 'G%s'%G, offset], cwd= Pdir, @@ -575,9 +575,9 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): def submit_to_cluster_splitted(self, job_list): - """ submit the version of the survey with splitted grid creation - """ - + """ submit the version of the survey with splitted grid creation + """ + #if self.splitted_grid <= 1: # return self.submit_to_cluster_no_splitting(job_list) @@ -592,7 +592,7 @@ def submit_to_cluster_splitted(self, job_list): for job in jobs: packet = cluster.Packet((Pdir, job, 1), self.combine_iteration, (Pdir, job, 1)) - for i in range(self.splitted_for_dir(Pdir, job)): + for i in range(self.splitted_for_dir(Pdir, job)): self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[i+1, job], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), @@ -601,15 +601,15 @@ def submit_to_cluster_splitted(self, job_list): def combine_iteration(self, Pdir, G, step): grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - - # Compute the number of events used for this run. + + # Compute the number of events used for this run. nb_events = grid_calculator.target_evt Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) - + # 4. make the submission of the next iteration # Three cases - less than 3 iteration -> continue # - more than 3 and less than 5 -> check error @@ -627,15 +627,15 @@ def combine_iteration(self, Pdir, G, step): need_submit = False else: need_submit = True - + elif step >= self.cmd.opts['iterations']: need_submit = False elif self.cmd.opts['accuracy'] < 0: #check for luminosity raise Exception("Not Implemented") elif self.abscross[(Pdir,G)] == 0: - need_submit = False - else: + need_submit = False + else: across = self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) tot_across = self.get_current_axsec() if across == 0: @@ -646,20 +646,20 @@ def combine_iteration(self, Pdir, G, step): need_submit = True else: need_submit = False - - + + if cross: grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_events,mode=self.mode, conservative_factor=5.0) - - xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) - if float(cross)!=0.0 and float(error)!=0.0 else 8) + + xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) + if float(cross)!=0.0 and float(error)!=0.0 else 8) if need_submit: message = "%%s/G%%s is at %%%s +- %%.3g pb. Now submitting iteration #%s."%(xsec_format, step+1) logger.info(message%\ - (os.path.basename(Pdir), G, float(cross), + (os.path.basename(Pdir), G, float(cross), float(error)*float(cross))) self.resubmit_survey(Pdir,G, Gdirs, step) elif cross: @@ -670,26 +670,26 @@ def combine_iteration(self, Pdir, G, step): newGpath = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(newGpath): os.mkdir(newGpath) - + # copy the new grid: - files.cp(pjoin(Gdirs[0], 'ftn25'), + files.cp(pjoin(Gdirs[0], 'ftn25'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, 'ftn26')) - + # copy the events fsock = open(pjoin(newGpath, 'events.lhe'), 'w') for Gdir in Gdirs: - fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) - + fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) + # copy one log - files.cp(pjoin(Gdirs[0], 'log.txt'), + files.cp(pjoin(Gdirs[0], 'log.txt'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G)) - - + + # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) else: logger.info("Survey finished for %s/G%s [0 cross]", os.path.basename(Pdir),G) - + Gdir = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(Gdir): os.mkdir(Gdir) @@ -697,21 +697,21 @@ def combine_iteration(self, Pdir, G, step): files.cp(pjoin(Gdirs[0], 'log.txt'), Gdir) # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) - + return 0 def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): """ exclude_sub_jobs is to remove some of the subjobs if a numerical issue is detected in one of them. Warning is issue when this occurs. """ - + # 1. create an object to combine the grid information and fill it grid_calculator = combine_grid.grid_information(self.run_card['nhel']) - + for i in range(self.splitted_for_dir(Pdir, G)): if i in exclude_sub_jobs: continue - path = pjoin(Pdir, "G%s_%s" % (G, i+1)) + path = pjoin(Pdir, "G%s_%s" % (G, i+1)) fsock = misc.mult_try_open(pjoin(path, 'results.dat')) one_result = grid_calculator.add_results_information(fsock) fsock.close() @@ -723,9 +723,9 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): fsock.close() os.remove(pjoin(path, 'results.dat')) #os.remove(pjoin(path, 'grid_information')) - - - + + + #2. combine the information about the total crossection / error # start by keep the interation in memory cross, across, sigma = grid_calculator.get_cross_section() @@ -736,12 +736,12 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): if maxwgt: nunwgt = grid_calculator.get_nunwgt(maxwgt) # Make sure not to apply the security below during the first step of the - # survey. Also, disregard channels with a contribution relative to the + # survey. Also, disregard channels with a contribution relative to the # total cross-section smaller than 1e-8 since in this case it is unlikely # that this channel will need more than 1 event anyway. apply_instability_security = False rel_contrib = 0.0 - if (self.__class__ != gensym or step > 1): + if (self.__class__ != gensym or step > 1): Pdir_across = 0.0 Gdir_across = 0.0 for (mPdir,mG) in self.abscross.keys(): @@ -750,7 +750,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): (self.sigma[(mPdir,mG)]+1e-99)) if mG == G: Gdir_across += (self.abscross[(mPdir,mG)]/ - (self.sigma[(mPdir,mG)]+1e-99)) + (self.sigma[(mPdir,mG)]+1e-99)) rel_contrib = abs(Gdir_across/(Pdir_across+1e-99)) if rel_contrib > (1.0e-8) and \ nunwgt < 2 and len(grid_calculator.results) > 1: @@ -770,14 +770,14 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): exclude_sub_jobs = list(exclude_sub_jobs) exclude_sub_jobs.append(th_maxwgt[-1][1]) grid_calculator.results.run_statistics['skipped_subchannel'] += 1 - + # Add some monitoring of the problematic events - gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) + gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) if os.path.isfile(pjoin(gPath,'events.lhe')): lhe_file = lhe_parser.EventFile(pjoin(gPath,'events.lhe')) discardedPath = pjoin(Pdir,'DiscardedUnstableEvents') if not os.path.exists(discardedPath): - os.mkdir(discardedPath) + os.mkdir(discardedPath) if os.path.isdir(discardedPath): # Keep only the event with a maximum weight, as it surely # is the problematic one. @@ -790,10 +790,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): lhe_file.close() evtRecord.write(pjoin(gPath,'events.lhe').read()) evtRecord.close() - + return self.combine_grid(Pdir, G, step, exclude_sub_jobs) - + if across !=0: if sigma != 0: self.cross[(Pdir,G)] += cross**3/sigma**2 @@ -814,10 +814,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): self.chi2[(Pdir,G)] = 0 cross = self.cross[(Pdir,G)] error = 0 - + else: error = 0 - + grid_calculator.results.compute_values(update_statistics=True) if (str(os.path.basename(Pdir)), G) in self.run_statistics: self.run_statistics[(str(os.path.basename(Pdir)), G)]\ @@ -825,8 +825,8 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): else: self.run_statistics[(str(os.path.basename(Pdir)), G)] = \ grid_calculator.results.run_statistics - - self.warnings_from_statistics(G, grid_calculator.results.run_statistics) + + self.warnings_from_statistics(G, grid_calculator.results.run_statistics) stats_msg = grid_calculator.results.run_statistics.nice_output( '/'.join([os.path.basename(Pdir),'G%s'%G])) @@ -836,7 +836,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): # Clean up grid_information to avoid border effects in case of a crash for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) - try: + try: os.remove(pjoin(path, 'grid_information')) except OSError as oneerror: if oneerror.errno != 2: @@ -850,7 +850,7 @@ def warnings_from_statistics(self,G,stats): return EPS_fraction = float(stats['exceptional_points'])/stats['n_madloop_calls'] - + msg = "Channel %s has encountered a fraction of %.3g\n"+ \ "of numerically unstable loop matrix element computations\n"+\ "(which could not be rescued using quadruple precision).\n"+\ @@ -861,16 +861,16 @@ def warnings_from_statistics(self,G,stats): elif EPS_fraction > 0.01: logger.critical((msg%(G,EPS_fraction)).replace('might', 'can')) raise Exception((msg%(G,EPS_fraction)).replace('might', 'can')) - + def get_current_axsec(self): - + across = 0 for (Pdir,G) in self.abscross: across += self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) return across - + def write_results(self, grid_calculator, cross, error, Pdir, G, step): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -888,7 +888,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step): maxwgt = grid_calculator.get_max_wgt() nunwgt = grid_calculator.get_nunwgt() luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -897,20 +897,20 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s %s 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross), fstr(maxwgt)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - + def resubmit_survey(self, Pdir, G, Gdirs, step): """submit the next iteration of the survey""" # 1. write the new input_app.txt to double the number of points - run_card = self.cmd.run_card + run_card = self.cmd.run_card options = {'event' : 2**(step) * self.cmd.opts['points'] / self.splitted_grid, 'maxiter': 1, 'miniter': 1, @@ -919,18 +919,18 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): else run_card['nhel'], 'gridmode': -2, 'channel' : '' - } - + } + if int(options['helicity']) == 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + for Gdir in Gdirs: - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + + #2. resubmit the new jobs packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, \ - (Pdir, G, step+1)) + (Pdir, G, step+1)) nb_step = len(Gdirs) * (step+1) for i,subdir in enumerate(Gdirs): subdir = subdir.rsplit('_',1)[1] @@ -938,34 +938,34 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): offset = nb_step+i+1 offset=str(offset) tag = "%s.%s" % (subdir, offset) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[tag, G], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), packet_member=packet) - + def write_parameter_file(self, path, options): """ """ - + template =""" %(event)s %(maxiter)s %(miniter)s !Number of events and max and min iterations %(accuracy)s !Accuracy %(gridmode)s !Grid Adjustment 0=none, 2=adjust 1 !Suppress Amplitude 1=yes %(helicity)s !Helicity Sum/event 0=exact - %(channel)s """ + %(channel)s """ options['event'] = int(options['event']) open(path, 'w').write(template % options) - - + + def write_parameter(self, parralelization, Pdirs=None): """Write the parameter of the survey run""" run_card = self.cmd.run_card - + options = {'event' : self.cmd.opts['points'], 'maxiter': self.cmd.opts['iterations'], 'miniter': self.min_iterations, @@ -975,36 +975,36 @@ def write_parameter(self, parralelization, Pdirs=None): 'gridmode': 2, 'channel': '' } - + if int(options['helicity'])== 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + if parralelization: options['gridmode'] = -2 options['maxiter'] = 1 #this is automatic in dsample anyway options['miniter'] = 1 #this is automatic in dsample anyway options['event'] /= self.splitted_grid - + if not Pdirs: Pdirs = self.subproc - + for Pdir in Pdirs: - path =pjoin(Pdir, 'input_app.txt') + path =pjoin(Pdir, 'input_app.txt') self.write_parameter_file(path, options) - - -class gen_ximprove(object): - - + + +class gen_ximprove(object): + + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job @@ -1022,7 +1022,7 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_gridpack) elif cls.force_class == 'loop_induced': return super(gen_ximprove, cls).__new__(gen_ximprove_share) - + if cmd.proc_characteristics['loop_induced']: return super(gen_ximprove, cls).__new__(gen_ximprove_share) elif gen_ximprove.format_variable(cmd.run_card['gridpack'], bool): @@ -1031,31 +1031,31 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_share) else: return super(gen_ximprove, cls).__new__(gen_ximprove_v4) - - + + def __init__(self, cmd, opt=None): - + try: super(gen_ximprove, self).__init__(cmd, opt) except TypeError: pass - + self.run_statistics = {} self.cmd = cmd self.run_card = cmd.run_card run_card = self.run_card self.me_dir = cmd.me_dir - + #extract from the run_card the information that we need. self.gridpack = run_card['gridpack'] self.nhel = run_card['nhel'] if "nhel_refine" in run_card: self.nhel = run_card["nhel_refine"] - + if self.run_card['refine_evt_by_job'] != -1: self.max_request_event = run_card['refine_evt_by_job'] - - + + # Default option for the run self.gen_events = True self.parralel = False @@ -1066,7 +1066,7 @@ def __init__(self, cmd, opt=None): # parameter for the gridpack run self.nreq = 2000 self.iseed = 4321 - + # placeholder for information self.results = 0 #updated in launch/update_html @@ -1074,16 +1074,16 @@ def __init__(self, cmd, opt=None): self.configure(opt) elif isinstance(opt, bannermod.GridpackCard): self.configure_gridpack(opt) - + def __call__(self): return self.launch() - + def launch(self): - """running """ - + """running """ + #start the run self.handle_seed() - self.results = sum_html.collect_result(self.cmd, + self.results = sum_html.collect_result(self.cmd, main_dir=pjoin(self.cmd.me_dir,'SubProcesses')) #main_dir is for gridpack readonly mode if self.gen_events: # We run to provide a given number of events @@ -1095,15 +1095,15 @@ def launch(self): def configure(self, opt): """Defines some parameter of the run""" - + for key, value in opt.items(): if key in self.__dict__: targettype = type(getattr(self, key)) setattr(self, key, self.format_variable(value, targettype, key)) else: raise Exception('%s not define' % key) - - + + # special treatment always do outside the loop to avoid side effect if 'err_goal' in opt: if self.err_goal < 1: @@ -1113,24 +1113,24 @@ def configure(self, opt): logger.info("Generating %s unweighted events." % self.err_goal) self.gen_events = True self.err_goal = self.err_goal * self.gen_events_security # security - + def handle_seed(self): """not needed but for gridpack --which is not handle here for the moment""" return - - + + def find_job_for_event(self): """return the list of channel that need to be improved""" - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) - - goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 + + goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) - all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) - + all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) + to_refine = [] for C in all_channels: if C.get('axsec') == 0: @@ -1141,61 +1141,61 @@ def find_job_for_event(self): elif C.get('xerr') > max(C.get('axsec'), (1/(100*math.sqrt(self.err_goal)))*all_channels[-1].get('axsec')): to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru + - class gen_ximprove_v4(gen_ximprove): - + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + super(gen_ximprove_v4, self).__init__(cmd, opt) - + if cmd.opts['accuracy'] < cmd._survey_options['accuracy'][1]: self.increase_precision(cmd._survey_options['accuracy'][1]/cmd.opts['accuracy']) @@ -1203,7 +1203,7 @@ def reset_multijob(self): for path in misc.glob(pjoin('*', '*','multijob.dat'), pjoin(self.me_dir, 'SubProcesses')): open(path,'w').write('0\n') - + def write_multijob(self, Channel, nb_split): """ """ if nb_split <=1: @@ -1211,7 +1211,7 @@ def write_multijob(self, Channel, nb_split): f = open(pjoin(self.me_dir, 'SubProcesses', Channel.get('name'), 'multijob.dat'), 'w') f.write('%i\n' % nb_split) f.close() - + def increase_precision(self, rate=3): #misc.sprint(rate) if rate < 3: @@ -1222,25 +1222,25 @@ def increase_precision(self, rate=3): rate = rate -2 self.max_event_in_iter = int((rate+1) * 10000) self.min_events = int(rate+2) * 2500 - self.gen_events_security = 1 + 0.1 * (rate+2) - + self.gen_events_security = 1 + 0.1 * (rate+2) + if int(self.nhel) == 1: self.min_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//3) self.max_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//2) - - + + alphabet = "abcdefghijklmnopqrstuvwxyz" def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() #reset the potential multijob of previous run self.reset_multijob() - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. @@ -1257,17 +1257,17 @@ def get_job_for_event(self): else: for i in range(len(to_refine) //3): new_order.append(to_refine[i]) - new_order.append(to_refine[-2*i-1]) + new_order.append(to_refine[-2*i-1]) new_order.append(to_refine[-2*i-2]) if len(to_refine) % 3 == 1: - new_order.append(to_refine[i+1]) + new_order.append(to_refine[i+1]) elif len(to_refine) % 3 == 2: - new_order.append(to_refine[i+2]) + new_order.append(to_refine[i+2]) #ensure that the reordering is done nicely assert set([id(C) for C in to_refine]) == set([id(C) for C in new_order]) - to_refine = new_order - - + to_refine = new_order + + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target @@ -1279,7 +1279,7 @@ def get_job_for_event(self): nb_split = self.max_splitting nb_split=max(1, nb_split) - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1296,21 +1296,21 @@ def get_job_for_event(self): nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + # write the multi-job information self.write_multijob(C, nb_split) - + packet = cluster.Packet((C.parent_name, C.name), combine_runs.CombineRuns, (pjoin(self.me_dir, 'SubProcesses', C.parent_name)), {"subproc": C.name, "nb_split":nb_split}) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1321,7 +1321,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': packet, + 'packet': packet, } if nb_split == 1: @@ -1334,19 +1334,19 @@ def get_job_for_event(self): if self.keep_grid_for_refine: new_info['base_directory'] = info['directory'] jobs.append(new_info) - - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def create_ajob(self, template, jobs, write_dir=None): """create the ajob""" - + if not jobs: return if not write_dir: write_dir = pjoin(self.me_dir, 'SubProcesses') - + #filter the job according to their SubProcess directory # no mix submition P2job= collections.defaultdict(list) for j in jobs: @@ -1355,11 +1355,11 @@ def create_ajob(self, template, jobs, write_dir=None): for P in P2job.values(): self.create_ajob(template, P, write_dir) return - - + + #Here we can assume that all job are for the same directory. path = pjoin(write_dir, jobs[0]['P_dir']) - + template_text = open(template, 'r').read() # special treatment if needed to combine the script # computes how many submition miss one job @@ -1384,8 +1384,8 @@ def create_ajob(self, template, jobs, write_dir=None): skip1=0 combining_job =1 nb_sub = len(jobs) - - + + nb_use = 0 for i in range(nb_sub): script_number = i+1 @@ -1404,14 +1404,14 @@ def create_ajob(self, template, jobs, write_dir=None): info["base_directory"] = "./" fsock.write(template_text % info) nb_use += nb_job - + fsock.close() return script_number def get_job_for_precision(self): """create the ajob to achieve a give precision on the total cross-section""" - + assert self.err_goal <=1 xtot = abs(self.results.xsec) logger.info("Working on precision: %s %%" %(100*self.err_goal)) @@ -1428,46 +1428,46 @@ def get_job_for_precision(self): rerr *=rerr if not len(to_refine): return - - # change limit since most don't contribute + + # change limit since most don't contribute limit = math.sqrt((self.err_goal * xtot)**2 - rerr/math.sqrt(len(to_refine))) for C in to_refine[:]: cerr = C.mfactor*(C.xerru + len(to_refine)*C.xerrc) if cerr < limit: to_refine.remove(C) - + # all the channel are now selected. create the channel information logger.info('need to improve %s channels' % len(to_refine)) - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. # loop over the channel to refine for C in to_refine: - + #1. Determine how many events we need in each iteration yerr = C.mfactor*(C.xerru+len(to_refine)*C.xerrc) nevents = 0.2*C.nevents*(yerr/limit)**2 - + nb_split = int((nevents*(C.nunwgt/C.nevents)/self.max_request_event/ (2**self.min_iter-1))**(2/3)) nb_split = max(nb_split, 1) - # **(2/3) to slow down the increase in number of jobs + # **(2/3) to slow down the increase in number of jobs if nb_split > self.max_splitting: nb_split = self.max_splitting - + if nb_split >1: nevents = nevents / nb_split self.write_multijob(C, nb_split) # forbid too low/too large value nevents = min(self.min_event_in_iter, max(self.max_event_in_iter, nevents)) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1487,38 +1487,38 @@ def get_job_for_precision(self): new_info['offset'] = i+1 new_info['directory'] += self.alphabet[i % 26] + str((i+1)//26) jobs.append(new_info) - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru @@ -1528,27 +1528,27 @@ class gen_ximprove_v4_nogridupdate(gen_ximprove_v4): # some hardcoded value which impact the generation gen_events_security = 1.1 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 400 # split jobs if a channel if it needs more than that + max_request_event = 400 # split jobs if a channel if it needs more than that max_event_in_iter = 500 min_event_in_iter = 250 - max_splitting = 260 # maximum duplication of a given channel - min_iter = 2 + max_splitting = 260 # maximum duplication of a given channel + min_iter = 2 max_iter = 6 keep_grid_for_refine = True - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + gen_ximprove.__init__(cmd, opt) - + if cmd.proc_characteristics['loopinduced'] and \ cmd.proc_characteristics['nexternal'] > 2: self.increase_parralelization(cmd.proc_characteristics['nexternal']) - + def increase_parralelization(self, nexternal): - self.max_splitting = 1000 - + self.max_splitting = 1000 + if self.run_card['refine_evt_by_job'] != -1: pass elif nexternal == 3: @@ -1563,27 +1563,27 @@ def increase_parralelization(self, nexternal): class gen_ximprove_share(gen_ximprove, gensym): """Doing the refine in multicore. Each core handle a couple of PS point.""" - nb_ps_by_job = 2000 + nb_ps_by_job = 2000 mode = "refine" gen_events_security = 1.15 # Note the real security is lower since we stop the jobs if they are at 96% # of this target. def __init__(self, *args, **opts): - + super(gen_ximprove_share, self).__init__(*args, **opts) self.generated_events = {} self.splitted_for_dir = lambda x,y : self.splitted_Pdir[(x,y)] - + def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - + goal_lum, to_refine = self.find_job_for_event() self.goal_lum = goal_lum - + # loop over the channel to refine to find the number of PS point to launch total_ps_points = 0 channel_to_ps_point = [] @@ -1593,7 +1593,7 @@ def get_job_for_event(self): os.remove(pjoin(self.me_dir, "SubProcesses",C.parent_name, C.name, "events.lhe")) except: pass - + #1. Compute the number of points are needed to reach target needed_event = goal_lum*C.get('axsec') if needed_event == 0: @@ -1609,18 +1609,18 @@ def get_job_for_event(self): nb_split = 1 if nb_split > self.max_splitting: nb_split = self.max_splitting - nevents = self.max_event_in_iter * self.max_splitting + nevents = self.max_event_in_iter * self.max_splitting else: nevents = self.max_event_in_iter * nb_split if nevents > self.max_splitting*self.max_event_in_iter: logger.warning("Channel %s/%s has a very low efficiency of unweighting. Might not be possible to reach target" % \ (C.name, C.parent_name)) - nevents = self.max_event_in_iter * self.max_splitting - - total_ps_points += nevents - channel_to_ps_point.append((C, nevents)) - + nevents = self.max_event_in_iter * self.max_splitting + + total_ps_points += nevents + channel_to_ps_point.append((C, nevents)) + if self.cmd.options["run_mode"] == 1: if self.cmd.options["cluster_size"]: nb_ps_by_job = total_ps_points /int(self.cmd.options["cluster_size"]) @@ -1634,7 +1634,7 @@ def get_job_for_event(self): nb_ps_by_job = total_ps_points / self.cmd.options["nb_core"] else: nb_ps_by_job = self.nb_ps_by_job - + nb_ps_by_job = int(max(nb_ps_by_job, 500)) for C, nevents in channel_to_ps_point: @@ -1648,20 +1648,20 @@ def get_job_for_event(self): self.create_resubmit_one_iter(C.parent_name, C.name[1:], submit_ps, nb_job, step=0) needed_event = goal_lum*C.get('xsec') logger.debug("%s/%s : need %s event. Need %s split job of %s points", C.parent_name, C.name, needed_event, nb_job, submit_ps) - - + + def combine_iteration(self, Pdir, G, step): - + grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - + # collect all the generated_event Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) assert len(grid_calculator.results) == len(Gdirs) == self.splitted_for_dir(Pdir, G) - - + + # Check how many events are going to be kept after un-weighting. needed_event = cross * self.goal_lum if needed_event == 0: @@ -1671,19 +1671,19 @@ def combine_iteration(self, Pdir, G, step): if self.err_goal >=1: if needed_event > self.gen_events_security * self.err_goal: needed_event = int(self.gen_events_security * self.err_goal) - + if (Pdir, G) in self.generated_events: old_nunwgt, old_maxwgt = self.generated_events[(Pdir, G)] else: old_nunwgt, old_maxwgt = 0, 0 - + if old_nunwgt == 0 and os.path.exists(pjoin(Pdir,"G%s" % G, "events.lhe")): # possible for second refine. lhe = lhe_parser.EventFile(pjoin(Pdir,"G%s" % G, "events.lhe")) old_nunwgt = lhe.unweight(None, trunc_error=0.005, log_level=0) old_maxwgt = lhe.max_wgt - - + + maxwgt = max(grid_calculator.get_max_wgt(), old_maxwgt) new_evt = grid_calculator.get_nunwgt(maxwgt) @@ -1695,35 +1695,35 @@ def combine_iteration(self, Pdir, G, step): one_iter_nb_event = max(grid_calculator.get_nunwgt(),1) drop_previous_iteration = False # compare the number of events to generate if we discard the previous iteration - n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) n_target_combined = (needed_event-nunwgt) / efficiency if n_target_one_iter < n_target_combined: # the last iteration alone has more event that the combine iteration. - # it is therefore interesting to drop previous iteration. + # it is therefore interesting to drop previous iteration. drop_previous_iteration = True nunwgt = one_iter_nb_event maxwgt = grid_calculator.get_max_wgt() new_evt = nunwgt - efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) - + efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + try: if drop_previous_iteration: raise IOError output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'a') except IOError: output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'w') - + misc.call(["cat"] + [pjoin(d, "events.lhe") for d in Gdirs], stdout=output_file) output_file.close() # For large number of iteration. check the number of event by doing the # real unweighting. - if nunwgt < 0.6 * needed_event and step > self.min_iter: + if nunwgt < 0.6 * needed_event and step > self.min_iter: lhe = lhe_parser.EventFile(output_file.name) old_nunwgt =nunwgt nunwgt = lhe.unweight(None, trunc_error=0.01, log_level=0) - - + + self.generated_events[(Pdir, G)] = (nunwgt, maxwgt) # misc.sprint("Adding %s event to %s. Currently at %s" % (new_evt, G, nunwgt)) @@ -1742,21 +1742,21 @@ def combine_iteration(self, Pdir, G, step): nevents = grid_calculator.results[0].nevents if nevents == 0: # possible if some integral returns 0 nevents = max(g.nevents for g in grid_calculator.results) - + need_ps_point = (needed_event - nunwgt)/(efficiency+1e-99) - need_job = need_ps_point // nevents + 1 - + need_job = need_ps_point // nevents + 1 + if step < self.min_iter: # This is normal but check if we are on the good track - job_at_first_iter = nb_split_before/2**(step-1) + job_at_first_iter = nb_split_before/2**(step-1) expected_total_job = job_at_first_iter * (2**self.min_iter-1) done_job = job_at_first_iter * (2**step-1) expected_remaining_job = expected_total_job - done_job - logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) + logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) # increase if needed but not too much need_job = min(need_job, expected_remaining_job*1.25) - + nb_job = (need_job-0.5)//(2**(self.min_iter-step)-1) + 1 nb_job = max(1, nb_job) grid_calculator.write_grid_for_submission(Pdir,G, @@ -1768,7 +1768,7 @@ def combine_iteration(self, Pdir, G, step): nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) #self.create_job(Pdir, G, nb_job, nevents, step) - + elif step < self.max_iter: if step + 1 == self.max_iter: need_job = 1.20 * need_job # avoid to have just too few event. @@ -1777,21 +1777,21 @@ def combine_iteration(self, Pdir, G, step): grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_job*nevents ,mode=self.mode, conservative_factor=self.max_iter) - - + + logger.info("%s/G%s is at %i/%i ('%.2g%%') event. Resubmit %i job at iteration %i." \ % (os.path.basename(Pdir), G, int(nunwgt),int(needed_event)+1, (float(nunwgt)/needed_event)*100.0 if needed_event>0.0 else 0.0, nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) - - + + return 0 - - + + def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -1807,7 +1807,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency nevents = nunwgt # make the unweighting to compute the number of events: luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -1816,23 +1816,23 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s 0.0 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - - - + + + class gen_ximprove_gridpack(gen_ximprove_v4): - - min_iter = 1 + + min_iter = 1 max_iter = 13 - max_request_event = 1e12 # split jobs if a channel if it needs more than that + max_request_event = 1e12 # split jobs if a channel if it needs more than that max_event_in_iter = 4000 min_event_in_iter = 500 combining_job = sys.maxsize @@ -1844,7 +1844,7 @@ def __new__(cls, *args, **opts): return super(gen_ximprove_gridpack, cls).__new__(cls, *args, **opts) def __init__(self, *args, **opts): - + self.ngran = -1 self.gscalefact = {} self.readonly = False @@ -1855,23 +1855,23 @@ def __init__(self, *args, **opts): self.readonly = opts['readonly'] super(gen_ximprove_gridpack,self).__init__(*args, **opts) if self.ngran == -1: - self.ngran = 1 - + self.ngran = 1 + def find_job_for_event(self): """return the list of channel that need to be improved""" import random - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) self.gscalefact = {} - + xtot = self.results.axsec - goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 + goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 # logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) all_channels.sort(key=lambda x : x.get('luminosity'), reverse=True) - + to_refine = [] for C in all_channels: tag = C.get('name') @@ -1885,27 +1885,27 @@ def find_job_for_event(self): #need to generate events logger.debug('request events for ', C.get('name'), 'cross=', C.get('axsec'), 'needed events = ', goal_lum * C.get('axsec')) - to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + to_refine.append(C) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. - + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target needed_event = max(goal_lum*C.get('axsec'), self.ngran) nb_split = 1 - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1920,13 +1920,13 @@ def get_job_for_event(self): # forbid too low/too large value nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': os.path.basename(C.parent_name), + 'P_dir': os.path.basename(C.parent_name), 'offset': 1, # need to be change for splitted job 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'nevents': nevents, #int(nevents*self.gen_events_security)+1, @@ -1938,7 +1938,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': None, + 'packet': None, } if self.readonly: @@ -1946,11 +1946,11 @@ def get_job_for_event(self): info['base_directory'] = basedir jobs.append(info) - - write_dir = '.' if self.readonly else None - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) - + + write_dir = '.' if self.readonly else None + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) + done = [] for j in jobs: if j['P_dir'] in done: @@ -1967,22 +1967,22 @@ def get_job_for_event(self): write_dir = '.' if self.readonly else pjoin(self.me_dir, 'SubProcesses') self.check_events(goal_lum, to_refine, jobs, write_dir) - + def check_events(self, goal_lum, to_refine, jobs, Sdir): """check that we get the number of requested events if not resubmit.""" - + new_jobs = [] - + for C, job_info in zip(to_refine, jobs): - P = job_info['P_dir'] + P = job_info['P_dir'] G = job_info['channel'] axsec = C.get('axsec') - requested_events= job_info['requested_event'] - + requested_events= job_info['requested_event'] + new_results = sum_html.OneResult((P,G)) new_results.read_results(pjoin(Sdir,P, 'G%s'%G, 'results.dat')) - + # need to resubmit? if new_results.get('nunwgt') < requested_events: pwd = pjoin(os.getcwd(),job_info['P_dir'],'G%s'%G) if self.readonly else \ @@ -1992,10 +1992,10 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): job_info['offset'] += 1 new_jobs.append(job_info) files.mv(pjoin(pwd, 'events.lhe'), pjoin(pwd, 'events.lhe.previous')) - + if new_jobs: - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) + done = [] for j in new_jobs: if j['P_dir'] in done: @@ -2015,9 +2015,9 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): files.put_at_end(pjoin(pwd, 'events.lhe'),pjoin(pwd, 'events.lhe.previous')) return self.check_events(goal_lum, to_refine, new_jobs, Sdir) - - - - + + + + diff --git a/epochX/cudacpp/gg_ttg.mad/bin/internal/madevent_interface.py b/epochX/cudacpp/gg_ttg.mad/bin/internal/madevent_interface.py index cb6bf4ca57..8abba3f33f 100755 --- a/epochX/cudacpp/gg_ttg.mad/bin/internal/madevent_interface.py +++ b/epochX/cudacpp/gg_ttg.mad/bin/internal/madevent_interface.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,10 +53,10 @@ # Special logger for the Cmd Interface logger = logging.getLogger('madevent.stdout') # -> stdout logger_stderr = logging.getLogger('madevent.stderr') # ->stderr - + try: import madgraph -except ImportError as error: +except ImportError as error: # import from madevent directory MADEVENT = True import internal.extended_cmd as cmd @@ -92,7 +92,7 @@ import madgraph.various.lhe_parser as lhe_parser # import madgraph.various.histograms as histograms # imported later to not slow down the loading of the code import models.check_param_card as check_param_card - from madgraph.iolibs.files import ln + from madgraph.iolibs.files import ln from madgraph import InvalidCmd, MadGraph5Error, MG5DIR, ReadWrite @@ -113,10 +113,10 @@ class CmdExtended(common_run.CommonRunCmd): next_possibility = { 'start': [], } - + debug_output = 'ME5_debug' error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' - error_debug += 'More information is found in \'%(debug)s\'.\n' + error_debug += 'More information is found in \'%(debug)s\'.\n' error_debug += 'Please attach this file to your report.' config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' @@ -124,18 +124,18 @@ class CmdExtended(common_run.CommonRunCmd): keyboard_stop_msg = """stopping all operation in order to quit MadGraph5_aMC@NLO please enter exit""" - + # Define the Error InvalidCmd = InvalidCmd ConfigurationError = MadGraph5Error def __init__(self, me_dir, options, *arg, **opt): """Init history and line continuation""" - + # Tag allowing/forbiding question self.force = False - - # If possible, build an info line with current version number + + # If possible, build an info line with current version number # and date, from the VERSION text file info = misc.get_pkg_info() info_line = "" @@ -150,7 +150,7 @@ def __init__(self, me_dir, options, *arg, **opt): else: version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() info_line = "#* VERSION %s %s *\n" % \ - (version, (24 - len(version)) * ' ') + (version, (24 - len(version)) * ' ') # Create a header for the history file. # Remember to fill in time at writeout time! @@ -177,7 +177,7 @@ def __init__(self, me_dir, options, *arg, **opt): '#* run as ./bin/madevent.py filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - + if info_line: info_line = info_line[1:] @@ -203,11 +203,11 @@ def __init__(self, me_dir, options, *arg, **opt): "* *\n" + \ "************************************************************") super(CmdExtended, self).__init__(me_dir, options, *arg, **opt) - + def get_history_header(self): - """return the history header""" + """return the history header""" return self.history_header % misc.get_time_info() - + def stop_on_keyboard_stop(self): """action to perform to close nicely on a keyboard interupt""" try: @@ -219,20 +219,20 @@ def stop_on_keyboard_stop(self): self.add_error_log_in_html(KeyboardInterrupt) except: pass - + def postcmd(self, stop, line): """ Update the status of the run for finishing interactive command """ - - stop = super(CmdExtended, self).postcmd(stop, line) + + stop = super(CmdExtended, self).postcmd(stop, line) # relaxing the tag forbidding question self.force = False - + if not self.use_rawinput: return stop - + if self.results and not self.results.current: return stop - + arg = line.split() if len(arg) == 0: return stop @@ -240,41 +240,41 @@ def postcmd(self, stop, line): return stop if isinstance(self.results.status, str) and self.results.status == 'Stop by the user': self.update_status('%s Stop by the user' % arg[0], level=None, error=True) - return stop + return stop elif not self.results.status: return stop elif str(arg[0]) in ['exit','quit','EOF']: return stop - + try: - self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], + self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], level=None, error=True) except Exception: misc.sprint('update_status fails') pass - - + + def nice_user_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() - return cmd.Cmd.nice_user_error(self, error, line) - + return cmd.Cmd.nice_user_error(self, error, line) + def nice_config_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() stop = cmd.Cmd.nice_config_error(self, error, line) - - + + try: debug_file = open(self.debug_output, 'a') debug_file.write(open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat'))) debug_file.close() except: - pass + pass return stop - + def nice_error_handling(self, error, line): """If a ME run is currently running add a link in the html output""" @@ -294,7 +294,7 @@ def nice_error_handling(self, error, line): proc_card = pjoin(self.me_dir,'Cards','proc_card_mg5.dat') if os.path.exists(proc_card): self.banner.add(proc_card) - + out_dir = pjoin(self.me_dir, 'Events', self.run_name) if not os.path.isdir(out_dir): os.mkdir(out_dir) @@ -307,7 +307,7 @@ def nice_error_handling(self, error, line): else: pass else: - self.add_error_log_in_html() + self.add_error_log_in_html() stop = cmd.Cmd.nice_error_handling(self, error, line) try: debug_file = open(self.debug_output, 'a') @@ -316,14 +316,14 @@ def nice_error_handling(self, error, line): except: pass return stop - - + + #=============================================================================== # HelpToCmd #=============================================================================== class HelpToCmd(object): """ The Series of help routine for the MadEventCmd""" - + def help_pythia(self): logger.info("syntax: pythia [RUN] [--run_options]") logger.info("-- run pythia on RUN (current one by default)") @@ -352,29 +352,29 @@ def help_banner_run(self): logger.info(" Path should be the path of a valid banner.") logger.info(" RUN should be the name of a run of the current directory") self.run_options_help([('-f','answer all question by default'), - ('--name=X', 'Define the name associated with the new run')]) - + ('--name=X', 'Define the name associated with the new run')]) + def help_open(self): logger.info("syntax: open FILE ") logger.info("-- open a file with the appropriate editor.") logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') logger.info(' the path to the last created/used directory is used') logger.info(' The program used to open those files can be chosen in the') - logger.info(' configuration file ./input/mg5_configuration.txt') - - + logger.info(' configuration file ./input/mg5_configuration.txt') + + def run_options_help(self, data): if data: logger.info('-- local options:') for name, info in data: logger.info(' %s : %s' % (name, info)) - + logger.info("-- session options:") - logger.info(" Note that those options will be kept for the current session") + logger.info(" Note that those options will be kept for the current session") logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) logger.info(" --multicore : Run in multi-core configuration") logger.info(" --nb_core=X : limit the number of core to use to X.") - + def help_generate_events(self): logger.info("syntax: generate_events [run_name] [options]",) @@ -398,16 +398,16 @@ def help_initMadLoop(self): logger.info(" -f : Bypass the edition of MadLoopParams.dat.",'$MG:color:BLUE') logger.info(" -r : Refresh of the existing filters (erasing them if already present).",'$MG:color:BLUE') logger.info(" --nPS= : Specify how many phase-space points should be tried to set up the filters.",'$MG:color:BLUE') - + def help_calculate_decay_widths(self): - + if self.ninitial != 1: logger.warning("This command is only valid for processes of type A > B C.") logger.warning("This command can not be run in current context.") logger.warning("") - + logger.info("syntax: calculate_decay_widths [run_name] [options])") logger.info("-- Calculate decay widths and enter widths and BRs in param_card") logger.info(" for a series of processes of type A > B C ...") @@ -428,8 +428,8 @@ def help_survey(self): logger.info("-- evaluate the different channel associate to the process") self.run_options_help([("--" + key,value[-1]) for (key,value) in \ self._survey_options.items()]) - - + + def help_restart_gridpack(self): logger.info("syntax: restart_gridpack --precision= --restart_zero") @@ -439,14 +439,14 @@ def help_launch(self): logger.info("syntax: launch [run_name] [options])") logger.info(" --alias for either generate_events/calculate_decay_widths") logger.info(" depending of the number of particles in the initial state.") - + if self.ninitial == 1: logger.info("For this directory this is equivalent to calculate_decay_widths") self.help_calculate_decay_widths() else: logger.info("For this directory this is equivalent to $generate_events") self.help_generate_events() - + def help_refine(self): logger.info("syntax: refine require_precision [max_channel] [--run_options]") logger.info("-- refine the LAST run to achieve a given precision.") @@ -454,14 +454,14 @@ def help_refine(self): logger.info(' or the required relative error') logger.info(' max_channel:[5] maximal number of channel per job') self.run_options_help([]) - + def help_combine_events(self): """ """ logger.info("syntax: combine_events [run_name] [--tag=tag_name] [--run_options]") logger.info("-- Combine the last run in order to write the number of events") logger.info(" asked in the run_card.") self.run_options_help([]) - + def help_store_events(self): """ """ logger.info("syntax: store_events [--run_options]") @@ -481,7 +481,7 @@ def help_import(self): logger.info("syntax: import command PATH") logger.info("-- Execute the command present in the file") self.run_options_help([]) - + def help_syscalc(self): logger.info("syntax: syscalc [RUN] [%s] [-f | --tag=]" % '|'.join(self._plot_mode)) logger.info("-- calculate systematics information for the RUN (current run by default)") @@ -506,18 +506,18 @@ class AskRun(cmd.ControlSwitch): ('madspin', 'Decay onshell particles'), ('reweight', 'Add weights to events for new hypp.') ] - + def __init__(self, question, line_args=[], mode=None, force=False, *args, **opt): - + self.check_available_module(opt['mother_interface'].options) self.me_dir = opt['mother_interface'].me_dir super(AskRun,self).__init__(self.to_control, opt['mother_interface'], *args, **opt) - - + + def check_available_module(self, options): - + self.available_module = set() if options['pythia-pgs_path']: self.available_module.add('PY6') @@ -540,32 +540,32 @@ def check_available_module(self, options): self.available_module.add('Rivet') else: logger.warning("Rivet program installed but no parton shower with hepmc output detected.\n Please install pythia8") - + if not MADEVENT or ('mg5_path' in options and options['mg5_path']): self.available_module.add('MadSpin') if misc.has_f2py() or options['f2py_compiler']: self.available_module.add('reweight') -# old mode to activate the shower +# old mode to activate the shower def ans_parton(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if value is None: self.set_all_off() else: logger.warning('Invalid command: parton=%s' % value) - - + + # -# HANDLING SHOWER +# HANDLING SHOWER # def get_allowed_shower(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_shower'): return self.allowed_shower - + self.allowed_shower = [] if 'PY6' in self.available_module: self.allowed_shower.append('Pythia6') @@ -574,9 +574,9 @@ def get_allowed_shower(self): if self.allowed_shower: self.allowed_shower.append('OFF') return self.allowed_shower - + def set_default_shower(self): - + if 'PY6' in self.available_module and\ os.path.exists(pjoin(self.me_dir,'Cards','pythia_card.dat')): self.switch['shower'] = 'Pythia6' @@ -590,10 +590,10 @@ def set_default_shower(self): def check_value_shower(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_shower(): return True - + value =value.lower() if value in ['py6','p6','pythia_6'] and 'PY6' in self.available_module: return 'Pythia6' @@ -601,13 +601,13 @@ def check_value_shower(self, value): return 'Pythia8' else: return False - - -# old mode to activate the shower + + +# old mode to activate the shower def ans_pythia(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if 'PY6' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return @@ -621,13 +621,13 @@ def ans_pythia(self, value=None): self.set_switch('shower', 'OFF') else: logger.warning('Invalid command: pythia=%s' % value) - - + + def consistency_shower_detector(self, vshower, vdetector): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower == 'OFF': @@ -635,35 +635,35 @@ def consistency_shower_detector(self, vshower, vdetector): return 'OFF' if vshower == 'Pythia8' and vdetector == 'PGS': return 'OFF' - + return None - + # # HANDLING DETECTOR # def get_allowed_detector(self): """return valid entry for the switch""" - + if hasattr(self, 'allowed_detector'): - return self.allowed_detector - + return self.allowed_detector + self.allowed_detector = [] if 'PGS' in self.available_module: self.allowed_detector.append('PGS') if 'Delphes' in self.available_module: self.allowed_detector.append('Delphes') - + if self.allowed_detector: self.allowed_detector.append('OFF') - return self.allowed_detector + return self.allowed_detector def set_default_detector(self): - + self.set_default_shower() #ensure that this one is called first! - + if 'PGS' in self.available_module and self.switch['shower'] == 'Pythia6'\ and os.path.exists(pjoin(self.me_dir,'Cards','pgs_card.dat')): self.switch['detector'] = 'PGS' @@ -674,16 +674,16 @@ def set_default_detector(self): self.switch['detector'] = 'OFF' else: self.switch['detector'] = 'Not Avail.' - -# old mode to activate pgs + +# old mode to activate pgs def ans_pgs(self, value=None): """None: means that the user type 'pgs' - value: means that the user type pgs=value""" - + value: means that the user type pgs=value""" + if 'PGS' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return - + if value is None: self.set_all_off() self.switch['shower'] = 'Pythia6' @@ -696,16 +696,16 @@ def ans_pgs(self, value=None): else: logger.warning('Invalid command: pgs=%s' % value) - + # old mode to activate Delphes def ans_delphes(self, value=None): """None: means that the user type 'delphes' - value: means that the user type delphes=value""" - + value: means that the user type delphes=value""" + if 'Delphes' not in self.available_module: logger.warning('Delphes not available. Ignore commmand') return - + if value is None: self.set_all_off() if 'PY6' in self.available_module: @@ -718,15 +718,15 @@ def ans_delphes(self, value=None): elif value == 'off': self.set_switch('detector', 'OFF') else: - logger.warning('Invalid command: pgs=%s' % value) + logger.warning('Invalid command: pgs=%s' % value) def consistency_detector_shower(self,vdetector, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ - + if vdetector == 'PGS' and vshower != 'Pythia6': return 'Pythia6' if vdetector == 'Delphes' and vshower not in ['Pythia6', 'Pythia8']: @@ -744,28 +744,28 @@ def consistency_detector_shower(self,vdetector, vshower): # def get_allowed_analysis(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_analysis'): return self.allowed_analysis - + self.allowed_analysis = [] if 'ExRoot' in self.available_module: self.allowed_analysis.append('ExRoot') if 'MA4' in self.available_module: self.allowed_analysis.append('MadAnalysis4') if 'MA5' in self.available_module: - self.allowed_analysis.append('MadAnalysis5') + self.allowed_analysis.append('MadAnalysis5') if 'Rivet' in self.available_module: - self.allowed_analysis.append('Rivet') - + self.allowed_analysis.append('Rivet') + if self.allowed_analysis: self.allowed_analysis.append('OFF') - + return self.allowed_analysis - + def check_analysis(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_analysis(): return True if value.lower() in ['ma4', 'madanalysis4', 'madanalysis_4','4']: @@ -786,30 +786,30 @@ def consistency_shower_analysis(self, vshower, vanalysis): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'OFF' #new value for analysis - + return None - + def consistency_analysis_shower(self, vanalysis, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'Pythia8' #new value for analysis - + return None def set_default_analysis(self): """initialise the switch for analysis""" - + if 'MA4' in self.available_module and \ os.path.exists(pjoin(self.me_dir,'Cards','plot_card.dat')): self.switch['analysis'] = 'MadAnalysis4' @@ -818,46 +818,46 @@ def set_default_analysis(self): or os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat'))): self.switch['analysis'] = 'MadAnalysis5' elif 'ExRoot' in self.available_module: - self.switch['analysis'] = 'ExRoot' - elif self.get_allowed_analysis(): + self.switch['analysis'] = 'ExRoot' + elif self.get_allowed_analysis(): self.switch['analysis'] = 'OFF' else: self.switch['analysis'] = 'Not Avail.' - + # # MADSPIN handling # def get_allowed_madspin(self): """ ON|OFF|onshell """ - + if hasattr(self, 'allowed_madspin'): return self.allowed_madspin - + self.allowed_madspin = [] if 'MadSpin' in self.available_module: self.allowed_madspin = ['OFF',"ON",'onshell',"full"] return self.allowed_madspin - + def check_value_madspin(self, value): """handle alias and valid option not present in get_allowed_madspin""" - + if value.upper() in self.get_allowed_madspin(): return True elif value.lower() in self.get_allowed_madspin(): return True - + if 'MadSpin' not in self.available_module: return False - + if value.lower() in ['madspin', 'full']: return 'full' elif value.lower() in ['none']: return 'none' - - + + def set_default_madspin(self): """initialise the switch for madspin""" - + if 'MadSpin' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): self.switch['madspin'] = 'ON' @@ -865,10 +865,10 @@ def set_default_madspin(self): self.switch['madspin'] = 'OFF' else: self.switch['madspin'] = 'Not Avail.' - + def get_cardcmd_for_madspin(self, value): """set some command to run before allowing the user to modify the cards.""" - + if value == 'onshell': return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] elif value in ['full', 'madspin']: @@ -877,36 +877,36 @@ def get_cardcmd_for_madspin(self, value): return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] else: return [] - + # # ReWeight handling # def get_allowed_reweight(self): """ return the list of valid option for reweight=XXX """ - + if hasattr(self, 'allowed_reweight'): return getattr(self, 'allowed_reweight') - + if 'reweight' not in self.available_module: self.allowed_reweight = [] return self.allowed_reweight = ['OFF', 'ON'] - + # check for plugin mode plugin_path = self.mother_interface.plugin_path opts = misc.from_plugin_import(plugin_path, 'new_reweight', warning=False) self.allowed_reweight += opts - + def set_default_reweight(self): """initialise the switch for reweight""" - + if 'reweight' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): self.switch['reweight'] = 'ON' else: self.switch['reweight'] = 'OFF' else: - self.switch['reweight'] = 'Not Avail.' + self.switch['reweight'] = 'Not Avail.' #=============================================================================== # CheckValidForCmd @@ -916,14 +916,14 @@ class CheckValidForCmd(object): def check_banner_run(self, args): """check the validity of line""" - + if len(args) == 0: self.help_banner_run() raise self.InvalidCmd('banner_run requires at least one argument.') - + tag = [a[6:] for a in args if a.startswith('--tag=')] - - + + if os.path.exists(args[0]): type ='banner' format = self.detect_card_type(args[0]) @@ -931,7 +931,7 @@ def check_banner_run(self, args): raise self.InvalidCmd('The file is not a valid banner.') elif tag: args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) + (args[0], tag)) if not os.path.exists(args[0]): raise self.InvalidCmd('No banner associates to this name and tag.') else: @@ -939,7 +939,7 @@ def check_banner_run(self, args): type = 'run' banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) if not banners: - raise self.InvalidCmd('No banner associates to this name.') + raise self.InvalidCmd('No banner associates to this name.') elif len(banners) == 1: args[0] = banners[0] else: @@ -947,8 +947,8 @@ def check_banner_run(self, args): tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] tag = self.ask('which tag do you want to use?', tags[0], tags) args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) - + (args[0], tag)) + run_name = [arg[7:] for arg in args if arg.startswith('--name=')] if run_name: try: @@ -970,14 +970,14 @@ def check_banner_run(self, args): except Exception: pass self.set_run_name(name) - + def check_history(self, args): """check the validity of line""" - + if len(args) > 1: self.help_history() raise self.InvalidCmd('\"history\" command takes at most one argument') - + if not len(args): return elif args[0] != 'clean': @@ -985,16 +985,16 @@ def check_history(self, args): if dirpath and not os.path.exists(dirpath) or \ os.path.isdir(args[0]): raise self.InvalidCmd("invalid path %s " % dirpath) - + def check_save(self, args): """ check the validity of the line""" - + if len(args) == 0: args.append('options') if args[0] not in self._save_opts: raise self.InvalidCmd('wrong \"save\" format') - + if args[0] != 'options' and len(args) != 2: self.help_save() raise self.InvalidCmd('wrong \"save\" format') @@ -1003,7 +1003,7 @@ def check_save(self, args): if not os.path.exists(basename): raise self.InvalidCmd('%s is not a valid path, please retry' % \ args[1]) - + if args[0] == 'options': has_path = None for arg in args[1:]: @@ -1024,9 +1024,9 @@ def check_save(self, args): has_path = True if not has_path: if '--auto' in arg and self.options['mg5_path']: - args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) + args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) else: - args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) + args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) def check_set(self, args): """ check the validity of the line""" @@ -1039,20 +1039,20 @@ def check_set(self, args): self.help_set() raise self.InvalidCmd('Possible options for set are %s' % \ self._set_options) - + if args[0] in ['stdout_level']: if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \ and not args[1].isdigit(): raise self.InvalidCmd('output_level needs ' + \ - 'a valid level') - + 'a valid level') + if args[0] in ['timeout']: if not args[1].isdigit(): - raise self.InvalidCmd('timeout values should be a integer') - + raise self.InvalidCmd('timeout values should be a integer') + def check_open(self, args): """ check the validity of the line """ - + if len(args) != 1: self.help_open() raise self.InvalidCmd('OPEN command requires exactly one argument') @@ -1069,7 +1069,7 @@ def check_open(self, args): raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file') else: return True - + path = self.me_dir if os.path.isfile(os.path.join(path,args[0])): args[0] = os.path.join(path,args[0]) @@ -1078,7 +1078,7 @@ def check_open(self, args): elif os.path.isfile(os.path.join(path,'HTML',args[0])): args[0] = os.path.join(path,'HTML',args[0]) # special for card with _default define: copy the default and open it - elif '_card.dat' in args[0]: + elif '_card.dat' in args[0]: name = args[0].replace('_card.dat','_card_default.dat') if os.path.isfile(os.path.join(path,'Cards', name)): files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0])) @@ -1086,13 +1086,13 @@ def check_open(self, args): else: raise self.InvalidCmd('No default path for this file') elif not os.path.isfile(args[0]): - raise self.InvalidCmd('No default path for this file') - + raise self.InvalidCmd('No default path for this file') + def check_initMadLoop(self, args): """ check initMadLoop command arguments are valid.""" - + opt = {'refresh': False, 'nPS': None, 'force': False} - + for arg in args: if arg in ['-r','--refresh']: opt['refresh'] = True @@ -1105,14 +1105,14 @@ def check_initMadLoop(self, args): except ValueError: raise InvalidCmd("The number of attempts specified "+ "'%s' is not a valid integer."%n_attempts) - + return opt - + def check_treatcards(self, args): """check that treatcards arguments are valid [param|run|all] [--output_dir=] [--param_card=] [--run_card=] """ - + opt = {'output_dir':pjoin(self.me_dir,'Source'), 'param_card':pjoin(self.me_dir,'Cards','param_card.dat'), 'run_card':pjoin(self.me_dir,'Cards','run_card.dat'), @@ -1129,14 +1129,14 @@ def check_treatcards(self, args): if os.path.isfile(value): card_name = self.detect_card_type(value) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' % (card_name, key)) opt[key] = value elif os.path.isfile(pjoin(self.me_dir,value)): card_name = self.detect_card_type(pjoin(self.me_dir,value)) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' - % (card_name, key)) + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + % (card_name, key)) opt[key] = value else: raise self.InvalidCmd('No such file: %s ' % value) @@ -1154,14 +1154,14 @@ def check_treatcards(self, args): else: self.help_treatcards() raise self.InvalidCmd('Unvalid argument %s' % arg) - - return mode, opt - - + + return mode, opt + + def check_survey(self, args, cmd='survey'): """check that the argument for survey are valid""" - - + + self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) @@ -1183,41 +1183,41 @@ def check_survey(self, args, cmd='survey'): self.help_survey() raise self.InvalidCmd('Too many argument for %s command' % cmd) elif not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], None,'parton', True) args.pop(0) - + return True def check_generate_events(self, args): """check that the argument for generate_events are valid""" - + run = None if args and args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['auto','parton', 'pythia', 'pgs', 'delphes']: self.help_generate_events() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + #if len(args) > 1: # self.help_generate_events() # raise self.InvalidCmd('Too many argument for generate_events command: %s' % cmd) - + return run def check_calculate_decay_widths(self, args): """check that the argument for calculate_decay_widths are valid""" - + if self.ninitial != 1: raise self.InvalidCmd('Can only calculate decay widths for decay processes A > B C ...') @@ -1232,7 +1232,7 @@ def check_calculate_decay_widths(self, args): if len(args) > 1: self.help_calculate_decay_widths() raise self.InvalidCmd('Too many argument for calculate_decay_widths command: %s' % cmd) - + return accuracy @@ -1241,25 +1241,25 @@ def check_multi_run(self, args): """check that the argument for survey are valid""" run = None - + if not len(args): self.help_multi_run() raise self.InvalidCmd("""multi_run command requires at least one argument for the number of times that it call generate_events command""") - + if args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['parton', 'pythia', 'pgs', 'delphes']: self.help_multi_run() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + elif not args[0].isdigit(): self.help_multi_run() @@ -1267,7 +1267,7 @@ def check_multi_run(self, args): #pass nb run to an integer nb_run = args.pop(0) args.insert(0, int(nb_run)) - + return run @@ -1284,7 +1284,7 @@ def check_refine(self, args): self.help_refine() raise self.InvalidCmd('require_precision argument is require for refine cmd') - + if not self.run_name: if self.results.lastrun: self.set_run_name(self.results.lastrun) @@ -1296,17 +1296,17 @@ def check_refine(self, args): else: try: [float(arg) for arg in args] - except ValueError: - self.help_refine() + except ValueError: + self.help_refine() raise self.InvalidCmd('refine arguments are suppose to be number') - + return True - + def check_combine_events(self, arg): """ Check the argument for the combine events command """ - + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] elif not self.run_tag: @@ -1314,53 +1314,53 @@ def check_combine_events(self, arg): else: tag = self.run_tag self.run_tag = tag - + if len(arg) > 1: self.help_combine_events() raise self.InvalidCmd('Too many argument for combine_events command') - + if len(arg) == 1: self.set_run_name(arg[0], self.run_tag, 'parton', True) - + if not self.run_name: if not self.results.lastrun: raise self.InvalidCmd('No run_name currently define. Unable to run combine') else: self.set_run_name(self.results.lastrun) - + return True - + def check_pythia(self, args): """Check the argument for pythia command - syntax: pythia [NAME] + syntax: pythia [NAME] Note that other option are already removed at this point """ - + mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia', 'pgs', 'delphes']: self.help_pythia() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') - + if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' raise self.InvalidCmd(error_msg) - - - + + + tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1368,8 +1368,8 @@ def check_pythia(self, args): if self.results.lastrun: args.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): @@ -1388,21 +1388,21 @@ def check_pythia(self, args): files.ln(input_file, os.path.dirname(output_file)) else: misc.gunzip(input_file, keep=True, stdout=output_file) - + args.append(mode) - + def check_pythia8(self, args): """Check the argument for pythia command - syntax: pythia8 [NAME] + syntax: pythia8 [NAME] Note that other option are already removed at this point - """ + """ mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia','pythia8','delphes']: self.help_pythia8() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') @@ -1410,7 +1410,7 @@ def check_pythia8(self, args): if not self.options['pythia8_path']: logger.info('Retry reading configuration file to find pythia8 path') self.set_configuration() - + if not self.options['pythia8_path'] or not \ os.path.exists(pjoin(self.options['pythia8_path'],'bin','pythia8-config')): error_msg = 'No valid pythia8 path set.\n' @@ -1421,7 +1421,7 @@ def check_pythia8(self, args): raise self.InvalidCmd(error_msg) tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1430,11 +1430,11 @@ def check_pythia8(self, args): args.insert(0, self.results.lastrun) else: raise self.InvalidCmd('No run name currently define. '+ - 'Please add this information.') - + 'Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ - not os.path.exists(pjoin(self.me_dir,'Events',args[0], + not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): raise self.InvalidCmd('No events file corresponding to %s run. ' % args[0]) @@ -1451,9 +1451,9 @@ def check_pythia8(self, args): else: raise self.InvalidCmd('No event file corresponding to %s run. ' % self.run_name) - + args.append(mode) - + def check_remove(self, args): """Check that the remove command is valid""" @@ -1484,33 +1484,33 @@ def check_plot(self, args): madir = self.options['madanalysis_path'] td = self.options['td_path'] - + if not madir or not td: logger.info('Retry to read configuration file to find madanalysis/td') self.set_configuration() madir = self.options['madanalysis_path'] - td = self.options['td_path'] - + td = self.options['td_path'] + if not madir: error_msg = 'No valid MadAnalysis path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) + raise self.InvalidCmd(error_msg) if not td: error_msg = 'No valid td path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') + raise self.InvalidCmd('No run name currently define. Please add this information.') args.append('all') return - + if args[0] not in self._plot_mode: self.set_run_name(args[0], level='plot') del args[0] @@ -1518,45 +1518,45 @@ def check_plot(self, args): args.append('all') elif not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + for arg in args: if arg not in self._plot_mode and arg != self.run_name: self.help_plot() - raise self.InvalidCmd('unknown options %s' % arg) - + raise self.InvalidCmd('unknown options %s' % arg) + def check_syscalc(self, args): """Check the argument for the syscalc command syscalc run_name modes""" scdir = self.options['syscalc_path'] - + if not scdir: logger.info('Retry to read configuration file to find SysCalc') self.set_configuration() scdir = self.options['syscalc_path'] - + if not scdir: error_msg = 'No valid SysCalc path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' error_msg += 'Please note that you need to compile SysCalc first.' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') args.append('all') return #deal options tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] - + if args[0] not in self._syscalc_mode: self.set_run_name(args[0], tag=tag, level='syscalc') del args[0] @@ -1564,61 +1564,61 @@ def check_syscalc(self, args): args.append('all') elif not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') elif tag and tag != self.run_tag: self.set_run_name(self.run_name, tag=tag, level='syscalc') - + for arg in args: if arg not in self._syscalc_mode and arg != self.run_name: self.help_syscalc() - raise self.InvalidCmd('unknown options %s' % arg) + raise self.InvalidCmd('unknown options %s' % arg) if self.run_card['use_syst'] not in self.true: raise self.InvalidCmd('Run %s does not include ' % self.run_name + \ 'systematics information needed for syscalc.') - - + + def check_pgs(self, arg, no_default=False): """Check the argument for pythia command - syntax is "pgs [NAME]" + syntax is "pgs [NAME]" Note that other option are already remove at this point """ - + # If not pythia-pgs path if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] - - + + if len(arg) == 0 and not self.run_name: if self.results.lastrun: arg.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(arg) == 1 and self.run_name == arg[0]: arg.pop(0) - + if not len(arg) and \ not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): if not no_default: self.help_pgs() raise self.InvalidCmd('''No file file pythia_events.hep currently available Please specify a valid run_name''') - - lock = None + + lock = None if len(arg) == 1: prev_tag = self.set_run_name(arg[0], tag, 'pgs') if not os.path.exists(pjoin(self.me_dir,'Events',self.run_name,'%s_pythia_events.hep.gz' % prev_tag)): @@ -1626,25 +1626,25 @@ def check_pgs(self, arg, no_default=False): else: input_file = pjoin(self.me_dir,'Events', self.run_name, '%s_pythia_events.hep.gz' % prev_tag) output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') - lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), + lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), argument=['-c', input_file]) else: - if tag: + if tag: self.run_card['run_tag'] = tag self.set_run_name(self.run_name, tag, 'pgs') - - return lock + + return lock def check_display(self, args): """check the validity of line syntax is "display XXXXX" """ - + if len(args) < 1 or args[0] not in self._display_opts: self.help_display() raise self.InvalidCmd - + if args[0] == 'variable' and len(args) !=2: raise self.InvalidCmd('variable need a variable name') @@ -1654,39 +1654,39 @@ def check_display(self, args): def check_import(self, args): """check the validity of line""" - + if not args: self.help_import() raise self.InvalidCmd('wrong \"import\" format') - + if args[0] != 'command': args.insert(0,'command') - - + + if not len(args) == 2 or not os.path.exists(args[1]): raise self.InvalidCmd('PATH is mandatory for import command\n') - + #=============================================================================== # CompleteForCmd #=============================================================================== class CompleteForCmd(CheckValidForCmd): """ The Series of help routine for the MadGraphCmd""" - - + + def complete_banner_run(self, text, line, begidx, endidx, formatting=True): "Complete the banner run command" try: - - + + args = self.split_arg(line[0:begidx], error=False) - + if args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args \ - if a.endswith(os.path.sep)])) - - + if a.endswith(os.path.sep)])) + + if len(args) > 1: # only options are possible tags = misc.glob('%s_*_banner.txt' % args[1], pjoin(self.me_dir, 'Events' , args[1])) @@ -1697,9 +1697,9 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): else: return self.list_completion(text, tags) return self.list_completion(text, tags +['--name=','-f'], line) - + # First argument - possibilites = {} + possibilites = {} comp = self.path_completion(text, os.path.join('.',*[a for a in args \ if a.endswith(os.path.sep)])) @@ -1711,10 +1711,10 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) run_list = [n.rsplit('/',2)[1] for n in run_list] possibilites['RUN Name'] = self.list_completion(text, run_list) - + return self.deal_multiple_categories(possibilites, formatting) - - + + except Exception as error: print(error) @@ -1732,12 +1732,12 @@ def complete_history(self, text, line, begidx, endidx): if len(args) == 1: return self.path_completion(text) - - def complete_open(self, text, line, begidx, endidx): + + def complete_open(self, text, line, begidx, endidx): """ complete the open command """ args = self.split_arg(line[0:begidx]) - + # Directory continuation if os.path.sep in args[-1] + text: return self.path_completion(text, @@ -1751,10 +1751,10 @@ def complete_open(self, text, line, begidx, endidx): if os.path.isfile(os.path.join(path,'README')): possibility.append('README') if os.path.isdir(os.path.join(path,'Cards')): - possibility += [f for f in os.listdir(os.path.join(path,'Cards')) + possibility += [f for f in os.listdir(os.path.join(path,'Cards')) if f.endswith('.dat')] if os.path.isdir(os.path.join(path,'HTML')): - possibility += [f for f in os.listdir(os.path.join(path,'HTML')) + possibility += [f for f in os.listdir(os.path.join(path,'HTML')) if f.endswith('.html') and 'default' not in f] else: possibility.extend(['./','../']) @@ -1763,7 +1763,7 @@ def complete_open(self, text, line, begidx, endidx): if os.path.exists('MG5_debug'): possibility.append('MG5_debug') return self.list_completion(text, possibility) - + def complete_set(self, text, line, begidx, endidx): "Complete the set command" @@ -1784,27 +1784,27 @@ def complete_set(self, text, line, begidx, endidx): elif len(args) >2 and args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args if a.endswith(os.path.sep)]), - only_dirs = True) - + only_dirs = True) + def complete_survey(self, text, line, begidx, endidx): """ Complete the survey command """ - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + return self.list_completion(text, self._run_options, line) - + complete_refine = complete_survey complete_combine_events = complete_survey complite_store = complete_survey complete_generate_events = complete_survey complete_create_gridpack = complete_survey - + def complete_generate_events(self, text, line, begidx, endidx): """ Complete the generate events""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() @@ -1813,17 +1813,17 @@ def complete_generate_events(self, text, line, begidx, endidx): return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) def complete_initMadLoop(self, text, line, begidx, endidx): "Complete the initMadLoop command" - + numbers = [str(i) for i in range(10)] opts = ['-f','-r','--nPS='] - + args = self.split_arg(line[0:begidx], error=False) if len(line) >=6 and line[begidx-6:begidx]=='--nPS=': return self.list_completion(text, numbers, line) @@ -1840,18 +1840,18 @@ def complete_launch(self, *args, **opts): def complete_calculate_decay_widths(self, text, line, begidx, endidx): """ Complete the calculate_decay_widths command""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + opts = self._run_options + self._calculate_decay_options return self.list_completion(text, opts, line) - + def complete_display(self, text, line, begidx, endidx): - """ Complete the display command""" - + """ Complete the display command""" + args = self.split_arg(line[0:begidx], error=False) if len(args) >= 2 and args[1] =='results': start = line.find('results') @@ -1860,44 +1860,44 @@ def complete_display(self, text, line, begidx, endidx): def complete_multi_run(self, text, line, begidx, endidx): """complete multi run command""" - + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: data = [str(i) for i in range(0,20)] return self.list_completion(text, data, line) - + if line.endswith('run=') and not text: return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - - - + + + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - + def complete_plot(self, text, line, begidx, endidx): """ Complete the plot command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1: return self.list_completion(text, self._plot_mode) else: return self.list_completion(text, self._plot_mode + list(self.results.keys())) - + def complete_syscalc(self, text, line, begidx, endidx, formatting=True): """ Complete the syscalc command """ - + output = {} args = self.split_arg(line[0:begidx], error=False) - + if len(args) <=1: output['RUN_NAME'] = self.list_completion(list(self.results.keys())) output['MODE'] = self.list_completion(text, self._syscalc_mode) @@ -1907,12 +1907,12 @@ def complete_syscalc(self, text, line, begidx, endidx, formatting=True): if run in self.results: tags = ['--tag=%s' % tag['tag'] for tag in self.results[run]] output['options'] += tags - + return self.deal_multiple_categories(output, formatting) - + def complete_remove(self, text, line, begidx, endidx): """Complete the remove command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1 and (text.startswith('--t')): run = args[1] @@ -1932,8 +1932,8 @@ def complete_remove(self, text, line, begidx, endidx): data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1] for n in data] return self.list_completion(text, ['all'] + data) - - + + def complete_shower(self,text, line, begidx, endidx): "Complete the shower command" args = self.split_arg(line[0:begidx], error=False) @@ -1941,7 +1941,7 @@ def complete_shower(self,text, line, begidx, endidx): return self.list_completion(text, self._interfaced_showers) elif len(args)>1 and args[1] in self._interfaced_showers: return getattr(self, 'complete_%s' % text)\ - (text, args[1],line.replace(args[0]+' ',''), + (text, args[1],line.replace(args[0]+' ',''), begidx-len(args[0])-1, endidx-len(args[0])-1) def complete_pythia8(self,text, line, begidx, endidx): @@ -1955,11 +1955,11 @@ def complete_pythia8(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_madanalysis5_parton(self,text, line, begidx, endidx): @@ -1978,19 +1978,19 @@ def complete_madanalysis5_parton(self,text, line, begidx, endidx): else: tmp2 = self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) - return tmp1 + tmp2 + return tmp1 + tmp2 elif '--MA5_stdout_lvl=' in line and not any(arg.startswith( '--MA5_stdout_lvl=') for arg in args): - return self.list_completion(text, - ['--MA5_stdout_lvl=%s'%opt for opt in + return self.list_completion(text, + ['--MA5_stdout_lvl=%s'%opt for opt in ['logging.INFO','logging.DEBUG','logging.WARNING', 'logging.CRITICAL','90']], line) else: - return self.list_completion(text, ['-f', + return self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) def complete_pythia(self,text, line, begidx, endidx): - "Complete the pythia command" + "Complete the pythia command" args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: @@ -2001,16 +2001,16 @@ def complete_pythia(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_pgs(self,text, line, begidx, endidx): "Complete the pythia command" - args = self.split_arg(line[0:begidx], error=False) + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: #return valid run_name data = misc.glob(pjoin('*', '*_pythia_events.hep.gz'), pjoin(self.me_dir, 'Events')) @@ -2019,23 +2019,23 @@ def complete_pgs(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--tag=' ,'--no_default'], line) - return tmp1 + tmp2 + return tmp1 + tmp2 else: - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--tag=','--no_default'], line) - complete_delphes = complete_pgs - complete_rivet = complete_pgs + complete_delphes = complete_pgs + complete_rivet = complete_pgs #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCmd): - """The command line processor of Mad Graph""" - + """The command line processor of Mad Graph""" + LO = True # Truth values @@ -2063,7 +2063,7 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm cluster_mode = 0 queue = 'madgraph' nb_core = None - + next_possibility = { 'start': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]', 'calculate_decay_widths [OPTIONS]', @@ -2080,9 +2080,9 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm 'pgs': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'], 'delphes' : ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'] } - + asking_for_run = AskRun - + ############################################################################ def __init__(self, me_dir = None, options={}, *completekey, **stdin): """ add information to the cmd """ @@ -2095,16 +2095,16 @@ def __init__(self, me_dir = None, options={}, *completekey, **stdin): if self.web: os.system('touch %s' % pjoin(self.me_dir,'Online')) - self.load_results_db() + self.load_results_db() self.results.def_web_mode(self.web) self.Gdirs = None - + self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) self.configured = 0 # time for reading the card self._options = {} # for compatibility with extended_cmd - - + + def pass_in_web_mode(self): """configure web data""" self.web = True @@ -2113,22 +2113,22 @@ def pass_in_web_mode(self): if os.environ['MADGRAPH_BASE']: self.options['mg5_path'] = pjoin(os.environ['MADGRAPH_BASE'],'MG5') - ############################################################################ + ############################################################################ def check_output_type(self, path): """ Check that the output path is a valid madevent directory """ - + bin_path = os.path.join(path,'bin') if os.path.isfile(os.path.join(bin_path,'generate_events')): return True - else: + else: return False ############################################################################ def set_configuration(self, amcatnlo=False, final=True, **opt): - """assign all configuration variable from file + """assign all configuration variable from file loop over the different config file if config_file not define """ - - super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, + + super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, final=final, **opt) if not final: @@ -2171,24 +2171,24 @@ def set_configuration(self, amcatnlo=False, final=True, **opt): if not os.path.exists(pjoin(path, 'sys_calc')): logger.info("No valid SysCalc path found") continue - # No else since the next line reinitialize the option to the + # No else since the next line reinitialize the option to the #previous value anyway self.options[key] = os.path.realpath(path) continue else: self.options[key] = None - - + + return self.options ############################################################################ - def do_banner_run(self, line): + def do_banner_run(self, line): """Make a run from the banner file""" - + args = self.split_arg(line) #check the validity of the arguments - self.check_banner_run(args) - + self.check_banner_run(args) + # Remove previous cards for name in ['delphes_trigger.dat', 'delphes_card.dat', 'pgs_card.dat', 'pythia_card.dat', 'madspin_card.dat', @@ -2197,20 +2197,20 @@ def do_banner_run(self, line): os.remove(pjoin(self.me_dir, 'Cards', name)) except Exception: pass - + banner_mod.split_banner(args[0], self.me_dir, proc_card=False) - + # Check if we want to modify the run if not self.force: ans = self.ask('Do you want to modify the Cards?', 'n', ['y','n']) if ans == 'n': self.force = True - + # Call Generate events self.exec_cmd('generate_events %s %s' % (self.run_name, self.force and '-f' or '')) - - - + + + ############################################################################ def do_display(self, line, output=sys.stdout): """Display current internal status""" @@ -2223,7 +2223,7 @@ def do_display(self, line, output=sys.stdout): #return valid run_name data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1:] for n in data] - + if data: out = {} for name, tag in data: @@ -2235,11 +2235,11 @@ def do_display(self, line, output=sys.stdout): print('the runs available are:') for run_name, tags in out.items(): print(' run: %s' % run_name) - print(' tags: ', end=' ') + print(' tags: ', end=' ') print(', '.join(tags)) else: print('No run detected.') - + elif args[0] == 'options': outstr = " Run Options \n" outstr += " ----------- \n" @@ -2260,8 +2260,8 @@ def do_display(self, line, output=sys.stdout): if value == default: outstr += " %25s \t:\t%s\n" % (key,value) else: - outstr += " %25s \t:\t%s (user set)\n" % (key,value) - outstr += "\n" + outstr += " %25s \t:\t%s (user set)\n" % (key,value) + outstr += "\n" outstr += " Configuration Options \n" outstr += " --------------------- \n" for key, default in self.options_configuration.items(): @@ -2275,15 +2275,15 @@ def do_display(self, line, output=sys.stdout): self.do_print_results(' '.join(args[1:])) else: super(MadEventCmd, self).do_display(line, output) - + def do_save(self, line, check=True, to_keep={}): - """Not in help: Save information to file""" + """Not in help: Save information to file""" args = self.split_arg(line) # Check argument validity if check: self.check_save(args) - + if args[0] == 'options': # First look at options which should be put in MG5DIR/input to_define = {} @@ -2295,7 +2295,7 @@ def do_save(self, line, check=True, to_keep={}): for key, default in self.options_madevent.items(): if self.options[key] != self.options_madevent[key]: to_define[key] = self.options[key] - + if '--all' in args: for key, default in self.options_madgraph.items(): if self.options[key] != self.options_madgraph[key]: @@ -2312,12 +2312,12 @@ def do_save(self, line, check=True, to_keep={}): filepath = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basefile = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basedir = self.me_dir - + if to_keep: to_define = to_keep self.write_configuration(filepath, basefile, basedir, to_define) - - + + def do_edit_cards(self, line): @@ -2326,80 +2326,80 @@ def do_edit_cards(self, line): # Check argument's validity mode = self.check_generate_events(args) self.ask_run_configuration(mode) - + return ############################################################################ - + ############################################################################ def do_restart_gridpack(self, line): """ syntax restart_gridpack --precision=1.0 --restart_zero collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) - + # initialize / remove lhapdf mode #self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) #self.configure_directory() - + gensym = gen_ximprove.gensym(self) - + min_precision = 1.0 resubmit_zero=False if '--precision=' in line: s = line.index('--precision=') + len('--precision=') arg=line[s:].split(1)[0] min_precision = float(arg) - + if '--restart_zero' in line: resubmit_zero = True - - + + gensym.resubmit(min_precision, resubmit_zero) self.monitor(run_type='All jobs submitted for gridpack', html=True) #will be done during the refine (more precisely in gen_ximprove) cross, error = sum_html.make_all_html_results(self) self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(gensym.run_statistics)) - + #self.exec_cmd('combine_events', postcmd=False) #self.exec_cmd('store_events', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) self.exec_cmd('create_gridpack', postcmd=False) - - - ############################################################################ + + + ############################################################################ ############################################################################ def do_generate_events(self, line): """Main Commands: launch the full chain """ - + self.banner = None self.Gdirs = None - + args = self.split_arg(line) # Check argument's validity mode = self.check_generate_events(args) switch_mode = self.ask_run_configuration(mode, args) if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir), None, 'parton') else: self.set_run_name(args[0], None, 'parton', True) args.pop(0) - + self.run_generate_events(switch_mode, args) self.postprocessing() @@ -2420,8 +2420,8 @@ def postprocessing(self): def rivet_postprocessing(self, rivet_config, postprocess_RIVET, postprocess_CONTUR): - # Check number of Rivet jobs to run - run_dirs = [pjoin(self.me_dir, 'Events',run_name) + # Check number of Rivet jobs to run + run_dirs = [pjoin(self.me_dir, 'Events',run_name) for run_name in self.postprocessing_dirs] nb_rivet = len(run_dirs) @@ -2550,10 +2550,10 @@ def wait_monitoring(Idle, Running, Done): wrapper = open(pjoin(self.me_dir, "Analysis", "contur", "run_contur.sh"), "w") wrapper.write(set_env) - + wrapper.write('{0}\n'.format(contur_cmd)) wrapper.close() - + misc.call(["run_contur.sh"], cwd=(pjoin(self.me_dir, "Analysis", "contur"))) logger.info("Contur outputs are stored in {0}".format(pjoin(self.me_dir, "Analysis", "contur","conturPlot"))) @@ -2572,7 +2572,7 @@ def run_generate_events(self, switch_mode, args): self.do_set('run_mode 2') self.do_set('nb_core 1') - if self.run_card['gridpack'] in self.true: + if self.run_card['gridpack'] in self.true: # Running gridpack warmup gridpack_opts=[('accuracy', 0.01), ('points', 2000), @@ -2593,7 +2593,7 @@ def run_generate_events(self, switch_mode, args): # Regular run mode logger.info('Generating %s events with run name %s' % (self.run_card['nevents'], self.run_name)) - + self.exec_cmd('survey %s %s' % (self.run_name,' '.join(args)), postcmd=False) nb_event = self.run_card['nevents'] @@ -2601,7 +2601,7 @@ def run_generate_events(self, switch_mode, args): self.exec_cmd('refine %s' % nb_event, postcmd=False) if not float(self.results.current['cross']): # Zero cross-section. Try to guess why - text = '''Survey return zero cross section. + text = '''Survey return zero cross section. Typical reasons are the following: 1) A massive s-channel particle has a width set to zero. 2) The pdf are zero for at least one of the initial state particles @@ -2613,17 +2613,17 @@ def run_generate_events(self, switch_mode, args): raise ZeroResult('See https://cp3.irmp.ucl.ac.be/projects/madgraph/wiki/FAQ-General-14') else: bypass_run = True - + #we can bypass the following if scan and first result is zero if not bypass_run: self.exec_cmd('refine %s --treshold=%s' % (nb_event,self.run_card['second_refine_treshold']) , postcmd=False) - + self.exec_cmd('combine_events', postcmd=False,printcmd=False) self.print_results_in_shell(self.results.current) if self.run_card['use_syst']: - if self.run_card['systematics_program'] == 'auto': + if self.run_card['systematics_program'] == 'auto': scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): to_use = 'systematics' @@ -2634,26 +2634,26 @@ def run_generate_events(self, switch_mode, args): else: logger.critical('Unvalid options for systematics_program: bypass computation of systematics variations.') to_use = 'none' - + if to_use == 'systematics': if self.run_card['systematics_arguments'] != ['']: self.exec_cmd('systematics %s %s ' % (self.run_name, - ' '.join(self.run_card['systematics_arguments'])), + ' '.join(self.run_card['systematics_arguments'])), postcmd=False, printcmd=False) else: self.exec_cmd('systematics %s --from_card' % self.run_name, - postcmd=False,printcmd=False) + postcmd=False,printcmd=False) elif to_use == 'syscalc': self.run_syscalc('parton') - - - self.create_plot('parton') - self.exec_cmd('store_events', postcmd=False) + + + self.create_plot('parton') + self.exec_cmd('store_events', postcmd=False) if self.run_card['boost_event'].strip() and self.run_card['boost_event'] != 'False': self.boost_events() - - - self.exec_cmd('reweight -from_cards', postcmd=False) + + + self.exec_cmd('reweight -from_cards', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) if self.run_card['time_of_flight']>=0: self.exec_cmd("add_time_of_flight --threshold=%s" % self.run_card['time_of_flight'] ,postcmd=False) @@ -2664,43 +2664,43 @@ def run_generate_events(self, switch_mode, args): self.create_root_file(input , output) self.exec_cmd('madanalysis5_parton --no_default', postcmd=False, printcmd=False) - # shower launches pgs/delphes if needed + # shower launches pgs/delphes if needed self.exec_cmd('shower --no_default', postcmd=False, printcmd=False) self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) self.exec_cmd('rivet --no_default', postcmd=False, printcmd=False) self.store_result() - - if self.allow_notification_center: - misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), - '%s: %s +- %s ' % (self.results.current['run_name'], + + if self.allow_notification_center: + misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), + '%s: %s +- %s ' % (self.results.current['run_name'], self.results.current['cross'], self.results.current['error'])) - + def boost_events(self): - + if not self.run_card['boost_event']: return - + if self.run_card['boost_event'].startswith('lambda'): if not isinstance(self, cmd.CmdShell): raise Exception("boost not allowed online") filter = eval(self.run_card['boost_event']) else: raise Exception - + path = [pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')] - + for p in path: if os.path.exists(p): event_path = p break else: raise Exception("fail to find event file for the boost") - - + + lhe = lhe_parser.EventFile(event_path) with misc.TMP_directory() as tmp_dir: output = lhe_parser.EventFile(pjoin(tmp_dir, os.path.basename(event_path)), 'w') @@ -2711,28 +2711,28 @@ def boost_events(self): event.boost(filter) #write this modify event output.write(str(event)) - output.write('\n') + output.write('\n') lhe.close() - files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) - - - - - + files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) + + + + + def do_initMadLoop(self,line): - """Compile and run MadLoop for a certain number of PS point so as to + """Compile and run MadLoop for a certain number of PS point so as to initialize MadLoop (setup the zero helicity and loop filter.)""" - + args = line.split() # Check argument's validity options = self.check_initMadLoop(args) - + if not options['force']: self.ask_edit_cards(['MadLoopParams.dat'], mode='fixed', plot=False) self.exec_cmd('treatcards loop --no_MadLoopInit') if options['refresh']: - for filter in misc.glob('*Filter*', + for filter in misc.glob('*Filter*', pjoin(self.me_dir,'SubProcesses','MadLoop5_resources')): logger.debug("Resetting filter '%s'."%os.path.basename(filter)) os.remove(filter) @@ -2753,14 +2753,14 @@ def do_initMadLoop(self,line): def do_launch(self, line, *args, **opt): """Main Commands: exec generate_events for 2>N and calculate_width for 1>N""" - + if self.ninitial == 1: logger.info("Note that since 2.3. The launch for 1>N pass in event generation\n"+ " To have the previous behavior use the calculate_decay_widths function") # self.do_calculate_decay_widths(line, *args, **opt) #else: self.do_generate_events(line, *args, **opt) - + def print_results_in_shell(self, data): """Have a nice results prints in the shell, data should be of type: gen_crossxhtml.OneTagResults""" @@ -2770,7 +2770,7 @@ def print_results_in_shell(self, data): if data['run_statistics']: globalstat = sum_html.RunStatistics() - + logger.info(" " ) logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2786,13 +2786,13 @@ def print_results_in_shell(self, data): logger.warning(globalstat.get_warning_text()) logger.info(" ") - + logger.info(" === Results Summary for run: %s tag: %s ===\n" % (data['run_name'],data['tag'])) - + total_time = int(sum(_['cumulative_timing'] for _ in data['run_statistics'].values())) if total_time > 0: logger.info(" Cumulative sequential time for this run: %s"%misc.format_time(total_time)) - + if self.ninitial == 1: logger.info(" Width : %.4g +- %.4g GeV" % (data['cross'], data['error'])) else: @@ -2810,18 +2810,18 @@ def print_results_in_shell(self, data): if len(split)!=3: continue scale, cross, error = split - cross_sections[float(scale)] = (float(cross), float(error)) + cross_sections[float(scale)] = (float(cross), float(error)) if len(cross_sections)>0: logger.info(' Pythia8 merged cross-sections are:') for scale in sorted(cross_sections.keys()): logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ (scale,cross_sections[scale][0],cross_sections[scale][1])) - + else: if self.ninitial == 1: logger.info(" Matched width : %.4g +- %.4g GeV" % (data['cross_pythia'], data['error_pythia'])) else: - logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) + logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) logger.info(" Nb of events after matching/merging : %d" % int(data['nb_event_pythia'])) if self.run_card['use_syst'] in self.true and \ (int(self.run_card['ickkw'])==1 or self.run_card['ktdurham']>0.0 @@ -2838,9 +2838,9 @@ def print_results_in_file(self, data, path, mode='w', format='full'): data should be of type: gen_crossxhtml.OneTagResults""" if not data: return - + fsock = open(path, mode) - + if data['run_statistics']: logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2851,7 +2851,7 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if format == "full": fsock.write(" === Results Summary for run: %s tag: %s process: %s ===\n" % \ (data['run_name'],data['tag'], os.path.basename(self.me_dir))) - + if self.ninitial == 1: fsock.write(" Width : %.4g +- %.4g GeV\n" % (data['cross'], data['error'])) else: @@ -2861,20 +2861,20 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if self.ninitial == 1: fsock.write(" Matched Width : %.4g +- %.4g GeV\n" % (data['cross_pythia'], data['error_pythia'])) else: - fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) + fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) fsock.write(" Nb of events after Matching : %s\n" % data['nb_event_pythia']) fsock.write(" \n" ) elif format == "short": if mode == "w": fsock.write("# run_name tag cross error Nb_event cross_after_matching nb_event_after matching\n") - + if data['cross_pythia'] and data['nb_event_pythia']: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s %(cross_pythia)s %(nb_event_pythia)s\n" else: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s\n" fsock.write(text % data) - - ############################################################################ + + ############################################################################ def do_calculate_decay_widths(self, line): """Main Commands: launch decay width calculation and automatic inclusion of calculated widths and BRs in the param_card.""" @@ -2887,21 +2887,21 @@ def do_calculate_decay_widths(self, line): self.Gdirs = None if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], reload_card=True) args.pop(0) self.configure_directory() - + # Running gridpack warmup opts=[('accuracy', accuracy), # default 0.01 ('points', 1000), ('iterations',9)] logger.info('Calculating decay widths with run name %s' % self.run_name) - + self.exec_cmd('survey %s %s' % \ (self.run_name, " ".join(['--' + opt + '=' + str(val) for (opt,val) \ @@ -2910,26 +2910,26 @@ def do_calculate_decay_widths(self, line): self.refine_mode = "old" # specify how to combine event self.exec_cmd('combine_events', postcmd=False) self.exec_cmd('store_events', postcmd=False) - + self.collect_decay_widths() self.print_results_in_shell(self.results.current) - self.update_status('calculate_decay_widths done', - level='parton', makehtml=False) + self.update_status('calculate_decay_widths done', + level='parton', makehtml=False) + - ############################################################################ def collect_decay_widths(self): - """ Collect the decay widths and calculate BRs for all particles, and put - in param_card form. + """ Collect the decay widths and calculate BRs for all particles, and put + in param_card form. """ - + particle_dict = {} # store the results run_name = self.run_name # Looping over the Subprocesses for P_path in SubProcesses.get_subP(self.me_dir): ids = SubProcesses.get_subP_ids(P_path) - # due to grouping we need to compute the ratio factor for the + # due to grouping we need to compute the ratio factor for the # ungroup resutls (that we need here). Note that initial particles # grouping are not at the same stage as final particle grouping nb_output = len(ids) / (len(set([p[0] for p in ids]))) @@ -2940,30 +2940,30 @@ def collect_decay_widths(self): particle_dict[particles[0]].append([particles[1:], result/nb_output]) except KeyError: particle_dict[particles[0]] = [[particles[1:], result/nb_output]] - + self.update_width_in_param_card(particle_dict, initial = pjoin(self.me_dir, 'Cards', 'param_card.dat'), output=pjoin(self.me_dir, 'Events', run_name, "param_card.dat")) - + @staticmethod def update_width_in_param_card(decay_info, initial=None, output=None): # Open the param_card.dat and insert the calculated decays and BRs - + if not output: output = initial - + param_card_file = open(initial) param_card = param_card_file.read().split('\n') param_card_file.close() decay_lines = [] line_number = 0 - # Read and remove all decays from the param_card + # Read and remove all decays from the param_card while line_number < len(param_card): line = param_card[line_number] if line.lower().startswith('decay'): - # Read decay if particle in decay_info - # DECAY 6 1.455100e+00 + # Read decay if particle in decay_info + # DECAY 6 1.455100e+00 line = param_card.pop(line_number) line = line.split() particle = 0 @@ -2996,7 +2996,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): break line=param_card[line_number] if particle and particle not in decay_info: - # No decays given, only total width + # No decays given, only total width decay_info[particle] = [[[], width]] else: # Not decay line_number += 1 @@ -3004,7 +3004,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): while not param_card[-1] or param_card[-1].startswith('#'): param_card.pop(-1) - # Append calculated and read decays to the param_card + # Append calculated and read decays to the param_card param_card.append("#\n#*************************") param_card.append("# Decay widths *") param_card.append("#*************************") @@ -3018,7 +3018,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): param_card.append("# BR NDA ID1 ID2 ...") brs = [[(val[1]/width).real, val[0]] for val in decay_info[key] if val[1]] for val in sorted(brs, reverse=True): - param_card.append(" %e %i %s # %s" % + param_card.append(" %e %i %s # %s" % (val[0].real, len(val[1]), " ".join([str(v) for v in val[1]]), val[0] * width @@ -3031,7 +3031,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): ############################################################################ def do_multi_run(self, line): - + args = self.split_arg(line) # Check argument's validity mode = self.check_multi_run(args) @@ -3047,7 +3047,7 @@ def do_multi_run(self, line): self.check_param_card(path, run=False) #store it locally to avoid relaunch param_card_iterator, self.param_card_iterator = self.param_card_iterator, [] - + crossoversig = 0 inv_sq_err = 0 nb_event = 0 @@ -3055,8 +3055,8 @@ def do_multi_run(self, line): self.nb_refine = 0 self.exec_cmd('generate_events %s_%s -f' % (main_name, i), postcmd=False) # Update collected value - nb_event += int(self.results[self.run_name][-1]['nb_event']) - self.results.add_detail('nb_event', nb_event , run=main_name) + nb_event += int(self.results[self.run_name][-1]['nb_event']) + self.results.add_detail('nb_event', nb_event , run=main_name) cross = self.results[self.run_name][-1]['cross'] error = self.results[self.run_name][-1]['error'] + 1e-99 crossoversig+=cross/error**2 @@ -3070,7 +3070,7 @@ def do_multi_run(self, line): os.mkdir(pjoin(self.me_dir,'Events', self.run_name)) except Exception: pass - os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' + os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' % {'bin': self.dirbin, 'event': pjoin(self.me_dir,'Events'), 'name': self.run_name}) @@ -3084,19 +3084,19 @@ def do_multi_run(self, line): self.create_root_file('%s/unweighted_events.lhe' % self.run_name, '%s/unweighted_events.root' % self.run_name) - - path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") + + path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") self.create_plot('parton', path, pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') ) - - if not os.path.exists('%s.gz' % path): + + if not os.path.exists('%s.gz' % path): misc.gzip(path) self.update_status('', level='parton') - self.print_results_in_shell(self.results.current) - + self.print_results_in_shell(self.results.current) + cpath = pjoin(self.me_dir,'Cards','param_card.dat') if param_card_iterator: @@ -3112,21 +3112,21 @@ def do_multi_run(self, line): path = pjoin(self.me_dir, 'Events','scan_%s.txt' % scan_name) logger.info("write all cross-section results in %s" % path, '$MG:BOLD') param_card_iterator.write_summary(path) - - ############################################################################ + + ############################################################################ def do_treatcards(self, line, mode=None, opt=None): """Advanced commands: create .inc files from param_card.dat/run_card.dat""" if not mode and not opt: args = self.split_arg(line) mode, opt = self.check_treatcards(args) - + # To decide whether to refresh MadLoop's helicity filters, it is necessary # to check if the model parameters where modified or not, before doing - # anything else. + # anything else. need_MadLoopFilterUpdate = False - # Just to record what triggered the reinitialization of MadLoop for a + # Just to record what triggered the reinitialization of MadLoop for a # nice debug message. type_of_change = '' if not opt['forbid_MadLoopInit'] and self.proc_characteristics['loop_induced'] \ @@ -3137,10 +3137,10 @@ def do_treatcards(self, line, mode=None, opt=None): (os.path.getmtime(paramDat)-os.path.getmtime(paramInc)) > 0.0: need_MadLoopFilterUpdate = True type_of_change = 'model' - + ML_in = pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat') ML_out = pjoin(self.me_dir,"SubProcesses", - "MadLoop5_resources", "MadLoopParams.dat") + "MadLoop5_resources", "MadLoopParams.dat") if (not os.path.isfile(ML_in)) or (not os.path.isfile(ML_out)) or \ (os.path.getmtime(ML_in)-os.path.getmtime(ML_out)) > 0.0: need_MadLoopFilterUpdate = True @@ -3148,7 +3148,7 @@ def do_treatcards(self, line, mode=None, opt=None): #check if no 'Auto' are present in the file self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) - + if mode in ['param', 'all']: model = self.find_model_name() tmp_model = os.path.basename(model) @@ -3160,9 +3160,9 @@ def do_treatcards(self, line, mode=None, opt=None): check_param_card.check_valid_param_card(mg5_param) opt['param_card'] = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') else: - check_param_card.check_valid_param_card(opt['param_card']) - - logger.debug('write compile file for card: %s' % opt['param_card']) + check_param_card.check_valid_param_card(opt['param_card']) + + logger.debug('write compile file for card: %s' % opt['param_card']) param_card = check_param_card.ParamCard(opt['param_card']) outfile = pjoin(opt['output_dir'], 'param_card.inc') ident_card = pjoin(self.me_dir,'Cards','ident_card.dat') @@ -3185,10 +3185,10 @@ def do_treatcards(self, line, mode=None, opt=None): devnull.close() default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat') - need_mp = self.proc_characteristics['loop_induced'] + need_mp = self.proc_characteristics['loop_induced'] param_card.write_inc_file(outfile, ident_card, default, need_mp=need_mp) - - + + if mode in ['run', 'all']: if not hasattr(self, 'run_card'): run_card = banner_mod.RunCard(opt['run_card'], path=pjoin(self.me_dir, 'Cards', 'run_card.dat')) @@ -3202,7 +3202,7 @@ def do_treatcards(self, line, mode=None, opt=None): run_card['lpp2'] = 0 run_card['ebeam1'] = 0 run_card['ebeam2'] = 0 - + # Ensure that the bias parameters has all the required input from the # run_card if run_card['bias_module'].lower() not in ['dummy','none']: @@ -3219,7 +3219,7 @@ def do_treatcards(self, line, mode=None, opt=None): mandatory_file,run_card['bias_module'])) misc.copytree(run_card['bias_module'], pjoin(self.me_dir,'Source','BIAS', os.path.basename(run_card['bias_module']))) - + #check expected parameters for the module. default_bias_parameters = {} start, last = False,False @@ -3244,50 +3244,50 @@ def do_treatcards(self, line, mode=None, opt=None): for pair in line.split(','): if not pair.strip(): continue - x,y =pair.split(':') + x,y =pair.split(':') x=x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y elif ':' in line: x,y = line.split(':') x = x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y for key,value in run_card['bias_parameters'].items(): if key not in default_bias_parameters: logger.warning('%s not supported by the bias module. We discard this entry.', key) else: default_bias_parameters[key] = value - run_card['bias_parameters'] = default_bias_parameters - - - # Finally write the include file + run_card['bias_parameters'] = default_bias_parameters + + + # Finally write the include file run_card.write_include_file(opt['output_dir']) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: - self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, + self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat')) # The writing out of MadLoop filter is potentially dangerous # when running in multi-core with a central disk. So it is turned - # off here. If these filters were not initialized then they will + # off here. If these filters were not initialized then they will # have to be re-computed at the beginning of each run. if 'WriteOutFilters' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('WriteOutFilters'): logger.info( -"""You chose to have MadLoop writing out filters. +"""You chose to have MadLoop writing out filters. Beware that this can be dangerous for local multicore runs.""") self.MadLoopparam.set('WriteOutFilters',False, changeifuserset=False) - + # The conservative settings below for 'CTModeInit' and 'ZeroThres' # help adress issues for processes like g g > h z, and g g > h g - # where there are some helicity configuration heavily suppressed - # (by several orders of magnitude) so that the helicity filter + # where there are some helicity configuration heavily suppressed + # (by several orders of magnitude) so that the helicity filter # needs high numerical accuracy to correctly handle this spread in # magnitude. Also, because one cannot use the Born as a reference - # scale, it is better to force quadruple precision *for the + # scale, it is better to force quadruple precision *for the # initialization points only*. This avoids numerical accuracy issues # when setting up the helicity filters and does not significantly # slow down the run. @@ -3298,21 +3298,21 @@ def do_treatcards(self, line, mode=None, opt=None): # It is a bit superficial to use the level 2 which tries to numerically # map matching helicities (because of CP symmetry typically) together. -# It is useless in the context of MC over helicities and it can +# It is useless in the context of MC over helicities and it can # potentially make the helicity double checking fail. self.MadLoopparam.set('HelicityFilterLevel',1, changeifuserset=False) # To be on the safe side however, we ask for 4 consecutive matching # helicity filters. self.MadLoopparam.set('CheckCycle',4, changeifuserset=False) - + # For now it is tricky to have each channel performing the helicity # double check. What we will end up doing is probably some kind # of new initialization round at the beginning of each launch - # command, to reset the filters. + # command, to reset the filters. self.MadLoopparam.set('DoubleCheckHelicityFilter',False, changeifuserset=False) - + # Thanks to TIR recycling, TIR is typically much faster for Loop-induced # processes when not doing MC over helicities, so that we place OPP last. if not hasattr(self, 'run_card'): @@ -3349,7 +3349,7 @@ def do_treatcards(self, line, mode=None, opt=None): logger.warning( """You chose to also use a lorentz rotation for stability tests (see parameter NRotations_[DP|QP]). Beware that, for optimization purposes, MadEvent uses manual TIR cache clearing which is not compatible - with the lorentz rotation stability test. The number of these rotations to be used will be reset to + with the lorentz rotation stability test. The number of these rotations to be used will be reset to zero by MadLoop. You can avoid this by changing the parameter 'FORCE_ML_HELICITY_SUM' int he matrix.f files to be .TRUE. so that the sum over helicity configurations is performed within MadLoop (in which case the helicity of final state particles cannot be speicfied in the LHE file.""") @@ -3363,15 +3363,15 @@ def do_treatcards(self, line, mode=None, opt=None): # self.MadLoopparam.set('NRotations_DP',0,changeifuserset=False) # Revert to the above to be slightly less robust but twice faster. self.MadLoopparam.set('NRotations_DP',1,changeifuserset=False) - self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) - + self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) + # Finally, the stability tests are slightly less reliable for process - # with less or equal than 4 final state particles because the + # with less or equal than 4 final state particles because the # accessible kinematic is very limited (i.e. lorentz rotations don't # shuffle invariants numerics much). In these cases, we therefore # increase the required accuracy to 10^-7. # This is important for getting g g > z z [QCD] working with a - # ptheavy cut as low as 1 GeV. + # ptheavy cut as low as 1 GeV. if self.proc_characteristics['nexternal']<=4: if ('MLStabThres' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('MLStabThres')>1.0e-7): @@ -3381,12 +3381,12 @@ def do_treatcards(self, line, mode=None, opt=None): than four external legs, so this is not recommended (especially not for g g > z z).""") self.MadLoopparam.set('MLStabThres',1.0e-7,changeifuserset=False) else: - self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) + self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) #write the output file self.MadLoopparam.write(pjoin(self.me_dir,"SubProcesses","MadLoop5_resources", "MadLoopParams.dat")) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: # Now Update MadLoop filters if necessary (if modifications were made to # the model parameters). @@ -3403,12 +3403,12 @@ def do_treatcards(self, line, mode=None, opt=None): elif not opt['forbid_MadLoopInit'] and \ MadLoopInitializer.need_MadLoopInit(self.me_dir): self.exec_cmd('initMadLoop -f') - - ############################################################################ + + ############################################################################ def do_survey(self, line): """Advanced commands: launch survey for the current process """ - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) @@ -3416,7 +3416,7 @@ def do_survey(self, line): if os.path.exists(pjoin(self.me_dir,'error')): os.remove(pjoin(self.me_dir,'error')) - + self.configure_directory() # Save original random number self.random_orig = self.random @@ -3435,9 +3435,9 @@ def do_survey(self, line): P_zero_result = [] # check the number of times where they are no phase-space # File for the loop (for loop induced) - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources')) and cluster.need_transfer(self.options): - tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', + tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', 'MadLoop5_resources.tar.gz'), 'w:gz', dereference=True) tf.add(pjoin(self.me_dir,'SubProcesses','MadLoop5_resources'), arcname='MadLoop5_resources') @@ -3467,7 +3467,7 @@ def do_survey(self, line): except Exception as error: logger.debug(error) pass - + jobs, P_zero_result = ajobcreator.launch() # Check if all or only some fails if P_zero_result: @@ -3481,60 +3481,60 @@ def do_survey(self, line): self.get_Gdir() for P in P_zero_result: self.Gdirs[0][pjoin(self.me_dir,'SubProcesses',P)] = [] - + self.monitor(run_type='All jobs submitted for survey', html=True) if not self.history or 'survey' in self.history[-1] or self.ninitial ==1 or \ self.run_card['gridpack']: #will be done during the refine (more precisely in gen_ximprove) cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(ajobcreator.run_statistics)) self.update_status('End survey', 'parton', makehtml=False) ############################################################################ def pass_in_difficult_integration_mode(self, rate=1): """be more secure for the integration to not miss it due to strong cut""" - + # improve survey options if default if self.opts['points'] == self._survey_options['points'][1]: self.opts['points'] = (rate+2) * self._survey_options['points'][1] if self.opts['iterations'] == self._survey_options['iterations'][1]: self.opts['iterations'] = 1 + rate + self._survey_options['iterations'][1] if self.opts['accuracy'] == self._survey_options['accuracy'][1]: - self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) - + self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) + # Modify run_config.inc in order to improve the refine conf_path = pjoin(self.me_dir, 'Source','run_config.inc') files.cp(conf_path, conf_path + '.bk') # text = open(conf_path).read() - min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) - + min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) + text = re.sub('''\(min_events = \d+\)''', '(min_events = %i )' % min_evt, text) text = re.sub('''\(max_events = \d+\)''', '(max_events = %i )' % max_evt, text) fsock = open(conf_path, 'w') fsock.write(text) fsock.close() - + # Compile for name in ['../bin/internal/gen_ximprove', 'all']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) - - - ############################################################################ + + + ############################################################################ def do_refine(self, line): """Advanced commands: launch survey for the current process """ - devnull = open(os.devnull, 'w') + devnull = open(os.devnull, 'w') self.nb_refine += 1 args = self.split_arg(line) treshold=None - - + + for a in args: if a.startswith('--treshold='): treshold = float(a.split('=',1)[1]) @@ -3548,8 +3548,8 @@ def do_refine(self, line): break # Check argument's validity self.check_refine(args) - - refine_opt = {'err_goal': args[0], 'split_channels': True} + + refine_opt = {'err_goal': args[0], 'split_channels': True} precision = args[0] if len(args) == 2: refine_opt['max_process']= args[1] @@ -3560,15 +3560,15 @@ def do_refine(self, line): # Update random number self.update_random() self.save_random() - + if self.cluster_mode: logger.info('Creating Jobs') self.update_status('Refine results to %s' % precision, level=None) - + self.total_jobs = 0 - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + # cleanning the previous job for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() @@ -3589,14 +3589,14 @@ def do_refine(self, line): level = 5 if value.has_warning(): level = 10 - logger.log(level, + logger.log(level, value.nice_output(str('/'.join([key[0],'G%s'%key[1]]))). replace(' statistics','')) logger.debug(globalstat.nice_output('combined', no_warning=True)) - + if survey_statistics: x_improve.run_statistics = survey_statistics - + x_improve.launch() # create the ajob for the refinment. if not self.history or 'refine' not in self.history[-1]: cross, error = x_improve.update_html() #update html results for survey @@ -3610,9 +3610,9 @@ def do_refine(self, line): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) - + if os.path.exists(pjoin(Pdir, 'ajob1')): cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py @@ -3629,7 +3629,7 @@ def do_refine(self, line): ###self.compile(['all'], cwd=Pdir) alljobs = misc.glob('ajob*', Pdir) - + #remove associated results.dat (ensure to not mix with all data) Gre = re.compile("\s*j=(G[\d\.\w]+)") for job in alljobs: @@ -3637,49 +3637,49 @@ def do_refine(self, line): for Gdir in Gdirs: if os.path.exists(pjoin(Pdir, Gdir, 'results.dat')): os.remove(pjoin(Pdir, Gdir,'results.dat')) - - nb_tot = len(alljobs) + + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), - run_type='Refine number %s on %s (%s/%s)' % + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) - self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, html=True) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + if isinstance(x_improve, gen_ximprove.gen_ximprove_v4): # the merge of the events.lhe is handle in the x_improve class - # for splitted runs. (and partly in store_events). + # for splitted runs. (and partly in store_events). combine_runs.CombineRuns(self.me_dir) self.refine_mode = "old" else: self.refine_mode = "new" - + cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - self.results.add_detail('run_statistics', + self.results.add_detail('run_statistics', dict(self.results.get_detail('run_statistics'))) self.update_status('finish refine', 'parton', makehtml=False) devnull.close() - - ############################################################################ + + ############################################################################ def do_comine_iteration(self, line): """Not in help: Combine a given iteration combine_iteration Pdir Gdir S|R step - S is for survey + S is for survey R is for refine - step is the iteration number (not very critical)""" + step is the iteration number (not very critical)""" self.set_run_name("tmp") self.configure_directory(html_opening=False) @@ -3695,12 +3695,12 @@ def do_comine_iteration(self, line): gensym.combine_iteration(Pdir, Gdir, int(step)) elif mode == "R": refine = gen_ximprove.gen_ximprove_share(self) - refine.combine_iteration(Pdir, Gdir, int(step)) - - + refine.combine_iteration(Pdir, Gdir, int(step)) - - ############################################################################ + + + + ############################################################################ def do_combine_events(self, line): """Advanced commands: Launch combine events""" start=time.time() @@ -3710,11 +3710,11 @@ def do_combine_events(self, line): self.check_combine_events(args) self.update_status('Combining Events', level='parton') - + if self.run_card['gridpack'] and isinstance(self, GridPackCmd): return GridPackCmd.do_combine_events(self, line) - + # Define The Banner tag = self.run_card['run_tag'] # Update the banner with the pythia card @@ -3727,14 +3727,14 @@ def do_combine_events(self, line): self.banner.change_seed(self.random_orig) if not os.path.exists(pjoin(self.me_dir, 'Events', self.run_name)): os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) - self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, + self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -3751,12 +3751,12 @@ def do_combine_events(self, line): os.remove(pjoin(Gdir, 'events.lhe')) continue - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec'), result.get('xerru'), result.get('axsec') ) - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.run_card['nevents']) @@ -3765,13 +3765,13 @@ def do_combine_events(self, line): AllEvent.add(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() if len(AllEvent) == 0: - nb_event = 0 + nb_event = 0 else: nb_event = AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.run_card['nevents'], @@ -3791,22 +3791,22 @@ def do_combine_events(self, line): os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) - + if self.run_card['bias_module'].lower() not in ['dummy', 'none'] and nb_event: self.correct_bias() elif self.run_card['custom_fcts']: self.correct_bias() logger.info("combination of events done in %s s ", time.time()-start) - + self.to_store.append('event') - - ############################################################################ + + ############################################################################ def correct_bias(self): - """check the first event and correct the weight by the bias + """check the first event and correct the weight by the bias and correct the cross-section. - If the event do not have the bias tag it means that the bias is + If the event do not have the bias tag it means that the bias is one modifying the cross-section/shape so we have nothing to do """ @@ -3834,7 +3834,7 @@ def correct_bias(self): output.write('') output.close() lhe.close() - + # MODIFY THE BANNER i.e. INIT BLOCK # ensure information compatible with normalisation choice total_cross = sum(cross[key] for key in cross) @@ -3846,8 +3846,8 @@ def correct_bias(self): elif self.run_card['event_norm'] == 'unity': total_cross = self.results.current['cross'] * total_cross / nb_event for key in cross: - cross[key] *= total_cross / nb_event - + cross[key] *= total_cross / nb_event + bannerfile = lhe_parser.EventFile(pjoin(self.me_dir, 'Events', self.run_name, '.banner.tmp.gz'),'w') banner = banner_mod.Banner(lhe.banner) banner.modify_init_cross(cross) @@ -3862,12 +3862,12 @@ def correct_bias(self): os.remove(lhe.name) os.remove(bannerfile.name) os.remove(output.name) - - + + self.results.current['cross'] = total_cross self.results.current['error'] = 0 - - ############################################################################ + + ############################################################################ def do_store_events(self, line): """Advanced commands: Launch store events""" @@ -3883,16 +3883,16 @@ def do_store_events(self, line): if not os.path.exists(pjoin(self.me_dir, 'Events', run)): os.mkdir(pjoin(self.me_dir, 'Events', run)) if not os.path.exists(pjoin(self.me_dir, 'HTML', run)): - os.mkdir(pjoin(self.me_dir, 'HTML', run)) - + os.mkdir(pjoin(self.me_dir, 'HTML', run)) + # 1) Store overall process information #input = pjoin(self.me_dir, 'SubProcesses', 'results.dat') #output = pjoin(self.me_dir, 'SubProcesses', '%s_results.dat' % run) - #files.cp(input, output) + #files.cp(input, output) # 2) Treat the files present in the P directory - # Ensure that the number of events is different of 0 + # Ensure that the number of events is different of 0 if self.results.current['nb_event'] == 0 and not self.run_card['gridpack']: logger.warning("No event detected. No cleaning performed! This should allow to run:\n" + " cd Subprocesses; ../bin/internal/combine_events\n"+ @@ -3910,18 +3910,18 @@ def do_store_events(self, line): # if os.path.exists(pjoin(G_path, 'results.dat')): # input = pjoin(G_path, 'results.dat') # output = pjoin(G_path, '%s_results.dat' % run) - # files.cp(input, output) + # files.cp(input, output) #except Exception: - # continue + # continue # Store log try: if os.path.exists(pjoin(G_path, 'log.txt')): input = pjoin(G_path, 'log.txt') output = pjoin(G_path, '%s_log.txt' % run) - files.mv(input, output) + files.mv(input, output) except Exception: continue - #try: + #try: # # Grid # for name in ['ftn26']: # if os.path.exists(pjoin(G_path, name)): @@ -3930,7 +3930,7 @@ def do_store_events(self, line): # input = pjoin(G_path, name) # output = pjoin(G_path, '%s_%s' % (run,name)) # files.mv(input, output) - # misc.gzip(pjoin(G_path, output), error=None) + # misc.gzip(pjoin(G_path, output), error=None) #except Exception: # continue # Delete ftn25 to ensure reproducible runs @@ -3940,11 +3940,11 @@ def do_store_events(self, line): # 3) Update the index.html self.gen_card_html() - + # 4) Move the Files present in Events directory E_path = pjoin(self.me_dir, 'Events') O_path = pjoin(self.me_dir, 'Events', run) - + # The events file for name in ['events.lhe', 'unweighted_events.lhe']: finput = pjoin(E_path, name) @@ -3960,30 +3960,30 @@ def do_store_events(self, line): # os.remove(pjoin(O_path, '%s.gz' % name)) # input = pjoin(E_path, name) ## output = pjoin(O_path, name) - + self.update_status('End Parton', level='parton', makehtml=False) devnull.close() - - - ############################################################################ + + + ############################################################################ def do_create_gridpack(self, line): """Advanced commands: Create gridpack from present run""" self.update_status('Creating gridpack', level='parton') # compile gen_ximprove misc.compile(['../bin/internal/gen_ximprove'], cwd=pjoin(self.me_dir, "Source")) - + Gdir = self.get_Gdir() Pdir = set([os.path.dirname(G) for G in Gdir]) - for P in Pdir: + for P in Pdir: allG = misc.glob('G*', path=P) for G in allG: if pjoin(P, G) not in Gdir: logger.debug('removing %s', pjoin(P,G)) shutil.rmtree(pjoin(P,G)) - - + + args = self.split_arg(line) self.check_combine_events(args) if not self.run_tag: self.run_tag = 'tag_1' @@ -3996,13 +3996,13 @@ def do_create_gridpack(self, line): cwd=self.me_dir) misc.call(['./bin/internal/clean'], cwd=self.me_dir) misc.call(['./bin/internal/make_gridpack'], cwd=self.me_dir) - files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), + files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), pjoin(self.me_dir, '%s_gridpack.tar.gz' % self.run_name)) os.system("sed -i.bak \"s/\s*.true.*=.*GridRun/ .false. = GridRun/g\" %s/Cards/grid_card.dat" \ % self.me_dir) self.update_status('gridpack created', level='gridpack') - - ############################################################################ + + ############################################################################ def do_shower(self, line): """launch the shower""" @@ -4010,7 +4010,7 @@ def do_shower(self, line): if len(args)>1 and args[0] in self._interfaced_showers: chosen_showers = [args.pop(0)] elif '--no_default' in line: - # If '--no_default' was specified in the arguments, then only one + # If '--no_default' was specified in the arguments, then only one # shower will be run, depending on which card is present. # but we each of them are called. (each of them check if the file exists) chosen_showers = list(self._interfaced_showers) @@ -4021,9 +4021,9 @@ def do_shower(self, line): shower_priority = ['pythia8','pythia'] chosen_showers = [sorted(chosen_showers,key=lambda sh: shower_priority.index(sh) if sh in shower_priority else len(shower_priority)+1)[0]] - + for shower in chosen_showers: - self.exec_cmd('%s %s'%(shower,' '.join(args)), + self.exec_cmd('%s %s'%(shower,' '.join(args)), postcmd=False, printcmd=False) def do_madanalysis5_parton(self, line): @@ -4039,11 +4039,11 @@ def do_madanalysis5_parton(self, line): def mg5amc_py8_interface_consistency_warning(options): """ Check the consistency of the mg5amc_py8_interface installed with the current MG5 and Pythia8 versions. """ - + # All this is only relevant is Pythia8 is interfaced to MG5 if not options['pythia8_path']: return None - + if not options['mg5amc_py8_interface_path']: return \ """ @@ -4053,7 +4053,7 @@ def mg5amc_py8_interface_consistency_warning(options): Consider installing the MG5_aMC-PY8 interface with the following command: MG5_aMC>install mg5amc_py8_interface """ - + mg5amc_py8_interface_path = options['mg5amc_py8_interface_path'] py8_path = options['pythia8_path'] # If the specified interface path is relative, make it absolut w.r.t MGDIR if @@ -4062,7 +4062,7 @@ def mg5amc_py8_interface_consistency_warning(options): mg5amc_py8_interface_path = pjoin(MG5DIR,mg5amc_py8_interface_path) py8_path = pjoin(MG5DIR,py8_path) - # Retrieve all the on-install and current versions + # Retrieve all the on-install and current versions fsock = open(pjoin(mg5amc_py8_interface_path, 'MG5AMC_VERSION_ON_INSTALL')) MG5_version_on_install = fsock.read().replace('\n','') fsock.close() @@ -4074,7 +4074,7 @@ def mg5amc_py8_interface_consistency_warning(options): MG5_curr_version =misc.get_pkg_info()['version'] try: p = subprocess.Popen(['./get_pythia8_version.py',py8_path], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mg5amc_py8_interface_path) (out, err) = p.communicate() out = out.decode(errors='ignore').replace('\n','') @@ -4084,37 +4084,37 @@ def mg5amc_py8_interface_consistency_warning(options): float(out) except: PY8_curr_version = None - + if not MG5_version_on_install is None and not MG5_curr_version is None: if MG5_version_on_install != MG5_curr_version: return \ """ The current version of MG5_aMC (v%s) is different than the one active when - installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). + installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(MG5_curr_version, MG5_version_on_install) - + if not PY8_version_on_install is None and not PY8_curr_version is None: if PY8_version_on_install != PY8_curr_version: return \ """ The current version of Pythia8 (v%s) is different than the one active when - installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). + installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(PY8_curr_version,PY8_version_on_install) - + return None def setup_Pythia8RunAndCard(self, PY8_Card, run_type): """ Setup the Pythia8 Run environment and card. In particular all the process and run specific parameters of the card are automatically set here. This function returns the path where HEPMC events will be output, if any.""" - + HepMC_event_output = None tag = self.run_tag - + PY8_Card.subruns[0].systemSet('Beams:LHEF',"unweighted_events.lhe.gz") hepmc_format = PY8_Card['HEPMCoutput:file'].lower() @@ -4185,7 +4185,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): misc.mkfifo(fifo_path) # Use defaultSet not to overwrite the current userSet status PY8_Card.defaultSet('HEPMCoutput:file',fifo_path) - HepMC_event_output=fifo_path + HepMC_event_output=fifo_path elif hepmc_format in ['','/dev/null','None']: logger.warning('User disabled the HepMC output of Pythia8.') HepMC_event_output = None @@ -4206,7 +4206,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): # only if it is not already user_set. if PY8_Card['JetMatching:qCut']==-1.0: PY8_Card.MadGraphSet('JetMatching:qCut',1.5*self.run_card['xqcut'], force=True) - + if PY8_Card['JetMatching:qCut']<(1.5*self.run_card['xqcut']): logger.error( 'The MLM merging qCut parameter you chose (%f) is less than '%PY8_Card['JetMatching:qCut']+ @@ -4233,7 +4233,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): if PY8_Card['JetMatching:qCut'] not in qCutList: qCutList.append(PY8_Card['JetMatching:qCut']) PY8_Card.MadGraphSet('SysCalc:qCutList', qCutList, force=True) - + if PY8_Card['SysCalc:qCutList']!='auto': for scale in PY8_Card['SysCalc:qCutList']: @@ -4244,7 +4244,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): "'sys_matchscale' in the run_card) is less than 1.5*xqcut, where xqcut is"+ ' the run_card parameter (=%f)\n'%self.run_card['xqcut']+ 'It would be better/safer to use a larger qCut or a smaller xqcut.') - + # Specific MLM settings # PY8 should not implement the MLM veto since the driver should do it # if merging scale variation is turned on @@ -4294,18 +4294,18 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): CKKW_cut = 'ktdurham' elif self.run_card['ptlund']>0.0 and self.run_card['ktdurham']<=0.0: PY8_Card.subruns[0].MadGraphSet('Merging:doPTLundMerging',True) - CKKW_cut = 'ptlund' + CKKW_cut = 'ptlund' else: raise InvalidCmd("*Either* the 'ptlund' or 'ktdurham' cut in "+\ " the run_card must be turned on to activate CKKW(L) merging"+ " with Pythia8, but *both* cuts cannot be turned on at the same time."+ "\n ptlund=%f, ktdurham=%f."%(self.run_card['ptlund'],self.run_card['ktdurham'])) - + # Automatically set qWeed to the CKKWL cut if not defined by the user. if PY8_Card['SysCalc:qWeed']==-1.0: PY8_Card.MadGraphSet('SysCalc:qWeed',self.run_card[CKKW_cut], force=True) - + # MadGraphSet sets the corresponding value (in system mode) # only if it is not already user_set. if PY8_Card['Merging:TMS']==-1.0: @@ -4319,7 +4319,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): 'The CKKWl merging scale you chose (%f) is less than '%PY8_Card['Merging:TMS']+ 'the %s cut specified in the run_card parameter (=%f).\n'%(CKKW_cut,self.run_card[CKKW_cut])+ 'It is incorrect to use a smaller CKKWl scale than the generation-level %s cut!'%CKKW_cut) - + PY8_Card.MadGraphSet('TimeShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:rapidityOrder',False) @@ -4381,7 +4381,7 @@ def do_pythia8(self, line): try: import madgraph - except ImportError: + except ImportError: import internal.histograms as histograms else: import madgraph.various.histograms as histograms @@ -4400,16 +4400,16 @@ def do_pythia8(self, line): self.check_pythia8(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) - self.check_pythia8(args) + self.check_pythia8(args) # Update the banner with the pythia card if not self.banner or len(self.banner) <=1: # Here the level keyword 'pythia' must not be changed to 'pythia8'. self.banner = banner_mod.recover_banner(self.results, 'pythia') - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1], pythia_version=8, banner=self.banner) @@ -4425,7 +4425,7 @@ def do_pythia8(self, line): #"Please use 'event_norm = average' in the run_card to avoid this problem.") - + if not self.options['mg5amc_py8_interface_path'] or not \ os.path.exists(pjoin(self.options['mg5amc_py8_interface_path'], 'MG5aMC_PY8_interface')): @@ -4444,16 +4444,16 @@ def do_pythia8(self, line): # Again here 'pythia' is just a keyword for the simulation level. self.update_status('\033[92mRunning Pythia8 [arXiv:1410.3012]\033[0m', 'pythia8') - - tag = self.run_tag + + tag = self.run_tag # Now write Pythia8 card # Start by reading, starting from the default one so that the 'user_set' # tag are correctly set. - PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', + PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', 'pythia8_card_default.dat')) PY8_Card.read(pjoin(self.me_dir, 'Cards', 'pythia8_card.dat'), setter='user') - + run_type = 'default' merged_run_types = ['MLM','CKKW'] if int(self.run_card['ickkw'])==1: @@ -4471,7 +4471,7 @@ def do_pythia8(self, line): cmd_card = StringIO.StringIO() PY8_Card.write(cmd_card,pjoin(self.me_dir,'Cards','pythia8_card_default.dat'), direct_pythia_input=True) - + # Now setup the preamble to make sure that everything will use the locally # installed tools (if present) even if the user did not add it to its # environment variables. @@ -4486,13 +4486,13 @@ def do_pythia8(self, line): preamble = misc.get_HEPTools_location_setter( pjoin(MG5DIR,'HEPTools'),'lib') preamble += "\n unset PYTHIA8DATA\n" - + open(pythia_cmd_card,'w').write("""! ! It is possible to run this card manually with: ! %s %s ! """%(preamble+pythia_main,os.path.basename(pythia_cmd_card))+cmd_card.getvalue()) - + # launch pythia8 pythia_log = pjoin(self.me_dir , 'Events', self.run_name , '%s_pythia8.log' % tag) @@ -4504,13 +4504,13 @@ def do_pythia8(self, line): shell_exe = None if os.path.exists('/usr/bin/env'): shell_exe = '/usr/bin/env %s'%shell - else: + else: shell_exe = misc.which(shell) if not shell_exe: raise self.InvalidCmd('No s hell could be found in your environment.\n'+ "Make sure that either '%s' is in your path or that the"%shell+\ " command '/usr/bin/env %s' exists and returns a valid path."%shell) - + exe_cmd = "#!%s\n%s"%(shell_exe,' '.join( [preamble+pythia_main, os.path.basename(pythia_cmd_card)])) @@ -4528,7 +4528,7 @@ def do_pythia8(self, line): ( os.path.exists(HepMC_event_output) and \ stat.S_ISFIFO(os.stat(HepMC_event_output).st_mode)) startPY8timer = time.time() - + # Information that will be extracted from this PY8 run PY8_extracted_information={ 'sigma_m':None, 'Nacc':None, 'Ntry':None, 'cross_sections':{} } @@ -4556,7 +4556,7 @@ def do_pythia8(self, line): n_cores = max(int(self.options['cluster_size']),1) elif self.options['run_mode']==2: n_cores = max(int(self.cluster.nb_core),1) - + lhe_file_name = os.path.basename(PY8_Card.subruns[0]['Beams:LHEF']) lhe_file = lhe_parser.EventFile(pjoin(self.me_dir,'Events', self.run_name,PY8_Card.subruns[0]['Beams:LHEF'])) @@ -4574,7 +4574,7 @@ def do_pythia8(self, line): if self.options['run_mode']==2: min_n_events_per_job = 100 elif self.options['run_mode']==1: - min_n_events_per_job = 1000 + min_n_events_per_job = 1000 min_n_core = n_events//min_n_events_per_job n_cores = max(min(min_n_core,n_cores),1) @@ -4584,8 +4584,8 @@ def do_pythia8(self, line): logger.info('Follow Pythia8 shower by running the '+ 'following command (in a separate terminal):\n tail -f %s'%pythia_log) - if self.options['run_mode']==2 and self.options['nb_core']>1: - ret_code = self.cluster.launch_and_wait(wrapper_path, + if self.options['run_mode']==2 and self.options['nb_core']>1: + ret_code = self.cluster.launch_and_wait(wrapper_path, argument= [], stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events',self.run_name)) else: @@ -4630,10 +4630,10 @@ def do_pythia8(self, line): wrapper = open(wrapper_path,'w') if self.options['cluster_temp_path'] is None: exe_cmd = \ -"""#!%s +"""#!%s ./%s PY8Card.dat >& PY8_log.txt """ - else: + else: exe_cmd = \ """#!%s ln -s ./events_$1.lhe.gz ./events.lhe.gz @@ -4663,21 +4663,21 @@ def do_pythia8(self, line): # Set it as executable st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) - + # Split the .lhe event file, create event partition partition=[n_available_events//n_cores]*n_cores for i in range(n_available_events%n_cores): partition[i] += 1 - + # Splitting according to the total number of events requested by the user # Will be used to determine the number of events to indicate in the PY8 split cards. partition_for_PY8=[n_events//n_cores]*n_cores for i in range(n_events%n_cores): partition_for_PY8[i] += 1 - - logger.info('Splitting .lhe event file for PY8 parallelization...') - n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) - + + logger.info('Splitting .lhe event file for PY8 parallelization...') + n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) + if n_splits!=len(partition): raise MadGraph5Error('Error during lhe file splitting. Expected %d files but obtained %d.' %(len(partition),n_splits)) @@ -4690,7 +4690,7 @@ def do_pythia8(self, line): # Add the necessary run content shutil.move(pjoin(parallelization_dir,lhe_file.name+'_%d.lhe.gz'%split_id), pjoin(parallelization_dir,split_files[-1])) - + logger.info('Submitting Pythia8 jobs...') for i, split_file in enumerate(split_files): # We must write a PY8Card tailored for each split so as to correct the normalization @@ -4706,7 +4706,7 @@ def do_pythia8(self, line): split_PY8_Card.write(pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,'PY8Card.dat'), add_missing=False) in_files = [pjoin(parallelization_dir,os.path.basename(pythia_main)), - pjoin(parallelization_dir,'PY8Card_%d.dat'%i), + pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,split_file)] if self.options['cluster_temp_path'] is None: out_files = [] @@ -4718,35 +4718,35 @@ def do_pythia8(self, line): if os.path.basename(in_file)==split_file: ln(in_file,selected_cwd,name='events.lhe.gz') elif os.path.basename(in_file).startswith('PY8Card'): - ln(in_file,selected_cwd,name='PY8Card.dat') + ln(in_file,selected_cwd,name='PY8Card.dat') else: - ln(in_file,selected_cwd) + ln(in_file,selected_cwd) in_files = [] wrapper_path = os.path.basename(wrapper_path) else: out_files = ['split_%d.tar.gz'%i] selected_cwd = parallelization_dir - self.cluster.submit2(wrapper_path, - argument=[str(i)], cwd=selected_cwd, + self.cluster.submit2(wrapper_path, + argument=[str(i)], cwd=selected_cwd, input_files=in_files, output_files=out_files, required_output=out_files) - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return logger.info('Pythia8 shower jobs: %d Idle, %d Running, %d Done [%s]'\ %(Idle, Running, Done, misc.format_time(time.time() - startPY8timer))) self.cluster.wait(parallelization_dir,wait_monitoring) - + logger.info('Merging results from the split PY8 runs...') if self.options['cluster_temp_path']: # Decompressing the output for i, split_file in enumerate(split_files): misc.call(['tar','-xzf','split_%d.tar.gz'%i],cwd=parallelization_dir) os.remove(pjoin(parallelization_dir,'split_%d.tar.gz'%i)) - + # Now merge logs pythia_log_file = open(pythia_log,'w') n_added = 0 @@ -4778,7 +4778,7 @@ def wait_monitoring(Idle, Running, Done): if n_added>0: PY8_extracted_information['sigma_m'] /= float(n_added) pythia_log_file.close() - + # djr plots djr_HwU = None n_added = 0 @@ -4845,7 +4845,7 @@ def wait_monitoring(Idle, Running, Done): if not os.path.isfile(hepmc_file): continue all_hepmc_files.append(hepmc_file) - + if len(all_hepmc_files)>0: hepmc_output = pjoin(self.me_dir,'Events',self.run_name,HepMC_event_output) with misc.TMP_directory() as tmp_dir: @@ -4860,8 +4860,8 @@ def wait_monitoring(Idle, Running, Done): break header.close() tail = open(pjoin(tmp_dir,'tail.hepmc'),'w') - n_tail = 0 - + n_tail = 0 + for line in misc.reverse_readline(all_hepmc_files[-1]): if line.startswith('HepMC::'): n_tail += 1 @@ -4871,7 +4871,7 @@ def wait_monitoring(Idle, Running, Done): tail.close() if n_tail>1: raise MadGraph5Error('HEPMC files should only have one trailing command.') - ###################################################################### + ###################################################################### # This is the most efficient way of putting together HEPMC's, *BUT* # # WARNING: NEED TO RENDER THE CODE BELOW SAFE TOWARDS INJECTION # ###################################################################### @@ -4888,12 +4888,12 @@ def wait_monitoring(Idle, Running, Done): elif sys.platform == 'darwin': # sed on MAC has slightly different synthax than on os.system(' '.join(['sed','-i',"''","'%s;$d'"% - (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) - else: - # other UNIX systems + (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) + else: + # other UNIX systems os.system(' '.join(['sed','-i']+["-e '%id'"%(i+1) for i in range(n_head)]+ ["-e '$d'",hepmc_file])) - + os.system(' '.join(['cat',pjoin(tmp_dir,'header.hepmc')]+all_hepmc_files+ [pjoin(tmp_dir,'tail.hepmc'),'>',hepmc_output])) @@ -4915,12 +4915,12 @@ def wait_monitoring(Idle, Running, Done): 'Inclusive cross section:' not in '\n'.join(open(pythia_log,'r').readlines()[-20:]): logger.warning('Fail to produce a pythia8 output. More info in \n %s'%pythia_log) return - + # Plot for Pythia8 successful = self.create_plot('Pythia8') if not successful: logger.warning('Failed to produce Pythia8 merging plots.') - + self.to_store.append('pythia8') # Study matched cross-sections @@ -4931,7 +4931,7 @@ def wait_monitoring(Idle, Running, Done): if self.options['run_mode']==0 or (self.options['run_mode']==2 and self.options['nb_core']==1): PY8_extracted_information['sigma_m'],PY8_extracted_information['Nacc'],\ PY8_extracted_information['Ntry'] = self.parse_PY8_log_file( - pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) + pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) else: logger.warning('Pythia8 cross-section could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1. YYYYY') @@ -4944,8 +4944,8 @@ def wait_monitoring(Idle, Running, Done): Ntry = PY8_extracted_information['Ntry'] sigma_m = PY8_extracted_information['sigma_m'] # Compute pythia error - error = self.results[self.run_name].return_tag(self.run_tag)['error'] - try: + error = self.results[self.run_name].return_tag(self.run_tag)['error'] + try: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) except ZeroDivisionError: # Cannot compute error @@ -4966,31 +4966,31 @@ def wait_monitoring(Idle, Running, Done): else: logger.warning('Pythia8 merged cross-sections could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1.XXXXX') - PY8_extracted_information['cross_sections'] = {} - + PY8_extracted_information['cross_sections'] = {} + cross_sections = PY8_extracted_information['cross_sections'] if cross_sections: - # Filter the cross_sections specified an keep only the ones + # Filter the cross_sections specified an keep only the ones # with central parameters and a different merging scale a_float_re = '[\+|-]?\d+(\.\d*)?([EeDd][\+|-]?\d+)?' central_merging_re = re.compile( '^\s*Weight_MERGING\s*=\s*(?P%s)\s*$'%a_float_re, - re.IGNORECASE) + re.IGNORECASE) cross_sections = dict( (float(central_merging_re.match(xsec).group('merging')),value) - for xsec, value in cross_sections.items() if not + for xsec, value in cross_sections.items() if not central_merging_re.match(xsec) is None) central_scale = PY8_Card['JetMatching:qCut'] if \ int(self.run_card['ickkw'])==1 else PY8_Card['Merging:TMS'] if central_scale in cross_sections: self.results.add_detail('cross_pythia8', cross_sections[central_scale][0]) self.results.add_detail('error_pythia8', cross_sections[central_scale][1]) - + #logger.info('Pythia8 merged cross-sections are:') #for scale in sorted(cross_sections.keys()): # logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ - # (scale,cross_sections[scale][0],cross_sections[scale][1])) - + # (scale,cross_sections[scale][0],cross_sections[scale][1])) + xsecs_file = open(pjoin(self.me_dir,'Events',self.run_name, '%s_merged_xsecs.txt'%tag),'w') if cross_sections: @@ -5003,9 +5003,9 @@ def wait_monitoring(Idle, Running, Done): xsecs_file.write('Cross-sections could not be read from the'+\ "XML node 'xsection' of the .dat file produced by Pythia8.") xsecs_file.close() - + #Update the banner - # We add directly the pythia command card because it has the full + # We add directly the pythia command card because it has the full # information self.banner.add(pythia_cmd_card) @@ -5022,13 +5022,13 @@ def wait_monitoring(Idle, Running, Done): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + def parse_PY8_log_file(self, log_file_path): """ Parse a log file to extract number of event and cross-section. """ pythiare = re.compile("Les Houches User Process\(es\)\s*\d+\s*\|\s*(?P\d+)\s*(?P\d+)\s*(?P\d+)\s*\|\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") pythia_xsec_re = re.compile("Inclusive cross section\s*:\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") sigma_m, Nacc, Ntry = None, None, None - for line in misc.BackRead(log_file_path): + for line in misc.BackRead(log_file_path): info = pythiare.search(line) if not info: # Also try to obtain the cross-section and error from the final xsec line of pythia8 log @@ -5058,7 +5058,7 @@ def parse_PY8_log_file(self, log_file_path): raise self.InvalidCmd("Could not find cross-section and event number information "+\ "in Pythia8 log\n '%s'."%log_file_path) - + def extract_cross_sections_from_DJR(self,djr_output): """Extract cross-sections from a djr XML output.""" import xml.dom.minidom as minidom @@ -5075,11 +5075,11 @@ def extract_cross_sections_from_DJR(self,djr_output): [float(xsec.childNodes[0].data.split()[0]), float(xsec.childNodes[0].data.split()[1])]) for xsec in xsections) - + def do_pythia(self, line): """launch pythia""" - - + + # Check argument's validity args = self.split_arg(line) if '--no_default' in args: @@ -5089,12 +5089,12 @@ def do_pythia(self, line): args.remove('--no_default') else: no_default = False - + if not self.run_name: self.check_pythia(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) self.check_pythia(args) @@ -5102,7 +5102,7 @@ def do_pythia(self, line): logger.error('pythia-pgs require event_norm to be on sum. Do not run pythia6') return - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1]) if self.options['automatic_html_opening']: @@ -5114,35 +5114,35 @@ def do_pythia(self, line): self.banner = banner_mod.recover_banner(self.results, 'pythia') pythia_src = pjoin(self.options['pythia-pgs_path'],'src') - + self.results.add_detail('run_mode', 'madevent') self.update_status('Running Pythia', 'pythia') try: os.remove(pjoin(self.me_dir,'Events','pythia.done')) except Exception: - pass - + pass + ## LAUNCHING PYTHIA # check that LHAPATH is define. if not re.search(r'^\s*LHAPATH=%s/PDFsets' % pythia_src, - open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), + open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), re.M): f = open(pjoin(self.me_dir,'Cards','pythia_card.dat'),'a') f.write('\n LHAPATH=%s/PDFsets' % pythia_src) f.close() tag = self.run_tag pythia_log = pjoin(self.me_dir, 'Events', self.run_name , '%s_pythia.log' % tag) - #self.cluster.launch_and_wait('../bin/internal/run_pythia', + #self.cluster.launch_and_wait('../bin/internal/run_pythia', # argument= [pythia_src], stdout= pythia_log, # stderr=subprocess.STDOUT, # cwd=pjoin(self.me_dir,'Events')) output_files = ['pythia_events.hep'] if self.run_card['use_syst']: output_files.append('syst.dat') - if self.run_card['ickkw'] == 1: + if self.run_card['ickkw'] == 1: output_files += ['beforeveto.tree', 'xsecs.tree', 'events.tree'] - + os.environ['PDG_MASS_TBL'] = pjoin(pythia_src,'mass_width_2004.mc') self.cluster.launch_and_wait(pjoin(pythia_src, 'pythia'), input_files=[pjoin(self.me_dir, "Events", "unweighted_events.lhe"), @@ -5152,23 +5152,23 @@ def do_pythia(self, line): stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events')) - + os.remove(pjoin(self.me_dir, "Events", "unweighted_events.lhe")) if not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): logger.warning('Fail to produce pythia output. More info in \n %s' % pythia_log) return - + self.to_store.append('pythia') - + # Find the matched cross-section if int(self.run_card['ickkw']): # read the line from the bottom of the file - #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, + #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, # '%s_pythia.log' % tag)) - pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") - for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, + pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") + for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, '%s_pythia.log' % tag)): info = pythiare.search(line) if not info: @@ -5188,16 +5188,16 @@ def do_pythia(self, line): self.results.add_detail('nb_event_pythia', Nacc) #compute pythia error error = self.results[self.run_name].return_tag(self.run_tag)['error'] - if Nacc: + if Nacc: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) else: error_m = 10000 * sigma_m # works both for fixed number of generated events and fixed accepted events self.results.add_detail('error_pythia', error_m) - break + break #pythia_log.close() - + pydir = pjoin(self.options['pythia-pgs_path'], 'src') eradir = self.options['exrootanalysis_path'] madir = self.options['madanalysis_path'] @@ -5216,12 +5216,12 @@ def do_pythia(self, line): # Creating LHE file self.run_hep2lhe(banner_path) - + if int(self.run_card['ickkw']): misc.gzip(pjoin(self.me_dir,'Events','beforeveto.tree'), - stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + - if self.run_card['use_syst'] in self.true: # Calculate syscalc info based on syst.dat try: @@ -5233,7 +5233,7 @@ def do_pythia(self, line): # Store syst.dat misc.gzip(pjoin(self.me_dir,'Events', 'syst.dat'), stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_syst.dat.gz')) - + # Store syscalc.dat if os.path.exists(pjoin(self.me_dir, 'Events', 'syscalc.dat')): filename = pjoin(self.me_dir, 'Events' ,self.run_name, @@ -5253,7 +5253,7 @@ def do_pythia(self, line): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + ################################################################################ def do_remove(self, line): @@ -5263,8 +5263,8 @@ def do_remove(self, line): run, tag, mode = self.check_remove(args) if 'banner' in mode: mode.append('all') - - + + if run == 'all': # Check first if they are not a run with a name run. if os.path.exists(pjoin(self.me_dir, 'Events', 'all')): @@ -5280,7 +5280,7 @@ def do_remove(self, line): logger.info(error) pass # run already clear return - + # Check that run exists if not os.path.exists(pjoin(self.me_dir, 'Events', run)): raise self.InvalidCmd('No run \'%s\' detected' % run) @@ -5294,7 +5294,7 @@ def do_remove(self, line): # Found the file to delete - + to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) to_delete += misc.glob('*', pjoin(self.me_dir, 'HTML', run)) # forbid the banner to be removed @@ -5314,7 +5314,7 @@ def do_remove(self, line): if os.path.exists(pjoin(self.me_dir, 'Events', run, 'unweighted_events.lhe.gz')): to_delete.append('unweighted_events.lhe.gz') if os.path.exists(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')): - to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) + to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) if nb_rm != len(to_delete): logger.warning('Be carefull that partonic information are on the point to be removed.') if 'all' in mode: @@ -5327,8 +5327,8 @@ def do_remove(self, line): if 'delphes' not in mode: to_delete = [f for f in to_delete if 'delphes' not in f] if 'parton' not in mode: - to_delete = [f for f in to_delete if 'delphes' in f - or 'pgs' in f + to_delete = [f for f in to_delete if 'delphes' in f + or 'pgs' in f or 'pythia' in f] if not self.force and len(to_delete): question = 'Do you want to delete the following files?\n %s' % \ @@ -5336,7 +5336,7 @@ def do_remove(self, line): ans = self.ask(question, 'y', choices=['y','n']) else: ans = 'y' - + if ans == 'y': for file2rm in to_delete: if os.path.exists(pjoin(self.me_dir, 'Events', run, file2rm)): @@ -5374,7 +5374,7 @@ def do_remove(self, line): if ans == 'y': for file2rm in to_delete: os.remove(file2rm) - + if 'banner' in mode: to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) if tag: @@ -5389,8 +5389,8 @@ def do_remove(self, line): return elif any(['banner' not in os.path.basename(p) for p in to_delete]): if to_delete: - raise MadGraph5Error('''Some output still exists for this run. - Please remove those output first. Do for example: + raise MadGraph5Error('''Some output still exists for this run. + Please remove those output first. Do for example: remove %s all banner ''' % run) else: @@ -5400,7 +5400,7 @@ def do_remove(self, line): return else: logger.info('''The banner is not removed. In order to remove it run: - remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) + remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) # update database. self.results.clean(mode, run, tag) @@ -5420,7 +5420,7 @@ def do_plot(self, line): logger.info('plot for run %s' % self.run_name) if not self.force: self.ask_edit_cards(['plot_card.dat'], args, plot=True) - + if any([arg in ['all','parton'] for arg in args]): filename = pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe') if os.path.exists(filename+'.gz'): @@ -5438,8 +5438,8 @@ def do_plot(self, line): except Exception: pass else: - logger.info('No valid files for partonic plot') - + logger.info('No valid files for partonic plot') + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_events.lhe' % self.run_tag) @@ -5452,10 +5452,10 @@ def do_plot(self, line): stdout= "%s.gz" % filename) else: logger.info('No valid files for pythia plot') - - + + if any([arg in ['all','pgs'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) @@ -5464,15 +5464,15 @@ def do_plot(self, line): misc.gzip(filename) else: logger.info('No valid files for pgs plot') - + if any([arg in ['all','delphes'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) if os.path.exists(filename): self.create_plot('Delphes') - misc.gzip(filename) + misc.gzip(filename) else: logger.info('No valid files for delphes plot') @@ -5488,9 +5488,9 @@ def do_syscalc(self, line): if self.ninitial == 1: logger.error('SysCalc can\'t be run for decay processes') return - + logger.info('Calculating systematics for run %s' % self.run_name) - + self.ask_edit_cards(['run_card.dat'], args, plot=False) self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) if any([arg in ['all','parton'] for arg in args]): @@ -5504,7 +5504,7 @@ def do_syscalc(self, line): stdout="%s.gz" % filename) else: logger.info('No valid files for parton level systematics run.') - + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_syst.dat' % self.run_tag) @@ -5525,17 +5525,17 @@ def do_syscalc(self, line): else: logger.info('No valid files for pythia level') - + def store_result(self): - """ tar the pythia results. This is done when we are quite sure that + """ tar the pythia results. This is done when we are quite sure that the pythia output will not be use anymore """ if not self.run_name: return - + if not self.to_store: - return - + return + tag = self.run_card['run_tag'] self.update_status('storing files of previous run', level=None,\ error=True) @@ -5546,14 +5546,14 @@ def store_result(self): misc.gzip(pjoin(self.me_dir,'Events',self.run_name,"unweighted_events.lhe")) if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) - + if 'pythia' in self.to_store: self.update_status('Storing Pythia files of previous run', level='pythia', error=True) p = pjoin(self.me_dir,'Events') n = self.run_name t = tag self.to_store.remove('pythia') - misc.gzip(pjoin(p,'pythia_events.hep'), + misc.gzip(pjoin(p,'pythia_events.hep'), stdout=pjoin(p, str(n),'%s_pythia_events.hep' % t),forceexternal=True) if 'pythia8' in self.to_store: @@ -5581,26 +5581,26 @@ def store_result(self): os.system("mv " + file_path + hepmc_fileformat + " " + move_hepmc_path) self.update_status('Done', level='pythia',makehtml=False,error=True) - self.results.save() - + self.results.save() + self.to_store = [] - def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, + def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, run_type='', mode=None, **opt): """ """ argument = [str(arg) for arg in argument] if mode is None: mode = self.cluster_mode - + # ensure that exe is executable if os.path.exists(exe) and not os.access(exe, os.X_OK): os.system('chmod +x %s ' % exe) elif (cwd and os.path.exists(pjoin(cwd, exe))) and not \ os.access(pjoin(cwd, exe), os.X_OK): os.system('chmod +x %s ' % pjoin(cwd, exe)) - + if mode == 0: - self.update_status((remaining, 1, + self.update_status((remaining, 1, self.total_jobs - remaining -1, run_type), level=None, force=False) start = time.time() #os.system('cd %s; ./%s' % (cwd,exe)) @@ -5613,24 +5613,24 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, elif mode in [1,2]: exename = os.path.basename(exe) # For condor cluster, create the input/output files - if 'ajob' in exename: + if 'ajob' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat','dname.mg', pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) - + output_files = [] required_output = [] - + #Find the correct PDF input file input_files.append(self.get_pdf_input_filename()) - + #Find the correct ajob Gre = re.compile("\s*j=(G[\d\.\w]+)") origre = re.compile("grid_directory=(G[\d\.\w]+)") - try : + try : fsock = open(exe) except Exception: fsock = open(pjoin(cwd,exe)) @@ -5648,21 +5648,21 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if os.path.isdir(pjoin(cwd,G)): input_files.append(G) required_output.append('%s/results.dat' % G) - + if origre.search(text): G_grid = origre.search(text).groups()[0] input_files.append(pjoin(G_grid, 'ftn26')) - + #submitting - self.cluster.submit2(exe, stdout=stdout, cwd=cwd, + self.cluster.submit2(exe, stdout=stdout, cwd=cwd, input_files=input_files, output_files=output_files, required_output=required_output) elif 'survey' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5671,7 +5671,7 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [] required_output = [] - + #Find the correct ajob suffix = "_%s" % int(float(argument[0])) if suffix == '_0': @@ -5685,12 +5685,12 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if '.' in argument[0]: offset = int(str(argument[0]).split('.')[1]) else: - offset = 0 - + offset = 0 + if offset ==0 or offset == int(float(argument[0])): if os.path.exists(pjoin(cwd, G, 'input_app.txt')): os.remove(pjoin(cwd, G, 'input_app.txt')) - + if os.path.exists(os.path.realpath(pjoin(cwd, G, 'ftn25'))): if offset == 0 or offset == int(float(argument[0])): os.remove(pjoin(cwd, G, 'ftn25')) @@ -5706,16 +5706,16 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, pass #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, required_output=required_output, **opt) elif "refine_splitted.sh" in exename: input_files = ['madevent','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5725,25 +5725,25 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [argument[0]] required_output = [] for G in output_files: - required_output.append('%s/results.dat' % G) + required_output.append('%s/results.dat' % G) input_files.append(pjoin(argument[1], "input_app.txt")) input_files.append(pjoin(argument[1], "ftn26")) - + #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, - required_output=required_output, **opt) + required_output=required_output, **opt) + + - - else: self.cluster.submit(exe, argument=argument, stdout=stdout, cwd=cwd, **opt) - + ############################################################################ def find_madevent_mode(self): """Find if Madevent is in Group mode or not""" - + # The strategy is too look in the files Source/run_configs.inc # if we found: ChanPerJob=3 then it's a group mode. file_path = pjoin(self.me_dir, 'Source', 'run_config.inc') @@ -5752,11 +5752,11 @@ def find_madevent_mode(self): return 'group' else: return 'v4' - + ############################################################################ def monitor(self, run_type='monitor', mode=None, html=False): """ monitor the progress of running job """ - + starttime = time.time() if mode is None: @@ -5772,8 +5772,8 @@ def monitor(self, run_type='monitor', mode=None, html=False): else: update_status = lambda idle, run, finish: None update_first = None - try: - self.cluster.wait(self.me_dir, update_status, update_first=update_first) + try: + self.cluster.wait(self.me_dir, update_status, update_first=update_first) except Exception as error: logger.info(error) if not self.force: @@ -5788,24 +5788,24 @@ def monitor(self, run_type='monitor', mode=None, html=False): raise except KeyboardInterrupt as error: self.cluster.remove() - raise - - + raise + - ############################################################################ + + ############################################################################ def configure_directory(self, html_opening=True): - """ All action require before any type of run """ + """ All action require before any type of run """ # Basic check assert os.path.exists(pjoin(self.me_dir,'SubProcesses')) # environmental variables to be included in make_opts self.make_opts_var = {} - + #see when the last file was modified time_mod = max([os.path.getmtime(pjoin(self.me_dir,'Cards','run_card.dat')), os.path.getmtime(pjoin(self.me_dir,'Cards','param_card.dat'))]) - + if self.configured >= time_mod and hasattr(self, 'random') and hasattr(self, 'run_card'): #just ensure that cluster specific are correctly handled if self.cluster: @@ -5820,7 +5820,7 @@ def configure_directory(self, html_opening=True): #open only once the web page # Change current working directory self.launching_dir = os.getcwd() - + # Check if we need the MSSM special treatment model = self.find_model_name() if model == 'mssm' or model.startswith('mssm-'): @@ -5828,14 +5828,14 @@ def configure_directory(self, html_opening=True): mg5_param = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') check_param_card.convert_to_mg5card(param_card, mg5_param) check_param_card.check_valid_param_card(mg5_param) - + # limit the number of event to 100k self.check_nb_events() # this is in order to avoid conflicts between runs with and without # lhapdf. not needed anymore the makefile handles it automaticallu #misc.compile(['clean4pdf'], cwd = pjoin(self.me_dir, 'Source')) - + self.make_opts_var['pdlabel1'] = '' self.make_opts_var['pdlabel2'] = '' if self.run_card['pdlabel1'] in ['eva', 'iww']: @@ -5866,7 +5866,7 @@ def configure_directory(self, html_opening=True): self.copy_lep_densities(self.run_card['pdlabel'], pjoin(self.me_dir, 'Source')) self.make_opts_var['pdlabel1'] = 'ee' self.make_opts_var['pdlabel2'] = 'ee' - + # set random number if self.run_card['iseed'] != 0: self.random = int(self.run_card['iseed']) @@ -5885,18 +5885,18 @@ def configure_directory(self, html_opening=True): break else: self.random = random.randint(1, 30107) - + #set random seed for python part of the code if self.run_card['python_seed'] == -2: #-2 means same as run_card import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] elif self.run_card['python_seed'] >= 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] if self.run_card['ickkw'] == 2: logger.info('Running with CKKW matching') self.treat_ckkw_matching() @@ -5905,12 +5905,12 @@ def configure_directory(self, html_opening=True): self.update_make_opts(self.run_card) # reset list of Gdirectory self.Gdirs = None - + # create param_card.inc and run_card.inc self.do_treatcards('') - + logger.info("compile Source Directory") - + # Compile for name in [ 'all']:#, '../bin/internal/combine_events']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) @@ -5933,7 +5933,7 @@ def configure_directory(self, html_opening=True): os.remove(pjoin(self.me_dir, 'lib','libbias.a')) force_subproc_clean = True - + # Finally compile the bias module as well if self.run_card['bias_module'] not in ['dummy',None]: logger.debug("Compiling the bias module '%s'"%bias_name) @@ -5945,7 +5945,7 @@ def configure_directory(self, html_opening=True): 'INVALID' in str(bias_module_valid).upper(): raise InvalidCmd("The bias module '%s' cannot be used because of:\n%s"% (bias_name,bias_module_valid)) - + self.compile(arg=[], cwd=os.path.join(self.me_dir, 'Source','BIAS',bias_name)) self.proc_characteristics['bias_module']=bias_name # Update the proc_characterstics file @@ -5954,7 +5954,7 @@ def configure_directory(self, html_opening=True): if force_subproc_clean: # Make sure that madevent will be recompiled - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] for nb_proc,subdir in enumerate(subproc): Pdir = pjoin(self.me_dir, 'SubProcesses',subdir.strip()) @@ -5971,20 +5971,20 @@ def configure_directory(self, html_opening=True): ############################################################################ @staticmethod def check_dir(path, default=''): - """check if the directory exists. if so return the path otherwise the + """check if the directory exists. if so return the path otherwise the default""" - + if os.path.isdir(path): return path else: return default - + ############################################################################ def get_Gdir(self, Pdir=None, symfact=None): """get the list of Gdirectory if not yet saved.""" - + if hasattr(self, "Gdirs") and self.Gdirs: if self.me_dir in self.Gdirs[0]: if Pdir is None: @@ -6000,8 +6000,8 @@ def get_Gdir(self, Pdir=None, symfact=None): Pdirs = self.get_Pdir() - Gdirs = {self.me_dir:[]} - mfactors = {} + Gdirs = {self.me_dir:[]} + mfactors = {} for P in Pdirs: Gdirs[P] = [] #for the next line do not use P, since in readonly mode it might not have symfact @@ -6012,7 +6012,7 @@ def get_Gdir(self, Pdir=None, symfact=None): mfactors[pjoin(P, "G%s" % tag)] = mfactor self.Gdirs = (Gdirs, mfactors) return self.get_Gdir(Pdir, symfact=symfact) - + ############################################################################ def set_run_name(self, name, tag=None, level='parton', reload_card=False, allow_new_tag=True): @@ -6030,8 +6030,8 @@ def get_last_tag(self, level): tagRun = self.results[self.run_name][i] if tagRun.pythia or tagRun.shower or tagRun.pythia8 : return tagRun['tag'] - - + + # when are we force to change the tag new_run:previous run requiring changes upgrade_tag = {'parton': ['parton','pythia','pgs','delphes','madanalysis5_hadron','madanalysis5_parton', 'rivet'], 'pythia': ['pythia','pgs','delphes','madanalysis5_hadron'], @@ -6044,7 +6044,7 @@ def get_last_tag(self, level): 'syscalc':[], 'rivet':['rivet']} - if name == self.run_name: + if name == self.run_name: if reload_card: run_card = pjoin(self.me_dir, 'Cards','run_card.dat') self.run_card = banner_mod.RunCard(run_card) @@ -6064,13 +6064,13 @@ def get_last_tag(self, level): break return get_last_tag(self, level) - + # save/clean previous run if self.run_name: self.store_result() # store new name self.run_name = name - + new_tag = False # First call for this run -> set the banner self.banner = banner_mod.recover_banner(self.results, level, name) @@ -6079,8 +6079,8 @@ def get_last_tag(self, level): else: # Read run_card run_card = pjoin(self.me_dir, 'Cards','run_card.dat') - self.run_card = banner_mod.RunCard(run_card) - + self.run_card = banner_mod.RunCard(run_card) + if tag: self.run_card['run_tag'] = tag new_tag = True @@ -6093,7 +6093,7 @@ def get_last_tag(self, level): self.results.update('add run %s' % name, 'all', makehtml=False) else: for tag in upgrade_tag[level]: - + if getattr(self.results[self.run_name][-1], tag): # LEVEL is already define in the last tag -> need to switch tag tag = self.get_available_tag() @@ -6103,8 +6103,8 @@ def get_last_tag(self, level): if not new_tag: # We can add the results to the current run tag = self.results[self.run_name][-1]['tag'] - self.run_card['run_tag'] = tag # ensure that run_tag is correct - + self.run_card['run_tag'] = tag # ensure that run_tag is correct + if allow_new_tag and (name in self.results and not new_tag): self.results.def_current(self.run_name) else: @@ -6113,15 +6113,15 @@ def get_last_tag(self, level): self.run_tag = self.run_card['run_tag'] return get_last_tag(self, level) - - - + + + ############################################################################ def check_nb_events(self): - """Find the number of event in the run_card, and check that this is not + """Find the number of event in the run_card, and check that this is not too large""" - + nb_event = int(self.run_card['nevents']) if nb_event > 1000000: logger.warning("Attempting to generate more than 1M events") @@ -6133,20 +6133,20 @@ def check_nb_events(self): return - - ############################################################################ + + ############################################################################ def update_random(self): """ change random number""" - + self.random += 3 if self.random > 30081*30081: # can't use too big random number raise MadGraph5Error('Random seed too large ' + str(self.random) + ' > 30081*30081') - if self.run_card['python_seed'] == -2: + if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.random) + random.seed(self.random) random.mg_seedset = self.random - + ############################################################################ def save_random(self): """save random number in appropirate file""" @@ -6155,14 +6155,14 @@ def save_random(self): fsock.writelines('r=%s\n' % self.random) def do_quit(self, *args, **opts): - + return common_run.CommonRunCmd.do_quit(self, *args, **opts) #return CmdExtended.do_quit(self, *args, **opts) - + ############################################################################ def treat_CKKW_matching(self): """check for ckkw""" - + lpp1 = self.run_card['lpp1'] lpp2 = self.run_card['lpp2'] e1 = self.run_card['ebeam1'] @@ -6170,19 +6170,19 @@ def treat_CKKW_matching(self): pd = self.run_card['pdlabel'] lha = self.run_card['lhaid'] xq = self.run_card['xqcut'] - translation = {'e1': e1, 'e2':e2, 'pd':pd, + translation = {'e1': e1, 'e2':e2, 'pd':pd, 'lha':lha, 'xq':xq} if lpp1 or lpp2: - # Remove ':s from pd + # Remove ':s from pd if pd.startswith("'"): pd = pd[1:] if pd.endswith("'"): - pd = pd[:-1] + pd = pd[:-1] if xq >2 or xq ==2: xq = 2 - + # find data file if pd == "lhapdf": issudfile = 'lib/issudgrid-%(e1)s-%(e2)s-%(pd)s-%(lha)s-%(xq)s.dat.gz' @@ -6192,9 +6192,9 @@ def treat_CKKW_matching(self): issudfile = pjoin(self.webbin, issudfile % translation) else: issudfile = pjoin(self.me_dir, issudfile % translation) - + logger.info('Sudakov grid file: %s' % issudfile) - + # check that filepath exists if os.path.exists(issudfile): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') @@ -6203,20 +6203,20 @@ def treat_CKKW_matching(self): msg = 'No sudakov grid file for parameter choice. Start to generate it. This might take a while' logger.info(msg) self.update_status('GENERATE SUDAKOV GRID', level='parton') - + for i in range(-2,6): - self.cluster.submit('%s/gensudgrid ' % self.dirbin, + self.cluster.submit('%s/gensudgrid ' % self.dirbin, argument = ['%d'%i], - cwd=self.me_dir, + cwd=self.me_dir, stdout=open(pjoin(self.me_dir, 'gensudgrid%s.log' % i),'w')) self.monitor() for i in range(-2,6): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') os.system('cat %s/gensudgrid%s.log >> %s' % (self.me_dir, path)) misc.gzip(path, stdout=issudfile) - + ############################################################################ - def create_root_file(self, input='unweighted_events.lhe', + def create_root_file(self, input='unweighted_events.lhe', output='unweighted_events.root' ): """create the LHE root file """ self.update_status('Creating root files', level='parton') @@ -6233,14 +6233,14 @@ def create_root_file(self, input='unweighted_events.lhe', totar = False torm = True input = input[:-3] - + try: - misc.call(['%s/ExRootLHEFConverter' % eradir, + misc.call(['%s/ExRootLHEFConverter' % eradir, input, output], cwd=pjoin(self.me_dir, 'Events')) except Exception: logger.warning('fail to produce Root output [problem with ExRootAnalysis]') - + if totar: if os.path.exists('%s.gz' % input): try: @@ -6251,13 +6251,13 @@ def create_root_file(self, input='unweighted_events.lhe', misc.gzip(input) if torm: os.remove(input) - + def run_syscalc(self, mode='parton', event_path=None, output=None): - """create the syscalc output""" + """create the syscalc output""" if self.run_card['use_syst'] not in self.true: return - + scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): return @@ -6265,12 +6265,12 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): if self.run_card['event_norm'] != 'sum': logger.critical('SysCalc works only when event_norm is on \'sum\'.') return - logger.info('running SysCalc on mode %s' % mode) - + logger.info('running SysCalc on mode %s' % mode) + # Restore the old default for SysCalc+PY6 if self.run_card['sys_matchscale']=='auto': self.run_card['sys_matchscale'] = "30 50" - + # Check that all pdfset are correctly installed lhaid = [self.run_card.get_lhapdf_id()] if '&&' in self.run_card['sys_pdf']: @@ -6285,20 +6285,20 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.debug(str(error)) logger.warning('Systematic computation requires lhapdf to run. Bypass SysCalc') return - + # Copy all the relevant PDF sets [self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid] - + to_syscalc={'sys_scalefact': self.run_card['sys_scalefact'], 'sys_alpsfact': self.run_card['sys_alpsfact'], 'sys_matchscale': self.run_card['sys_matchscale'], 'sys_scalecorrelation': self.run_card['sys_scalecorrelation'], 'sys_pdf': self.run_card['sys_pdf']} - - tag = self.run_card['run_tag'] + + tag = self.run_card['run_tag'] card = pjoin(self.me_dir, 'bin','internal', 'syscalc_card.dat') template = open(pjoin(self.me_dir, 'bin','internal', 'syscalc_template.dat')).read() - + if '&&' in to_syscalc['sys_pdf']: to_syscalc['sys_pdf'] = to_syscalc['sys_pdf'].split('#',1)[0].replace('&&',' \n ') else: @@ -6311,8 +6311,8 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): new.append(d) else: new[-1] += ' %s' % d - to_syscalc['sys_pdf'] = '\n'.join(new) - + to_syscalc['sys_pdf'] = '\n'.join(new) + if to_syscalc['sys_pdf'].lower() in ['', 'f', 'false', 'none', '.false.']: to_syscalc['sys_pdf'] = '' if to_syscalc['sys_alpsfact'].lower() in ['', 'f', 'false', 'none','.false.']: @@ -6320,17 +6320,17 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): - + # check if the scalecorrelation parameter is define: if not 'sys_scalecorrelation' in self.run_card: self.run_card['sys_scalecorrelation'] = -1 open(card,'w').write(template % self.run_card) - + if not os.path.exists(card): return False - - + + event_dir = pjoin(self.me_dir, 'Events') if not event_path: @@ -6353,19 +6353,19 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): raise SysCalcError('qcut value for sys_matchscale lower than qcut in pythia_card. Bypass syscalc') if float(value) < xqcut: raise SysCalcError('qcut value for sys_matchscale lower than xqcut in run_card. Bypass syscalc') - - + + event_path = pjoin(event_dir,'syst.dat') output = pjoin(event_dir, 'syscalc.dat') else: raise self.InvalidCmd('Invalid mode %s' % mode) - + if not os.path.exists(event_path): if os.path.exists(event_path+'.gz'): misc.gunzip(event_path+'.gz') else: raise SysCalcError('Events file %s does not exits' % event_path) - + self.update_status('Calculating systematics for %s level' % mode, level = mode.lower()) try: proc = misc.call([os.path.join(scdir, 'sys_calc'), @@ -6374,7 +6374,7 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): stderr = subprocess.STDOUT, cwd=event_dir) # Wait 5 s to make sure file is finished writing - time.sleep(5) + time.sleep(5) except OSError as error: logger.error('fail to run syscalc: %s. Please check that SysCalc is correctly installed.' % error) else: @@ -6382,11 +6382,11 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.warning('SysCalc Failed. Please read the associate log to see the reason. Did you install the associate PDF set?') elif mode == 'parton': files.mv(output, event_path) - + self.update_status('End syscalc for %s level' % mode, level = mode.lower(), makehtml=False) - - return True + + return True action_switcher = AskRun @@ -6399,23 +6399,23 @@ def ask_run_configuration(self, mode=None, args=[]): passing_cmd.append('reweight=ON') if '-M' in args or '--madspin' in args: passing_cmd.append('madspin=ON') - + switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, mode=mode, line_args=args, force=self.force, first_cmd=passing_cmd, return_instance=True) # - self.switch = switch # store the value of the switch for plugin purpose + self.switch = switch # store the value of the switch for plugin purpose if 'dynamical' in switch: mode = 'auto' - + # Now that we know in which mode we are check that all the card #exists (copy default if needed) - + cards = ['param_card.dat', 'run_card.dat'] if switch['shower'] == 'Pythia6': cards.append('pythia_card.dat') if switch['shower'] == 'Pythia8': - cards.append('pythia8_card.dat') + cards.append('pythia8_card.dat') if switch['detector'] in ['PGS','DELPHES+PGS']: cards.append('pgs_card.dat') if switch['detector'] in ['Delphes', 'DELPHES+PGS']: @@ -6438,29 +6438,29 @@ def ask_run_configuration(self, mode=None, args=[]): cards.append('rivet_card.dat') self.keep_cards(cards) - + first_cmd = cmd_switch.get_cardcmd() - + if os.path.isfile(pjoin(self.me_dir,'Cards','MadLoopParams.dat')): cards.append('MadLoopParams.dat') - + if self.force: self.check_param_card(pjoin(self.me_dir,'Cards','param_card.dat' )) return switch - + if 'dynamical' in switch and switch['dynamical']: self.ask_edit_cards(cards, plot=False, mode='auto', first_cmd=first_cmd) else: self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) return switch - + ############################################################################ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None): """Ask the question when launching pythia""" - + pythia_suffix = '' if pythia_version==6 else '%d'%pythia_version - + available_mode = ['0', '1'] if pythia_version==6: available_mode.append('2') @@ -6485,10 +6485,10 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = self.ask(question, '0', options) elif not mode: mode = 'auto' - + if mode.isdigit(): mode = name[mode] - + auto = False if mode == 'auto': auto = True @@ -6497,7 +6497,7 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = 'pgs' elif os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')): mode = 'delphes' - else: + else: mode = 'pythia%s'%pythia_suffix logger.info('Will run in mode %s' % mode) # Now that we know in which mode we are check that all the card @@ -6513,15 +6513,15 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) cards.append('delphes_trigger.dat') self.keep_cards(cards, ignore=['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat', 'plot_card.dat']) - + if self.force: return mode - + if not banner: banner = self.banner - + if auto: - self.ask_edit_cards(cards, from_banner=['param', 'run'], + self.ask_edit_cards(cards, from_banner=['param', 'run'], mode='auto', plot=(pythia_version==6), banner=banner ) else: @@ -6529,12 +6529,12 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) plot=(pythia_version==6), banner=banner) return mode - + #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmdShell(MadEventCmd, cmd.CmdShell): - """The command line processor of MadGraph""" + """The command line processor of MadGraph""" @@ -6548,11 +6548,11 @@ class SubProcesses(object): @classmethod def clean(cls): cls.name_to_pdg = {} - + @staticmethod def get_subP(me_dir): """return the list of Subprocesses""" - + out = [] for line in open(pjoin(me_dir,'SubProcesses', 'subproc.mg')): if not line: @@ -6560,9 +6560,9 @@ def get_subP(me_dir): name = line.strip() if os.path.exists(pjoin(me_dir, 'SubProcesses', name)): out.append(pjoin(me_dir, 'SubProcesses', name)) - + return out - + @staticmethod @@ -6623,9 +6623,9 @@ def get_subP_ids(path): particles = re.search("/([\d,-]+)/", line) all_ids.append([int(p) for p in particles.group(1).split(',')]) return all_ids - - -#=============================================================================== + + +#=============================================================================== class GridPackCmd(MadEventCmd): """The command for the gridpack --Those are not suppose to be use interactively--""" @@ -6639,7 +6639,7 @@ def __init__(self, me_dir = None, nb_event=0, seed=0, gran=-1, *completekey, **s self.random = seed self.random_orig = self.random self.granularity = gran - + self.options['automatic_html_opening'] = False #write the grid_card.dat on disk self.nb_event = int(nb_event) @@ -6680,7 +6680,7 @@ def write_RunWeb(self, me_dir): def write_gridcard(self, nb_event, seed, gran): """write the grid_card.dat file at appropriate location""" - + # first try to write grid_card within the gridpack. print("WRITE GRIDCARD", self.me_dir) if self.readonly: @@ -6689,35 +6689,35 @@ def write_gridcard(self, nb_event, seed, gran): fsock = open('grid_card.dat','w') else: fsock = open(pjoin(self.me_dir, 'Cards', 'grid_card.dat'),'w') - + gridpackcard = banner_mod.GridpackCard() gridpackcard['GridRun'] = True gridpackcard['gevents'] = nb_event gridpackcard['gseed'] = seed gridpackcard['ngran'] = gran - + gridpackcard.write(fsock) ############################################################################ def get_Pdir(self): """get the list of Pdirectory if not yet saved.""" - + if hasattr(self, "Pdirs"): if self.me_dir in self.Pdirs[0]: return self.Pdirs - + if not self.readonly: - self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) + self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] else: - self.Pdirs = [l.strip() - for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + self.Pdirs = [l.strip() + for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] + return self.Pdirs - + def prepare_local_dir(self): """create the P directory structure in the local directory""" - + if not self.readonly: os.chdir(self.me_dir) else: @@ -6726,7 +6726,7 @@ def prepare_local_dir(self): os.mkdir(p) files.cp(pjoin(self.me_dir,'SubProcesses',p,'symfact.dat'), pjoin(p, 'symfact.dat')) - + def launch(self, nb_event, seed): """ launch the generation for the grid """ @@ -6742,13 +6742,13 @@ def launch(self, nb_event, seed): if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(seed) + random.seed(seed) random.mg_seedset = seed elif self.run_card['python_seed'] > 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] # 2) Run the refine for the grid self.update_status('Generating Events', level=None) #misc.call([pjoin(self.me_dir,'bin','refine4grid'), @@ -6767,70 +6767,70 @@ def launch(self, nb_event, seed): self.exec_cmd('decay_events -from_cards', postcmd=False) elif self.run_card['use_syst'] and self.run_card['systematics_program'] == 'systematics': self.options['nb_core'] = 1 - self.exec_cmd('systematics %s --from_card' % + self.exec_cmd('systematics %s --from_card' % pjoin('Events', self.run_name, 'unweighted_events.lhe.gz'), postcmd=False,printcmd=False) - + def refine4grid(self, nb_event): """Special refine for gridpack run.""" self.nb_refine += 1 - + precision = nb_event self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) - + # initialize / remove lhapdf mode # self.configure_directory() # All this has been done before self.cluster_mode = 0 # force single machine # Store seed in randinit file, to be read by ranmar.f self.save_random() - + self.update_status('Refine results to %s' % precision, level=None) logger.info("Using random number seed offset = %s" % self.random) refine_opt = {'err_goal': nb_event, 'split_channels': False, - 'ngran':self.granularity, 'readonly': self.readonly} + 'ngran':self.granularity, 'readonly': self.readonly} x_improve = gen_ximprove.gen_ximprove_gridpack(self, refine_opt) x_improve.launch() # create the ajob for the refinment and run those! - self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack - - + self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack + + #bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) #print 'run combine!!!' #combine_runs.CombineRuns(self.me_dir) - + return #update html output Presults = sum_html.collect_result(self) cross, error = Presults.xsec, Presults.xerru self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + #self.update_status('finish refine', 'parton', makehtml=False) #devnull.close() - - - + + + return self.total_jobs = 0 - subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if + subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if P.startswith('P') and os.path.isdir(pjoin(self.me_dir,'SubProcesses', P))] devnull = open(os.devnull, 'w') for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) # clean previous run for match in misc.glob('*ajob*', Pdir): if os.path.basename(match)[:4] in ['ajob', 'wait', 'run.', 'done']: os.remove(pjoin(Pdir, match)) - + logfile = pjoin(Pdir, 'gen_ximprove.log') misc.call([pjoin(bindir, 'gen_ximprove')], @@ -6840,40 +6840,40 @@ def refine4grid(self, nb_event): if os.path.exists(pjoin(Pdir, 'ajob1')): alljobs = misc.glob('ajob*', Pdir) - nb_tot = len(alljobs) + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) if os.path.exists(pjoin(self.me_dir,'error')): self.monitor(html=True) raise MadEventError('Error detected in dir %s: %s' % \ (Pdir, open(pjoin(self.me_dir,'error')).read())) - self.monitor(run_type='All job submitted for refine number %s' % + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) combine_runs.CombineRuns(self.me_dir) - + #update html output cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + self.update_status('finish refine', 'parton', makehtml=False) devnull.close() def do_combine_events(self, line): - """Advanced commands: Launch combine events""" + """Advanced commands: Launch combine events""" if self.readonly: outdir = 'Events' @@ -6895,17 +6895,17 @@ def do_combine_events(self, line): self.banner.add_generation_info(self.results.current['cross'], self.run_card['nevents']) if not hasattr(self, 'random_orig'): self.random_orig = 0 self.banner.change_seed(self.random_orig) - - + + if not os.path.exists(pjoin(outdir, self.run_name)): os.mkdir(pjoin(outdir, self.run_name)) - self.banner.write(pjoin(outdir, self.run_name, + self.banner.write(pjoin(outdir, self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -6915,7 +6915,7 @@ def do_combine_events(self, line): if os.path.exists(pjoin(Gdir, 'events.lhe')): result = sum_html.OneResult('') result.read_results(pjoin(Gdir, 'results.dat')) - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec')*gscalefact[Gdir], result.get('xerru')*gscalefact[Gdir], result.get('axsec')*gscalefact[Gdir] @@ -6924,7 +6924,7 @@ def do_combine_events(self, line): sum_xsec += result.get('xsec')*gscalefact[Gdir] sum_xerru.append(result.get('xerru')*gscalefact[Gdir]) sum_axsec += result.get('axsec')*gscalefact[Gdir] - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.nb_event) @@ -6933,26 +6933,26 @@ def do_combine_events(self, line): AllEvent.add(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() - + self.banner.add_generation_info(sum_xsec, self.nb_event) nb_event = AllEvent.unweight(pjoin(outdir, self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.nb_event, log_level=logging.DEBUG, normalization=self.run_card['event_norm'], proc_charac=self.proc_characteristic) - - + + if partials: for i in range(partials): try: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) self.banner.add_generation_info(sum_xsec, nb_event) if self.run_card['bias_module'].lower() not in ['dummy', 'none']: @@ -6961,7 +6961,7 @@ def do_combine_events(self, line): class MadLoopInitializer(object): """ A container class for the various methods for initializing MadLoop. It is - placed in MadEventInterface because it is used by Madevent for loop-induced + placed in MadEventInterface because it is used by Madevent for loop-induced simulations. """ @staticmethod @@ -6974,7 +6974,7 @@ def make_and_run(dir_name,checkRam=False): if os.path.isfile(pjoin(dir_name,'check')): os.remove(pjoin(dir_name,'check')) os.remove(pjoin(dir_name,'check_sa.o')) - os.remove(pjoin(dir_name,'loop_matrix.o')) + os.remove(pjoin(dir_name,'loop_matrix.o')) # Now run make devnull = open(os.devnull, 'w') start=time.time() @@ -6996,7 +6996,7 @@ def make_and_run(dir_name,checkRam=False): stdout=devnull, stderr=devnull, close_fds=True) try: ptimer.execute() - #poll as often as possible; otherwise the subprocess might + #poll as often as possible; otherwise the subprocess might # "sneak" in some extra memory usage while you aren't looking # Accuracy of .2 seconds is enough for the timing. while ptimer.poll(): @@ -7028,7 +7028,7 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, If mu_r > 0.0, then the renormalization constant value will be hardcoded directly in check_sa.f, if is is 0 it will be set to Sqrt(s) and if it is < 0.0 the value in the param_card.dat is used. - If the split_orders target (i.e. the target squared coupling orders for + If the split_orders target (i.e. the target squared coupling orders for the computation) is != -1, it will be changed in check_sa.f via the subroutine CALL SET_COUPLINGORDERS_TARGET(split_orders).""" @@ -7043,12 +7043,12 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, file_path = pjoin(directories[0],'check_sa.f') if not os.path.isfile(file_path): raise MadGraph5Error('Could not find the location of check_sa.f'+\ - ' from the specified path %s.'%str(file_path)) + ' from the specified path %s.'%str(file_path)) file = open(file_path, 'r') check_sa = file.read() file.close() - + file = open(file_path, 'w') check_sa = re.sub(r"READPS = \S+\)","READPS = %s)"%('.TRUE.' if read_ps \ else '.FALSE.'), check_sa) @@ -7064,42 +7064,42 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, (("%.17e"%mu_r).replace('e','d')),check_sa) elif mu_r < 0.0: check_sa = re.sub(r"MU_R=SQRTS","",check_sa) - + if split_orders > 0: check_sa = re.sub(r"SET_COUPLINGORDERS_TARGET\(-?\d+\)", - "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) - + "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) + file.write(check_sa) file.close() - @staticmethod + @staticmethod def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ req_files = ['HelFilter.dat','LoopFilter.dat'], attempts = [4,15]): - """ Run the initialization of the process in 'run_dir' with success + """ Run the initialization of the process in 'run_dir' with success characterized by the creation of the files req_files in this directory. The directory containing the driving source code 'check_sa.f'. - The list attempt gives the successive number of PS points the + The list attempt gives the successive number of PS points the initialization should be tried with before calling it failed. Returns the number of PS points which were necessary for the init. Notice at least run_dir or SubProc_dir must be provided. A negative attempt number given in input means that quadprec will be forced for initialization.""" - + # If the user does not want detailed info, then set the dictionary # to a dummy one. if infos is None: infos={} - + if SubProc_dir is None and run_dir is None: raise MadGraph5Error('At least one of [SubProc_dir,run_dir] must'+\ ' be provided in run_initialization.') - + # If the user does not specify where is check_sa.f, then it is assumed # to be one levels above run_dir if SubProc_dir is None: SubProc_dir = os.path.abspath(pjoin(run_dir,os.pardir)) - + if run_dir is None: directories =[ dir for dir in misc.glob('P[0-9]*', SubProc_dir) if os.path.isdir(dir) ] @@ -7109,7 +7109,7 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find a valid running directory'+\ ' in %s.'%str(SubProc_dir)) - # Use the presence of the file born_matrix.f to decide if it is a + # Use the presence of the file born_matrix.f to decide if it is a # loop-induced process or not. It's not crucial, but just that because # of the dynamic adjustment of the ref scale used for deciding what are # the zero contributions, more points are neeeded for loop-induced. @@ -7128,9 +7128,9 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ %MLCardPath) else: - MLCard = banner_mod.MadLoopParam(MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) MLCard_orig = banner_mod.MadLoopParam(MLCard) - + # Make sure that LoopFilter really is needed. if not MLCard['UseLoopFilter']: try: @@ -7153,11 +7153,11 @@ def need_init(): proc_prefix+fname)) for fname in my_req_files]) or \ not os.path.isfile(pjoin(run_dir,'check')) or \ not os.access(pjoin(run_dir,'check'), os.X_OK) - + # Check if this is a process without born by checking the presence of the # file born_matrix.f is_loop_induced = os.path.exists(pjoin(run_dir,'born_matrix.f')) - + # For loop induced processes, always attempt quadruple precision if # double precision attempts fail and the user didn't specify himself # quadruple precision initializations attempts @@ -7166,11 +7166,11 @@ def need_init(): use_quad_prec = 1 curr_attempt = 1 - MLCard.set('WriteOutFilters',True) - + MLCard.set('WriteOutFilters',True) + while to_attempt!=[] and need_init(): curr_attempt = to_attempt.pop() - # if the attempt is a negative number it means we must force + # if the attempt is a negative number it means we must force # quadruple precision at initialization time if curr_attempt < 0: use_quad_prec = -1 @@ -7183,11 +7183,11 @@ def need_init(): MLCard.set('ZeroThres',1e-9) # Plus one because the filter are written on the next PS point after curr_attempt = abs(curr_attempt+1) - MLCard.set('MaxAttempts',curr_attempt) + MLCard.set('MaxAttempts',curr_attempt) MLCard.write(pjoin(SubProc_dir,'MadLoopParams.dat')) # initialization is performed. - MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, + MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, npoints = curr_attempt) compile_time, run_time, ram_usage = \ MadLoopInitializer.make_and_run(run_dir) @@ -7200,7 +7200,7 @@ def need_init(): infos['Process_compilation']==None: infos['Process_compilation'] = compile_time infos['Initialization'] = run_time - + MLCard_orig.write(pjoin(SubProc_dir,'MadLoopParams.dat')) if need_init(): return None @@ -7219,8 +7219,8 @@ def need_init(ML_resources_path, proc_prefix, r_files): MLCardPath = pjoin(proc_dir,'SubProcesses','MadLoopParams.dat') if not os.path.isfile(MLCardPath): raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ - %MLCardPath) - MLCard = banner_mod.MadLoopParam(MLCardPath) + %MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) req_files = ['HelFilter.dat','LoopFilter.dat'] # Make sure that LoopFilter really is needed. @@ -7234,9 +7234,9 @@ def need_init(ML_resources_path, proc_prefix, r_files): req_files.remove('HelFilter.dat') except ValueError: pass - + for v_folder in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)): + '%s*'%subproc_prefix)): # Make sure it is a valid MadLoop directory if not os.path.isdir(v_folder) or not os.path.isfile(\ pjoin(v_folder,'loop_matrix.f')): @@ -7247,7 +7247,7 @@ def need_init(ML_resources_path, proc_prefix, r_files): if need_init(pjoin(proc_dir,'SubProcesses','MadLoop5_resources'), proc_prefix, req_files): return True - + return False @staticmethod @@ -7265,7 +7265,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['treatCardsLoopNoInit'], cwd=pjoin(proc_dir,'Source')) else: interface.do_treatcards('all --no_MadLoopInit') - + # First make sure that IREGI and CUTTOOLS are compiled if needed if os.path.exists(pjoin(proc_dir,'Source','CutTools')): misc.compile(arg=['libcuttools'],cwd=pjoin(proc_dir,'Source')) @@ -7273,8 +7273,8 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['libiregi'],cwd=pjoin(proc_dir,'Source')) # Then make sure DHELAS and MODEL are compiled misc.compile(arg=['libmodel'],cwd=pjoin(proc_dir,'Source')) - misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) - + misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) + # Now initialize the MadLoop outputs logger.info('Initializing MadLoop loop-induced matrix elements '+\ '(this can take some time)...') @@ -7283,7 +7283,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, if MG_options: if interface and hasattr(interface, 'cluster') and isinstance(interface.cluster, cluster.MultiCore): mcore = interface.cluster - else: + else: mcore = cluster.MultiCore(**MG_options) else: mcore = cluster.onecore @@ -7294,10 +7294,10 @@ def run_initialization_wrapper(run_dir, infos, attempts): run_dir=run_dir, infos=infos) else: n_PS = MadLoopInitializer.run_initialization( - run_dir=run_dir, infos=infos, attempts=attempts) + run_dir=run_dir, infos=infos, attempts=attempts) infos['nPS'] = n_PS return 0 - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return @@ -7307,21 +7307,21 @@ def wait_monitoring(Idle, Running, Done): init_info = {} # List all virtual folders while making sure they are valid MadLoop folders VirtualFolders = [f for f in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)) if (os.path.isdir(f) or + '%s*'%subproc_prefix)) if (os.path.isdir(f) or os.path.isfile(pjoin(f,'loop_matrix.f')))] logger.debug("Now Initializing MadLoop matrix element in %d folder%s:"%\ (len(VirtualFolders),'s' if len(VirtualFolders)>1 else '')) - logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in + logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in VirtualFolders)) for v_folder in VirtualFolders: init_info[v_folder] = {} - + # We try all multiples of n_PS from 1 to max_mult, first in DP and then # in QP before giving up, or use default values if n_PS is None. max_mult = 3 if n_PS is None: # Then use the default list of number of PS points to try - mcore.submit(run_initialization_wrapper, + mcore.submit(run_initialization_wrapper, [pjoin(v_folder), init_info[v_folder], None]) else: # Use specific set of PS points @@ -7348,8 +7348,8 @@ def wait_monitoring(Idle, Running, Done): '%d PS points (%s), in %.3g(compil.) + %.3g(init.) secs.'%( abs(init['nPS']),'DP' if init['nPS']>0 else 'QP', init['Process_compilation'],init['Initialization'])) - - logger.info('MadLoop initialization finished.') + + logger.info('MadLoop initialization finished.') AskforEditCard = common_run.AskforEditCard @@ -7364,16 +7364,16 @@ def wait_monitoring(Idle, Running, Done): import os import optparse - # Get the directory of the script real path (bin) - # and add it to the current PYTHONPATH + # Get the directory of the script real path (bin) + # and add it to the current PYTHONPATH #root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath( __file__ )))) sys.path.insert(0, root_path) - class MyOptParser(optparse.OptionParser): + class MyOptParser(optparse.OptionParser): class InvalidOption(Exception): pass def error(self, msg=''): raise MyOptParser.InvalidOption(msg) - # Write out nice usage message if called with -h or --help + # Write out nice usage message if called with -h or --help usage = "usage: %prog [options] [FILE] " parser = MyOptParser(usage=usage) parser.add_option("-l", "--logging", default='INFO', @@ -7384,7 +7384,7 @@ def error(self, msg=''): help='force to launch debug mode') parser_error = '' done = False - + for i in range(len(sys.argv)-1): try: (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) @@ -7394,7 +7394,7 @@ def error(self, msg=''): else: args += sys.argv[len(sys.argv)-i:] if not done: - # raise correct error: + # raise correct error: try: (options, args) = parser.parse_args() except MyOptParser.InvalidOption as error: @@ -7407,8 +7407,8 @@ def error(self, msg=''): import subprocess import logging import logging.config - # Set logging level according to the logging level given by options - #logging.basicConfig(level=vars(logging)[options.logging]) + # Set logging level according to the logging level given by options + #logging.basicConfig(level=vars(logging)[options.logging]) import internal import internal.coloring_logging # internal.file = XXX/bin/internal/__init__.py @@ -7431,13 +7431,13 @@ def error(self, msg=''): raise pass - # Call the cmd interface main loop + # Call the cmd interface main loop try: if args: # a single command is provided if '--web' in args: - i = args.index('--web') - args.pop(i) + i = args.index('--web') + args.pop(i) cmd_line = MadEventCmd(me_dir, force_run=True) else: cmd_line = MadEventCmdShell(me_dir, force_run=True) @@ -7457,13 +7457,13 @@ def error(self, msg=''): pass - - - - - - - - + + + + + + + + diff --git a/epochX/cudacpp/gg_ttg.mad/src/cudacpp_src.mk b/epochX/cudacpp/gg_ttg.mad/src/cudacpp_src.mk index d4cc628aec..b4e446bc45 100644 --- a/epochX/cudacpp/gg_ttg.mad/src/cudacpp_src.mk +++ b/epochX/cudacpp/gg_ttg.mad/src/cudacpp_src.mk @@ -1,12 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories - -THISMK = $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #------------------------------------------------------------------------------- @@ -16,165 +11,24 @@ SHELL := /bin/bash #------------------------------------------------------------------------------- -#=== Configure common compiler flags for CUDA and C++ - -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here - -#------------------------------------------------------------------------------- - #=== Configure the C++ compiler -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) $(USE_NVTX) -fPIC -Wall -Wshadow -Wextra +include ../Source/make_opts + +MG_CXXFLAGS += -fPIC -I. $(USE_NVTX) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS+= -ffast-math # see issue #117 +MG_CXXFLAGS += -ffast-math # see issue #117 endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY # Note: AR, CXX and FC are implicitly defined if not set externally # See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html ###RANLIB = ranlib -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -LDFLAGS = -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -LDFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler (note: NVCC is already exported including ccache) - -###$(info NVCC=$(NVCC)) - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ builds (note: NVCC is already exported including ccache) - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for CUDA and C++ - -# Assuming uname is available, detect if architecture is PowerPC -UNAME_P := $(shell uname -p) - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # BUILD ERROR IF THIS ADDED IN SRC?! -else - ###AR=gcc-ar # needed by -flto - ###RANLIB=gcc-ranlib # needed by -flto - ###CXXFLAGS+= -flto # NB: build error from src/Makefile unless gcc-ar and gcc-ranlib are used - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -#------------------------------------------------------------------------------- - #=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the build flags appropriate to OMPFLAGS ###$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -###$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -###$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -###$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -###$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - CXXFLAGS += -DMGONGPU_HAS_NO_CURAND -else ifneq ($(RNDGEN),hasCurand) - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- @@ -182,28 +36,18 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -###$(info Current directory is $(shell pwd)) -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIRREL = ../lib/$(BUILDDIR) - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR=1 is set)) -else - override BUILDDIR = . - override LIBDIRREL = ../lib - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) -endif -######$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) +# Build directory: +BUILDDIR := build.$(DIRTAG) +LIBDIRREL := ../lib/$(BUILDDIR) # Workaround for Mac #375 (I did not manage to fix rpath with @executable_path): use absolute paths for LIBDIR # (NB: this is quite ugly because it creates the directory if it does not exist - to avoid removing src by mistake) -UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Darwin) override LIBDIR = $(shell mkdir -p $(LIBDIRREL); cd $(LIBDIRREL); pwd) ifeq ($(wildcard $(LIBDIR)),) @@ -223,55 +67,35 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -# Target (and build options): debug -debug: OPTFLAGS = -g -O0 -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ #------------------------------------------------------------------------------- cxx_objects=$(addprefix $(BUILDDIR)/, Parameters_sm.o read_slha.o) -ifneq ($(NVCC),) +ifeq ($(AVX),cuda) +COMPILER=$(NVCC) cu_objects=$(addprefix $(BUILDDIR)/, Parameters_sm_cu.o) +else +COMPILER=$(CXX) +cu_objects= endif # Target (and build rules): common (src) library -ifneq ($(NVCC),) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) $(cu_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(NVCC) -shared -o $@ $(cxx_objects) $(cu_objects) $(LDFLAGS) -else -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(CXX) -shared -o $@ $(cxx_objects) $(LDFLAGS) -endif + mkdir -p $(LIBDIR) + $(COMPILER) -shared -o $@ $(cxx_objects) $(cu_objects) $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- @@ -279,19 +103,7 @@ endif .PHONY: clean clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) -else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe -endif - -cleanall: - @echo - $(MAKE) clean -f $(THISMK) - @echo - rm -rf $(LIBDIR)/build.* - rm -rf build.* + $(RM) -f ../lib/build.*/*.so + $(RM) -rf build.* #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_ttg.mad/src/mgOnGpuCxtypes.h b/epochX/cudacpp/gg_ttg.mad/src/mgOnGpuCxtypes.h index ca9a9f00c0..3290d314d6 100644 --- a/epochX/cudacpp/gg_ttg.mad/src/mgOnGpuCxtypes.h +++ b/epochX/cudacpp/gg_ttg.mad/src/mgOnGpuCxtypes.h @@ -21,10 +21,14 @@ // Complex type in cuda: thrust or cucomplex or cxsmpl #ifdef __CUDACC__ #if defined MGONGPU_CUCXTYPE_THRUST +#ifdef __CLANG__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wtautological-compare" // for icpx2021/clang13 (https://stackoverflow.com/a/15864661) +#endif #include +#ifdef __CLANG__ #pragma clang diagnostic pop +#endif #elif defined MGONGPU_CUCXTYPE_CUCOMPLEX #include #elif not defined MGONGPU_CUCXTYPE_CXSMPL diff --git a/epochX/cudacpp/gg_ttg.sa/src/cudacpp_src.mk b/epochX/cudacpp/gg_ttg.sa/src/cudacpp_src.mk index d4cc628aec..c757875347 100644 --- a/epochX/cudacpp/gg_ttg.sa/src/cudacpp_src.mk +++ b/epochX/cudacpp/gg_ttg.sa/src/cudacpp_src.mk @@ -1,7 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. +# Further modified by: J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) #=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories @@ -95,50 +95,52 @@ CXXFLAGS += $(OMPFLAGS) # Set the build flags appropriate to each AVX choice (example: "make AVX=none") # [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] # [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) +ifeq ($(NVCC),) + $(info AVX=$(AVX)) + ifeq ($(UNAME_P),ppc64le) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) + endif + else ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) + endif + else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + ifeq ($(AVX),none) + override AVXFLAGS = -march=x86-64 # no SIMD (see #588) + else ifeq ($(AVX),sse4) + override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif endif + # For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? + CXXFLAGS+= $(AVXFLAGS) endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) # Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") ###$(info FPTYPE=$(FPTYPE)) @@ -182,11 +184,19 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +ifneq ($(NVCC),) + override DIRTAG = cuda_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +else + override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +endif # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +ifneq ($(NVCC),) + override TAG = cuda_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +else + override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +endif # Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 ###$(info Current directory is $(shell pwd)) @@ -223,35 +233,21 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so # Target (and build options): debug debug: OPTFLAGS = -g -O0 debug: all.$(TAG) -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) - #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ @@ -278,20 +274,61 @@ endif # Target: clean the builds .PHONY: clean +BUILD_DIRS := $(wildcard build.*) +NUM_BUILD_DIRS := $(words $(BUILD_DIRS)) + clean: ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) +ifeq ($(NUM_BUILD_DIRS),1) + $(info USEBUILDDIR=1, only one src build directory found.) + rm -rf ../lib/$(BUILD_DIRS) + rm -rf $(BUILD_DIRS) +else ifeq ($(NUM_BUILD_DIRS),0) + $(error USEBUILDDIR=1, but no src build directories are found.) else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe + $(error Multiple src BUILDDIR's found! Use 'cleannone', 'cleansse4', 'cleanavx2', 'clean512y','clean512z', 'cleancuda' or 'cleanall'.) +endif +else + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + rm -f $(BUILDDIR)/*.o $(BUILDDIR)/*.exe endif cleanall: @echo - $(MAKE) clean -f $(THISMK) + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + rm -f $(BUILDDIR)/*.o $(BUILDDIR)/*.exe @echo - rm -rf $(LIBDIR)/build.* + rm -rf ../lib/build.* rm -rf build.* +# Target: clean different builds + +cleannone: + rm -rf ../lib/build.none_* + rm -rf build.none_* + +cleansse4: + rm -rf ../lib/build.sse4_* + rm -rf build.sse4_* + +cleanavx2: + rm -rf ../lib/build.avx2_* + rm -rf build.avx2_* + +clean512y: + rm -rf ../lib/build.512y_* + rm -rf build.512y_* + +clean512z: + rm -rf ../lib/build.512z_* + rm -rf build.512z_* + +cleancuda: + rm -rf ../lib/build.cuda_* + rm -rf build.cuda_* + +cleandir: + rm -f ./*.o ./*.exe + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_ttgg.mad/CODEGEN_mad_gg_ttgg_log.txt b/epochX/cudacpp/gg_ttgg.mad/CODEGEN_mad_gg_ttgg_log.txt index ee3d38dfb1..7a61f69d2f 100644 --- a/epochX/cudacpp/gg_ttgg.mad/CODEGEN_mad_gg_ttgg_log.txt +++ b/epochX/cudacpp/gg_ttgg.mad/CODEGEN_mad_gg_ttgg_log.txt @@ -52,7 +52,7 @@ Note that you can still compile and run aMC@NLO with the built-in PDFs Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt import /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttgg.mg The import format was not given, so we guess it as command set stdout_level DEBUG @@ -62,7 +62,7 @@ generate g g > t t~ g g No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.00561833381652832  +DEBUG: model prefixing takes 0.005710124969482422  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -155,7 +155,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=4: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ g g WEIGHTED<=4 @1 INFO: Process has 123 diagrams -1 processes with 123 diagrams generated in 0.154 s +1 processes with 123 diagrams generated in 0.156 s Total: 1 processes with 123 diagrams output madevent ../TMPOUT/CODEGEN_mad_gg_ttgg --hel_recycling=False --vector_size=32 --me_exporter=standalone_cudacpp Load PLUGIN.CUDACPP_OUTPUT @@ -175,7 +175,7 @@ INFO: Generating Helas calls for process: g g > t t~ g g WEIGHTED<=4 @1 INFO: Processing color information for process: g g > t t~ g g @1 INFO: Creating files in directory P1_gg_ttxgg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -190,15 +190,15 @@ INFO: Created files CPPProcess.h and CPPProcess.cc in directory ./. DEBUG: vector, subproc_group,self.opt['vector_size'] =  32 True 32 [export_v4.py at line 1872]  INFO: Generating Feynman diagrams for Process: g g > t t~ g g WEIGHTED<=4 @1 INFO: Finding symmetric diagrams for subprocess group gg_ttxgg -Generated helas calls for 1 subprocesses (123 diagrams) in 0.420 s -Wrote files for 222 helas calls in 0.679 s +Generated helas calls for 1 subprocesses (123 diagrams) in 0.418 s +Wrote files for 222 helas calls in 0.683 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 5 routines in 0.323 s +ALOHA: aloha creates 5 routines in 0.327 s DEBUG: Entering PLUGIN_ProcessExporter.convert_model (create the model) [output.py at line 202]  ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines @@ -206,7 +206,7 @@ ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 10 routines in 0.314 s +ALOHA: aloha creates 10 routines in 0.309 s VVV1 VVV1 FFV1 @@ -233,12 +233,14 @@ save configuration file to /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CO INFO: Use Fortran compiler gfortran INFO: Use c++ compiler g++ INFO: Generate web pages +DEBUG: standardise /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttgg/Source/make_opts (fix f2py3 and sort make_opts_variables) before applying patch.common DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttgg; patch -p4 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common patching file Source/genps.inc +patching file Source/make_opts patching file Source/makefile patching file SubProcesses/makefile +patching file bin/internal/banner.py patching file bin/internal/gen_ximprove.py -Hunk #1 succeeded at 391 (offset 6 lines). patching file bin/internal/madevent_interface.py DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttgg/SubProcesses/P1_gg_ttxgg; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f @@ -255,9 +257,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m3.226s -user 0m2.976s -sys 0m0.232s +real 0m3.244s +user 0m3.007s +sys 0m0.230s ************************************************************ * * * W E L C O M E to * @@ -283,7 +285,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttgg/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards run quit INFO: @@ -313,7 +315,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttgg/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards param quit INFO: diff --git a/epochX/cudacpp/gg_ttgg.mad/Source/make_opts b/epochX/cudacpp/gg_ttgg.mad/Source/make_opts index e4b87ee6ad..435bed0dc7 100644 --- a/epochX/cudacpp/gg_ttgg.mad/Source/make_opts +++ b/epochX/cudacpp/gg_ttgg.mad/Source/make_opts @@ -1,7 +1,7 @@ DEFAULT_CPP_COMPILER=g++ DEFAULT_F2PY_COMPILER=f2py3 DEFAULT_F_COMPILER=gfortran -GLOBAL_FLAG=-O3 -ffast-math -fbounds-check +GLOBAL_FLAG=-O3 -ffast-math MACFLAG= MG5AMC_VERSION=SpecifiedByMG5aMCAtRunTime PYTHIA8_PATH=NotInstalled @@ -13,31 +13,53 @@ BIASLIBDIR=../../../lib/ BIASLIBRARY=libbias.$(libext) # Rest of the makefile -ifeq ($(origin FFLAGS),undefined) -FFLAGS= -w -fPIC -#FFLAGS+= -g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none -endif -FFLAGS += $(GLOBAL_FLAG) +#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) + +# Detect O/S kernel (Linux, Darwin...) +UNAME_S := $(shell uname -s) + +# Detect architecture (x86_64, ppc64le...) +UNAME_P := $(shell uname -p) + +#------------------------------------------------------------------------------- # REMOVE MACFLAG IF NOT ON MAC OR FOR F2PY -UNAME := $(shell uname -s) ifdef f2pymode MACFLAG= else -ifneq ($(UNAME), Darwin) +ifneq ($(UNAME_S), Darwin) MACFLAG= endif endif +############################################################ +# Default compiler flags +# To change optimisation level, override these as follows: +# make CXXFLAGS="-O0 -g" +# or export them as environment variables +# For debugging Fortran, one could e.g. use: +# FCFLAGS="-g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none" +############################################################ +FCFLAGS ?= $(GLOBAL_FLAG) -fbounds-check +CXXFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG +NVCCFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG -use_fast_math -lineinfo +LDFLAGS ?= $(STDLIB) -ifeq ($(origin CXXFLAGS),undefined) -CXXFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +ifneq ($(FFLAGS),) +# Madgraph used to use FFLAGS, so the user probably tries to change the flags specifically for madgraph: +FCFLAGS = $(FFLAGS) endif -ifeq ($(origin CFLAGS),undefined) -CFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +# Madgraph-specific flags: +WARNFLAGS = -Wall -Wshadow -Wextra +ifeq (,$(findstring -std=,$(CXXFLAGS))) +CXXSTANDARD= -std=c++17 endif +MG_FCFLAGS += -fPIC -w +MG_CXXFLAGS += -fPIC $(CXXSTANDARD) $(WARNFLAGS) $(MACFLAG) +MG_NVCCFLAGS += -fPIC $(CXXSTANDARD) --forward-unknown-to-host-compiler $(WARNFLAGS) +MG_LDFLAGS += $(MACFLAG) # Set FC unless it's defined by an environment variable ifeq ($(origin FC),default) @@ -49,45 +71,40 @@ endif # Increase the number of allowed charcters in a Fortran line ifeq ($(FC), ftn) -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler else VERS="$(shell $(FC) --version | grep ifort -i)" ifeq ($(VERS), "") -FFLAGS+= -ffixed-line-length-132 +MG_FCFLAGS += -ffixed-line-length-132 else -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler endif endif -UNAME := $(shell uname -s) -ifeq ($(origin LDFLAGS), undefined) -LDFLAGS=$(STDLIB) $(MACFLAG) -endif - # Options: dynamic, lhapdf # Option dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) dylibext=dylib else dylibext=so endif ifdef dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) libext=dylib -FFLAGS+= -fno-common -LDFLAGS += -bundle +MG_FCFLAGS += -fno-common +MG_LDFLAGS += -bundle define CREATELIB $(FC) -dynamiclib -undefined dynamic_lookup -o $(1) $(2) endef else libext=so -FFLAGS+= -fPIC -LDFLAGS += -shared +MG_FCFLAGS += -fPIC +MG_LDFLAGS += -shared define CREATELIB -$(FC) $(FFLAGS) $(LDFLAGS) -o $(1) $(2) +$(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MG_LDFLAGS) $(LDFLAGS) -o $(1) $(2) endef endif else @@ -101,17 +118,9 @@ endif # Option lhapdf ifneq ($(lhapdf),) -CXXFLAGS += $(shell $(lhapdf) --cppflags) +MG_CXXFLAGS += $(shell $(lhapdf) --cppflags) alfas_functions=alfas_functions_lhapdf llhapdf+= $(shell $(lhapdf) --cflags --libs) -lLHAPDF -# check if we need to activate c++11 (for lhapdf6.2) -ifeq ($(origin CXX),default) -ifeq ($lhapdfversion$lhapdfsubversion,62) -CXX=$(DEFAULT_CPP_COMPILER) -std=c++11 -else -CXX=$(DEFAULT_CPP_COMPILER) -endif -endif else alfas_functions=alfas_functions llhapdf= @@ -120,4 +129,207 @@ endif # Helper function to check MG5 version define CHECK_MG5AMC_VERSION python -c 'import re; from distutils.version import StrictVersion; print StrictVersion("$(MG5AMC_VERSION)") >= StrictVersion("$(1)") if re.match("^[\d\.]+$$","$(MG5AMC_VERSION)") else True;' -endef \ No newline at end of file +endef + +#------------------------------------------------------------------------------- + +# Set special cases for non-gcc/clang builds +# AVX below gets overridden from outside in architecture-specific builds +AVX ?= none +# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] +# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] +$(info AVX=$(AVX)) +ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + endif +else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif +endif + +# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? +MG_CXXFLAGS+= $(AVXFLAGS) + +#------------------------------------------------------------------------------- + +#=== Configure the CUDA compiler if available + +# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) +# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below +ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside + $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") + override CUDA_HOME=disabled +endif + +# If CUDA_HOME is not set, try to set it from the location of nvcc +ifndef CUDA_HOME + CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) + $(info CUDA_HOME="$(CUDA_HOME)") +endif + +# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists +ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) + NVCC = $(CUDA_HOME)/bin/nvcc + USE_NVTX ?=-DUSE_NVTX + # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html + # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ + # Default: use compute capability 70 (Volta architecture), and embed PTX to support later architectures, too. + # Set MADGRAPH_CUDA_ARCHITECTURE to the desired value to change the default. + # Build for multiple architectures using a space-separated list, e.g. MADGRAPH_CUDA_ARCHITECTURE="70 80" + MADGRAPH_CUDA_ARCHITECTURE ?= 70 + # Generate PTX for the first architecture: + CUARCHFLAGS := --generate-code arch=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)),code=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)) + # Generate device code for all architectures: + CUARCHFLAGS += $(foreach arch,$(MADGRAPH_CUDA_ARCHITECTURE), --generate-code arch=compute_$(arch),code=sm_$(arch)) + + CUINC = -I$(CUDA_HOME)/include/ + CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! + MG_LDFLAGS += $(CURANDLIBFLAGS) + MG_NVCCFLAGS += $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) + +else ifeq ($(AVX),cuda) + $(error nvcc is not visible in PATH. Either add it to PATH or export CUDA_HOME to compile with cuda) + ifeq ($(AVX),cuda) + $(error Cannot compile for cuda without NVCC) + endif +endif + +# Set the host C++ compiler for nvcc via "-ccbin " +# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) +MG_NVCCFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) + +# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) +ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) +MG_NVCCFLAGS += -allow-unsupported-compiler +endif + +#------------------------------------------------------------------------------- + +#=== Configure ccache for C++ and CUDA builds + +# Enable ccache if USECCACHE=1 +ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) + override CXX:=ccache $(CXX) +endif + +ifneq ($(NVCC),) + ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) + override NVCC:=ccache $(NVCC) + endif +endif + +#------------------------------------------------------------------------------- + +#=== Configure PowerPC-specific compiler flags for C++ and CUDA + +# PowerPC-specific CXX / CUDA compiler flags (being reviewed) +ifeq ($(UNAME_P),ppc64le) + MG_CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 + MG_NVCCFLAGS+= -Xcompiler -mno-float128 + + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + endif +endif + +#------------------------------------------------------------------------------- +#=== Apple-specific compiler/linker options + +# Add -std=c++17 explicitly to avoid build errors on macOS +# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" +ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) +MG_CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 +endif + +ifeq ($(UNAME_S),Darwin) +STDLIB = -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) +MG_LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" +else +MG_LDFLAGS += -Xlinker --no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +endif + +#------------------------------------------------------------------------------- + +#=== C++/CUDA-specific flags for floating-point types and random generators to use + +# Set the default FPTYPE (floating point type) choice +FPTYPE ?= m + +# Set the default HELINL (inline helicities?) choice +HELINL ?= 0 + +# Set the default HRDCOD (hardcode cIPD physics parameters?) choice +HRDCOD ?= 0 + +# Set the default RNDGEN (random number generator) choice +ifeq ($(NVCC),) + RNDGEN ?= hasNoCurand +else + RNDGEN ?= hasCurand +endif + +# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so sub-makes don't go back to the defaults +export AVX +export AVXFLAGS +export FPTYPE +export HELINL +export HRDCOD +export RNDGEN + +#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN + +# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") +# $(info FPTYPE=$(FPTYPE)) +ifeq ($(FPTYPE),d) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE +else ifeq ($(FPTYPE),f) + COMMONFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT +else ifeq ($(FPTYPE),m) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT +else + $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) +endif + +# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") +# $(info HELINL=$(HELINL)) +ifeq ($(HELINL),1) + COMMONFLAGS += -DMGONGPU_INLINE_HELAMPS +else ifneq ($(HELINL),0) + $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") +# $(info HRDCOD=$(HRDCOD)) +ifeq ($(HRDCOD),1) + COMMONFLAGS += -DMGONGPU_HARDCODE_PARAM +else ifneq ($(HRDCOD),0) + $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") +$(info RNDGEN=$(RNDGEN)) +ifeq ($(RNDGEN),hasNoCurand) + override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND + override CURANDLIBFLAGS = +else ifeq ($(RNDGEN),hasCurand) + CXXFLAGSCURAND = $(CUINC) +else + $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) +endif + +MG_CXXFLAGS += $(COMMONFLAGS) +MG_NVCCFLAGS += $(COMMONFLAGS) + +#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_ttgg.mad/Source/makefile b/epochX/cudacpp/gg_ttgg.mad/Source/makefile index 00c73099a0..407b1b753e 100644 --- a/epochX/cudacpp/gg_ttgg.mad/Source/makefile +++ b/epochX/cudacpp/gg_ttgg.mad/Source/makefile @@ -10,8 +10,8 @@ include make_opts # Source files -PROCESS= hfill.o matrix.o myamp.o -DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o +PROCESS = hfill.o matrix.o myamp.o +DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o HBOOK = hfill.o hcurve.o hbook1.o hbook2.o GENERIC = $(alfas_functions).o transpole.o invarients.o hfill.o pawgraphs.o ran1.o \ rw_events.o rw_routines.o kin_functions.o open_file.o basecode.o setrun.o \ @@ -22,7 +22,7 @@ GENSUDGRID = gensudgrid.o is-sud.o setrun_gen.o rw_routines.o open_file.o # Locally compiled libraries -LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) +LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) # Binaries @@ -32,6 +32,9 @@ BINARIES = $(BINDIR)gen_ximprove $(BINDIR)gensudgrid $(BINDIR)combine_runs all: $(LIBRARIES) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(LIBDIR)libbias.$(libext) +%.o: %.f *.inc + $(FC) -I. $(MG_FCFLAGS) $(FCFLAGS) -c $< -o $@ + # Libraries $(LIBDIR)libdsample.$(libext): $(DSAMPLE) @@ -39,36 +42,35 @@ $(LIBDIR)libdsample.$(libext): $(DSAMPLE) $(LIBDIR)libgeneric.$(libext): $(GENERIC) $(call CREATELIB, $@, $^) $(LIBDIR)libdhelas.$(libext): DHELAS - cd DHELAS; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libpdf.$(libext): PDF make_opts - cd PDF; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" ifneq (,$(filter edff chff, $(pdlabel1) $(pdlabel2))) $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make ; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" else $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make -f makefile_dummy; cd ../../ -endif + $(MAKE) -C $< -f makefile_dummy FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" +endif $(LIBDIR)libcernlib.$(libext): CERNLIB - cd CERNLIB; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" # The bias library is here the dummy by default; compilation of other ones specified in the run_card will be done by MG5aMC directly. $(LIBDIR)libbias.$(libext): BIAS/dummy - cd BIAS/dummy; make; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libmodel.$(libext): MODEL param_card.inc - cd MODEL; make + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" param_card.inc: ../Cards/param_card.dat ../bin/madevent treatcards param + touch $@ # madevent doesn't update the time stamp if there's nothing to do -$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o - $(FC) $(LDFLAGS) -o $@ $^ -#$(BINDIR)combine_events: $(COMBINE) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) run_card.inc $(LIBDIR)libbias.$(libext) -# $(FC) -o $@ $(COMBINE) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC $(llhapdf) $(LDFLAGS) -lbias +$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o + $(FC) $(MG_LDFLAGS) $(LDFLAGS) -o $@ $^ $(BINDIR)gensudgrid: $(GENSUDGRID) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libcernlib.$(libext) - $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(LDFLAGS) + $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(MG_LDFLAGS) $(LDFLAGS) # Dependencies @@ -85,6 +87,7 @@ rw_events.o: rw_events.f run_config.inc run_card.inc: ../Cards/run_card.dat ../bin/madevent treatcards run + touch $@ # madevent doesn't update the time stamp if there's nothing to do clean4pdf: rm -f ../lib/libpdf.$(libext) @@ -120,7 +123,7 @@ $(LIBDIR)libiregi.a: $(IREGIDIR) cd $(IREGIDIR); make ln -sf ../Source/$(IREGIDIR)libiregi.a $(LIBDIR)libiregi.a -cleanSource: +clean: $(RM) *.o $(LIBRARIES) $(BINARIES) cd PDF; make clean; cd .. cd PDF/gammaUPC; make clean; cd ../../ @@ -132,11 +135,3 @@ cleanSource: cd BIAS/ptj_bias; make clean; cd ../.. if [ -d $(CUTTOOLSDIR) ]; then cd $(CUTTOOLSDIR); make clean; cd ..; fi if [ -d $(IREGIDIR) ]; then cd $(IREGIDIR); make clean; cd ..; fi - -clean: cleanSource - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make clean; cd -; done; - -cleanavx: - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; -cleanall: cleanSource # THIS IS THE ONE - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; diff --git a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/Bridge.h b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/Bridge.h index bf8b5e024d..c263f39a62 100644 --- a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/Bridge.h +++ b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/Bridge.h @@ -236,7 +236,7 @@ namespace mg5amcCpu #ifdef __CUDACC__ if( ( m_nevt < s_gputhreadsmin ) || ( m_nevt % s_gputhreadsmin != 0 ) ) throw std::runtime_error( "Bridge constructor: nevt should be a multiple of " + std::to_string( s_gputhreadsmin ) ); - while( m_nevt != m_gpublocks * m_gputhreads ) + while( m_nevt != static_cast( m_gpublocks * m_gputhreads ) ) { m_gputhreads /= 2; if( m_gputhreads < s_gputhreadsmin ) @@ -266,7 +266,7 @@ namespace mg5amcCpu template void Bridge::set_gpugrid( const int gpublocks, const int gputhreads ) { - if( m_nevt != gpublocks * gputhreads ) + if( m_nevt != static_cast( gpublocks * gputhreads ) ) throw std::runtime_error( "Bridge: gpublocks*gputhreads must equal m_nevt in set_gpugrid" ); m_gpublocks = gpublocks; m_gputhreads = gputhreads; diff --git a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/MadgraphTest.h b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/MadgraphTest.h index ef40624c88..b0f2250c25 100644 --- a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/MadgraphTest.h +++ b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/MadgraphTest.h @@ -199,10 +199,6 @@ class MadgraphTest : public testing::TestWithParam } }; -// Since we link both the CPU-only and GPU tests into the same executable, we prevent -// a multiply defined symbol by only compiling this in the non-CUDA phase: -#ifndef __CUDACC__ - /// Compare momenta and matrix elements. /// This uses an implementation of TestDriverBase to run a madgraph workflow, /// and compares momenta and matrix elements with a reference file. @@ -307,6 +303,4 @@ TEST_P( MadgraphTest, CompareMomentaAndME ) } } -#endif // __CUDACC__ - #endif /* MADGRAPHTEST_H_ */ diff --git a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/MatrixElementKernels.cc b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/MatrixElementKernels.cc index 74b5239ebf..2d6f27cd5d 100644 --- a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/MatrixElementKernels.cc +++ b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/MatrixElementKernels.cc @@ -196,6 +196,9 @@ namespace mg5amcGpu void MatrixElementKernelDevice::setGrid( const int gpublocks, const int gputhreads ) { + m_gpublocks = gpublocks; + m_gputhreads = gputhreads; + if( m_gpublocks == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gpublocks must be > 0 in setGrid" ); if( m_gputhreads == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gputhreads must be > 0 in setGrid" ); if( this->nevt() != m_gpublocks * m_gputhreads ) throw std::runtime_error( "MatrixElementKernelDevice: nevt mismatch in setGrid" ); diff --git a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg/check_sa.cc b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg/check_sa.cc +++ b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg/counters.cc b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg/counters.cc +++ b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk index 509307506b..a522ddb335 100644 --- a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk @@ -1,56 +1,41 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: use ':=' to ensure that the value of CUDACPP_MAKEFILE is not modified further down after including make_opts -#=== NB: use 'override' to ensure that the value can not be modified from the outside -override CUDACPP_MAKEFILE := $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) -###$(info CUDACPP_MAKEFILE='$(CUDACPP_MAKEFILE)') - -#=== NB: different names (e.g. cudacpp.mk and cudacpp_src.mk) are used in the Subprocess and src directories -override CUDACPP_SRC_MAKEFILE = cudacpp_src.mk - -#------------------------------------------------------------------------------- - -#=== Use bash in the Makefile (https://www.gnu.org/software/make/manual/html_node/Choosing-the-Shell.html) - -SHELL := /bin/bash - -#------------------------------------------------------------------------------- - -#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) - -# Detect O/S kernel (Linux, Darwin...) -UNAME_S := $(shell uname -s) -###$(info UNAME_S='$(UNAME_S)') - -# Detect architecture (x86_64, ppc64le...) -UNAME_P := $(shell uname -p) -###$(info UNAME_P='$(UNAME_P)') - -#------------------------------------------------------------------------------- - -#=== Include the common MG5aMC Makefile options - -# OM: this is crucial for MG5aMC flag consistency/documentation -# AV: temporarely comment this out because it breaks cudacpp builds -ifneq ($(wildcard ../../Source/make_opts),) -include ../../Source/make_opts -endif +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. + +# This makefile extends the Fortran makefile called "makefile" + +CUDACPP_SRC_MAKEFILE = cudacpp_src.mk + +# Self-invocation with adapted flags: +cppnative: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=native AVXFLAGS="-march=native" cppbuild +cppnone: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=none AVXFLAGS= cppbuild +cppsse4: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=sse4 AVXFLAGS=-march=nehalem cppbuild +cppavx2: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=avx2 AVXFLAGS=-march=haswell cppbuild +cppavx512y: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512y AVXFLAGS="-march=skylake-avx512 -mprefer-vector-width=256" cppbuild +cppavx512z: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512z AVXFLAGS="-march=skylake-avx512 -DMGONGPU_PVW512" cppbuild +cuda: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=cuda cudabuild #------------------------------------------------------------------------------- #=== Configure common compiler flags for C++ and CUDA +# NB: The base flags are defined in the fortran "makefile" + +# Include directories +INCFLAGS = -I. -I../../src -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here +MG_CXXFLAGS += $(INCFLAGS) +MG_NVCCFLAGS += $(INCFLAGS) # Dependency on src directory -MG5AMC_COMMONLIB = mg5amc_common -LIBFLAGS = -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -INCFLAGS += -I../../src +MG5AMC_COMMONLIB = mg5amc_common # Compiler-specific googletest build directory (#125 and #738) ifneq ($(shell $(CXX) --version | grep '^Intel(R) oneAPI DPC++/C++ Compiler'),) @@ -99,356 +84,42 @@ endif #------------------------------------------------------------------------------- -#=== Configure the C++ compiler - -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) -Wall -Wshadow -Wextra -ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS += -ffast-math # see issue #117 -endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY - -# Optionally add debug flags to display the full list of flags (eg on Darwin) -###CXXFLAGS+= -v - -# Note: AR, CXX and FC are implicitly defined if not set externally -# See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html - -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler - -# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) -# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below -ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside - $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") - override CUDA_HOME=disabled -endif - -# If CUDA_HOME is not set, try to set it from the location of nvcc -ifndef CUDA_HOME - CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) - $(warning CUDA_HOME was not set: using "$(CUDA_HOME)") -endif - -# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists -ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) - NVCC = $(CUDA_HOME)/bin/nvcc - USE_NVTX ?=-DUSE_NVTX - # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html - # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ - # Default: use compute capability 70 for V100 (CERN lxbatch, CERN itscrd, Juwels Cluster). - # Embed device code for 70, and PTX for 70+. - # Export MADGRAPH_CUDA_ARCHITECTURE (comma-separated list) to use another value or list of values (see #533). - # Examples: use 60 for P100 (Piz Daint), 80 for A100 (Juwels Booster, NVidia raplab/Curiosity). - MADGRAPH_CUDA_ARCHITECTURE ?= 70 - ###CUARCHFLAGS = -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=compute_$(MADGRAPH_CUDA_ARCHITECTURE) -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=sm_$(MADGRAPH_CUDA_ARCHITECTURE) # Older implementation (AV): go back to this one for multi-GPU support #533 - ###CUARCHFLAGS = --gpu-architecture=compute_$(MADGRAPH_CUDA_ARCHITECTURE) --gpu-code=sm_$(MADGRAPH_CUDA_ARCHITECTURE),compute_$(MADGRAPH_CUDA_ARCHITECTURE) # Newer implementation (SH): cannot use this as-is for multi-GPU support #533 - comma:=, - CUARCHFLAGS = $(foreach arch,$(subst $(comma), ,$(MADGRAPH_CUDA_ARCHITECTURE)),-gencode arch=compute_$(arch),code=compute_$(arch) -gencode arch=compute_$(arch),code=sm_$(arch)) - CUINC = -I$(CUDA_HOME)/include/ - ifeq ($(RNDGEN),hasNoCurand) - CURANDLIBFLAGS= - else - CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! - endif - CUOPTFLAGS = -lineinfo - CUFLAGS = $(foreach opt, $(OPTFLAGS), -Xcompiler $(opt)) $(CUOPTFLAGS) $(INCFLAGS) $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) -use_fast_math - ###CUFLAGS += -Xcompiler -Wall -Xcompiler -Wextra -Xcompiler -Wshadow - ###NVCC_VERSION = $(shell $(NVCC) --version | grep 'Cuda compilation tools' | cut -d' ' -f5 | cut -d, -f1) - CUFLAGS += -std=c++17 # need CUDA >= 11.2 (see #333): this is enforced in mgOnGpuConfig.h - # Without -maxrregcount: baseline throughput: 6.5E8 (16384 32 12) up to 7.3E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 160 # improves throughput: 6.9E8 (16384 32 12) up to 7.7E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 128 # improves throughput: 7.3E8 (16384 32 12) up to 7.6E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 96 # degrades throughput: 4.1E8 (16384 32 12) up to 4.5E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 64 # degrades throughput: 1.7E8 (16384 32 12) flat at 1.7E8 (65536 128 12) -else ifneq ($(origin REQUIRE_CUDA),undefined) - # If REQUIRE_CUDA is set but no cuda is found, stop here (e.g. for CI tests on GPU #443) - $(error No cuda installation found (set CUDA_HOME or make nvcc visible in PATH)) -else - # No cuda. Switch cuda compilation off and go to common random numbers in C++ - $(warning CUDA_HOME is not set or is invalid: export CUDA_HOME to compile with cuda) - override NVCC= - override USE_NVTX= - override CUINC= - override CURANDLIBFLAGS= -endif -export NVCC -export CUFLAGS - -# Set the host C++ compiler for nvcc via "-ccbin " -# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) -CUFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) - -# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) -ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) -CUFLAGS += -allow-unsupported-compiler -endif - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ and CUDA builds - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif -ifneq ($(NVCC),) - ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) - override NVCC:=ccache $(NVCC) - endif -endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for C++ and CUDA - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # would increase to none=4.08-4.12E6, sse4=4.99-5.03E6! -else - ###CXXFLAGS+= -flto # also on Intel this would increase throughputs by a factor 2 to 4... - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -# PowerPC-specific CUDA compiler flags (to be reviewed!) -ifeq ($(UNAME_P),ppc64le) - CUFLAGS+= -Xcompiler -mno-float128 -endif - -#------------------------------------------------------------------------------- - #=== Configure defaults and check if user-defined choices exist for OMPFLAGS, AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the default OMPFLAGS choice -ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on Intel (was ok without nvcc but not ok with nvcc before #578) -else ifneq ($(shell $(CXX) --version | egrep '^(clang)'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on clang (was not ok without or with nvcc before #578) -###else ifneq ($(shell $(CXX) --version | egrep '^(Apple clang)'),) # AV for Mac (Apple clang compiler) -else ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) +OMPFLAGS ?= -fopenmp +ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) override OMPFLAGS = # AV disable OpenMP MT on Apple clang (builds fail in the CI #578) -###override OMPFLAGS = -fopenmp # OM reenable OpenMP MT on Apple clang? (AV Oct 2023: this still fails in the CI) -else -override OMPFLAGS = -fopenmp # enable OpenMP MT by default on all other platforms -###override OMPFLAGS = # disable OpenMP MT on all other platforms (default before #575) -endif - -# Set the default AVX (vectorization) choice -ifeq ($(AVX),) - ifeq ($(UNAME_P),ppc64le) - ###override AVX = none - override AVX = sse4 - else ifeq ($(UNAME_P),arm) - ###override AVX = none - override AVX = sse4 - else ifeq ($(wildcard /proc/cpuinfo),) - override AVX = none - $(warning Using AVX='$(AVX)' because host SIMD features cannot be read from /proc/cpuinfo) - else ifeq ($(shell grep -m1 -c avx512vl /proc/cpuinfo)$(shell $(CXX) --version | grep ^clang),1) - override AVX = 512y - ###$(info Using AVX='$(AVX)' as no user input exists) - else - override AVX = avx2 - ifneq ($(shell grep -m1 -c avx512vl /proc/cpuinfo),1) - $(warning Using AVX='$(AVX)' because host does not support avx512vl) - else - $(warning Using AVX='$(AVX)' because this is faster than avx512vl for clang) - endif - endif -else - ###$(info Using AVX='$(AVX)' according to user input) -endif - -# Set the default FPTYPE (floating point type) choice -ifeq ($(FPTYPE),) - override FPTYPE = d -endif - -# Set the default HELINL (inline helicities?) choice -ifeq ($(HELINL),) - override HELINL = 0 -endif - -# Set the default HRDCOD (hardcode cIPD physics parameters?) choice -ifeq ($(HRDCOD),) - override HRDCOD = 0 -endif - -# Set the default RNDGEN (random number generator) choice -ifeq ($(RNDGEN),) - ifeq ($(NVCC),) - override RNDGEN = hasNoCurand - else ifeq ($(RNDGEN),) - override RNDGEN = hasCurand - endif endif -# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so that it is not necessary to pass them to the src Makefile too -export AVX -export FPTYPE -export HELINL -export HRDCOD -export RNDGEN +# Export here, so sub makes don't fall back to the defaults: export OMPFLAGS -#------------------------------------------------------------------------------- - -#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN - -# Set the build flags appropriate to OMPFLAGS -$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS - CUFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM - CUFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND -else ifeq ($(RNDGEN),hasCurand) - override CXXFLAGSCURAND = -else - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- #=== Configure build directories and build lockfiles === -# Build directory "short" tag (defines target and path to the optional build directory) -# (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) - -# Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) -# (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) - -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIR = ../../lib/$(BUILDDIR) - override LIBDIRRPATH = '$$ORIGIN/../$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is set = 1)) -else - override BUILDDIR = . - override LIBDIR = ../../lib - override LIBDIRRPATH = '$$ORIGIN/$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) +# Build directory "short" tag (defines target and path to the build directory) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +CUDACPP_BUILDDIR = build.$(DIRTAG) +CUDACPP_LIBDIR := ../../lib/$(CUDACPP_BUILDDIR) +LIBDIRRPATH := '$$ORIGIN:$$ORIGIN/../$(CUDACPP_LIBDIR)' +ifneq ($(AVX),) + $(info Building CUDACPP in CUDACPP_BUILDDIR=$(CUDACPP_BUILDDIR). Libs in $(CUDACPP_LIBDIR)) endif -###override INCDIR = ../../include -###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) -# On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH +# On Linux, set rpath to CUDACPP_LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables or shared libraries ($ORIGIN on Linux) -# On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary +# On Darwin, building libraries with absolute paths in CUDACPP_LIBDIR makes this unnecessary ifeq ($(UNAME_S),Darwin) override CXXLIBFLAGSRPATH = override CULIBFLAGSRPATH = - override CXXLIBFLAGSRPATH2 = - override CULIBFLAGSRPATH2 = else # RPATH to cuda/cpp libs when linking executables override CXXLIBFLAGSRPATH = -Wl,-rpath,$(LIBDIRRPATH) override CULIBFLAGSRPATH = -Xlinker -rpath,$(LIBDIRRPATH) - # RPATH to common lib when linking cuda/cpp libs - override CXXLIBFLAGSRPATH2 = -Wl,-rpath,'$$ORIGIN' - override CULIBFLAGSRPATH2 = -Xlinker -rpath,'$$ORIGIN' endif # Setting LD_LIBRARY_PATH or DYLD_LIBRARY_PATH in the RUNTIME is no longer necessary (neither on Linux nor on Mac) @@ -458,107 +129,68 @@ override RUNTIME = #=== Makefile TARGETS and build rules below #=============================================================================== -cxx_main=$(BUILDDIR)/check.exe -fcxx_main=$(BUILDDIR)/fcheck.exe +cxx_main=$(CUDACPP_BUILDDIR)/check.exe +fcxx_main=$(CUDACPP_BUILDDIR)/fcheck.exe -ifneq ($(NVCC),) -cu_main=$(BUILDDIR)/gcheck.exe -fcu_main=$(BUILDDIR)/fgcheck.exe -else -cu_main= -fcu_main= -endif - -testmain=$(BUILDDIR)/runTest.exe +cu_main=$(CUDACPP_BUILDDIR)/gcheck.exe +fcu_main=$(CUDACPP_BUILDDIR)/fgcheck.exe ifneq ($(GTESTLIBS),) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) $(testmain) -else -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) +testmain=$(CUDACPP_BUILDDIR)/runTest.exe +cutestmain=$(CUDACPP_BUILDDIR)/runTest_cuda.exe endif -# Target (and build options): debug -MAKEDEBUG= -debug: OPTFLAGS = -g -O0 -debug: CUOPTFLAGS = -G -debug: MAKEDEBUG := debug -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -$(BUILDDIR)/.build.$(TAG): - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @if [ "$(oldtagsb)" != "" ]; then echo "Cannot build for tag=$(TAG) as old builds exist for other tags:"; echo " $(oldtagsb)"; echo "Please run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @touch $(BUILDDIR)/.build.$(TAG) +cppbuild: $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(cxx_main) $(fcxx_main) $(testmain) +cudabuild: $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(cu_main) $(fcu_main) $(cutestmain) # Generic target and build rules: objects from CUDA compilation -ifneq ($(NVCC),) -$(BUILDDIR)/%.o : %.cu *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cu *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c $< -o $@ -$(BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ -endif +$(CUDACPP_BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ # Generic target and build rules: objects from C++ compilation # (NB do not include CUINC here! add it only for NVTX or curand #679) -$(BUILDDIR)/%.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Apply special build flags only to CrossSectionKernel.cc and gCrossSectionKernel.cu (no fast math, see #117 and #516) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS := $(filter-out -ffast-math,$(CXXFLAGS)) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math +$(CUDACPP_BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math ifneq ($(NVCC),) -$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -fno-fast-math +$(CUDACPP_BUILDDIR)/gCrossSectionKernels.o: NVCCFLAGS += -Xcompiler -fno-fast-math endif endif # Apply special build flags only to check_sa.o and gcheck_sa.o (NVTX in timermap.h, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) -$(BUILDDIR)/gcheck_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) # Apply special build flags only to check_sa and CurandRandomNumberKernel (curand headers, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gcheck_sa.o: CUFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gCurandRandomNumberKernel.o: CUFLAGS += $(CXXFLAGSCURAND) -ifeq ($(RNDGEN),hasCurand) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CUINC) -endif +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) + # Avoid "warning: builtin __has_trivial_... is deprecated; use __is_trivially_... instead" in nvcc with icx2023 (#592) ifneq ($(shell $(CXX) --version | egrep '^(Intel)'),) ifneq ($(NVCC),) -CUFLAGS += -Xcompiler -Wno-deprecated-builtins +MG_NVCCFLAGS += -Xcompiler -Wno-deprecated-builtins endif endif -# Avoid clang warning "overriding '-ffp-contract=fast' option with '-ffp-contract=on'" (#516) -# This patch does remove the warning, but I prefer to keep it disabled for the moment... -###ifneq ($(shell $(CXX) --version | egrep '^(clang|Apple clang|Intel)'),) -###$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -Wno-overriding-t-option -###ifneq ($(NVCC),) -###$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -Wno-overriding-t-option -###endif -###endif - #### Apply special build flags only to CPPProcess.cc (-flto) ###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += -flto -#### Apply special build flags only to CPPProcess.cc (AVXFLAGS) -###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += $(AVXFLAGS) - #------------------------------------------------------------------------------- -# Target (and build rules): common (src) library -commonlib : $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc $(BUILDDIR)/.build.$(TAG) - $(MAKE) -C ../../src $(MAKEDEBUG) -f $(CUDACPP_SRC_MAKEFILE) +$(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc + $(MAKE) AVX=$(AVX) AVXFLAGS="$(AVXFLAGS)" -C ../../src -f $(CUDACPP_SRC_MAKEFILE) #------------------------------------------------------------------------------- @@ -566,162 +198,123 @@ processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') ###$(info processid_short=$(processid_short)) MG5AMC_CXXLIB = mg5amc_$(processid_short)_cpp -cxx_objects_lib=$(BUILDDIR)/CPPProcess.o $(BUILDDIR)/MatrixElementKernels.o $(BUILDDIR)/BridgeKernels.o $(BUILDDIR)/CrossSectionKernels.o -cxx_objects_exe=$(BUILDDIR)/CommonRandomNumberKernel.o $(BUILDDIR)/RamboSamplingKernels.o +cxx_objects_lib=$(CUDACPP_BUILDDIR)/CPPProcess.o $(CUDACPP_BUILDDIR)/MatrixElementKernels.o $(CUDACPP_BUILDDIR)/BridgeKernels.o $(CUDACPP_BUILDDIR)/CrossSectionKernels.o +cxx_objects_exe=$(CUDACPP_BUILDDIR)/CommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/RamboSamplingKernels.o -ifneq ($(NVCC),) MG5AMC_CULIB = mg5amc_$(processid_short)_cuda -cu_objects_lib=$(BUILDDIR)/gCPPProcess.o $(BUILDDIR)/gMatrixElementKernels.o $(BUILDDIR)/gBridgeKernels.o $(BUILDDIR)/gCrossSectionKernels.o -cu_objects_exe=$(BUILDDIR)/gCommonRandomNumberKernel.o $(BUILDDIR)/gRamboSamplingKernels.o -endif +cu_objects_lib=$(CUDACPP_BUILDDIR)/gCPPProcess.o $(CUDACPP_BUILDDIR)/gMatrixElementKernels.o $(CUDACPP_BUILDDIR)/gBridgeKernels.o $(CUDACPP_BUILDDIR)/gCrossSectionKernels.o +cu_objects_exe=$(CUDACPP_BUILDDIR)/gCommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/gRamboSamplingKernels.o # Target (and build rules): C++ and CUDA shared libraries -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) - $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) - -ifneq ($(NVCC),) -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) - $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -endif +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) + $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) $(MG_LDFLAGS) $(LDFLAGS) -#------------------------------------------------------------------------------- - -# Target (and build rules): Fortran include files -###$(INCDIR)/%.inc : ../%.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### \cp $< $@ +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) + $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) #------------------------------------------------------------------------------- # Target (and build rules): C++ and CUDA standalone executables -$(cxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cxx_main): $(BUILDDIR)/check_sa.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o - $(CXX) -o $@ $(BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o $(CURANDLIBFLAGS) -ifneq ($(NVCC),) +$(cxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(cxx_main): $(CUDACPP_BUILDDIR)/check_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) + ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(cu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(cu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(cu_main): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(cu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cu_main): $(BUILDDIR)/gcheck_sa.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o - $(NVCC) -o $@ $(BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o $(CURANDLIBFLAGS) +$(cu_main): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif +$(cu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(cu_main): $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- - -# Generic target and build rules: objects from Fortran compilation -$(BUILDDIR)/%.o : %.f *.inc - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(FC) -I. -c $< -o $@ - -# Generic target and build rules: objects from Fortran compilation -###$(BUILDDIR)/%.o : %.f *.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi -### $(FC) -I. -I$(INCDIR) -c $< -o $@ - -# Target (and build rules): Fortran standalone executables -###$(BUILDDIR)/fcheck_sa.o : $(INCDIR)/fbridge.inc +# Check executables: ifeq ($(UNAME_S),Darwin) -$(fcxx_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 +$(fcxx_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif -$(fcxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcxx_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) - $(CXX) -o $@ $(BUILDDIR)/fcheck_sa.o $(OMPFLAGS) $(BUILDDIR)/fsampler.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) +$(fcxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(fcxx_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(cxx_objects_exe) $(OMPFLAGS) $(CUDACPP_BUILDDIR)/fsampler.o -lgfortran -L$(CUDACPP_LIBDIR) $(MG_LDFLAGS) $(LDFLAGS) -ifneq ($(NVCC),) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(fcu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(fcu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(fcu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(fcu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') endif ifeq ($(UNAME_S),Darwin) -$(fcu_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 -endif -$(fcu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcu_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) - $(NVCC) -o $@ $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) +$(fcu_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif +$(fcu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(fcu_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(cu_objects_exe) -lgfortran $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- # Target (and build rules): test objects and test executable -$(BUILDDIR)/testxxx.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx_cu.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx_cu.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -endif +$(testmain) $(cutestmain): $(GTESTLIBS) +$(testmain) $(cutestmain): INCFLAGS += $(GTESTINC) +$(testmain) $(cutestmain): MG_LDFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main -$(BUILDDIR)/testmisc.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(CUDACPP_BUILDDIR)/testxxx.o $(CUDACPP_BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) testxxx_cc_ref.txt +$(testmain): $(CUDACPP_BUILDDIR)/testxxx.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions +$(cutestmain): $(CUDACPP_BUILDDIR)/testxxx_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc_cu.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests -endif -$(BUILDDIR)/runTest.o: $(GTESTLIBS) -$(BUILDDIR)/runTest.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/runTest.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/runTest.o +$(CUDACPP_BUILDDIR)/testmisc.o $(CUDACPP_BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/testmisc.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(cutestmain): $(CUDACPP_BUILDDIR)/testmisc_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests + + +$(CUDACPP_BUILDDIR)/runTest.o $(CUDACPP_BUILDDIR)/runTest_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/runTest.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/runTest.o +$(cutestmain): $(CUDACPP_BUILDDIR)/runTest_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/runTest_cu.o + -ifneq ($(NVCC),) -$(BUILDDIR)/runTest_cu.o: $(GTESTLIBS) -$(BUILDDIR)/runTest_cu.o: INCFLAGS += $(GTESTINC) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(testmain): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(testmain): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cutestmain): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cutestmain): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(testmain): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(testmain): $(BUILDDIR)/runTest_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/runTest_cu.o +$(cutestmain): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif -$(testmain): $(GTESTLIBS) -$(testmain): INCFLAGS += $(GTESTINC) -$(testmain): LIBFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main ifneq ($(OMPFLAGS),) ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -$(testmain): LIBFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) +$(testmain): MG_LDFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -$(testmain): LIBFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 +$(testmain): MG_LDFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 ###else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) ###$(testmain): LIBFLAGS += ???? # OMP is not supported yet by cudacpp for Apple clang (see #578 and #604) else -$(testmain): LIBFLAGS += -lgomp +$(testmain): MG_LDFLAGS += -lgomp endif endif -ifeq ($(NVCC),) # link only runTest.o -$(testmain): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) - $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -ldl -pthread $(LIBFLAGS) -else # link both runTest.o and runTest_cu.o -$(testmain): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) - $(NVCC) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) -ldl $(LIBFLAGS) -lcuda -endif +$(testmain): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(testmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) + $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -pthread $(MG_LDFLAGS) $(LDFLAGS) + +$(cutestmain): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cutestmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) + $(NVCC) -o $@ $(cu_objects_lib) $(cu_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -lcuda $(MG_LDFLAGS) $(LDFLAGS) # Use target gtestlibs to build only googletest ifneq ($(GTESTLIBS),) @@ -731,72 +324,15 @@ endif # Use flock (Linux only, no Mac) to allow 'make -j' if googletest has not yet been downloaded https://stackoverflow.com/a/32666215 $(GTESTLIBS): ifneq ($(shell which flock 2>/dev/null),) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - flock $(BUILDDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) + flock $(TESTDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) else if [ -d $(TESTDIR) ]; then $(MAKE) -C $(TESTDIR); fi endif #------------------------------------------------------------------------------- -# Target: build all targets in all AVX modes (each AVX mode in a separate build directory) -# Split the avxall target into five separate targets to allow parallel 'make -j avxall' builds -# (Hack: add a fbridge.inc dependency to avxall, to ensure it is only copied once for all AVX modes) -avxnone: - @echo - $(MAKE) USEBUILDDIR=1 AVX=none -f $(CUDACPP_MAKEFILE) - -avxsse4: - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 -f $(CUDACPP_MAKEFILE) - -avxavx2: - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 -f $(CUDACPP_MAKEFILE) - -avx512y: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y -f $(CUDACPP_MAKEFILE) - -avx512z: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z -f $(CUDACPP_MAKEFILE) - -ifeq ($(UNAME_P),ppc64le) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else ifeq ($(UNAME_P),arm) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 avxavx2 avx512y avx512z -avxall: avxnone avxsse4 avxavx2 avx512y avx512z -endif - -#------------------------------------------------------------------------------- - -# Target: clean the builds -.PHONY: clean - -clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(BUILDDIR) -else - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe - rm -f $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(LIBDIR)/lib$(MG5AMC_CULIB).so -endif - $(MAKE) -C ../../src clean -f $(CUDACPP_SRC_MAKEFILE) -### rm -rf $(INCDIR) - -cleanall: - @echo - $(MAKE) USEBUILDDIR=0 clean -f $(CUDACPP_MAKEFILE) - @echo - $(MAKE) USEBUILDDIR=0 -C ../../src cleanall -f $(CUDACPP_SRC_MAKEFILE) - rm -rf build.* - # Target: clean the builds as well as the gtest installation(s) -distclean: cleanall +distclean: clean cleansrc ifneq ($(wildcard $(TESTDIRCOMMON)),) $(MAKE) -C $(TESTDIRCOMMON) clean endif @@ -848,50 +384,55 @@ endif #------------------------------------------------------------------------------- -# Target: check (run the C++ test executable) +# Target: check/gcheck (run the C++ test executable) # [NB THIS IS WHAT IS USED IN THE GITHUB CI!] -ifneq ($(NVCC),) -check: runTest cmpFcheck cmpFGcheck -else check: runTest cmpFcheck -endif +gcheck: + $(MAKE) AVX=cuda runTest cmpFGcheck # Target: runTest (run the C++ test executable runTest.exe) -runTest: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/runTest.exe +ifneq ($(AVX),cuda) +runTest: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest.exe +else +runTest: cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest_cuda.exe +endif + # Target: runCheck (run the C++ standalone executable check.exe, with a small number of events) -runCheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/check.exe -p 2 32 2 +runCheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe -p 2 32 2 # Target: runGcheck (run the CUDA standalone executable gcheck.exe, with a small number of events) -runGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/gcheck.exe -p 2 32 2 +runGcheck: AVX=cuda +runGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe -p 2 32 2 # Target: runFcheck (run the Fortran standalone executable - with C++ MEs - fcheck.exe, with a small number of events) -runFcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 +runFcheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 # Target: runFGcheck (run the Fortran standalone executable - with CUDA MEs - fgcheck.exe, with a small number of events) -runFGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 +runFGcheck: AVX=cuda +runFGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 # Target: cmpFcheck (compare ME results from the C++ and Fortran with C++ MEs standalone executables, with a small number of events) -cmpFcheck: all.$(TAG) +cmpFcheck: cppbuild @echo - @echo "$(BUILDDIR)/check.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi # Target: cmpFGcheck (compare ME results from the CUDA and Fortran with CUDA MEs standalone executables, with a small number of events) -cmpFGcheck: all.$(TAG) +cmpFGcheck: AVX=cuda +cmpFGcheck: + $(MAKE) AVX=cuda cudabuild @echo - @echo "$(BUILDDIR)/gcheck.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fgcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi -# Target: memcheck (run the CUDA standalone executable gcheck.exe with a small number of events through cuda-memcheck) -memcheck: all.$(TAG) - $(RUNTIME) $(CUDA_HOME)/bin/cuda-memcheck --check-api-memory-access yes --check-deprecated-instr yes --check-device-heap yes --demangle full --language c --leak-check full --racecheck-report all --report-api-errors all --show-backtrace yes --tool memcheck --track-unused-memory yes $(BUILDDIR)/gcheck.exe -p 2 32 2 - -#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/makefile b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/makefile index d572486c2e..b69917ee1f 100644 --- a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/makefile +++ b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/makefile @@ -1,27 +1,30 @@ SHELL := /bin/bash -include ../../Source/make_opts -FFLAGS+= -w +# Include general setup +OPTIONS_MAKEFILE := ../../Source/make_opts +include $(OPTIONS_MAKEFILE) # Enable the C preprocessor https://gcc.gnu.org/onlinedocs/gfortran/Preprocessing-Options.html -FFLAGS+= -cpp +MG_FCFLAGS += -cpp +MG_CXXFLAGS += -I. -# Compile counters with -O3 as in the cudacpp makefile (avoid being "unfair" to Fortran #740) -CXXFLAGS = -O3 -Wall -Wshadow -Wextra +all: help cppnative + +# Target if user does not specify target +help: + $(info No target specified.) + $(info Viable targets are 'cppnative' (default), 'cppnone', 'cppsse4', 'cppavx2', 'cpp512y', 'cpp512z' and 'cuda') + $(info Or 'cppall' for all C++ targets) + $(info Or 'ALL' for all C++ and cuda targets) -# Add -std=c++17 explicitly to avoid build errors on macOS -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 -endif -# Enable ccache if USECCACHE=1 +# Enable ccache for C++ if USECCACHE=1 (do not enable it for Fortran since it is not supported for Fortran) ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) override CXX:=ccache $(CXX) endif -ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) - override FC:=ccache $(FC) -endif +###ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) +### override FC:=ccache $(FC) +###endif # Load additional dependencies of the bias module, if present ifeq (,$(wildcard ../bias_dependencies)) @@ -46,34 +49,25 @@ else MADLOOP_LIB = endif -LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias - -processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') -CUDACPP_MAKEFILE=cudacpp.mk -# NB1 Using ":=" below instead of "=" is much faster (it only runs the subprocess once instead of many times) -# NB2 Use '|&' in CUDACPP_BUILDDIR to avoid confusing errors about googletest #507 -# NB3 Do not add a comment inlined "CUDACPP_BUILDDIR=$(shell ...) # comment" as otherwise a trailing space is included... -# NB4 The variables relevant to the cudacpp Makefile must be explicitly passed to $(shell...) -CUDACPP_MAKEENV:=$(shell echo '$(.VARIABLES)' | tr " " "\n" | egrep "(USEBUILDDIR|AVX|FPTYPE|HELINL|HRDCOD)") -###$(info CUDACPP_MAKEENV=$(CUDACPP_MAKEENV)) -###$(info $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))")) -CUDACPP_BUILDDIR:=$(shell $(MAKE) $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))") -f $(CUDACPP_MAKEFILE) -pn 2>&1 | awk '/Building/{print $$3}' | sed s/BUILDDIR=//) -ifeq ($(CUDACPP_BUILDDIR),) -$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) -else -$(info CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)') -endif -CUDACPP_COMMONLIB=mg5amc_common -CUDACPP_CXXLIB=mg5amc_$(processid_short)_cpp -CUDACPP_CULIB=mg5amc_$(processid_short)_cuda - +LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias LIBS = $(LIBDIR)libbias.$(libext) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(MADLOOP_LIB) $(LOOP_LIBS) ifneq ("$(wildcard ../../Source/RUNNING)","") LINKLIBS += -lrunning - LIBS += $(LIBDIR)librunning.$(libext) + LIBS += $(LIBDIR)librunning.$(libext) endif +SOURCEDIR_GUARD:=../../Source/.timestamp_guard +# We use $(SOURCEDIR_GUARD) to figure out if Source is out of date. The Source makefile doesn't correctly +# update all files, so we need a proxy that is updated every time we run "$(MAKE) -C ../../Source". +$(SOURCEDIR_GUARD) ../../Source/discretesampler.mod &: ../../Source/*.f ../../Cards/param_card.dat ../../Cards/run_card.dat +ifneq ($(shell which flock 2>/dev/null),) + flock ../../Source/.lock -c "$(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD)" +else + $(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD) +endif + +$(LIBS): $(SOURCEDIR_GUARD) # Source files @@ -91,82 +85,83 @@ PROCESS= myamp.o genps.o unwgt.o setcuts.o get_color.o \ DSIG=driver.o $(patsubst %.f, %.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) DSIG_cudacpp=driver_cudacpp.o $(patsubst %.f, %_cudacpp.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) -SYMMETRY = symmetry.o idenparts.o +SYMMETRY = symmetry.o idenparts.o -# Binaries +# cudacpp targets: +CUDACPP_MAKEFILE := cudacpp.mk +ifneq (,$(wildcard $(CUDACPP_MAKEFILE))) +include $(CUDACPP_MAKEFILE) +endif -ifeq ($(UNAME),Darwin) -LDFLAGS += -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) -LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" -else -LDFLAGS += -Wl,--no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +ifeq ($(CUDACPP_BUILDDIR),) +$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) endif +CUDACPP_COMMONLIB=mg5amc_common +CUDACPP_CXXLIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so +CUDACPP_CULIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so -all: $(PROG)_fortran $(CUDACPP_BUILDDIR)/$(PROG)_cpp # also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (#503) +# Set up OpenMP if supported +OMPFLAGS ?= -fopenmp ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp LINKLIBS += -liomp5 # see #578 LINKLIBS += -lintlc # undefined reference to `_intel_fast_memcpy' else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -override OMPFLAGS = -fopenmp $(CUDACPP_BUILDDIR)/$(PROG)_cpp: LINKLIBS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -override OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang -else -override OMPFLAGS = -fopenmp +OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang endif -$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o - $(FC) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) - -$(LIBS): .libs -.libs: ../../Cards/param_card.dat ../../Cards/run_card.dat - cd ../../Source; make - touch $@ +# Binaries -$(CUDACPP_BUILDDIR)/.cudacpplibs: - $(MAKE) -f $(CUDACPP_MAKEFILE) - touch $@ +$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) # On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables ($ORIGIN on Linux) # On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary -ifeq ($(UNAME_S),Darwin) - override LIBFLAGSRPATH = -else ifeq ($(USEBUILDDIR),1) - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' -else - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/$(LIBDIR)' +ifneq ($(UNAME_S),Darwin) + LIBFLAGSRPATH := -Wl,-rpath,'$$ORIGIN:$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' endif -.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link +.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link madevent_cppnone_link madevent_cppsse4_link madevent_cppavx2_link madevent_cpp512y_link madevent_cpp512z_link clean cleanall cleansrc madevent_fortran_link: $(PROG)_fortran rm -f $(PROG) ln -s $(PROG)_fortran $(PROG) -madevent_cpp_link: $(CUDACPP_BUILDDIR)/$(PROG)_cpp - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) +madevent_cppnone_link: AVX=none +madevent_cppnone_link: cppnone + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -madevent_cuda_link: $(CUDACPP_BUILDDIR)/$(PROG)_cuda - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) +madevent_cppavx2_link: AVX=avx2 +madevent_cppavx2_link: cppavx2 + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512y_link: AVX=512y +madevent_cpp512y_link: cppavx512y + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512z_link: AVX=512z +madevent_cpp512z_link: cppavx512z + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -# Building $(PROG)_cpp also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (improved patch for cpp-only builds #503) -$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o $(CUDACPP_BUILDDIR)/.cudacpplibs - $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CXXLIB) $(LIBFLAGSRPATH) $(LDFLAGS) - if [ -f $(LIBDIR)/$(CUDACPP_BUILDDIR)/lib$(CUDACPP_CULIB).* ]; then $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CULIB) $(LIBFLAGSRPATH) $(LDFLAGS); fi +madevent_cuda_link: AVX=cuda +madevent_cuda_link: cuda + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) -$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(CUDACPP_BUILDDIR)/$(PROG)_cpp +$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(LIBS) $(CUDACPP_CXXLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) + +$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(LIBS) $(CUDACPP_CULIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) counters.o: counters.cc timer.h - $(CXX) $(CXXFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ ompnumthreads.o: ompnumthreads.cc ompnumthreads.h - $(CXX) -I. $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) $(FC) -o $(PROG)_forhel $(PROCESS) $(MATRIX_HEL) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) $(OMPFLAGS) @@ -174,27 +169,14 @@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) gensym: $(SYMMETRY) configs.inc $(LIBS) $(FC) -o gensym $(SYMMETRY) -L$(LIBDIR) $(LINKLIBS) $(LDFLAGS) -###ifeq (,$(wildcard fbridge.inc)) # Pointless: fbridge.inc always exists as this is the cudacpp-modified makefile! -###$(LIBDIR)libmodel.$(libext): ../../Cards/param_card.dat -### cd ../../Source/MODEL; make -### -###$(LIBDIR)libgeneric.$(libext): ../../Cards/run_card.dat -### cd ../../Source; make -### -###$(LIBDIR)libpdf.$(libext): -### cd ../../Source/PDF; make -### -###$(LIBDIR)libgammaUPC.$(libext): -### cd ../../Source/PDF/gammaUPC; make -###endif # Add source so that the compiler finds the DiscreteSampler module. $(MATRIX): %.o: %.f - $(FC) $(FFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC -%.o: %.f - $(FC) $(FFLAGS) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC + $(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC +%.o $(CUDACPP_BUILDDIR)/%.o: %.f + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -I../../Source/ -I../../Source/PDF/gammaUPC -c $< -o $@ %_cudacpp.o: %.f - $(FC) $(FFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ # Dependencies @@ -215,60 +197,42 @@ unwgt.o: genps.inc nexternal.inc symswap.inc cluster.inc run.inc message.inc \ initcluster.o: message.inc # Extra dependencies on discretesampler.mod +../../Source/discretesampler.mod: ../../Source/DiscreteSampler.f -auto_dsig.o: .libs -driver.o: .libs -driver_cudacpp.o: .libs -$(MATRIX): .libs -genps.o: .libs +auto_dsig.o: ../../Source/discretesampler.mod +driver.o: ../../Source/discretesampler.mod +driver_cudacpp.o: ../../Source/discretesampler.mod +$(MATRIX): ../../Source/discretesampler.mod +genps.o: ../../Source/discretesampler.mod # Cudacpp avxall targets -UNAME_P := $(shell uname -p) ifeq ($(UNAME_P),ppc64le) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else ifeq ($(UNAME_P),arm) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else -avxall: avxnone avxsse4 avxavx2 avx512y avx512z +cppall: cppnative cppnone cppsse4 cppavx2 cppavx512y cppavx512z endif -avxnone: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=none - -avxsse4: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 - -avxavx2: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 - -avx512y: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y - -avx512z: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z - -###endif - -# Clean (NB: 'make clean' in Source calls 'make clean' in all P*) +ALL: cppall cuda -clean: # Clean builds: fortran in this Pn; cudacpp executables for one AVX in this Pn - $(RM) *.o gensym $(PROG) $(PROG)_fortran $(PROG)_forhel $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(CUDACPP_BUILDDIR)/$(PROG)_cuda +# Clean all architecture-specific builds: +clean: + $(RM) *.o gensym $(PROG) $(PROG)_* + $(RM) -rf build.*/*{.o,.so,.exe,.dylib,madevent_*} + @for dir in build.*; do if [ -z "$$(ls -A $${dir})" ]; then rm -r $${dir}; else echo "Not cleaning $${dir}; not empty"; fi; done -cleanavxs: clean # Clean builds: fortran in this Pn; cudacpp for all AVX in this Pn and in src - $(MAKE) -f $(CUDACPP_MAKEFILE) cleanall - rm -f $(CUDACPP_BUILDDIR)/.cudacpplibs - rm -f .libs +cleanall: cleansrc + for PROCESS in ../P[0-9]*; do $(MAKE) -C $${PROCESS} clean; done -cleanall: # Clean builds: fortran in all P* and in Source; cudacpp for all AVX in all P* and in src - make -C ../../Source cleanall - rm -rf $(LIBDIR)libbias.$(libext) - rm -f ../../Source/*.mod ../../Source/*/*.mod +# Clean one architecture-specific build +clean%: + $(RM) -r build.$*_* -distclean: cleanall # Clean all fortran and cudacpp builds as well as the googletest installation - $(MAKE) -f $(CUDACPP_MAKEFILE) distclean +# Clean common source directories (interferes with other P*) +cleansrc: + make -C ../../Source clean + $(RM) -f $(SOURCEDIR_GUARD) ../../Source/{*.mod,.lock} ../../Source/*/*.mod + $(RM) -r $(LIBDIR)libbias.$(libext) + if [ -d ../../src ]; then $(MAKE) -C ../../src -f cudacpp_src.mk clean; fi diff --git a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/runTest.cc b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/runTest.cc index d4a760a71b..6c77775fb2 100644 --- a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/runTest.cc +++ b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/runTest.cc @@ -243,18 +243,20 @@ struct CUDATest : public CUDA_CPU_TestBase // Use two levels of macros to force stringification at the right level // (see https://gcc.gnu.org/onlinedocs/gcc-3.0.1/cpp_3.html#SEC17 and https://stackoverflow.com/a/3419392) // Google macro is in https://github.com/google/googletest/blob/master/googletest/include/gtest/gtest-param-test.h +/* clang-format off */ #define TESTID_CPU( s ) s##_CPU #define XTESTID_CPU( s ) TESTID_CPU( s ) #define MG_INSTANTIATE_TEST_SUITE_CPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); #define TESTID_GPU( s ) s##_GPU #define XTESTID_GPU( s ) TESTID_GPU( s ) #define MG_INSTANTIATE_TEST_SUITE_GPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); +/* clang-format on */ #ifdef __CUDACC__ MG_INSTANTIATE_TEST_SUITE_GPU( XTESTID_GPU( MG_EPOCH_PROCESS_ID ), MadgraphTest ); diff --git a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/testxxx.cc b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/testxxx.cc index 3361fe5aa9..1d315f6d75 100644 --- a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/testxxx.cc +++ b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/testxxx.cc @@ -40,7 +40,7 @@ namespace mg5amcCpu { std::string FPEhandlerMessage = "unknown"; int FPEhandlerIevt = -1; - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU): '" << FPEhandlerMessage << "' ievt=" << FPEhandlerIevt << std::endl; @@ -71,11 +71,10 @@ TEST( XTESTID( MG_EPOCH_PROCESS_ID ), testxxx ) constexpr bool testEvents = !dumpEvents; // run the test? constexpr fptype toleranceXXXs = std::is_same::value ? 1.E-15 : 1.E-5; // Constant parameters - constexpr int neppM = MemoryAccessMomenta::neppM; // AOSOA layout constexpr int np4 = CPPProcess::np4; - const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') - assert( nevt % neppM == 0 ); // nevt must be a multiple of neppM - assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV + const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') + assert( nevt % MemoryAccessMomenta::neppM == 0 ); // nevt must be a multiple of neppM + assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV // Fill in the input momenta #ifdef __CUDACC__ mg5amcGpu::PinnedHostBufferMomenta hstMomenta( nevt ); // AOSOA[npagM][npar=4][np4=4][neppM] diff --git a/epochX/cudacpp/gg_ttgg.mad/bin/internal/banner.py b/epochX/cudacpp/gg_ttgg.mad/bin/internal/banner.py index bd1517985f..b408679c2f 100755 --- a/epochX/cudacpp/gg_ttgg.mad/bin/internal/banner.py +++ b/epochX/cudacpp/gg_ttgg.mad/bin/internal/banner.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,7 +53,7 @@ MADEVENT = False import madgraph.various.misc as misc import madgraph.iolibs.file_writers as file_writers - import madgraph.iolibs.files as files + import madgraph.iolibs.files as files import models.check_param_card as param_card_reader from madgraph import MG5DIR, MadGraph5Error, InvalidCmd @@ -80,36 +80,36 @@ class Banner(dict): 'mgproccard': 'MGProcCard', 'mgruncard': 'MGRunCard', 'ma5card_parton' : 'MA5Card_parton', - 'ma5card_hadron' : 'MA5Card_hadron', + 'ma5card_hadron' : 'MA5Card_hadron', 'mggenerationinfo': 'MGGenerationInfo', 'mgpythiacard': 'MGPythiaCard', 'mgpgscard': 'MGPGSCard', 'mgdelphescard': 'MGDelphesCard', 'mgdelphestrigger': 'MGDelphesTrigger', 'mgshowercard': 'MGShowerCard' } - + forbid_cdata = ['initrwgt'] - + def __init__(self, banner_path=None): """ """ if isinstance(banner_path, Banner): dict.__init__(self, banner_path) self.lhe_version = banner_path.lhe_version - return + return else: dict.__init__(self) - + #Look at the version if MADEVENT: self['mgversion'] = '#%s\n' % open(pjoin(MEDIR, 'MGMEVersion.txt')).read() else: info = misc.get_pkg_info() self['mgversion'] = info['version']+'\n' - + self.lhe_version = None - + if banner_path: self.read_banner(banner_path) @@ -123,7 +123,7 @@ def __init__(self, banner_path=None): 'mgruncard':'run_card.dat', 'mgpythiacard':'pythia_card.dat', 'mgpgscard' : 'pgs_card.dat', - 'mgdelphescard':'delphes_card.dat', + 'mgdelphescard':'delphes_card.dat', 'mgdelphestrigger':'delphes_trigger.dat', 'mg5proccard':'proc_card_mg5.dat', 'mgproccard': 'proc_card.dat', @@ -137,10 +137,10 @@ def __init__(self, banner_path=None): 'mgshowercard':'shower_card.dat', 'pythia8':'pythia8_card.dat', 'ma5card_parton':'madanalysis5_parton_card.dat', - 'ma5card_hadron':'madanalysis5_hadron_card.dat', + 'ma5card_hadron':'madanalysis5_hadron_card.dat', 'run_settings':'' } - + def read_banner(self, input_path): """read a banner""" @@ -151,7 +151,7 @@ def read_banner(self, input_path): def split_iter(string): return (x.groups(0)[0] for x in re.finditer(r"([^\n]*\n)", string, re.DOTALL)) input_path = split_iter(input_path) - + text = '' store = False for line in input_path: @@ -170,13 +170,13 @@ def split_iter(string): text += line else: text += '%s%s' % (line, '\n') - - #reaching end of the banner in a event file avoid to read full file + + #reaching end of the banner in a event file avoid to read full file if "
" in line: break elif "" in line: break - + def __getattribute__(self, attr): """allow auto-build for the run_card/param_card/... """ try: @@ -187,23 +187,23 @@ def __getattribute__(self, attr): return self.charge_card(attr) - + def change_lhe_version(self, version): """change the lhe version associate to the banner""" - + version = float(version) if version < 3: version = 1 elif version > 3: raise Exception("Not Supported version") self.lhe_version = version - + def get_cross(self, witherror=False): """return the cross-section of the file""" if "init" not in self: raise Exception - + text = self["init"].split('\n') cross = 0 error = 0 @@ -217,13 +217,13 @@ def get_cross(self, witherror=False): return cross else: return cross, math.sqrt(error) - + def scale_init_cross(self, ratio): """modify the init information with the associate scale""" assert "init" in self - + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -231,29 +231,29 @@ def scale_init_cross(self, ratio): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break pid = int(pid) - + line = " %+13.7e %+13.7e %+13.7e %i" % \ (ratio*float(xsec), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + def get_pdg_beam(self): """return the pdg of each beam""" - + assert "init" in self - + all_lines = self["init"].split('\n') pdg1,pdg2,_ = all_lines[0].split(None, 2) return int(pdg1), int(pdg2) - + def load_basic(self, medir): """ Load the proc_card /param_card and run_card """ - + self.add(pjoin(medir,'Cards', 'param_card.dat')) self.add(pjoin(medir,'Cards', 'run_card.dat')) if os.path.exists(pjoin(medir, 'SubProcesses', 'procdef_mg5.dat')): @@ -261,29 +261,29 @@ def load_basic(self, medir): self.add(pjoin(medir,'Cards', 'proc_card_mg5.dat')) else: self.add(pjoin(medir,'Cards', 'proc_card.dat')) - + def change_seed(self, seed): """Change the seed value in the banner""" # 0 = iseed p = re.compile(r'''^\s*\d+\s*=\s*iseed''', re.M) new_seed_str = " %s = iseed" % seed self['mgruncard'] = p.sub(new_seed_str, self['mgruncard']) - + def add_generation_info(self, cross, nb_event): """add info on MGGeneration""" - + text = """ # Number of Events : %s # Integrated weight (pb) : %s """ % (nb_event, cross) self['MGGenerationInfo'] = text - + ############################################################################ # SPLIT BANNER ############################################################################ def split(self, me_dir, proc_card=True): """write the banner in the Cards directory. - proc_card argument is present to avoid the overwrite of proc_card + proc_card argument is present to avoid the overwrite of proc_card information""" for tag, text in self.items(): @@ -305,37 +305,37 @@ def check_pid(self, pid2label): """special routine removing width/mass of particles not present in the model This is usefull in case of loop model card, when we want to use the non loop model.""" - + if not hasattr(self, 'param_card'): self.charge_card('slha') - + for tag in ['mass', 'decay']: block = self.param_card.get(tag) for data in block: pid = data.lhacode[0] - if pid not in list(pid2label.keys()): + if pid not in list(pid2label.keys()): block.remove((pid,)) def get_lha_strategy(self): """get the lha_strategy: how the weight have to be handle by the shower""" - + if not self["init"]: raise Exception("No init block define") - + data = self["init"].split('\n')[0].split() if len(data) != 10: misc.sprint(len(data), self['init']) raise Exception("init block has a wrong format") return int(float(data[-2])) - + def set_lha_strategy(self, value): """set the lha_strategy: how the weight have to be handle by the shower""" - + if not (-4 <= int(value) <= 4): six.reraise(Exception, "wrong value for lha_strategy", value) if not self["init"]: raise Exception("No init block define") - + all_lines = self["init"].split('\n') data = all_lines[0].split() if len(data) != 10: @@ -351,13 +351,13 @@ def modify_init_cross(self, cross, allow_zero=False): assert isinstance(cross, dict) # assert "all" in cross assert "init" in self - + cross = dict(cross) for key in cross.keys(): if isinstance(key, str) and key.isdigit() and int(key) not in cross: cross[int(key)] = cross[key] - - + + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -365,7 +365,7 @@ def modify_init_cross(self, cross, allow_zero=False): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break @@ -383,23 +383,23 @@ def modify_init_cross(self, cross, allow_zero=False): (float(cross[pid]), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + ############################################################################ # WRITE BANNER ############################################################################ def write(self, output_path, close_tag=True, exclude=[]): """write the banner""" - + if isinstance(output_path, str): ff = open(output_path, 'w') else: ff = output_path - + if MADEVENT: header = open(pjoin(MEDIR, 'Source', 'banner_header.txt')).read() else: header = open(pjoin(MG5DIR,'Template', 'LO', 'Source', 'banner_header.txt')).read() - + if not self.lhe_version: self.lhe_version = self.get('run_card', 'lhe_version', default=1.0) if float(self.lhe_version) < 3: @@ -412,7 +412,7 @@ def write(self, output_path, close_tag=True, exclude=[]): for tag in [t for t in self.ordered_items if t in list(self.keys())]+ \ [t for t in self.keys() if t not in self.ordered_items]: - if tag in ['init'] or tag in exclude: + if tag in ['init'] or tag in exclude: continue capitalized_tag = self.capitalized_items[tag] if tag in self.capitalized_items else tag start_data, stop_data = '', '' @@ -422,19 +422,19 @@ def write(self, output_path, close_tag=True, exclude=[]): stop_data = ']]>\n' out = '<%(tag)s>%(start_data)s\n%(text)s\n%(stop_data)s\n' % \ {'tag':capitalized_tag, 'text':self[tag].strip(), - 'start_data': start_data, 'stop_data':stop_data} + 'start_data': start_data, 'stop_data':stop_data} try: ff.write(out) except: ff.write(out.encode('utf-8')) - - + + if not '/header' in exclude: out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) if 'init' in self and not 'init' in exclude: text = self['init'] @@ -444,22 +444,22 @@ def write(self, output_path, close_tag=True, exclude=[]): ff.write(out) except: ff.write(out.encode('utf-8')) - + if close_tag: - out = '\n' + out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) return ff - - + + ############################################################################ # BANNER ############################################################################ def add(self, path, tag=None): """Add the content of the file to the banner""" - + if not tag: card_name = os.path.basename(path) if 'param_card' in card_name: @@ -505,33 +505,33 @@ def add_text(self, tag, text): if tag == 'param_card': tag = 'slha' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' - + self[tag.lower()] = text - - + + def charge_card(self, tag): """Build the python object associated to the card""" - + if tag in ['param_card', 'param']: tag = 'slha' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse'], 'invalid card %s' % tag - + if tag == 'slha': param_card = self[tag].split('\n') self.param_card = param_card_reader.ParamCard(param_card) @@ -544,56 +544,56 @@ def charge_card(self, tag): self.proc_card = ProcCard(proc_card) return self.proc_card elif tag =='mgshowercard': - shower_content = self[tag] + shower_content = self[tag] if MADEVENT: import internal.shower_card as shower_card else: import madgraph.various.shower_card as shower_card self.shower_card = shower_card.ShowerCard(shower_content, True) - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.shower_card.testing = False return self.shower_card elif tag =='foanalyse': - analyse_content = self[tag] + analyse_content = self[tag] if MADEVENT: import internal.FO_analyse_card as FO_analyse_card else: import madgraph.various.FO_analyse_card as FO_analyse_card - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.FOanalyse_card = FO_analyse_card.FOAnalyseCard(analyse_content, True) self.FOanalyse_card.testing = False return self.FOanalyse_card - + def get_detail(self, tag, *arg, **opt): """return a specific """ - + if tag in ['param_card', 'param']: tag = 'slha' attr_tag = 'param_card' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], '%s not recognized' % tag - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) + self.charge_card(attr_tag) card = getattr(self, attr_tag) if len(arg) == 0: @@ -613,7 +613,7 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 2 and tag == 'slha': try: return card[arg[0]].get(arg[1:]) @@ -621,15 +621,15 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 0: return card else: raise Exception("Unknow command") - + #convenient alias get = get_detail - + def set(self, tag, *args): """modify one of the cards""" @@ -637,27 +637,27 @@ def set(self, tag, *args): tag = 'slha' attr_tag = 'param_card' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized' - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) - + self.charge_card(attr_tag) + card = getattr(self, attr_tag) if len(args) ==2: if tag == 'mg5proccard': @@ -666,20 +666,20 @@ def set(self, tag, *args): card[args[0]] = args[1] else: card[args[:-1]] = args[-1] - - + + @misc.multiple_try() def add_to_file(self, path, seed=None, out=None): """Add the banner to a file and change the associate seed in the banner""" if seed is not None: self.set("run_card", "iseed", seed) - + if not out: path_out = "%s.tmp" % path else: path_out = out - + ff = self.write(path_out, close_tag=False, exclude=['MGGenerationInfo', '/header', 'init']) ff.write("## END BANNER##\n") @@ -698,44 +698,44 @@ def add_to_file(self, path, seed=None, out=None): files.mv(path_out, path) - + def split_banner(banner_path, me_dir, proc_card=True): """a simple way to split a banner""" - + banner = Banner(banner_path) banner.split(me_dir, proc_card) - + def recover_banner(results_object, level, run=None, tag=None): """as input we receive a gen_crossxhtml.AllResults object. This define the current banner and load it """ - + if not run: - try: - _run = results_object.current['run_name'] - _tag = results_object.current['tag'] + try: + _run = results_object.current['run_name'] + _tag = results_object.current['tag'] except Exception: return Banner() else: _run = run if not tag: - try: - _tag = results_object[run].tags[-1] + try: + _tag = results_object[run].tags[-1] except Exception as error: if os.path.exists( pjoin(results_object.path,'Events','%s_banner.txt' % (run))): tag = None else: - return Banner() + return Banner() else: _tag = tag - - path = results_object.path - if tag: + + path = results_object.path + if tag: banner_path = pjoin(path,'Events',run,'%s_%s_banner.txt' % (run, tag)) else: banner_path = pjoin(results_object.path,'Events','%s_banner.txt' % (run)) - + if not os.path.exists(banner_path): if level != "parton" and tag != _tag: return recover_banner(results_object, level, _run, results_object[_run].tags[0]) @@ -754,12 +754,12 @@ def recover_banner(results_object, level, run=None, tag=None): return Banner(lhe.banner) # security if the banner was remove (or program canceled before created it) - return Banner() - + return Banner() + banner = Banner(banner_path) - - - + + + if level == 'pythia': if 'mgpythiacard' in banner: del banner['mgpythiacard'] @@ -768,13 +768,13 @@ def recover_banner(results_object, level, run=None, tag=None): if tag in banner: del banner[tag] return banner - + class InvalidRunCard(InvalidCmd): pass class ProcCard(list): """Basic Proccard object""" - + history_header = \ '#************************************************************\n' + \ '#* MadGraph5_aMC@NLO *\n' + \ @@ -798,10 +798,10 @@ class ProcCard(list): '#* run as ./bin/mg5_aMC filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - - - - + + + + def __init__(self, init=None): """ initialize a basic proc_card""" self.info = {'model': 'sm', 'generate':None, @@ -810,13 +810,13 @@ def __init__(self, init=None): if init: self.read(init) - + def read(self, init): """read the proc_card and save the information""" - + if isinstance(init, str): #path to file init = open(init, 'r') - + store_line = '' for line in init: line = line.rstrip() @@ -828,28 +828,28 @@ def read(self, init): store_line = "" if store_line: raise Exception("WRONG CARD FORMAT") - - + + def move_to_last(self, cmd): """move an element to the last history.""" for line in self[:]: if line.startswith(cmd): self.remove(line) list.append(self, line) - + def append(self, line): """"add a line in the proc_card perform automatically cleaning""" - + line = line.strip() cmds = line.split() if len(cmds) == 0: return - + list.append(self, line) - + # command type: cmd = cmds[0] - + if cmd == 'output': # Remove previous outputs from history self.clean(allow_for_removal = ['output'], keep_switch=True, @@ -875,7 +875,7 @@ def append(self, line): elif cmds[1] == 'proc_v4': #full cleaning self[:] = [] - + def clean(self, to_keep=['set','add','load'], remove_bef_last=None, @@ -884,13 +884,13 @@ def clean(self, to_keep=['set','add','load'], keep_switch=False): """Remove command in arguments from history. All command before the last occurrence of 'remove_bef_last' - (including it) will be removed (but if another options tells the opposite). + (including it) will be removed (but if another options tells the opposite). 'to_keep' is a set of line to always keep. - 'to_remove' is a set of line to always remove (don't care about remove_bef_ + 'to_remove' is a set of line to always remove (don't care about remove_bef_ status but keep_switch acts.). - if 'allow_for_removal' is define only the command in that list can be + if 'allow_for_removal' is define only the command in that list can be remove of the history for older command that remove_bef_lb1. all parameter - present in to_remove are always remove even if they are not part of this + present in to_remove are always remove even if they are not part of this list. keep_switch force to keep the statement remove_bef_??? which changes starts the removal mode. @@ -900,8 +900,8 @@ def clean(self, to_keep=['set','add','load'], if __debug__ and allow_for_removal: for arg in to_keep: assert arg not in allow_for_removal - - + + nline = -1 removal = False #looping backward @@ -912,7 +912,7 @@ def clean(self, to_keep=['set','add','load'], if not removal and remove_bef_last: if self[nline].startswith(remove_bef_last): removal = True - switch = True + switch = True # if this is the switch and is protected pass to the next element if switch and keep_switch: @@ -923,12 +923,12 @@ def clean(self, to_keep=['set','add','load'], if any([self[nline].startswith(arg) for arg in to_remove]): self.pop(nline) continue - + # Only if removal mode is active! if removal: if allow_for_removal: # Only a subset of command can be removed - if any([self[nline].startswith(arg) + if any([self[nline].startswith(arg) for arg in allow_for_removal]): self.pop(nline) continue @@ -936,10 +936,10 @@ def clean(self, to_keep=['set','add','load'], # All command have to be remove but protected self.pop(nline) continue - + # update the counter to pass to the next element nline -= 1 - + def get(self, tag, default=None): if isinstance(tag, int): list.__getattr__(self, tag) @@ -954,32 +954,32 @@ def get(self, tag, default=None): except ValueError: name, content = line[7:].split(None,1) out.append((name, content)) - return out + return out else: return self.info[tag] - + def write(self, path): """write the proc_card to a given path""" - + fsock = open(path, 'w') fsock.write(self.history_header) for line in self: while len(line) > 70: - sub, line = line[:70]+"\\" , line[70:] + sub, line = line[:70]+"\\" , line[70:] fsock.write(sub+"\n") else: fsock.write(line+"\n") - -class InvalidCardEdition(InvalidCmd): pass - + +class InvalidCardEdition(InvalidCmd): pass + class ConfigFile(dict): """ a class for storing/dealing with input file. - """ + """ def __init__(self, finput=None, **opt): """initialize a new instance. input can be an instance of MadLoopParam, - a file, a path to a file, or simply Nothing""" - + a file, a path to a file, or simply Nothing""" + if isinstance(finput, self.__class__): dict.__init__(self) for key in finput.__dict__: @@ -989,7 +989,7 @@ def __init__(self, finput=None, **opt): return else: dict.__init__(self) - + # Initialize it with all the default value self.user_set = set() self.auto_set = set() @@ -1000,15 +1000,15 @@ def __init__(self, finput=None, **opt): self.comments = {} # comment associated to parameters. can be display via help message # store the valid options for a given parameter. self.allowed_value = {} - + self.default_setup() self.plugin_input(finput) - + # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, **opt) - + @@ -1028,7 +1028,7 @@ def __add__(self, other): base = self.__class__(self) #base = copy.copy(self) base.update((key.lower(),value) for key, value in other.items()) - + return base def __radd__(self, other): @@ -1036,26 +1036,26 @@ def __radd__(self, other): new = copy.copy(other) new.update((key, value) for key, value in self.items()) return new - + def __contains__(self, key): return dict.__contains__(self, key.lower()) def __iter__(self): - + for name in super(ConfigFile, self).__iter__(): yield self.lower_to_case[name.lower()] - - + + #iter = super(ConfigFile, self).__iter__() #misc.sprint(iter) #return (self.lower_to_case[name] for name in iter) - + def keys(self): return [name for name in self] - + def items(self): return [(name,self[name]) for name in self] - + @staticmethod def warn(text, level, raiseerror=False): """convenient proxy to raiseerror/print warning""" @@ -1071,11 +1071,11 @@ def warn(text, level, raiseerror=False): log = lambda t: logger.log(level, t) elif level: log = level - + return log(text) def post_set(self, name, value, change_userdefine, raiseerror): - + if value is None: value = self[name] @@ -1087,25 +1087,25 @@ def post_set(self, name, value, change_userdefine, raiseerror): return getattr(self, 'post_set_%s' % name)(value, change_userdefine, raiseerror) else: raise - + def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): """set the attribute and set correctly the type if the value is a string. change_userdefine on True if we have to add the parameter in user_set """ - + if not len(self): #Should never happen but when deepcopy/pickle self.__init__() - + name = name.strip() - lower_name = name.lower() - + lower_name = name.lower() + # 0. check if this parameter is a system only one if change_userdefine and lower_name in self.system_only: text='%s is a private entry which can not be modify by the user. Keep value at %s' % (name,self[name]) self.warn(text, 'critical', raiseerror) return - + #1. check if the parameter is set to auto -> pass it to special if lower_name in self: targettype = type(dict.__getitem__(self, lower_name)) @@ -1115,22 +1115,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): self.user_set.remove(lower_name) #keep old value. self.post_set(lower_name, 'auto', change_userdefine, raiseerror) - return + return elif lower_name in self.auto_set: self.auto_set.remove(lower_name) - + # 2. Find the type of the attribute that we want if lower_name in self.list_parameter: targettype = self.list_parameter[lower_name] - - - + + + if isinstance(value, str): # split for each comma/space value = value.strip() if value.startswith('[') and value.endswith(']'): value = value[1:-1] - #do not perform split within a " or ' block + #do not perform split within a " or ' block data = re.split(r"((? bad input dropped.append(val) - + if not new_values: text= "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ % (value, name, self[lower_name]) text += "allowed values are any list composed of the following entries: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) - return self.warn(text, 'warning', raiseerror) - elif dropped: + return self.warn(text, 'warning', raiseerror) + elif dropped: text = "some value for entry '%s' are not valid. Invalid items are: '%s'.\n" \ % (name, dropped) text += "value will be set to %s" % new_values - text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) + text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) self.warn(text, 'warning') values = new_values # make the assignment - dict.__setitem__(self, lower_name, values) + dict.__setitem__(self, lower_name, values) if change_userdefine: self.user_set.add(lower_name) #check for specific action - return self.post_set(lower_name, None, change_userdefine, raiseerror) + return self.post_set(lower_name, None, change_userdefine, raiseerror) elif lower_name in self.dict_parameter: - targettype = self.dict_parameter[lower_name] + targettype = self.dict_parameter[lower_name] full_reset = True #check if we just update the current dict or not - + if isinstance(value, str): value = value.strip() # allowed entry: @@ -1209,7 +1209,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): # name , value => just add the entry # name value => just add the entry # {name1:value1, name2:value2} => full reset - + # split for each comma/space if value.startswith('{') and value.endswith('}'): new_value = {} @@ -1219,23 +1219,23 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): x, y = pair.split(':') x, y = x.strip(), y.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] new_value[x] = y value = new_value elif ',' in value: x,y = value.split(',') value = {x.strip():y.strip()} full_reset = False - + elif ':' in value: x,y = value.split(':') value = {x.strip():y.strip()} - full_reset = False + full_reset = False else: x,y = value.split() value = {x:y} - full_reset = False - + full_reset = False + if isinstance(value, dict): for key in value: value[key] = self.format_variable(value[key], targettype, name=name) @@ -1248,7 +1248,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - elif name in self: + elif name in self: targettype = type(self[name]) else: logger.debug('Trying to add argument %s in %s. ' % (name, self.__class__.__name__) +\ @@ -1256,22 +1256,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): suggestions = [k for k in self.keys() if k.startswith(name[0].lower())] if len(suggestions)>0: logger.debug("Did you mean one of the following: %s"%suggestions) - self.add_param(lower_name, self.format_variable(UnknownType(value), + self.add_param(lower_name, self.format_variable(UnknownType(value), UnknownType, name)) self.lower_to_case[lower_name] = name if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - + value = self.format_variable(value, targettype, name=name) #check that the value is allowed: if lower_name in self.allowed_value and '*' not in self.allowed_value[lower_name]: valid = False allowed = self.allowed_value[lower_name] - + # check if the current value is allowed or not (set valid to True) if value in allowed: - valid=True + valid=True elif isinstance(value, str): value = value.lower().strip() allowed = [str(v).lower() for v in allowed] @@ -1279,7 +1279,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): i = allowed.index(value) value = self.allowed_value[lower_name][i] valid=True - + if not valid: # act if not valid: text = "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ @@ -1303,7 +1303,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, if __debug__: if lower_name in self: raise Exception("Duplicate case for %s in %s" % (name,self.__class__)) - + dict.__setitem__(self, lower_name, value) self.lower_to_case[lower_name] = name if isinstance(value, list): @@ -1318,12 +1318,12 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, elif isinstance(value, dict): allvalues = list(value.values()) if any([type(allvalues[0]) != type(v) for v in allvalues]): - raise Exception("All entry should have the same type") - self.dict_parameter[lower_name] = type(allvalues[0]) + raise Exception("All entry should have the same type") + self.dict_parameter[lower_name] = type(allvalues[0]) if '__type__' in value: del value['__type__'] dict.__setitem__(self, lower_name, value) - + if allowed and allowed != ['*']: self.allowed_value[lower_name] = allowed if lower_name in self.list_parameter: @@ -1333,8 +1333,8 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, assert value in allowed or '*' in allowed #elif isinstance(value, bool) and allowed != ['*']: # self.allowed_value[name] = [True, False] - - + + if system: self.system_only.add(lower_name) if comment: @@ -1342,7 +1342,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, def do_help(self, name): """return a minimal help for the parameter""" - + out = "## Information on parameter %s from class %s\n" % (name, self.__class__.__name__) if name.lower() in self: out += "## current value: %s (parameter should be of type %s)\n" % (self[name], type(self[name])) @@ -1351,7 +1351,7 @@ def do_help(self, name): else: out += "## Unknown for this class\n" if name.lower() in self.user_set: - out += "## This value is considered as being set by the user\n" + out += "## This value is considered as being set by the user\n" else: out += "## This value is considered as being set by the system\n" if name.lower() in self.allowed_value: @@ -1359,17 +1359,17 @@ def do_help(self, name): out += "Allowed value are: %s\n" % ','.join([str(p) for p in self.allowed_value[name.lower()]]) else: out += "Suggested value are : %s\n " % ','.join([str(p) for p in self.allowed_value[name.lower()] if p!='*']) - + logger.info(out) return out @staticmethod def guess_type_from_value(value): "try to guess the type of the string --do not use eval as it might not be safe" - + if not isinstance(value, str): return str(value.__class__.__name__) - + #use ast.literal_eval to be safe since value is untrusted # add a timeout to mitigate infinite loop, memory stack attack with misc.stdchannel_redirected(sys.stdout, os.devnull): @@ -1388,7 +1388,7 @@ def guess_type_from_value(value): @staticmethod def format_variable(value, targettype, name="unknown"): """assign the value to the attribute for the given format""" - + if isinstance(targettype, str): if targettype in ['str', 'int', 'float', 'bool']: targettype = eval(targettype) @@ -1412,7 +1412,7 @@ def format_variable(value, targettype, name="unknown"): (name, type(value), targettype, value)) else: raise InvalidCmd("Wrong input type for %s found %s and expecting %s for value %s" %\ - (name, type(value), targettype, value)) + (name, type(value), targettype, value)) else: if targettype != UnknownType: value = value.strip() @@ -1441,8 +1441,8 @@ def format_variable(value, targettype, name="unknown"): value = int(value) elif value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} - value =int(value[:-1]) * convert[value[-1]] - elif '/' in value or '*' in value: + value =int(value[:-1]) * convert[value[-1]] + elif '/' in value or '*' in value: try: split = re.split('(\*|/)',value) v = float(split[0]) @@ -1461,7 +1461,7 @@ def format_variable(value, targettype, name="unknown"): try: value = float(value.replace('d','e')) except ValueError: - raise InvalidCmd("%s can not be mapped to an integer" % value) + raise InvalidCmd("%s can not be mapped to an integer" % value) try: new_value = int(value) except ValueError: @@ -1471,7 +1471,7 @@ def format_variable(value, targettype, name="unknown"): value = new_value else: raise InvalidCmd("incorect input: %s need an integer for %s" % (value,name)) - + elif targettype == float: if value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} @@ -1496,33 +1496,33 @@ def format_variable(value, targettype, name="unknown"): value = v else: raise InvalidCmd("type %s is not handle by the card" % targettype) - + return value - - + + def __getitem__(self, name): - + lower_name = name.lower() if __debug__: if lower_name not in self: if lower_name in [key.lower() for key in self] : raise Exception("Some key are not lower case %s. Invalid use of the class!"\ % [key for key in self if key.lower() != key]) - + if lower_name in self.auto_set: return 'auto' - + return dict.__getitem__(self, name.lower()) - + get = __getitem__ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): """convenient way to change attribute. changeifuserset=False means that the value is NOT change is the value is not on default. - user=True, means that the value will be marked as modified by the user - (potentially preventing future change to the value) + user=True, means that the value will be marked as modified by the user + (potentially preventing future change to the value) """ # changeifuserset=False -> we need to check if the user force a value. @@ -1530,8 +1530,8 @@ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): if name.lower() in self.user_set: #value modified by the user -> do nothing return - self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) - + self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) + class RivetCard(ConfigFile): @@ -1706,7 +1706,7 @@ def setRelevantParamCard(self, f_params, f_relparams): yexec_dict = {} yexec_line = exec_line + "yaxis_relvar = " + self['yaxis_relvar'] exec(yexec_line, locals(), yexec_dict) - if self['yaxis_label'] == "": + if self['yaxis_label'] == "": self['yaxis_label'] = "yaxis_relvar" f_relparams.write("{0} = {1}\n".format(self['yaxis_label'], yexec_dict['yaxis_relvar'])) else: @@ -1715,11 +1715,11 @@ def setRelevantParamCard(self, f_params, f_relparams): class ProcCharacteristic(ConfigFile): """A class to handle information which are passed from MadGraph to the madevent - interface.""" - + interface.""" + def default_setup(self): """initialize the directory to the default value""" - + self.add_param('loop_induced', False) self.add_param('has_isr', False) self.add_param('has_fsr', False) @@ -1735,16 +1735,16 @@ def default_setup(self): self.add_param('pdg_initial1', [0]) self.add_param('pdg_initial2', [0]) self.add_param('splitting_types',[], typelist=str) - self.add_param('perturbation_order', [], typelist=str) - self.add_param('limitations', [], typelist=str) - self.add_param('hel_recycling', False) + self.add_param('perturbation_order', [], typelist=str) + self.add_param('limitations', [], typelist=str) + self.add_param('hel_recycling', False) self.add_param('single_color', True) - self.add_param('nlo_mixed_expansion', True) + self.add_param('nlo_mixed_expansion', True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1752,49 +1752,49 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: if '#' in line: line = line.split('#',1)[0] if not line: continue - + if '=' in line: key, value = line.split('=',1) self[key.strip()] = value - + def write(self, outputpath): """write the file""" template ="# Information about the process #\n" template +="#########################################\n" - + fsock = open(outputpath, 'w') fsock.write(template) - + for key, value in self.items(): fsock.write(" %s = %s \n" % (key, value)) - - fsock.close() - + + fsock.close() + class GridpackCard(ConfigFile): """an object for the GridpackCard""" - + def default_setup(self): """default value for the GridpackCard""" - + self.add_param("GridRun", True) self.add_param("gevents", 2500) self.add_param("gseed", 1) - self.add_param("ngran", -1) - + self.add_param("ngran", -1) + def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1802,7 +1802,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -1812,19 +1812,19 @@ def read(self, finput): self[line[1].strip()] = line[0].replace('\'','').strip() def write(self, output_file, template=None): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'grid_card_default.dat') else: template = pjoin(MEDIR, 'Cards', 'grid_card_default.dat') - + text = "" - for line in open(template,'r'): + for line in open(template,'r'): nline = line.split('#')[0] nline = nline.split('!')[0] comment = line[len(nline):] @@ -1832,19 +1832,19 @@ def write(self, output_file, template=None): if len(nline) != 2: text += line elif nline[1].strip() in self: - text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) + text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) else: logger.info('Adding missing parameter %s to current run_card (with default value)' % nline[1].strip()) - text += line - + text += line + if isinstance(output_file, str): fsock = open(output_file,'w') else: fsock = output_file - + fsock.write(text) fsock.close() - + class PY8Card(ConfigFile): """ Implements the Pythia8 card.""" @@ -1868,7 +1868,7 @@ def add_default_subruns(self, type): def default_setup(self): """ Sets up the list of available PY8 parameters.""" - + # Visible parameters # ================== self.add_param("Main:numberOfEvents", -1) @@ -1877,11 +1877,11 @@ def default_setup(self): self.add_param("JetMatching:qCut", -1.0, always_write_to_card=False) self.add_param("JetMatching:doShowerKt",False,always_write_to_card=False) # -1 means that it is automatically set. - self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) + self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) # for CKKWL merging self.add_param("Merging:TMS", -1.0, always_write_to_card=False) self.add_param("Merging:Process", '', always_write_to_card=False) - # -1 means that it is automatically set. + # -1 means that it is automatically set. self.add_param("Merging:nJetMax", -1, always_write_to_card=False) # for both merging, chose whether to also consider different merging # scale values for the extra weights related to scale and PDF variations. @@ -1918,10 +1918,10 @@ def default_setup(self): comment='This allows to turn on/off hadronization alltogether.') self.add_param("partonlevel:mpi", True, hidden=True, always_write_to_card=False, comment='This allows to turn on/off MPI alltogether.') - self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, + self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, always_write_to_card=False, comment='This parameter is automatically set to True by MG5aMC when doing MLM merging with PY8.') - + # for MLM merging self.add_param("JetMatching:merge", False, hidden=True, always_write_to_card=False, comment='Specifiy if we are merging sample of different multiplicity.') @@ -1931,9 +1931,9 @@ def default_setup(self): comment='Value of the merging scale below which one does not even write the HepMC event.') self.add_param("JetMatching:doVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') - self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) + self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) self.add_param("JetMatching:setMad", False, hidden=True, always_write_to_card=False, - comment='Specify one must read inputs from the MadGraph banner.') + comment='Specify one must read inputs from the MadGraph banner.') self.add_param("JetMatching:coneRadius", 1.0, hidden=True, always_write_to_card=False) self.add_param("JetMatching:nQmatch",4,hidden=True, always_write_to_card=False) # for CKKWL merging (common with UMEPS, UNLOPS) @@ -1946,7 +1946,7 @@ def default_setup(self): self.add_param("Merging:applyVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') self.add_param("Merging:includeWeightInXsection", True, hidden=True, always_write_to_card=False, - comment='If turned off, then the option belows forces PY8 to keep the original weight.') + comment='If turned off, then the option belows forces PY8 to keep the original weight.') self.add_param("Merging:muRen", 91.188, hidden=True, always_write_to_card=False, comment='Set renormalization scales of the 2->2 process.') self.add_param("Merging:muFacInME", 91.188, hidden=True, always_write_to_card=False, @@ -1958,7 +1958,7 @@ def default_setup(self): # To be added in subruns for CKKWL self.add_param("Merging:mayRemoveDecayProducts", False, hidden=True, always_write_to_card=False) self.add_param("Merging:doKTMerging", False, hidden=True, always_write_to_card=False) - self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) + self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) self.add_param("Merging:doPTLundMerging", False, hidden=True, always_write_to_card=False) # Special Pythia8 paremeters useful to simplify the shower. @@ -1975,33 +1975,33 @@ def default_setup(self): # Add parameters controlling the subruns execution flow. # These parameters should not be part of PY8SubRun daughter. self.add_default_subruns('parameters') - + def __init__(self, *args, **opts): - # Parameters which are not printed in the card unless they are - # 'user_set' or 'system_set' or part of the + # Parameters which are not printed in the card unless they are + # 'user_set' or 'system_set' or part of the # self.hidden_params_to_always_print set. self.hidden_param = [] self.hidden_params_to_always_write = set() self.visible_params_to_always_write = set() # List of parameters that should never be written out given the current context. self.params_to_never_write = set() - + # Parameters which have been set by the system (i.e. MG5 itself during # the regular course of the shower interface) self.system_set = set() - + # Add attributes controlling the subruns execution flow. # These attributes should not be part of PY8SubRun daughter. self.add_default_subruns('attributes') - - # Parameters which have been set by the + + # Parameters which have been set by the super(PY8Card, self).__init__(*args, **opts) - def add_param(self, name, value, hidden=False, always_write_to_card=True, + def add_param(self, name, value, hidden=False, always_write_to_card=True, comment=None): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. The option 'hidden' decides whether the parameter should be visible to the user. The option 'always_write_to_card' decides whether it should @@ -2017,7 +2017,7 @@ def add_param(self, name, value, hidden=False, always_write_to_card=True, self.hidden_params_to_always_write.add(name) else: if always_write_to_card: - self.visible_params_to_always_write.add(name) + self.visible_params_to_always_write.add(name) if not comment is None: if not isinstance(comment, str): raise MadGraph5Error("Option 'comment' must be a string, not"+\ @@ -2036,7 +2036,7 @@ def add_subrun(self, py8_subrun): self.subruns[py8_subrun['Main:subrun']] = py8_subrun if not 'LHEFInputs:nSubruns' in self.user_set: self['LHEFInputs:nSubruns'] = max(self.subruns.keys()) - + def userSet(self, name, value, **opts): """Set an attribute of this card, following a user_request""" self.__setitem__(name, value, change_userdefine=True, **opts) @@ -2044,10 +2044,10 @@ def userSet(self, name, value, **opts): self.system_set.remove(name.lower()) def vetoParamWriteOut(self, name): - """ Forbid the writeout of a specific parameter of this card when the + """ Forbid the writeout of a specific parameter of this card when the "write" function will be invoked.""" self.params_to_never_write.add(name.lower()) - + def systemSet(self, name, value, **opts): """Set an attribute of this card, independently of a specific user request and only if not already user_set.""" @@ -2058,7 +2058,7 @@ def systemSet(self, name, value, **opts): if force or name.lower() not in self.user_set: self.__setitem__(name, value, change_userdefine=False, **opts) self.system_set.add(name.lower()) - + def MadGraphSet(self, name, value, **opts): """ Sets a card attribute, but only if it is absent or not already user_set.""" @@ -2068,18 +2068,18 @@ def MadGraphSet(self, name, value, **opts): force = False if name.lower() not in self or (force or name.lower() not in self.user_set): self.__setitem__(name, value, change_userdefine=False, **opts) - self.system_set.add(name.lower()) - + self.system_set.add(name.lower()) + def defaultSet(self, name, value, **opts): self.__setitem__(name, value, change_userdefine=False, **opts) - + @staticmethod def pythia8_formatting(value, formatv=None): """format the variable into pythia8 card convention. The type is detected by default""" if not formatv: if isinstance(value,UnknownType): - formatv = 'unknown' + formatv = 'unknown' elif isinstance(value, bool): formatv = 'bool' elif isinstance(value, int): @@ -2095,7 +2095,7 @@ def pythia8_formatting(value, formatv=None): formatv = 'str' else: assert formatv - + if formatv == 'unknown': # No formatting then return str(value) @@ -2116,7 +2116,7 @@ def pythia8_formatting(value, formatv=None): elif formatv == 'float': return '%.10e' % float(value) elif formatv == 'shortfloat': - return '%.3f' % float(value) + return '%.3f' % float(value) elif formatv == 'str': return "%s" % value elif formatv == 'list': @@ -2124,9 +2124,9 @@ def pythia8_formatting(value, formatv=None): return ','.join([PY8Card.pythia8_formatting(arg, 'shortfloat') for arg in value]) else: return ','.join([PY8Card.pythia8_formatting(arg) for arg in value]) - - def write(self, output_file, template, read_subrun=False, + + def write(self, output_file, template, read_subrun=False, print_only_visible=False, direct_pythia_input=False, add_missing=True): """ Write the card to output_file using a specific template. > 'print_only_visible' specifies whether or not the hidden parameters @@ -2143,28 +2143,28 @@ def write(self, output_file, template, read_subrun=False, or p.lower() in self.user_set] # Filter against list of parameters vetoed for write-out visible_param = [p for p in visible_param if p.lower() not in self.params_to_never_write] - + # Now the hidden param which must be written out if print_only_visible: hidden_output_param = [] else: hidden_output_param = [p for p in self if p.lower() in self.hidden_param and not p.lower() in self.user_set and - (p.lower() in self.hidden_params_to_always_write or + (p.lower() in self.hidden_params_to_always_write or p.lower() in self.system_set)] # Filter against list of parameters vetoed for write-out hidden_output_param = [p for p in hidden_output_param if p not in self.params_to_never_write] - + if print_only_visible: subruns = [] else: if not read_subrun: subruns = sorted(self.subruns.keys()) - + # Store the subruns to write in a dictionary, with its ID in key # and the corresponding stringstream in value subruns_to_write = {} - + # Sort these parameters nicely so as to put together parameters # belonging to the same group (i.e. prefix before the ':' in their name). def group_params(params): @@ -2191,7 +2191,7 @@ def group_params(params): # First dump in a temporary_output (might need to have a second pass # at the very end to update 'LHEFInputs:nSubruns') output = StringIO.StringIO() - + # Setup template from which to read if isinstance(template, str): if os.path.isfile(template): @@ -2199,7 +2199,7 @@ def group_params(params): elif '\n' in template: tmpl = StringIO.StringIO(template) else: - raise Exception("File input '%s' not found." % file_input) + raise Exception("File input '%s' not found." % file_input) elif template is None: # Then use a dummy empty StringIO, hence skipping the reading tmpl = StringIO.StringIO() @@ -2257,8 +2257,8 @@ def group_params(params): # Remove all of its variables (so that nothing is overwritten) DummySubrun.clear() DummySubrun.write(subruns_to_write[int(value)], - tmpl, read_subrun=True, - print_only_visible=print_only_visible, + tmpl, read_subrun=True, + print_only_visible=print_only_visible, direct_pythia_input=direct_pythia_input) logger.info('Adding new unknown subrun with ID %d.'% @@ -2267,7 +2267,7 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - + # Change parameters which must be output if param in visible_param: new_value = PY8Card.pythia8_formatting(self[param]) @@ -2286,10 +2286,10 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - - # Substitute the value. + + # Substitute the value. # If it is directly the pytia input, then don't write the param if it - # is not in the list of visible_params_to_always_write and was + # is not in the list of visible_params_to_always_write and was # not user_set or system_set if ((not direct_pythia_input) or (param.lower() in self.visible_params_to_always_write) or @@ -2304,16 +2304,16 @@ def group_params(params): output.write(template%(param_entry, value_entry.replace(value,new_value))) - + # Proceed to next line last_pos = tmpl.tell() line = tmpl.readline() - + # If add_missing is False, make sure to empty the list of remaining parameters if not add_missing: visible_param = [] hidden_output_param = [] - + # Now output the missing parameters. Warn about visible ones. if len(visible_param)>0 and not template is None: output.write( @@ -2343,12 +2343,12 @@ def group_params(params): """%(' for subrun %d'%self['Main:subrun'] if 'Main:subrun' in self else '')) for param in hidden_output_param: if param.lower() in self.comments: - comment = '\n'.join('! %s'%c for c in + comment = '\n'.join('! %s'%c for c in self.comments[param.lower()].split('\n')) output.write(comment+'\n') output.write('%s=%s\n'%(param,PY8Card.pythia8_formatting(self[param]))) - - # Don't close the file if we were reading a subrun, but simply write + + # Don't close the file if we were reading a subrun, but simply write # output and return now if read_subrun: output_file.write(output.getvalue()) @@ -2382,12 +2382,12 @@ def group_params(params): out.close() else: output_file.write(output.getvalue()) - + def read(self, file_input, read_subrun=False, setter='default'): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file. - The setter option choses the authority that sets potential - modified/new parameters. It can be either: + The setter option choses the authority that sets potential + modified/new parameters. It can be either: 'default' or 'user' or 'system'""" if isinstance(file_input, str): if "\n" in file_input: @@ -2423,8 +2423,8 @@ def read(self, file_input, read_subrun=False, setter='default'): raise MadGraph5Error("Could not read line '%s' of Pythia8 card."%\ line) if '!' in value: - value,_ = value.split('!',1) - + value,_ = value.split('!',1) + # Read a subrun if detected: if param=='Main:subrun': if read_subrun: @@ -2451,7 +2451,7 @@ def read(self, file_input, read_subrun=False, setter='default'): last_pos = finput.tell() line = finput.readline() continue - + # Read parameter. The case of a parameter not defined in the card is # handled directly in ConfigFile. @@ -2478,7 +2478,7 @@ def add_default_subruns(self, type): def __init__(self, *args, **opts): """ Initialize a subrun """ - + # Force user to set it manually. subrunID = -1 if 'subrun_id' in opts: @@ -2489,7 +2489,7 @@ def __init__(self, *args, **opts): def default_setup(self): """Sets up the list of available PY8SubRun parameters.""" - + # Add all default PY8Card parameters super(PY8SubRun, self).default_setup() # Make sure they are all hidden @@ -2501,33 +2501,33 @@ def default_setup(self): self.add_param("Main:subrun", -1) self.add_param("Beams:LHEF", "events.lhe.gz") - + class RunBlock(object): """ Class for a series of parameter in the run_card that can be either visible or hidden. - name: allow to set in the default run_card $name to set where that + name: allow to set in the default run_card $name to set where that block need to be inserted template_on: information to include is block is active template_off: information to include is block is not active on_fields/off_fields: paramater associated to the block - can be specify but are otherwise automatically but + can be specify but are otherwise automatically but otherwise determined from the template. - + function: status(self,run_card) -> return which template need to be used check_validity(self, runcard) -> sanity check - create_default_for_process(self, run_card, proc_characteristic, - history, proc_def) + create_default_for_process(self, run_card, proc_characteristic, + history, proc_def) post_set_XXXX(card, value, change_userdefine, raiseerror) -> fct called when XXXXX is set post_set(card, value, change_userdefine, raiseerror, **opt) -> fct called when a parameter is changed - -> no access to parameter name + -> no access to parameter name -> not called if post_set_XXXX is defined """ - - + + def __init__(self, name, template_on, template_off, on_fields=False, off_fields=False): self.name = name @@ -2550,7 +2550,7 @@ def fields(self): def find_fields_from_template(template): """ return the list of fields from a template. checking line like %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ - + return re.findall(r"^\s*%\((.*)\)s\s*=\s*\1", template, re.M) def get_template(self, card): @@ -2565,7 +2565,7 @@ def get_unused_template(self, card): if self.status(card): return self.template_off else: - return self.template_on + return self.template_on def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -2594,20 +2594,20 @@ def manage_parameters(self, card, written, to_write): written.add(name) if name in to_write: to_write.remove(name) - + def check_validity(self, runcard): """run self consistency check here --avoid to use runcard[''] = xxx here since it can trigger post_set function""" return def create_default_for_process(self, run_card, proc_characteristic, history, proc_def): - return + return # @staticmethod # def post_set(card, value, change_userdefine, raiseerror, **opt): # """default action to run when a parameter of the block is defined. # Here we do not know which parameter is modified. if this is needed. # then one need to define post_set_XXXXX(card, value, change_userdefine, raiseerror) -# and then only that function is used +# and then only that function is used # """ # # if 'pdlabel' in card.user_set: @@ -2621,7 +2621,7 @@ class RunCard(ConfigFile): blocks = [] parameter_in_block = {} - allowed_lep_densities = {} + allowed_lep_densities = {} default_include_file = 'run_card.inc' default_autodef_file = 'run.inc' donewarning = [] @@ -2637,7 +2637,7 @@ def plugin_input(self, finput): curr_dir = os.path.dirname(os.path.dirname(finput.name)) elif isinstance(finput, str): curr_dir = os.path.dirname(os.path.dirname(finput)) - + if curr_dir: if os.path.exists(pjoin(curr_dir, 'bin', 'internal', 'plugin_run_card')): # expected format {} passing everything as optional argument @@ -2646,7 +2646,7 @@ def plugin_input(self, finput): continue opts = dict(eval(line)) self.add_param(**opts) - + @classmethod def fill_post_set_from_blocks(cls): """set the post_set function for any parameter defined in a run_block""" @@ -2659,8 +2659,8 @@ def fill_post_set_from_blocks(cls): elif hasattr(block, 'post_set'): setattr(cls, 'post_set_%s' % parameter, block.post_set) cls.parameter_in_block[parameter] = block - - + + def __new__(cls, finput=None, **opt): cls.fill_post_set_from_blocks() @@ -2718,9 +2718,9 @@ def __new__(cls, finput=None, **opt): return super(RunCard, cls).__new__(cls, finput, **opt) def __init__(self, *args, **opts): - + # The following parameter are updated in the defaultsetup stage. - + #parameter for which no warning should be raised if not define self.hidden_param = [] # in which include file the parameer should be written @@ -2739,11 +2739,11 @@ def __init__(self, *args, **opts): self.cuts_parameter = {} # parameter added where legacy requires an older value. self.system_default = {} - + self.display_block = [] # set some block to be displayed self.fct_mod = {} # {param: (fct_pointer, *argument, **opts)} - self.cut_class = {} + self.cut_class = {} self.warned=False @@ -2776,11 +2776,11 @@ def get_lepton_densities(cls): else: cls.allowed_lep_densities[identity].append(name) - def add_param(self, name, value, fortran_name=None, include=True, + def add_param(self, name, value, fortran_name=None, include=True, hidden=False, legacy=False, cut=False, system=False, sys_default=None, autodef=False, fct_mod=None, **opts): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. fortran_name: defines what is the associate name in the f77 code include: defines if we have to put the value in the include file @@ -2795,7 +2795,7 @@ def add_param(self, name, value, fortran_name=None, include=True, fct_mod: defines a function to run if the parameter is modify in the include file options of **opts: - allowed: list of valid options. '*' means anything else should be allowed. - empty list means anything possible as well. + empty list means anything possible as well. - comment: add comment for writing/help - typelist: type of the list if default is empty """ @@ -2823,9 +2823,9 @@ def add_param(self, name, value, fortran_name=None, include=True, self.fct_mod[name] = fct_mod def read(self, finput, consistency=True, unknown_warning=True, **opt): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -2836,7 +2836,7 @@ def read(self, finput, consistency=True, unknown_warning=True, **opt): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -2864,8 +2864,8 @@ def add_unknown_entry(self, name, value, unknow_warning): This is based on the guess_entry_fromname for the various syntax providing input. This then call add_param accordingly. - This function does not returns anything. - """ + This function does not returns anything. + """ if name == "dsqrt_q2fact1" and not self.LO: raise InvalidRunCard("Looks like you passed a LO run_card for a NLO run. Please correct") @@ -2903,7 +2903,7 @@ def add_unknown_entry(self, name, value, unknow_warning): " The type was assigned to %s. \n"+\ " The definition of that variable will %sbe automatically added to fortran file %s\n"+\ " The value of that variable will %sbe passed to the fortran code via fortran file %s",\ - name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, + name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, "" if opts.get('autodef', False) else "not", "" if opts.get('autodef', False) in [True,False] else opts.get('autodef'), "" if opts.get('include', True) else "not", "" if opts.get('include', True) in [True,False] else opts.get('include')) RunCard.donewarning.append(name) @@ -2923,19 +2923,19 @@ def valid_line(self, line, tmp): return False elif line.strip().startswith('%'): parameter = line[line.find('(')+1:line.find(')')] - + try: cond = self.cuts_parameter[parameter] except KeyError: return True - - + + if template_options.get(cond, default) or cond is True: return True else: - return False + return False else: - return True + return True def reset_simd(self, old_value, new_value, name, *args, **opts): @@ -2946,28 +2946,28 @@ def make_clean(self,old_value, new_value, name, dir): raise Exception('pass make clean for ', dir) def make_Ptouch(self,old_value, new_value, name, reset): - raise Exception('pass Ptouch for ', reset) - + raise Exception('pass Ptouch for ', reset) + def write(self, output_file, template=None, python_template=False, write_hidden=False, template_options=None, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" - to_write = set(self.user_set) + to_write = set(self.user_set) written = set() if not template: raise Exception if not template_options: template_options = collections.defaultdict(str) - + if python_template: text = open(template,'r').read() - text = text.split('\n') + text = text.split('\n') # remove if templating - text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] + text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] for l in text if self.valid_line(l, template_options)] text ='\n'.join(text) - + if python_template and not to_write: import string if self.blocks: @@ -2981,14 +2981,14 @@ def write(self, output_file, template=None, python_template=False, if not self.list_parameter: text = text % self else: - data = dict((key.lower(),value) for key, value in self.items()) + data = dict((key.lower(),value) for key, value in self.items()) for name in self.list_parameter: if self.list_parameter[name] != str: data[name] = ', '.join(str(v) for v in data[name]) else: data[name] = "['%s']" % "', '".join(str(v) for v in data[name]) text = text % data - else: + else: text = "" for line in open(template,'r'): nline = line.split('#')[0] @@ -3005,11 +3005,11 @@ def write(self, output_file, template=None, python_template=False, this_group = this_group[0] text += this_group.get_template(self) % self this_group.manage_parameters(self, written, to_write) - + elif len(nline) != 2: text += line elif nline[1].strip() in self: - + name = nline[1].strip().lower() value = self[name] if name in self.list_parameter: @@ -3026,15 +3026,15 @@ def write(self, output_file, template=None, python_template=False, else: endline = '' text += ' %s\t= %s %s%s' % (value, name, comment, endline) - written.add(name) + written.add(name) if name in to_write: to_write.remove(name) else: logger.info('Adding missing parameter %s to current %s (with default value)', (name, self.filename)) - written.add(name) - text += line + written.add(name) + text += line for b in self.blocks: if b.status(self): @@ -3057,7 +3057,7 @@ def write(self, output_file, template=None, python_template=False, else: #partial writting -> add only what is needed to_add = [] - for line in b.get_template(self).split('\n'): + for line in b.get_template(self).split('\n'): nline = line.split('#')[0] nline = nline.split('!')[0] nline = nline.split('=') @@ -3072,8 +3072,8 @@ def write(self, output_file, template=None, python_template=False, continue #already include before else: to_add.append(line % {nline[1].strip():value, name:value}) - written.add(name) - + written.add(name) + if name in to_write: to_write.remove(name) else: @@ -3095,13 +3095,13 @@ def write(self, output_file, template=None, python_template=False, text += '\n'.join(to_add) if to_write or write_hidden: - text+="""#********************************************************************* + text+="""#********************************************************************* # Additional hidden parameters #********************************************************************* -""" +""" if write_hidden: # - # do not write hidden parameter not hidden for this template + # do not write hidden parameter not hidden for this template # if python_template: written = written.union(set(re.findall('\%\((\w*)\)s', open(template,'r').read(), re.M))) @@ -3129,7 +3129,7 @@ def get_last_value_include(self, output_dir): if inc file does not exist we will return the current value (i.e. set has no change) """ - #remember that + #remember that # default_include_file is a class variable # self.includepath is on the form include_path : [list of param ] out = {} @@ -3165,7 +3165,7 @@ def get_value_from_include(self, path, list_of_params, output_dir): with open(pjoin(output_dir,path), 'r') as fsock: text = fsock.read() - + for name in list_of_params: misc.sprint(name, name in self.fortran_name) misc.sprint(self.fortran_name[name] if name in self.fortran_name[name] else name) @@ -3191,11 +3191,11 @@ def get_value_from_include(self, path, list_of_params, output_dir): misc.sprint(self.fortran_name) misc.sprint(text) raise Exception - return out + return out def get_default(self, name, default=None, log_level=None): - """return self[name] if exist otherwise default. log control if we + """return self[name] if exist otherwise default. log control if we put a warning or not if we use the default value""" lower_name = name.lower() @@ -3216,13 +3216,13 @@ def get_default(self, name, default=None, log_level=None): log_level = 20 if not default: default = dict.__getitem__(self, name.lower()) - + logger.log(log_level, '%s missed argument %s. Takes default: %s' % (self.filename, name, default)) self[name] = default return default else: - return self[name] + return self[name] def mod_inc_pdlabel(self, value): """flag pdlabel has 'dressed' if one of the special lepton PDF with beamstralung. @@ -3237,16 +3237,16 @@ def edit_dummy_fct_from_file(self, filelist, outdir): filelist is a list of input files (given by the user) containing a series of function to be placed in replacement of standard (typically dummy) functions of the code. - This use LO/NLO class attribute that defines which function name need to - be placed in which file. + This use LO/NLO class attribute that defines which function name need to + be placed in which file. First time this is used, a backup of the original file is done in order to - recover if the user remove some of those files. + recover if the user remove some of those files. The function present in the file are determined automatically via regular expression. and only that function is replaced in the associated file. - function in the filelist starting with user_ will also be include within the + function in the filelist starting with user_ will also be include within the dummy_fct.f file """ @@ -3269,7 +3269,7 @@ def edit_dummy_fct_from_file(self, filelist, outdir): fsock = file_writers.FortranWriter(tmp,'w') function_text = fsock.remove_routine(text, fct) fsock.close() - test = open(tmp,'r').read() + test = open(tmp,'r').read() if fct not in self.dummy_fct_file: if fct.startswith('user_'): self.dummy_fct_file[fct] = self.dummy_fct_file['user_'] @@ -3315,22 +3315,22 @@ def guess_entry_fromname(self, name, value): - vartype: type of the variable - name: name of the variable (stripped from metadata) - options: additional options for the add_param - rules: - - if name starts with str_, int_, float_, bool_, list_, dict_ then + rules: + - if name starts with str_, int_, float_, bool_, list_, dict_ then - vartype is set accordingly - name is strip accordingly - otherwise guessed from value (which is string) - if name contains min/max - vartype is set to float - options has an added {'cut':True} - - suffixes like + - suffixes like - will be removed from named - will be added in options (for add_param) as {'cut':True} see add_param documentation for the list of supported options - if include is on False set autodef to False (i.e. enforce it False for future change) """ - # local function + # local function def update_typelist(value, name, opts): """convert a string to a list and update opts to keep track of the type """ value = value.strip() @@ -3358,7 +3358,7 @@ def update_typelist(value, name, opts): opts[key] = val name = name.replace("<%s=%s>" %(key,val), '') - # get vartype + # get vartype # first check that name does not force it supported_type = ["str", "float", "int", "bool", "list", "dict"] if "_" in name and name.split("_")[0].lower() in supported_type: @@ -3406,13 +3406,13 @@ def f77_formatting(value, formatv=None): value = str(value).lower() else: assert formatv - + if formatv == 'bool': if str(value) in ['1','T','.true.','True']: return '.true.' else: return '.false.' - + elif formatv == 'int': try: return str(int(value)) @@ -3422,12 +3422,12 @@ def f77_formatting(value, formatv=None): return str(int(fl)) else: raise - + elif formatv == 'float': if isinstance(value, str): value = value.replace('d','e') return ('%.10e' % float(value)).replace('e','d') - + elif formatv == 'str': # Check if it is a list if value.strip().startswith('[') and value.strip().endswith(']'): @@ -3437,20 +3437,20 @@ def f77_formatting(value, formatv=None): enumerate(elements)] else: return "'%s'" % value - - + + def check_validity(self, log_level=30): """check that parameter missing in the card are set to the expected value""" for name, value in self.system_default.items(): self.set(name, value, changeifuserset=False) - + for name in self.includepath[False]: to_bypass = self.hidden_param + list(self.legacy_parameter.keys()) if name not in to_bypass: - self.get_default(name, log_level=log_level) + self.get_default(name, log_level=log_level) for name in self.legacy_parameter: if self[name] != self.legacy_parameter[name]: @@ -3458,28 +3458,28 @@ def check_validity(self, log_level=30): for block in self.blocks: block.check_validity(self) - + def update_system_parameter_for_include(self): - """update hidden system only parameter for the correct writtin in the + """update hidden system only parameter for the correct writtin in the include""" return - + def write_include_file(self, output_dir, output_file=None): """Write the various include file in output_dir. The entry True of self.includepath will be written in run_card.inc The entry False will not be written anywhere output_file allows testing by providing stream. - This also call the function to add variable definition for the - variable with autodef=True (handle by write_autodef function) + This also call the function to add variable definition for the + variable with autodef=True (handle by write_autodef function) """ - + # ensure that all parameter are coherent and fix those if needed self.check_validity() - + #ensusre that system only parameter are correctly set self.update_system_parameter_for_include() @@ -3490,10 +3490,10 @@ def write_include_file(self, output_dir, output_file=None): self.write_autodef(output_dir, output_file=None) # check/fix status of customised functions self.edit_dummy_fct_from_file(self["custom_fcts"], os.path.dirname(output_dir)) - + for incname in self.includepath: self.write_one_include_file(output_dir, incname, output_file) - + for name,value in value_in_old_include.items(): if value != self[name]: self.fct_mod[name][0](value, self[name], name, *self.fct_mod[name][1],**self.fct_mod[name][2]) @@ -3515,13 +3515,13 @@ def write_one_include_file(self, output_dir, incname, output_file=None): fsock = file_writers.FortranWriter(pjoin(output_dir,pathinc+'.tmp')) - for key in self.includepath[incname]: + for key in self.includepath[incname]: #define the fortran name if key in self.fortran_name: fortran_name = self.fortran_name[key] else: fortran_name = key - + if incname in self.include_as_parameter: fsock.writelines('INTEGER %s\n' % fortran_name) #get the value with warning if the user didn't set it @@ -3534,7 +3534,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): # in case of a list, add the length of the list as 0th # element in fortran. Only in case of integer or float # list (not for bool nor string) - targettype = self.list_parameter[key] + targettype = self.list_parameter[key] if targettype is bool: pass elif targettype is int: @@ -3550,7 +3550,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): elif isinstance(value, dict): for fortran_name, onevalue in value.items(): line = '%s = %s \n' % (fortran_name, self.f77_formatting(onevalue)) - fsock.writelines(line) + fsock.writelines(line) elif isinstance(incname,str) and 'compile' in incname: if incname in self.include_as_parameter: line = 'PARAMETER (%s=%s)' %( fortran_name, value) @@ -3585,7 +3585,7 @@ def write_autodef(self, output_dir, output_file=None): filetocheck = dict(self.definition_path) if True not in self.definition_path: filetocheck[True] = [] - + for incname in filetocheck: if incname is True: @@ -3598,7 +3598,7 @@ def write_autodef(self, output_dir, output_file=None): if output_file: fsock = output_file input = fsock.getvalue() - + else: input = open(pjoin(output_dir,pathinc),'r').read() # do not define fsock here since we might not need to overwrite it @@ -3608,7 +3608,7 @@ def write_autodef(self, output_dir, output_file=None): previous = re.findall(re_pat, input, re.M) # now check which one needed to be added (and remove those identicaly defined) to_add = [] - for key in filetocheck[incname]: + for key in filetocheck[incname]: curr_type = self[key].__class__.__name__ length = "" if curr_type in [list, "list"]: @@ -3640,10 +3640,10 @@ def write_autodef(self, output_dir, output_file=None): fsock.truncate(0) fsock.seek(0) - # remove outdated lines + # remove outdated lines lines = input.split('\n') if previous: - out = [line for line in lines if not re.search(re_pat, line, re.M) or + out = [line for line in lines if not re.search(re_pat, line, re.M) or re.search(re_pat, line, re.M).groups() not in previous] else: out = lines @@ -3662,7 +3662,7 @@ def write_autodef(self, output_dir, output_file=None): stop = out.index('C STOP USER COMMON BLOCK') out = out[:start]+ out[stop+1:] #add new common-block - if self.definition_path[incname]: + if self.definition_path[incname]: out.append("C START USER COMMON BLOCK") if isinstance(pathinc , str): filename = os.path.basename(pathinc).split('.',1)[0] @@ -3675,10 +3675,10 @@ def write_autodef(self, output_dir, output_file=None): filename = filename.upper() out.append(" COMMON/USER_CUSTOM_%s/%s" %(filename,','.join( self.definition_path[incname]))) out.append('C STOP USER COMMON BLOCK') - + if not output_file: fsock.writelines(out) - fsock.close() + fsock.close() else: # for iotest out = ["%s\n" %l for l in out] @@ -3702,7 +3702,7 @@ def get_idbmup(lpp): def get_banner_init_information(self): """return a dictionary with the information needed to write the first line of the block of the lhe file.""" - + output = {} output["idbmup1"] = self.get_idbmup(self['lpp1']) output["idbmup2"] = self.get_idbmup(self['lpp2']) @@ -3713,7 +3713,7 @@ def get_banner_init_information(self): output["pdfsup1"] = self.get_pdf_id(self["pdlabel"]) output["pdfsup2"] = self.get_pdf_id(self["pdlabel"]) return output - + def get_pdf_id(self, pdf): if pdf == "lhapdf": lhaid = self["lhaid"] @@ -3721,19 +3721,19 @@ def get_pdf_id(self, pdf): return lhaid[0] else: return lhaid - else: + else: try: return {'none': 0, 'iww': 0, 'eva':0, 'edff':0, 'chff':0, 'cteq6_m':10000,'cteq6_l':10041,'cteq6l1':10042, 'nn23lo':246800,'nn23lo1':247000,'nn23nlo':244800 - }[pdf] + }[pdf] except: - return 0 - + return 0 + def get_lhapdf_id(self): return self.get_pdf_id(self['pdlabel']) - def remove_all_cut(self): + def remove_all_cut(self): """remove all the cut""" for name in self.cuts_parameter: @@ -3749,7 +3749,7 @@ def remove_all_cut(self): elif 'eta' in name: self[name] = -1 else: - self[name] = 0 + self[name] = 0 ################################################################################################ ### Define various template subpart for the LO Run_card @@ -3767,11 +3767,11 @@ def remove_all_cut(self): %(nb_proton1)s = nb_proton1 # number of proton for the first beam %(nb_neutron1)s = nb_neutron1 # number of neutron for the first beam %(mass_ion1)s = mass_ion1 # mass of the heavy ion (first beam) -# Note that seting differently the two beams only work if you use +# Note that seting differently the two beams only work if you use # group_subprocess=False when generating your matrix-element %(nb_proton2)s = nb_proton2 # number of proton for the second beam %(nb_neutron2)s = nb_neutron2 # number of neutron for the second beam - %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) + %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ template_off = "# To see heavy ion options: type \"update ion_pdf\"" @@ -3834,11 +3834,11 @@ def remove_all_cut(self): # Frame for polarization ------------------------------------------------------------------------------------ template_on = \ """#********************************************************************* -# Frame where to evaluate the matrix-element (not the cut!) for polarization +# Frame where to evaluate the matrix-element (not the cut!) for polarization #********************************************************************* %(me_frame)s = me_frame ! list of particles to sum-up to define the rest-frame ! in which to evaluate the matrix-element - ! [1,2] means the partonic center of mass + ! [1,2] means the partonic center of mass """ template_off = "" frame_block = RunBlock('frame', template_on=template_on, template_off=template_off) @@ -3891,7 +3891,7 @@ def remove_all_cut(self): # CONTROL The extra running scale (not QCD) * # Such running is NOT include in systematics computation * #*********************************************************************** - %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale + %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode %(mue_over_ref)s = mue_over_ref ! ratio to mur if dynamical scale """ @@ -3908,10 +3908,10 @@ def remove_all_cut(self): %(tmin_for_channel)s = tmin_for_channel ! limit the non-singular reach of --some-- channel of integration related to T-channel diagram (value between -1 and 0), -1 is no impact %(survey_splitting)s = survey_splitting ! for loop-induced control how many core are used at survey for the computation of a single iteration. %(survey_nchannel_per_job)s = survey_nchannel_per_job ! control how many Channel are integrated inside a single job on cluster/multicore - %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) + %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) +#********************************************************************* +# Compilation flag. #********************************************************************* -# Compilation flag. -#********************************************************************* %(global_flag)s = global_flag ! fortran optimization flag use for the all code. %(aloha_flag)s = aloha_flag ! fortran optimization flag for aloha function. Suggestions: '-ffast-math' %(matrix_flag)s = matrix_flag ! fortran optimization flag for matrix.f function. Suggestions: '-O3' @@ -3948,7 +3948,7 @@ def check_validity(self, card): if card['pdlabel'] != card['pdlabel1']: dict.__setitem__(card, 'pdlabel', card['pdlabel1']) elif card['pdlabel1'] in sum(card.allowed_lep_densities.values(),[]): - raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") + raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel2'] in sum(card.allowed_lep_densities.values(),[]): raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel1'] == 'none': @@ -3962,7 +3962,7 @@ def check_validity(self, card): dict.__setitem__(card, 'pdlabel2', card['pdlabel']) if abs(card['lpp1']) == 1 == abs(card['lpp2']) and card['pdlabel1'] != card['pdlabel2']: - raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") + raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -4028,7 +4028,7 @@ def post_set(card, value, change_userdefine, raiseerror, name='unknown', **opt): if name == 'fixed_fac_scale2' and 'fixed_fac_scale1' not in card.user_set: dict.__setitem__(card, 'fixed_fac_scale1', card['fixed_fac_scale']) if name == 'fixed_fac_scale1' and 'fixed_fac_scale2' not in card.user_set: - dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) + dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) def status(self, card): @@ -4061,32 +4061,32 @@ def status(self, card): class RunCardLO(RunCard): """an object to handle in a nice way the run_card information""" - + blocks = [heavy_ion_block, beam_pol_block, syscalc_block, ecut_block, frame_block, eva_scale_block, mlm_block, ckkw_block, psoptim_block, pdlabel_block, fixedfacscale, running_block] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), "get_dummy_x1": pjoin("SubProcesses","dummy_fct.f"), - "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), + "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), "dummy_boostframe": pjoin("SubProcesses","dummy_fct.f"), "user_dynamical_scale": pjoin("SubProcesses","dummy_fct.f"), "bias_wgt_custom": pjoin("SubProcesses","dummy_fct.f"), "user_": pjoin("SubProcesses","dummy_fct.f") # all function starting by user will be added to that file } - + include_as_parameter = ['vector.inc'] if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_lo.dat") - + def default_setup(self): """default value for the run_card.dat""" - + self.add_param("run_tag", "tag_1", include=False) self.add_param("gridpack", False) self.add_param("time_of_flight", -1.0, include=False) - self.add_param("nevents", 10000) + self.add_param("nevents", 10000) self.add_param("iseed", 0) self.add_param("python_seed", -2, include=False, hidden=True, comment="controlling python seed [handling in particular the final unweighting].\n -1 means use default from random module.\n -2 means set to same value as iseed") self.add_param("lpp1", 1, fortran_name="lpp(1)", allowed=[-1,1,0,2,3,9,-2,-3,4,-4], @@ -4106,7 +4106,7 @@ def default_setup(self): self.add_param('nb_neutron1', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(1)", comment='For heavy ion physics nb of neutron in the ion (for both beam but if group_subprocess was False)') self.add_param('nb_neutron2', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(2)", - comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') + comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') self.add_param('mass_ion1', -1.0, hidden=True, fortran_name="mass_ion(1)", allowed=[-1,0, 0.938, 207.9766521*0.938, 0.000511, 0.105, '*'], comment='For heavy ion physics mass in GeV of the ion (of beam 1)') @@ -4133,11 +4133,11 @@ def default_setup(self): self.add_param("mue_over_ref", 1.0, hidden=True, comment='ratio mu_other/mu for dynamical scale') self.add_param("ievo_eva",0,hidden=True, allowed=[0,1],fortran_name="ievo_eva", comment='eva: 0 for EW pdf muf evolution by q^2; 1 for evo by pT^2') - + # Bias module options self.add_param("bias_module", 'None', include=False, hidden=True) self.add_param('bias_parameters', {'__type__':1.0}, include='BIAS/bias.inc', hidden=True) - + #matching self.add_param("scalefact", 1.0) self.add_param("ickkw", 0, allowed=[0,1], hidden=True, comment="\'0\' for standard fixed order computation.\n\'1\' for MLM merging activates alphas and pdf re-weighting according to a kt clustering of the QCD radiation.") @@ -4221,7 +4221,7 @@ def default_setup(self): self.add_param("mmaa", 0.0, cut='aa') self.add_param("mmll", 0.0, cut='ll') self.add_param("mmjjmax", -1.0, cut='jj') - self.add_param("mmbbmax", -1.0, cut='bb') + self.add_param("mmbbmax", -1.0, cut='bb') self.add_param("mmaamax", -1.0, cut='aa') self.add_param("mmllmax", -1.0, cut='ll') self.add_param("mmnl", 0.0, cut='LL') @@ -4231,9 +4231,9 @@ def default_setup(self): self.add_param("ptllmax", -1.0, cut='ll') self.add_param("xptj", 0.0, cut='jj') self.add_param("xptb", 0.0, cut='bb') - self.add_param("xpta", 0.0, cut='aa') + self.add_param("xpta", 0.0, cut='aa') self.add_param("xptl", 0.0, cut='ll') - # ordered pt jet + # ordered pt jet self.add_param("ptj1min", 0.0, cut='jj') self.add_param("ptj1max", -1.0, cut='jj') self.add_param("ptj2min", 0.0, cut='jj') @@ -4241,7 +4241,7 @@ def default_setup(self): self.add_param("ptj3min", 0.0, cut='jjj') self.add_param("ptj3max", -1.0, cut='jjj') self.add_param("ptj4min", 0.0, cut='j'*4) - self.add_param("ptj4max", -1.0, cut='j'*4) + self.add_param("ptj4max", -1.0, cut='j'*4) self.add_param("cutuse", 0, cut='jj') # ordered pt lepton self.add_param("ptl1min", 0.0, cut='l'*2) @@ -4249,7 +4249,7 @@ def default_setup(self): self.add_param("ptl2min", 0.0, cut='l'*2) self.add_param("ptl2max", -1.0, cut='l'*2) self.add_param("ptl3min", 0.0, cut='l'*3) - self.add_param("ptl3max", -1.0, cut='l'*3) + self.add_param("ptl3max", -1.0, cut='l'*3) self.add_param("ptl4min", 0.0, cut='l'*4) self.add_param("ptl4max", -1.0, cut='l'*4) # Ht sum of jets @@ -4257,7 +4257,7 @@ def default_setup(self): self.add_param("htjmax", -1.0, cut='j'*2) self.add_param("ihtmin", 0.0, cut='J'*2) self.add_param("ihtmax", -1.0, cut='J'*2) - self.add_param("ht2min", 0.0, cut='J'*3) + self.add_param("ht2min", 0.0, cut='J'*3) self.add_param("ht3min", 0.0, cut='J'*3) self.add_param("ht4min", 0.0, cut='J'*4) self.add_param("ht2max", -1.0, cut='J'*3) @@ -4267,7 +4267,7 @@ def default_setup(self): self.add_param("ptgmin", 0.0, cut='aj') self.add_param("r0gamma", 0.4, hidden=True) self.add_param("xn", 1.0, hidden=True) - self.add_param("epsgamma", 1.0, hidden=True) + self.add_param("epsgamma", 1.0, hidden=True) self.add_param("isoem", True, hidden=True) self.add_param("xetamin", 0.0, cut='jj') self.add_param("deltaeta", 0.0, cut='j'*2) @@ -4280,7 +4280,7 @@ def default_setup(self): self.add_param("use_syst", True) self.add_param('systematics_program', 'systematics', include=False, hidden=True, comment='Choose which program to use for systematics computation: none, systematics, syscalc') self.add_param('systematics_arguments', ['--mur=0.5,1,2', '--muf=0.5,1,2', '--pdf=errorset'], include=False, hidden=True, comment='Choose the argment to pass to the systematics command. like --mur=0.25,1,4. Look at the help of the systematics function for more details.') - + self.add_param("sys_scalefact", "0.5 1 2", include=False, hidden=True) self.add_param("sys_alpsfact", "None", include=False, hidden=True) self.add_param("sys_matchscale", "auto", include=False, hidden=True) @@ -4315,8 +4315,8 @@ def default_setup(self): self.add_param('aloha_flag', '', include=False, hidden=True, comment='global fortran compilation flag, suggestion: -ffast-math', fct_mod=(self.make_clean, ('Source/DHELAS'),{})) self.add_param('matrix_flag', '', include=False, hidden=True, comment='fortran compilation flag for the matrix-element files, suggestion -O3', - fct_mod=(self.make_Ptouch, ('matrix'),{})) - self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', + fct_mod=(self.make_Ptouch, ('matrix'),{})) + self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', fortran_name='VECSIZE_MEMMAX', fct_mod=(self.reset_simd,(),{})) # parameter allowing to define simple cut via the pdg @@ -4329,24 +4329,24 @@ def default_setup(self): self.add_param('eta_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False) - + self.add_param('pdg_cut',[0], system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], system=True) # store pt min self.add_param('ptmax4pdg',[-1.], system=True) self.add_param('Emin4pdg',[0.], system=True) # store pt min - self.add_param('Emax4pdg',[-1.], system=True) + self.add_param('Emax4pdg',[-1.], system=True) self.add_param('etamin4pdg',[0.], system=True) # store pt min - self.add_param('etamax4pdg',[-1.], system=True) + self.add_param('etamax4pdg',[-1.], system=True) self.add_param('mxxmin4pdg',[-1.], system=True) self.add_param('mxxpart_antipart', [False], system=True) - - - + + + def check_validity(self): """ """ - + super(RunCardLO, self).check_validity() - + #Make sure that nhel is only either 0 (i.e. no MC over hel) or #1 (MC over hel with importance sampling). In particular, it can #no longer be > 1. @@ -4357,12 +4357,12 @@ def check_validity(self): "not %s." % self['nhel']) if int(self['maxjetflavor']) > 6: raise InvalidRunCard('maxjetflavor should be lower than 5! (6 is partly supported)') - + if len(self['pdgs_for_merging_cut']) > 1000: raise InvalidRunCard("The number of elements in "+\ "'pdgs_for_merging_cut' should not exceed 1000.") - + # some cut need to be deactivated in presence of isolation if self['ptgmin'] > 0: if self['pta'] > 0: @@ -4370,18 +4370,18 @@ def check_validity(self): self['pta'] = 0.0 if self['draj'] > 0: logger.warning('draj cut discarded since photon isolation is used') - self['draj'] = 0.0 - - # special treatment for gridpack use the gseed instead of the iseed + self['draj'] = 0.0 + + # special treatment for gridpack use the gseed instead of the iseed if self['gridrun']: self['iseed'] = self['gseed'] - + #Some parameter need to be fixed when using syscalc #if self['use_syst']: # if self['scalefact'] != 1.0: # logger.warning('Since use_syst=T, changing the value of \'scalefact\' to 1') # self['scalefact'] = 1.0 - + # CKKW Treatment if self['ickkw'] > 0: if self['ickkw'] != 1: @@ -4399,7 +4399,7 @@ def check_validity(self): raise InvalidRunCard('maxjetflavor at 6 is NOT supported for matching!') if self['ickkw'] == 2: # add warning if ckkw selected but the associate parameter are empty - self.get_default('highestmult', log_level=20) + self.get_default('highestmult', log_level=20) self.get_default('issgridfile', 'issudgrid.dat', log_level=20) if self['xqcut'] > 0: if self['ickkw'] == 0: @@ -4412,13 +4412,13 @@ def check_validity(self): if self['drjl'] != 0: if 'drjl' in self.user_set: logger.warning('Since icckw>0, changing the value of \'drjl\' to 0') - self['drjl'] = 0 - if not self['auto_ptj_mjj']: + self['drjl'] = 0 + if not self['auto_ptj_mjj']: if self['mmjj'] > self['xqcut']: logger.warning('mmjj > xqcut (and auto_ptj_mjj = F). MMJJ set to 0') - self['mmjj'] = 0.0 - - # check validity of the pdf set + self['mmjj'] = 0.0 + + # check validity of the pdf set # note that pdlabel is automatically set to lhapdf if pdlabel1 or pdlabel2 is set to lhapdf if self['pdlabel'] == 'lhapdf': #add warning if lhaid not define @@ -4426,7 +4426,7 @@ def check_validity(self): mod = False for i in [1,2]: - lpp = 'lpp%i' %i + lpp = 'lpp%i' %i pdlabelX = 'pdlabel%i' % i if self[lpp] == 0: # nopdf if self[pdlabelX] != 'none': @@ -4459,12 +4459,12 @@ def check_validity(self): raise InvalidRunCard( "Heavy ion mode is only supported for lpp1=1/2") if self['lpp2'] not in [1,2]: if self['nb_proton2'] !=1 or self['nb_neutron2'] !=0: - raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") + raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") # check that fixed_fac_scale(1/2) is setting as expected # if lpp=2/3/4 -> default is that beam in fixed scale - # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are + # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are # check that both fixed_fac_scale1/2 are defined together # ensure that fixed_fac_scale1 and fixed_fac_scale2 are setup as needed if 'fixed_fac_scale1' in self.user_set: @@ -4475,13 +4475,13 @@ def check_validity(self): elif 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale1 are defined but not fixed_fac_scale2. The value of fixed_fac_scale2 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale2'] = self['fixed_fac_scale'] - elif self['lpp2'] !=0: + elif self['lpp2'] !=0: raise Exception('fixed_fac_scale2 not defined while fixed_fac_scale1 is. Please fix your run_card.') elif 'fixed_fac_scale2' in self.user_set: if 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale2 are defined but not fixed_fac_scale1. The value of fixed_fac_scale1 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale1'] = self['fixed_fac_scale'] - elif self['lpp1'] !=0: + elif self['lpp1'] !=0: raise Exception('fixed_fac_scale1 not defined while fixed_fac_scale2 is. Please fix your run_card.') else: if 'fixed_fac_scale' in self.user_set: @@ -4500,12 +4500,12 @@ def check_validity(self): logger.warning('fixed_fac_scale1 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale1']) logger.warning('fixed_fac_scale2 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale2']) - # check if lpp = + # check if lpp = if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]): for i in [1,2]: if abs(self['lpp%s' % i ]) in [3,4] and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: logger.warning("Vector boson from lepton PDF is using fixed scale value of muf [dsqrt_q2fact%s]. Looks like you kept the default value (Mz). Is this really the cut-off that you want to use?" % i) - + if abs(self['lpp%s' % i ]) == 2 and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: if self['pdlabel'] in ['edff','chff']: logger.warning("Since 3.5.0 exclusive photon-photon processes in ultraperipheral proton and nuclear collisions from gamma-UPC (arXiv:2207.03012) will ignore the factorisation scale.") @@ -4515,10 +4515,10 @@ def check_validity(self): if six.PY2 and self['hel_recycling']: self['hel_recycling'] = False - logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. + logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. In general this optimization speeds up the computation by a factor of two.""") - + # check that ebeam is bigger than the associated mass. for i in [1,2]: if self['lpp%s' % i ] not in [1,2]: @@ -4529,13 +4529,13 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") - elif self['ebeam%i' % i] < self['mass_ion%i' % i]: + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + elif self['ebeam%i' % i] < self['mass_ion%i' % i]: if self['ebeam%i' %i] == 0: logger.warning("At rest ion mode set: Energy beam set to %s" % self['mass_ion%i' % i]) self.set('ebeam%i' %i, self['mass_ion%i' % i]) - - + + # check the tmin_for_channel is negative if self['tmin_for_channel'] == 0: raise InvalidRunCard('tmin_for_channel can not be set to 0.') @@ -4543,15 +4543,15 @@ def check_validity(self): logger.warning('tmin_for_channel should be negative. Will be using -%f instead' % self['tmin_for_channel']) self.set('tmin_for_channel', -self['tmin_for_channel']) - + def update_system_parameter_for_include(self): """system parameter need to be setupe""" - + # polarization self['frame_id'] = sum(2**(n) for n in self['me_frame']) - + # set the pdg_for_cut fortran parameter - pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + + pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + list(self['e_min_pdg'].keys()) +list(self['e_max_pdg'].keys()) + list(self['eta_min_pdg'].keys()) +list(self['eta_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys()) + list(self['mxx_only_part_antipart'].keys())) @@ -4559,15 +4559,15 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different pdgs are allowed for pdg specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative pdg code') - - + + if any(pdg in pdg_to_cut for pdg in [1,2,3,4,5,21,22,11,13,15]): raise Exception("Can not use PDG related cut for light quark/b quark/lepton/gluon/photon") - + if pdg_to_cut: self['pdg_cut'] = list(pdg_to_cut) self['ptmin4pdg'] = [] @@ -4595,7 +4595,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -4605,11 +4605,11 @@ def update_system_parameter_for_include(self): self['ptmax4pdg'] = [-1.] self['Emax4pdg'] = [-1.] self['etamax4pdg'] =[-1.] - self['mxxmin4pdg'] =[0.] + self['mxxmin4pdg'] =[0.] self['mxxpart_antipart'] = [False] - - - + + + def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules process 1->N all cut set on off. @@ -4626,7 +4626,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if proc_characteristic['loop_induced']: self['nhel'] = 1 self['pdgs_for_merging_cut'] = proc_characteristic['colored_pdgs'] - + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() @@ -4636,7 +4636,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): # check for beam_id beam_id = set() beam_id_split = [set(), set()] - for proc in proc_def: + for proc in proc_def: for oneproc in proc: for i,leg in enumerate(oneproc['legs']): if not leg['state']: @@ -4654,20 +4654,20 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): maxjetflavor = max([4]+[abs(i) for i in beam_id if -7< i < 7]) self['maxjetflavor'] = maxjetflavor self['asrwgtflavor'] = maxjetflavor - + if any(i in beam_id for i in [1,-1,2,-2,3,-3,4,-4,5,-5,21,22]): # check for e p collision if any(id in beam_id for id in [11,-11,13,-13]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [11,-11,13,-13]): - self['lpp1'] = 0 - self['lpp2'] = 1 - self['ebeam1'] = '1k' - self['ebeam2'] = '6500' + self['lpp1'] = 0 + self['lpp2'] = 1 + self['ebeam1'] = '1k' + self['ebeam2'] = '6500' else: - self['lpp1'] = 1 - self['lpp2'] = 0 - self['ebeam1'] = '6500' + self['lpp1'] = 1 + self['lpp2'] = 0 + self['ebeam1'] = '6500' self['ebeam2'] = '1k' # UPC for p p collision @@ -4677,7 +4677,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam1'] = '6500' self['ebeam2'] = '6500' self['pdlabel'] = 'edff' - + elif any(id in beam_id for id in [11,-11,13,-13]): self['lpp1'] = 0 self['lpp2'] = 0 @@ -4688,7 +4688,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('ecut') self.display_block.append('beam_pol') - + # check for possibility of eva eva_in_b1 = any(i in beam_id_split[0] for i in [23,24,-24]) #,12,-12,14,-14]) @@ -4701,10 +4701,10 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['nhel'] = 1 self['pdlabel'] = 'eva' self['fixed_fac_scale'] = True - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') elif eva_in_b1: - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') self['pdlabel1'] = 'eva' self['fixed_fac_scale1'] = True self['nhel'] = 1 @@ -4724,7 +4724,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['pdlabel2'] = 'eva' self['fixed_fac_scale2'] = True self['nhel'] = 1 - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') for i in beam_id_split[0]: if abs(i) == 11: self['lpp1'] = math.copysign(3,i) @@ -4740,34 +4740,34 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if any(i in beam_id for i in [22,23,24,-24,12,-12,14,-14]): self.display_block.append('eva_scale') - # automatic polarisation of the beam if neutrino beam + # automatic polarisation of the beam if neutrino beam if any(id in beam_id for id in [12,-12,14,-14,16,-16]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [12,14,16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = -100 if not all(id in [12,14,16] for id in beam_id_split[0]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1]. %s') elif any(id in beam_id_split[0] for id in [-12,-14,-16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[0]): - logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') + logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') if any(id in beam_id_split[1] for id in [12,14,16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = -100 if not all(id in [12,14,16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') elif any(id in beam_id_split[1] for id in [-12,-14,-16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') - + # Check if need matching min_particle = 99 max_particle = 0 @@ -4798,12 +4798,12 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - + break + if matching: self['ickkw'] = 1 self['xqcut'] = 30 - #self['use_syst'] = False + #self['use_syst'] = False self['drjj'] = 0 self['drjl'] = 0 self['sys_alpsfact'] = "0.5 1 2" @@ -4811,8 +4811,8 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('mlm') self.display_block.append('ckkw') self['dynamical_scale_choice'] = -1 - - + + # For interference module, the systematics are wrong. # automatically set use_syst=F and set systematics_program=none no_systematics = False @@ -4826,14 +4826,14 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): continue break - + if interference or no_systematics: self['use_syst'] = False self['systematics_program'] = 'none' if interference: self['dynamical_scale_choice'] = 3 self['sde_strategy'] = 2 - + # set default integration strategy # interference case is already handle above # here pick strategy 2 if only one QCD color flow @@ -4852,7 +4852,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if pure_lepton and proton_initial: self['sde_strategy'] = 1 else: - # check if multi-jet j + # check if multi-jet j is_multijet = True for proc in proc_def: if any(abs(j.get('id')) not in jet_id for j in proc[0]['legs']): @@ -4860,7 +4860,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): break if is_multijet: self['sde_strategy'] = 2 - + # if polarization is used, set the choice of the frame in the run_card # But only if polarization is used for massive particles for plist in proc_def: @@ -4870,7 +4870,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): model = proc.get('model') particle = model.get_particle(l.get('id')) if particle.get('mass').lower() != 'zero': - self.display_block.append('frame') + self.display_block.append('frame') break else: continue @@ -4894,15 +4894,15 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): proc = proc_list[0] if proc['forbidden_onsh_s_channels']: self['sde_strategy'] = 1 - + if 'fix_scale' in proc_characteristic['limitations']: self['fixed_ren_scale'] = 1 self['fixed_fac_scale'] = 1 if self['ickkw'] == 1: logger.critical("MLM matching/merging not compatible with the model! You need to use another method to remove the double counting!") self['ickkw'] = 0 - - # define class of particles present to hide all the cuts associated to + + # define class of particles present to hide all the cuts associated to # not present class cut_class = collections.defaultdict(int) for proc in proc_def: @@ -4925,41 +4925,41 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): one_proc_cut['L'] += 1 elif abs(pdg) in [12,14,16]: one_proc_cut['n'] += 1 - one_proc_cut['L'] += 1 + one_proc_cut['L'] += 1 elif str(oneproc.get('model').get_particle(pdg)['mass']) != 'ZERO': one_proc_cut['H'] += 1 - + for key, nb in one_proc_cut.items(): cut_class[key] = max(cut_class[key], nb) self.cut_class = dict(cut_class) self.cut_class[''] = True #avoid empty - + # If model has running functionality add the additional parameter model = proc_def[0][0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Read file input/default_run_card_lo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'run_card.dat') python_template = True else: template = pjoin(MEDIR, 'Cards', 'run_card_default.dat') python_template = False - + hid_lines = {'default':True}#collections.defaultdict(itertools.repeat(True).next) if isinstance(output_file, str): @@ -4975,9 +4975,9 @@ def write(self, output_file, template=None, python_template=False, hid_lines[k1+k2] = True super(RunCardLO, self).write(output_file, template=template, - python_template=python_template, + python_template=python_template, template_options=hid_lines, - **opt) + **opt) class InvalidMadAnalysis5Card(InvalidCmd): @@ -4986,19 +4986,19 @@ class InvalidMadAnalysis5Card(InvalidCmd): class MadAnalysis5Card(dict): """ A class to store a MadAnalysis5 card. Very basic since it is basically free format.""" - + _MG5aMC_escape_tag = '@MG5aMC' - + _default_hadron_inputs = ['*.hepmc', '*.hep', '*.stdhep', '*.lhco','*.root'] _default_parton_inputs = ['*.lhe'] _skip_analysis = False - + @classmethod def events_can_be_reconstructed(cls, file_path): """ Checks from the type of an event file whether it can be reconstructed or not.""" return not (file_path.endswith('.lhco') or file_path.endswith('.lhco.gz') or \ file_path.endswith('.root') or file_path.endswith('.root.gz')) - + @classmethod def empty_analysis(cls): """ A method returning the structure of an empty analysis """ @@ -5012,7 +5012,7 @@ def empty_reconstruction(cls): 'reco_output':'lhe'} def default_setup(self): - """define the default value""" + """define the default value""" self['mode'] = 'parton' self['inputs'] = [] # None is the default stdout level, it will be set automatically by MG5aMC @@ -5025,8 +5025,8 @@ def default_setup(self): # of this class and some other property could be added to this dictionary # in the future. self['analyses'] = {} - # The recasting structure contains on set of commands and one set of - # card lines. + # The recasting structure contains on set of commands and one set of + # card lines. self['recasting'] = {'commands':[],'card':[]} # Add the default trivial reconstruction to use an lhco input # This is just for the structure @@ -5035,7 +5035,7 @@ def default_setup(self): 'root_input': MadAnalysis5Card.empty_reconstruction()} self['reconstruction']['lhco_input']['reco_output']='lhco' - self['reconstruction']['root_input']['reco_output']='root' + self['reconstruction']['root_input']['reco_output']='root' # Specify in which order the analysis/recasting were specified self['order'] = [] @@ -5049,7 +5049,7 @@ def __init__(self, finput=None,mode=None): return else: dict.__init__(self) - + # Initialize it with all the default value self.default_setup() if not mode is None: @@ -5058,15 +5058,15 @@ def __init__(self, finput=None,mode=None): # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, mode=mode) - + def read(self, input, mode=None): """ Read an MA5 card""" - + if mode not in [None,'parton','hadron']: raise MadGraph5Error('A MadAnalysis5Card can be read online the modes'+ "'parton' or 'hadron'") card_mode = mode - + if isinstance(input, (file, StringIO.StringIO)): input_stream = input elif isinstance(input, str): @@ -5099,10 +5099,10 @@ def read(self, input, mode=None): except ValueError: option = line[len(self._MG5aMC_escape_tag):] option = option.strip() - + if option=='inputs': self['inputs'].extend([v.strip() for v in value.split(',')]) - + elif option == 'skip_analysis': self._skip_analysis = True @@ -5118,7 +5118,7 @@ def read(self, input, mode=None): except: raise InvalidMadAnalysis5Card( "MA5 output level specification '%s' is incorrect."%str(value)) - + elif option=='analysis_name': current_type = 'analyses' current_name = value @@ -5127,7 +5127,7 @@ def read(self, input, mode=None): "Analysis '%s' already defined in MadAnalysis5 card"%current_name) else: self[current_type][current_name] = MadAnalysis5Card.empty_analysis() - + elif option=='set_reconstructions': try: reconstructions = eval(value) @@ -5142,7 +5142,7 @@ def read(self, input, mode=None): "analysis in a MadAnalysis5 card.") self[current_type][current_name]['reconstructions']=reconstructions continue - + elif option=='reconstruction_name': current_type = 'reconstruction' current_name = value @@ -5161,7 +5161,7 @@ def read(self, input, mode=None): raise InvalidMadAnalysis5Card( "Option '%s' can only take the values 'lhe' or 'root'"%option) self['reconstruction'][current_name]['reco_output'] = value.lower() - + elif option.startswith('recasting'): current_type = 'recasting' try: @@ -5171,11 +5171,11 @@ def read(self, input, mode=None): if len(self['recasting'][current_name])>0: raise InvalidMadAnalysis5Card( "Only one recasting can be defined in MadAnalysis5 hadron card") - + else: raise InvalidMadAnalysis5Card( "Unreckognized MG5aMC instruction in MadAnalysis5 card: '%s'"%option) - + if option in ['analysis_name','reconstruction_name'] or \ option.startswith('recasting'): self['order'].append((current_type,current_name)) @@ -5209,7 +5209,7 @@ def read(self, input, mode=None): self['inputs'] = self._default_hadron_inputs else: self['inputs'] = self._default_parton_inputs - + # Make sure at least one reconstruction is specified for each hadron # level analysis and that it exists. if self['mode']=='hadron': @@ -5221,7 +5221,7 @@ def read(self, input, mode=None): analysis['reconstructions']): raise InvalidMadAnalysis5Card('A reconstructions specified in'+\ " analysis '%s' is not defined."%analysis_name) - + def write(self, output): """ Write an MA5 card.""" @@ -5232,7 +5232,7 @@ def write(self, output): else: raise MadGraph5Error('Incorrect input for the write function of'+\ ' the MadAnalysis5Card card. Received argument type is: %s'%str(type(output))) - + output_lines = [] if self._skip_analysis: output_lines.append('%s skip_analysis'%self._MG5aMC_escape_tag) @@ -5240,11 +5240,11 @@ def write(self, output): if not self['stdout_lvl'] is None: output_lines.append('%s stdout_lvl=%s'%(self._MG5aMC_escape_tag,self['stdout_lvl'])) for definition_type, name in self['order']: - + if definition_type=='analyses': output_lines.append('%s analysis_name = %s'%(self._MG5aMC_escape_tag,name)) output_lines.append('%s set_reconstructions = %s'%(self._MG5aMC_escape_tag, - str(self['analyses'][name]['reconstructions']))) + str(self['analyses'][name]['reconstructions']))) elif definition_type=='reconstruction': output_lines.append('%s reconstruction_name = %s'%(self._MG5aMC_escape_tag,name)) elif definition_type=='recasting': @@ -5254,23 +5254,23 @@ def write(self, output): output_lines.extend(self[definition_type][name]) elif definition_type in ['reconstruction']: output_lines.append('%s reco_output = %s'%(self._MG5aMC_escape_tag, - self[definition_type][name]['reco_output'])) + self[definition_type][name]['reco_output'])) output_lines.extend(self[definition_type][name]['commands']) elif definition_type in ['analyses']: - output_lines.extend(self[definition_type][name]['commands']) - + output_lines.extend(self[definition_type][name]['commands']) + output_stream.write('\n'.join(output_lines)) - + return - - def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, + + def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, UFO_model_path=None, run_tag=''): - """ Returns a list of tuples ('AnalysisTag',['commands']) specifying - the commands of the MadAnalysis runs required from this card. - At parton-level, the number of such commands is the number of analysis + """ Returns a list of tuples ('AnalysisTag',['commands']) specifying + the commands of the MadAnalysis runs required from this card. + At parton-level, the number of such commands is the number of analysis asked for. In the future, the idea is that the entire card can be processed in one go from MA5 directly.""" - + if isinstance(inputs_arg, list): inputs = inputs_arg elif isinstance(inputs_arg, str): @@ -5278,21 +5278,21 @@ def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, else: raise MadGraph5Error("The function 'get_MA5_cmds' can only take "+\ " a string or a list for the argument 'inputs_arg'") - + if len(inputs)==0: raise MadGraph5Error("The function 'get_MA5_cmds' must have "+\ " at least one input specified'") - + if run_dir_path is None: run_dir_path = os.path.dirname(inputs_arg) - + cmds_list = [] - + UFO_load = [] # first import the UFO if provided if UFO_model_path: UFO_load.append('import %s'%UFO_model_path) - + def get_import(input, type=None): """ Generates the MA5 import commands for that event file. """ dataset_name = os.path.basename(input).split('.')[0] @@ -5304,7 +5304,7 @@ def get_import(input, type=None): if not type is None: res.append('set %s.type = %s'%(dataset_name, type)) return res - + fifo_status = {'warned_fifo':False,'fifo_used_up':False} def warn_fifo(input): if not input.endswith('.fifo'): @@ -5317,7 +5317,7 @@ def warn_fifo(input): logger.warning('Only the first MA5 analysis/reconstructions can be run on a fifo. Subsequent runs will skip fifo inputs.') fifo_status['warned_fifo'] = True return True - + # Then the event file(s) input(s) inputs_load = [] for input in inputs: @@ -5325,16 +5325,16 @@ def warn_fifo(input): if len(inputs) > 1: inputs_load.append('set main.stacking_method = superimpose') - + submit_command = 'submit %s'%submit_folder+'_%s' - + # Keep track of the reconstruction outpus in the MA5 workflow # Keys are reconstruction names and values are .lhe.gz reco file paths. # We put by default already the lhco/root ones present reconstruction_outputs = { - 'lhco_input':[f for f in inputs if + 'lhco_input':[f for f in inputs if f.endswith('.lhco') or f.endswith('.lhco.gz')], - 'root_input':[f for f in inputs if + 'root_input':[f for f in inputs if f.endswith('.root') or f.endswith('.root.gz')]} # If a recasting card has to be written out, chose here its path @@ -5343,7 +5343,7 @@ def warn_fifo(input): # Make sure to only run over one analysis over each fifo. for definition_type, name in self['order']: - if definition_type == 'reconstruction': + if definition_type == 'reconstruction': analysis_cmds = list(self['reconstruction'][name]['commands']) reco_outputs = [] for i_input, input in enumerate(inputs): @@ -5365,8 +5365,8 @@ def warn_fifo(input): analysis_cmds.append( submit_command%('reco_%s_%d'%(name,i_input+1))) analysis_cmds.append('remove reco_events') - - reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) + + reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) for rec_out in reco_outputs] if len(reco_outputs)>0: cmds_list.append(('_reco_%s'%name,analysis_cmds)) @@ -5386,7 +5386,7 @@ def warn_fifo(input): analysis_cmds = ['set main.mode = parton'] else: analysis_cmds = [] - analysis_cmds.extend(sum([get_import(rec_out) for + analysis_cmds.extend(sum([get_import(rec_out) for rec_out in reconstruction_outputs[reco]],[])) analysis_cmds.extend(self['analyses'][name]['commands']) analysis_cmds.append(submit_command%('%s_%s'%(name,reco))) @@ -5427,12 +5427,12 @@ def warn_fifo(input): %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode """ running_block_nlo = RunBlock('RUNNING', template_on=template_on, template_off="") - + class RunCardNLO(RunCard): """A class object for the run_card for a (aMC@)NLO pocess""" - + LO = False - + blocks = [running_block_nlo] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), @@ -5443,11 +5443,11 @@ class RunCardNLO(RunCard): if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_nlo.dat") - - + + def default_setup(self): """define the default value""" - + self.add_param('run_tag', 'tag_1', include=False) self.add_param('nevents', 10000) self.add_param('req_acc', -1.0, include=False) @@ -5455,27 +5455,27 @@ def default_setup(self): self.add_param("time_of_flight", -1.0, include=False) self.add_param('event_norm', 'average') #FO parameter - self.add_param('req_acc_fo', 0.01, include=False) + self.add_param('req_acc_fo', 0.01, include=False) self.add_param('npoints_fo_grid', 5000, include=False) self.add_param('niters_fo_grid', 4, include=False) - self.add_param('npoints_fo', 10000, include=False) + self.add_param('npoints_fo', 10000, include=False) self.add_param('niters_fo', 6, include=False) #seed and collider self.add_param('iseed', 0) - self.add_param('lpp1', 1, fortran_name='lpp(1)') - self.add_param('lpp2', 1, fortran_name='lpp(2)') + self.add_param('lpp1', 1, fortran_name='lpp(1)') + self.add_param('lpp2', 1, fortran_name='lpp(2)') self.add_param('ebeam1', 6500.0, fortran_name='ebeam(1)') - self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') + self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') self.add_param('pdlabel', 'nn23nlo', allowed=['lhapdf', 'emela', 'cteq6_m','cteq6_d','cteq6_l','cteq6l1', 'nn23lo','nn23lo1','nn23nlo','ct14q00','ct14q07','ct14q14','ct14q21'] +\ - sum(self.allowed_lep_densities.values(),[]) ) + sum(self.allowed_lep_densities.values(),[]) ) self.add_param('lhaid', [244600],fortran_name='lhaPDFid') self.add_param('pdfscheme', 0) # whether to include or not photon-initiated processes in lepton collisions self.add_param('photons_from_lepton', True) self.add_param('lhapdfsetname', ['internal_use_only'], system=True) - # stuff for lepton collisions - # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set - # whether the current PDF set has or not beamstrahlung + # stuff for lepton collisions + # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set + # whether the current PDF set has or not beamstrahlung self.add_param('has_bstrahl', False, system=True) # renormalisation scheme of alpha self.add_param('alphascheme', 0, system=True) @@ -5486,31 +5486,31 @@ def default_setup(self): # w contribution included or not in the running of alpha self.add_param('w_run', 1, system=True) #shower and scale - self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') + self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') self.add_param('shower_scale_factor',1.0) self.add_param('mcatnlo_delta', False) self.add_param('fixed_ren_scale', False) self.add_param('fixed_fac_scale', False) self.add_param('fixed_extra_scale', True, hidden=True, system=True) # set system since running from Ellis-Sexton scale not implemented - self.add_param('mur_ref_fixed', 91.118) + self.add_param('mur_ref_fixed', 91.118) self.add_param('muf1_ref_fixed', -1.0, hidden=True) - self.add_param('muf_ref_fixed', 91.118) + self.add_param('muf_ref_fixed', 91.118) self.add_param('muf2_ref_fixed', -1.0, hidden=True) - self.add_param('mue_ref_fixed', 91.118, hidden=True) - self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', + self.add_param('mue_ref_fixed', 91.118, hidden=True) + self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', allowed = [-2,-1,0,1,2,3,10], comment="\'-1\' is based on CKKW back clustering (following feynman diagram).\n \'1\' is the sum of transverse energy.\n '2' is HT (sum of the transverse mass)\n '3' is HT/2, '0' allows to use the user_hook definition (need to be defined via custom_fct entry) ") self.add_param('fixed_qes_scale', False, hidden=True) self.add_param('qes_ref_fixed', -1.0, hidden=True) self.add_param('mur_over_ref', 1.0) - self.add_param('muf_over_ref', 1.0) - self.add_param('muf1_over_ref', -1.0, hidden=True) + self.add_param('muf_over_ref', 1.0) + self.add_param('muf1_over_ref', -1.0, hidden=True) self.add_param('muf2_over_ref', -1.0, hidden=True) self.add_param('mue_over_ref', 1.0, hidden=True, system=True) # forbid the user to modigy due to incorrect handling of the Ellis-Sexton scale self.add_param('qes_over_ref', -1.0, hidden=True) self.add_param('reweight_scale', [True], fortran_name='lscalevar') - self.add_param('rw_rscale_down', -1.0, hidden=True) + self.add_param('rw_rscale_down', -1.0, hidden=True) self.add_param('rw_rscale_up', -1.0, hidden=True) - self.add_param('rw_fscale_down', -1.0, hidden=True) + self.add_param('rw_fscale_down', -1.0, hidden=True) self.add_param('rw_fscale_up', -1.0, hidden=True) self.add_param('rw_rscale', [1.0,2.0,0.5], fortran_name='scalevarR') self.add_param('rw_fscale', [1.0,2.0,0.5], fortran_name='scalevarF') @@ -5523,60 +5523,60 @@ def default_setup(self): #technical self.add_param('folding', [1,1,1], include=False) - + #merging self.add_param('ickkw', 0, allowed=[-1,0,3,4], comment=" - 0: No merging\n - 3: FxFx Merging : http://amcatnlo.cern.ch/FxFx_merging.htm\n - 4: UNLOPS merging (No interface within MG5aMC)\n - -1: NNLL+NLO jet-veto computation. See arxiv:1412.8408 [hep-ph]") self.add_param('bwcutoff', 15.0) - #cuts + #cuts self.add_param('jetalgo', 1.0) - self.add_param('jetradius', 0.7) + self.add_param('jetradius', 0.7) self.add_param('ptj', 10.0 , cut=True) - self.add_param('etaj', -1.0, cut=True) - self.add_param('gamma_is_j', True) + self.add_param('etaj', -1.0, cut=True) + self.add_param('gamma_is_j', True) self.add_param('ptl', 0.0, cut=True) - self.add_param('etal', -1.0, cut=True) + self.add_param('etal', -1.0, cut=True) self.add_param('drll', 0.0, cut=True) - self.add_param('drll_sf', 0.0, cut=True) + self.add_param('drll_sf', 0.0, cut=True) self.add_param('mll', 0.0, cut=True) - self.add_param('mll_sf', 30.0, cut=True) - self.add_param('rphreco', 0.1) - self.add_param('etaphreco', -1.0) - self.add_param('lepphreco', True) - self.add_param('quarkphreco', True) + self.add_param('mll_sf', 30.0, cut=True) + self.add_param('rphreco', 0.1) + self.add_param('etaphreco', -1.0) + self.add_param('lepphreco', True) + self.add_param('quarkphreco', True) self.add_param('ptgmin', 20.0, cut=True) - self.add_param('etagamma', -1.0) + self.add_param('etagamma', -1.0) self.add_param('r0gamma', 0.4) - self.add_param('xn', 1.0) + self.add_param('xn', 1.0) self.add_param('epsgamma', 1.0) - self.add_param('isoem', True) + self.add_param('isoem', True) self.add_param('maxjetflavor', 4, hidden=True) - self.add_param('pineappl', False) + self.add_param('pineappl', False) self.add_param('lhe_version', 3, hidden=True, include=False) - + # customization self.add_param("custom_fcts",[],typelist="str", include=False, comment="list of files containing function that overwritte dummy function of the code (like adding cuts/...)") #internal variable related to FO_analyse_card self.add_param('FO_LHE_weight_ratio',1e-3, hidden=True, system=True) - self.add_param('FO_LHE_postprocessing',['grouping','random'], + self.add_param('FO_LHE_postprocessing',['grouping','random'], hidden=True, system=True, include=False) - + # parameter allowing to define simple cut via the pdg self.add_param('pt_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('pt_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False, hidden=True) - + #hidden parameter that are transfer to the fortran code self.add_param('pdg_cut',[0], hidden=True, system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], hidden=True, system=True) # store pt min self.add_param('ptmax4pdg',[-1.], hidden=True, system=True) self.add_param('mxxmin4pdg',[0.], hidden=True, system=True) self.add_param('mxxpart_antipart', [False], hidden=True, system=True) - + def check_validity(self): """check the validity of the various input""" - + super(RunCardNLO, self).check_validity() # for lepton-lepton collisions, ignore 'pdlabel' and 'lhaid' @@ -5588,12 +5588,12 @@ def check_validity(self): # for dressed lepton collisions, check that the lhaid is a valid one if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]) + ['emela']: raise InvalidRunCard('pdlabel %s not allowed for dressed-lepton collisions' % self['pdlabel']) - + elif self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' self['reweight_pdf']=[False] logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') - + if self['lpp1'] == 0 == self['lpp2']: if self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' @@ -5601,8 +5601,8 @@ def check_validity(self): logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') # For FxFx merging, make sure that the following parameters are set correctly: - if self['ickkw'] == 3: - # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed + if self['ickkw'] == 3: + # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed scales=['fixed_ren_scale','fixed_fac_scale','fixed_QES_scale'] for scale in scales: if self[scale]: @@ -5615,7 +5615,7 @@ def check_validity(self): self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency in FxFx merging, dynamical_scale_choice has been set to -1 (default)''' ,'$MG:BOLD') - + # 2. Use kT algorithm for jets with pseudo-code size R=1.0 jetparams=['jetradius','jetalgo'] for jetparam in jetparams: @@ -5628,8 +5628,8 @@ def check_validity(self): self["dynamical_scale_choice"] = [-1] self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency with the jet veto, the scale which will be used is ptj. dynamical_scale_choice will be set at -1.''' - ,'$MG:BOLD') - + ,'$MG:BOLD') + # For interface to PINEAPPL, need to use LHAPDF and reweighting to get scale uncertainties if self['pineappl'] and self['pdlabel'].lower() != 'lhapdf': raise InvalidRunCard('PineAPPL generation only possible with the use of LHAPDF') @@ -5661,7 +5661,7 @@ def check_validity(self): if (self['rw_fscale_down'] != -1.0 and ['rw_fscale_down'] not in self['rw_fscale']) or\ (self['rw_fscale_up'] != -1.0 and ['rw_fscale_up'] not in self['rw_fscale']): self['rw_fscale']=[1.0,self['rw_fscale_up'],self['rw_fscale_down']] - + # PDF reweighting check if any(self['reweight_pdf']): # check that we use lhapdf if reweighting is ON @@ -5672,7 +5672,7 @@ def check_validity(self): if self['pdlabel'] != "lhapdf": self['reweight_pdf']=[self['reweight_pdf'][0]] self['lhaid']=[self['lhaid'][0]] - + # make sure set have reweight_scale and dyn_scale_choice of length 1 when fixed scales: if self['fixed_ren_scale'] and self['fixed_fac_scale']: self['reweight_scale']=[self['reweight_scale'][0]] @@ -5685,7 +5685,7 @@ def check_validity(self): self['reweight_pdf']=self['reweight_pdf']*len(self['lhaid']) logger.warning("Setting 'reweight_pdf' for all 'lhaid' to %s" % self['reweight_pdf'][0]) if len(self['reweight_scale']) == 1 and len(self['dynamical_scale_choice']) != 1: - self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) + self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) logger.warning("Setting 'reweight_scale' for all 'dynamical_scale_choice' to %s" % self['reweight_pdf'][0]) # Check that there are no identical elements in lhaid or dynamical_scale_choice @@ -5693,7 +5693,7 @@ def check_validity(self): raise InvalidRunCard("'lhaid' has two or more identical entries. They have to be all different for the code to work correctly.") if len(self['dynamical_scale_choice']) != len(set(self['dynamical_scale_choice'])): raise InvalidRunCard("'dynamical_scale_choice' has two or more identical entries. They have to be all different for the code to work correctly.") - + # Check that lenght of lists are consistent if len(self['reweight_pdf']) != len(self['lhaid']): raise InvalidRunCard("'reweight_pdf' and 'lhaid' lists should have the same length") @@ -5730,7 +5730,7 @@ def check_validity(self): if len(self['folding']) != 3: raise InvalidRunCard("'folding' should contain exactly three integers") for ifold in self['folding']: - if ifold not in [1,2,4,8]: + if ifold not in [1,2,4,8]: raise InvalidRunCard("The three 'folding' parameters should be equal to 1, 2, 4, or 8.") # Check MC@NLO-Delta if self['mcatnlo_delta'] and not self['parton_shower'].lower() == 'pythia8': @@ -5746,11 +5746,11 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938 GeV") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") def update_system_parameter_for_include(self): - + # set the pdg_for_cut fortran parameter pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys())+ list(self['mxx_only_part_antipart'].keys())) @@ -5758,12 +5758,12 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different PDGs are allowed for PDG specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative PDG codes') - - + + if any(pdg in pdg_to_cut for pdg in [21,22,11,13,15]+ list(range(self['maxjetflavor']+1))): # Note that this will double check in the fortran code raise Exception("Can not use PDG related cuts for massless SM particles/leptons") @@ -5790,7 +5790,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -5800,12 +5800,12 @@ def update_system_parameter_for_include(self): self['mxxpart_antipart'] = [False] def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', 'run_card.dat') python_template = True else: @@ -5818,7 +5818,7 @@ def write(self, output_file, template=None, python_template=False, **opt): def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules - e+ e- beam -> lpp:0 ebeam:500 + e+ e- beam -> lpp:0 ebeam:500 p p beam -> set maxjetflavor automatically process with tagged photons -> gamma_is_j = false process without QED splittings -> gamma_is_j = false, recombination = false @@ -5844,19 +5844,19 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam2'] = 500 else: self['lpp1'] = 0 - self['lpp2'] = 0 - + self['lpp2'] = 0 + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() # check for tagged photons tagged_particles = set() - + # If model has running functionality add the additional parameter model = proc_def[0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Check if need matching min_particle = 99 @@ -5885,7 +5885,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: idsmin = [l['id'] for l in procmin['legs']] break - + for procmax in proc_def: if len(procmax['legs']) != max_particle: continue @@ -5901,9 +5901,9 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - - if matching: + break + + if matching: self['ickkw'] = 3 self['fixed_ren_scale'] = False self["fixed_fac_scale"] = False @@ -5911,17 +5911,17 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self["jetalgo"] = 1 self["jetradius"] = 1 self["parton_shower"] = "PYTHIA8" - + # Read file input/default_run_card_nlo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + class MadLoopParam(ConfigFile): """ a class for storing/dealing with the file MadLoopParam.dat contains a parser to read it, facilities to write a new file,... """ - + _ID_reduction_tool_map = {1:'CutTools', 2:'PJFry++', 3:'IREGI', @@ -5929,10 +5929,10 @@ class MadLoopParam(ConfigFile): 5:'Samurai', 6:'Ninja', 7:'COLLIER'} - + def default_setup(self): """initialize the directory to the default value""" - + self.add_param("MLReductionLib", "6|7|1") self.add_param("IREGIMODE", 2) self.add_param("IREGIRECY", True) @@ -5954,7 +5954,7 @@ def default_setup(self): self.add_param("HelicityFilterLevel", 2) self.add_param("LoopInitStartOver", False) self.add_param("HelInitStartOver", False) - self.add_param("UseQPIntegrandForNinja", True) + self.add_param("UseQPIntegrandForNinja", True) self.add_param("UseQPIntegrandForCutTools", True) self.add_param("COLLIERMode", 1) self.add_param("COLLIERComputeUVpoles", True) @@ -5966,9 +5966,9 @@ def default_setup(self): self.add_param("COLLIERUseInternalStabilityTest",True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -5976,7 +5976,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % input) - + previous_line= '' for line in finput: if previous_line.startswith('#'): @@ -5985,20 +5985,20 @@ def read(self, finput): if len(value) and value[0] not in ['#', '!']: self.__setitem__(name, value, change_userdefine=True) previous_line = line - - + + def write(self, outputpath, template=None,commentdefault=False): - + if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', + template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', 'Cards', 'MadLoopParams.dat') else: template = pjoin(MEDIR, 'Cards', 'MadLoopParams_default.dat') fsock = open(template, 'r') template = fsock.readlines() fsock.close() - + if isinstance(outputpath, str): output = open(outputpath, 'w') else: @@ -6019,7 +6019,7 @@ def f77format(value): return value else: raise Exception("Can not format input %s" % type(value)) - + name = '' done = set() for line in template: @@ -6034,12 +6034,12 @@ def f77format(value): elif line.startswith('#'): name = line[1:].split()[0] output.write(line) - - - - - -class eMELA_info(ConfigFile): + + + + + +class eMELA_info(ConfigFile): """ a class for eMELA (LHAPDF-like) info files """ path = '' @@ -6053,7 +6053,7 @@ def __init__(self, finput, me_dir): def read(self, finput): - if isinstance(finput, file): + if isinstance(finput, file): lines = finput.open().read().split('\n') self.path = finput.name else: @@ -6066,7 +6066,7 @@ def read(self, finput): k, v = l.split(':', 1) # ignore further occurrences of : try: self[k.strip()] = eval(v) - except (NameError, SyntaxError): + except (NameError, SyntaxError): self[k.strip()] = v def default_setup(self): @@ -6091,7 +6091,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): +"powers of alpha should be reweighted a posteriori") - logger.info('Updating variables according to %s' % self.path) + logger.info('Updating variables according to %s' % self.path) # Flavours in the running of alpha nd, nu, nl = self['eMELA_ActiveFlavoursAlpha'] self.log_and_update(banner, 'run_card', 'ndnq_run', nd) @@ -6130,8 +6130,8 @@ def update_epdf_emela_variables(self, banner, uvscheme): logger.warning('Cannot treat the following renormalisation schemes for ME and PDFs: %d, %d' \ % (uvscheme, uvscheme_pdf)) - # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref - # also check that the com energy is equal to qref, otherwise print a + # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref + # also check that the com energy is equal to qref, otherwise print a # warning if uvscheme_pdf == 1: qref = self['eMELA_AlphaQref'] @@ -6144,23 +6144,23 @@ def update_epdf_emela_variables(self, banner, uvscheme): # LL / NLL PDF (0/1) pdforder = self['eMELA_PerturbativeOrder'] - # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) + # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) # 4->mixed (leptonic); 5-> nobeta (leptonic); 6->delta (leptonic) # if LL, use nobeta scheme unless LEGACYLLPDF > 0 if pdforder == 0: if 'eMELA_LEGACYLLPDF' not in self.keys() or self['eMELA_LEGACYLLPDF'] in [-1, 0]: self.log_and_update(banner, 'run_card', 'pdfscheme', 5) - elif self['eMELA_LEGACYLLPDF'] == 1: + elif self['eMELA_LEGACYLLPDF'] == 1: # mixed self.log_and_update(banner, 'run_card', 'pdfscheme', 4) - elif self['eMELA_LEGACYLLPDF'] == 2: + elif self['eMELA_LEGACYLLPDF'] == 2: # eta self.log_and_update(banner, 'run_card', 'pdfscheme', 2) - elif self['eMELA_LEGACYLLPDF'] == 3: + elif self['eMELA_LEGACYLLPDF'] == 3: # beta self.log_and_update(banner, 'run_card', 'pdfscheme', 3) elif pdforder == 1: - # for NLL, use eMELA_FactorisationSchemeInt = 0/1 + # for NLL, use eMELA_FactorisationSchemeInt = 0/1 # for delta/MSbar if self['eMELA_FactorisationSchemeInt'] == 0: # MSbar @@ -6177,7 +6177,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): - + def log_and_update(self, banner, card, par, v): """update the card parameter par to value v diff --git a/epochX/cudacpp/gg_ttgg.mad/bin/internal/gen_ximprove.py b/epochX/cudacpp/gg_ttgg.mad/bin/internal/gen_ximprove.py index 5fd170d18d..cc842aa50f 100755 --- a/epochX/cudacpp/gg_ttgg.mad/bin/internal/gen_ximprove.py +++ b/epochX/cudacpp/gg_ttgg.mad/bin/internal/gen_ximprove.py @@ -2,18 +2,18 @@ # # Copyright (c) 2014 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch # ################################################################################ """ A python file to replace the fortran script gen_ximprove. - This script analyses the result of the survey/ previous refine and + This script analyses the result of the survey/ previous refine and creates the jobs for the following script. """ from __future__ import division @@ -66,77 +66,77 @@ class gensym(object): """a class to call the fortran gensym executable and handle it's output in order to create the various job that are needed for the survey""" - + #convenient shortcut for the formatting of variable @ staticmethod def format_variable(*args): return bannermod.ConfigFile.format_variable(*args) - + combining_job = 2 # number of channel by ajob - splitted_grid = False + splitted_grid = False min_iterations = 3 mode= "survey" - + def __init__(self, cmd, opt=None): - + try: super(gensym, self).__init__(cmd, opt) except TypeError: pass - - # Run statistics, a dictionary of RunStatistics(), with + + # Run statistics, a dictionary of RunStatistics(), with self.run_statistics = {} - + self.cmd = cmd self.run_card = cmd.run_card self.me_dir = cmd.me_dir - - + + # dictionary to keep track of the precision when combining iteration self.cross = collections.defaultdict(int) self.abscross = collections.defaultdict(int) self.sigma = collections.defaultdict(int) self.chi2 = collections.defaultdict(int) - + self.splitted_grid = False if self.cmd.proc_characteristics['loop_induced']: nexternal = self.cmd.proc_characteristics['nexternal'] self.splitted_grid = max(2, (nexternal-2)**2) if hasattr(self.cmd, "opts") and self.cmd.opts['accuracy'] == 0.1: self.cmd.opts['accuracy'] = 0.02 - + if isinstance(cmd.cluster, cluster.MultiCore) and self.splitted_grid > 1: self.splitted_grid = int(cmd.cluster.nb_core**0.5) if self.splitted_grid == 1 and cmd.cluster.nb_core >1: self.splitted_grid = 2 - + #if the user defines it in the run_card: if self.run_card['survey_splitting'] != -1: self.splitted_grid = self.run_card['survey_splitting'] if self.run_card['survey_nchannel_per_job'] != 1 and 'survey_nchannel_per_job' in self.run_card.user_set: - self.combining_job = self.run_card['survey_nchannel_per_job'] + self.combining_job = self.run_card['survey_nchannel_per_job'] elif self.run_card['hard_survey'] > 1: self.combining_job = 1 - - + + self.splitted_Pdir = {} self.splitted_for_dir = lambda x,y: self.splitted_grid self.combining_job_for_Pdir = lambda x: self.combining_job self.lastoffset = {} - + done_warning_zero_coupling = False def get_helicity(self, to_submit=True, clean=True): """launch a single call to madevent to get the list of non zero helicity""" - - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc P_zero_result = [] nb_tot_proc = len(subproc) - job_list = {} - - + job_list = {} + + for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -154,7 +154,7 @@ def get_helicity(self, to_submit=True, clean=True): p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts - + (stdout, _) = p.communicate(''.encode()) stdout = stdout.decode('ascii',errors='ignore') if stdout: @@ -166,11 +166,11 @@ def get_helicity(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir, 'error')): os.remove(pjoin(self.me_dir, 'error')) continue # bypass bad process - + self.cmd.compile(['madevent_forhel'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'madevent_forhel')): - raise Exception('Error make madevent_forhel not successful') - + raise Exception('Error make madevent_forhel not successful') + if not os.path.exists(pjoin(Pdir, 'Hel')): os.mkdir(pjoin(Pdir, 'Hel')) ff = open(pjoin(Pdir, 'Hel', 'input_app.txt'),'w') @@ -180,15 +180,15 @@ def get_helicity(self, to_submit=True, clean=True): try: os.remove(pjoin(Pdir, 'Hel','results.dat')) except Exception: - pass + pass # Launch gensym - p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, + p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=pjoin(Pdir,'Hel'), shell=True) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(" ".encode()) stdout = stdout.decode('ascii',errors='ignore') if os.path.exists(pjoin(self.me_dir, 'error')): - raise Exception(pjoin(self.me_dir,'error')) + raise Exception(pjoin(self.me_dir,'error')) # note a continue is not enough here, we have in top to link # the matrixX_optim.f to matrixX_orig.f to let the code to work # after this error. @@ -203,7 +203,7 @@ def get_helicity(self, to_submit=True, clean=True): zero_gc = list() all_zampperhel = set() all_bad_amps_perhel = set() - + for line in stdout.splitlines(): if "=" not in line and ":" not in line: continue @@ -229,22 +229,22 @@ def get_helicity(self, to_submit=True, clean=True): "%s\n" % (' '.join(zero_gc)) +\ "This will slow down the computation. Please consider using restricted model:\n" +\ "https://answers.launchpad.net/mg5amcnlo/+faq/2312") - - + + all_good_hels = collections.defaultdict(list) for me_index, hel in all_hel: - all_good_hels[me_index].append(int(hel)) - + all_good_hels[me_index].append(int(hel)) + #print(all_hel) if self.run_card['hel_zeroamp']: all_bad_amps = collections.defaultdict(list) for me_index, amp in all_zamp: all_bad_amps[me_index].append(int(amp)) - + all_bad_amps_perhel = collections.defaultdict(list) for me_index, hel, amp in all_zampperhel: - all_bad_amps_perhel[me_index].append((int(hel),int(amp))) - + all_bad_amps_perhel[me_index].append((int(hel),int(amp))) + elif all_zamp: nb_zero = sum(int(a[1]) for a in all_zamp) if zero_gc: @@ -254,7 +254,7 @@ def get_helicity(self, to_submit=True, clean=True): else: logger.warning("The optimization detected that you have %i zero matrix-element for this SubProcess: %s.\n" % nb_zero +\ "This part can optimize if you set the flag hel_zeroamp to True in the run_card.") - + #check if we need to do something and write associate information" data = [all_hel, all_zamp, all_bad_amps_perhel] if not self.run_card['hel_zeroamp']: @@ -266,14 +266,14 @@ def get_helicity(self, to_submit=True, clean=True): old_data = open(pjoin(Pdir,'Hel','selection')).read() if old_data == data: continue - - + + with open(pjoin(Pdir,'Hel','selection'),'w') as fsock: - fsock.write(data) - - + fsock.write(data) + + for matrix_file in misc.glob('matrix*orig.f', Pdir): - + split_file = matrix_file.split('/') me_index = split_file[-1][len('matrix'):-len('_orig.f')] @@ -289,11 +289,11 @@ def get_helicity(self, to_submit=True, clean=True): #good_hels = sorted(list(good_hels)) good_hels = [str(x) for x in sorted(all_good_hels[me_index])] if self.run_card['hel_zeroamp']: - + bad_amps = [str(x) for x in sorted(all_bad_amps[me_index])] bad_amps_perhel = [x for x in sorted(all_bad_amps_perhel[me_index])] else: - bad_amps = [] + bad_amps = [] bad_amps_perhel = [] if __debug__: mtext = open(matrix_file).read() @@ -310,7 +310,7 @@ def get_helicity(self, to_submit=True, clean=True): recycler.set_input(matrix_file) recycler.set_output(out_file) - recycler.set_template(templ_file) + recycler.set_template(templ_file) recycler.generate_output_file() del recycler @@ -321,19 +321,19 @@ def get_helicity(self, to_submit=True, clean=True): return {}, P_zero_result - + def launch(self, to_submit=True, clean=True): """ """ if not hasattr(self, 'subproc'): - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc - + P_zero_result = [] # check the number of times where they are no phase-space - + nb_tot_proc = len(subproc) - job_list = {} + job_list = {} for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.
(previous processes already running)' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -341,7 +341,7 @@ def launch(self, to_submit=True, clean=True): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) logger.info(' %s ' % subdir) - + # clean previous run if clean: for match in misc.glob('*ajob*', Pdir): @@ -349,17 +349,17 @@ def launch(self, to_submit=True, clean=True): os.remove(match) for match in misc.glob('G*', Pdir): if os.path.exists(pjoin(match,'results.dat')): - os.remove(pjoin(match, 'results.dat')) + os.remove(pjoin(match, 'results.dat')) if os.path.exists(pjoin(match, 'ftn25')): - os.remove(pjoin(match, 'ftn25')) - + os.remove(pjoin(match, 'ftn25')) + #compile gensym self.cmd.compile(['gensym'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'gensym')): - raise Exception('Error make gensym not successful') - + raise Exception('Error make gensym not successful') + # Launch gensym - p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, + p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(''.encode()) @@ -367,8 +367,8 @@ def launch(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir,'error')): files.mv(pjoin(self.me_dir,'error'), pjoin(Pdir,'ajob.no_ps.log')) P_zero_result.append(subdir) - continue - + continue + jobs = stdout.split() job_list[Pdir] = jobs try: @@ -386,8 +386,8 @@ def launch(self, to_submit=True, clean=True): continue else: if done: - raise Exception('Parsing error in gensym: %s' % stdout) - job_list[Pdir] = l.split() + raise Exception('Parsing error in gensym: %s' % stdout) + job_list[Pdir] = l.split() done = True if not done: raise Exception('Parsing error in gensym: %s' % stdout) @@ -408,16 +408,16 @@ def launch(self, to_submit=True, clean=True): if to_submit: self.submit_to_cluster(job_list) job_list = {} - + return job_list, P_zero_result - + def resubmit(self, min_precision=1.0, resubmit_zero=False): """collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - + job_list, P_zero_result = self.launch(to_submit=False, clean=False) - + for P , jobs in dict(job_list).items(): misc.sprint(jobs) to_resub = [] @@ -434,7 +434,7 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): elif max(one_result.xerru, one_result.xerrc)/one_result.xsec > min_precision: to_resub.append(job) else: - to_resub.append(job) + to_resub.append(job) if to_resub: for G in to_resub: try: @@ -442,19 +442,19 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): except Exception as error: misc.sprint(error) pass - misc.sprint(to_resub) + misc.sprint(to_resub) self.submit_to_cluster({P: to_resub}) - - - - - - - - - - - + + + + + + + + + + + def submit_to_cluster(self, job_list): """ """ @@ -467,7 +467,7 @@ def submit_to_cluster(self, job_list): nexternal = self.cmd.proc_characteristics['nexternal'] current = open(pjoin(path, "nexternal.inc")).read() ext = re.search(r"PARAMETER \(NEXTERNAL=(\d+)\)", current).group(1) - + if self.run_card['job_strategy'] == 2: self.splitted_grid = 2 if nexternal == int(ext): @@ -498,18 +498,18 @@ def submit_to_cluster(self, job_list): return self.submit_to_cluster_no_splitting(job_list) else: return self.submit_to_cluster_splitted(job_list) - - + + def submit_to_cluster_no_splitting(self, job_list): """submit the survey without the parralelization. This is the old mode which is still usefull in single core""" - - # write the template file for the parameter file + + # write the template file for the parameter file self.write_parameter(parralelization=False, Pdirs=list(job_list.keys())) - - + + # launch the job with the appropriate grouping - for Pdir, jobs in job_list.items(): + for Pdir, jobs in job_list.items(): jobs = list(jobs) i=0 while jobs: @@ -518,16 +518,16 @@ def submit_to_cluster_no_splitting(self, job_list): for _ in range(self.combining_job_for_Pdir(Pdir)): if jobs: to_submit.append(jobs.pop(0)) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=to_submit, cwd=pjoin(self.me_dir,'SubProcesses' , Pdir)) - + def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): """prepare the input_file for submitting the channel""" - + if 'SubProcesses' not in Pdir: Pdir = pjoin(self.me_dir, 'SubProcesses', Pdir) @@ -535,8 +535,8 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): self.splitted_Pdir[(Pdir, G)] = int(nb_job) - # 1. write the new input_app.txt - run_card = self.cmd.run_card + # 1. write the new input_app.txt + run_card = self.cmd.run_card options = {'event' : submit_ps, 'maxiter': 1, 'miniter': 1, @@ -545,29 +545,29 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): else run_card['nhel'], 'gridmode': -2, 'channel' : G - } - + } + Gdir = pjoin(Pdir, 'G%s' % G) - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + # 2. check that ftn25 exists. - assert os.path.exists(pjoin(Gdir, "ftn25")) - - + assert os.path.exists(pjoin(Gdir, "ftn25")) + + # 3. Submit the new jobs #call back function - packet = cluster.Packet((Pdir, G, step+1), + packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, (Pdir, G, step+1)) - + if step ==0: - self.lastoffset[(Pdir, G)] = 0 - - # resubmit the new jobs + self.lastoffset[(Pdir, G)] = 0 + + # resubmit the new jobs for i in range(int(nb_job)): name = "G%s_%s" % (G,i+1) self.lastoffset[(Pdir, G)] += 1 - offset = self.lastoffset[(Pdir, G)] + offset = self.lastoffset[(Pdir, G)] self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'refine_splitted.sh'), argument=[name, 'G%s'%G, offset], cwd= Pdir, @@ -575,9 +575,9 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): def submit_to_cluster_splitted(self, job_list): - """ submit the version of the survey with splitted grid creation - """ - + """ submit the version of the survey with splitted grid creation + """ + #if self.splitted_grid <= 1: # return self.submit_to_cluster_no_splitting(job_list) @@ -592,7 +592,7 @@ def submit_to_cluster_splitted(self, job_list): for job in jobs: packet = cluster.Packet((Pdir, job, 1), self.combine_iteration, (Pdir, job, 1)) - for i in range(self.splitted_for_dir(Pdir, job)): + for i in range(self.splitted_for_dir(Pdir, job)): self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[i+1, job], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), @@ -601,15 +601,15 @@ def submit_to_cluster_splitted(self, job_list): def combine_iteration(self, Pdir, G, step): grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - - # Compute the number of events used for this run. + + # Compute the number of events used for this run. nb_events = grid_calculator.target_evt Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) - + # 4. make the submission of the next iteration # Three cases - less than 3 iteration -> continue # - more than 3 and less than 5 -> check error @@ -627,15 +627,15 @@ def combine_iteration(self, Pdir, G, step): need_submit = False else: need_submit = True - + elif step >= self.cmd.opts['iterations']: need_submit = False elif self.cmd.opts['accuracy'] < 0: #check for luminosity raise Exception("Not Implemented") elif self.abscross[(Pdir,G)] == 0: - need_submit = False - else: + need_submit = False + else: across = self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) tot_across = self.get_current_axsec() if across == 0: @@ -646,20 +646,20 @@ def combine_iteration(self, Pdir, G, step): need_submit = True else: need_submit = False - - + + if cross: grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_events,mode=self.mode, conservative_factor=5.0) - - xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) - if float(cross)!=0.0 and float(error)!=0.0 else 8) + + xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) + if float(cross)!=0.0 and float(error)!=0.0 else 8) if need_submit: message = "%%s/G%%s is at %%%s +- %%.3g pb. Now submitting iteration #%s."%(xsec_format, step+1) logger.info(message%\ - (os.path.basename(Pdir), G, float(cross), + (os.path.basename(Pdir), G, float(cross), float(error)*float(cross))) self.resubmit_survey(Pdir,G, Gdirs, step) elif cross: @@ -670,26 +670,26 @@ def combine_iteration(self, Pdir, G, step): newGpath = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(newGpath): os.mkdir(newGpath) - + # copy the new grid: - files.cp(pjoin(Gdirs[0], 'ftn25'), + files.cp(pjoin(Gdirs[0], 'ftn25'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, 'ftn26')) - + # copy the events fsock = open(pjoin(newGpath, 'events.lhe'), 'w') for Gdir in Gdirs: - fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) - + fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) + # copy one log - files.cp(pjoin(Gdirs[0], 'log.txt'), + files.cp(pjoin(Gdirs[0], 'log.txt'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G)) - - + + # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) else: logger.info("Survey finished for %s/G%s [0 cross]", os.path.basename(Pdir),G) - + Gdir = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(Gdir): os.mkdir(Gdir) @@ -697,21 +697,21 @@ def combine_iteration(self, Pdir, G, step): files.cp(pjoin(Gdirs[0], 'log.txt'), Gdir) # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) - + return 0 def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): """ exclude_sub_jobs is to remove some of the subjobs if a numerical issue is detected in one of them. Warning is issue when this occurs. """ - + # 1. create an object to combine the grid information and fill it grid_calculator = combine_grid.grid_information(self.run_card['nhel']) - + for i in range(self.splitted_for_dir(Pdir, G)): if i in exclude_sub_jobs: continue - path = pjoin(Pdir, "G%s_%s" % (G, i+1)) + path = pjoin(Pdir, "G%s_%s" % (G, i+1)) fsock = misc.mult_try_open(pjoin(path, 'results.dat')) one_result = grid_calculator.add_results_information(fsock) fsock.close() @@ -723,9 +723,9 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): fsock.close() os.remove(pjoin(path, 'results.dat')) #os.remove(pjoin(path, 'grid_information')) - - - + + + #2. combine the information about the total crossection / error # start by keep the interation in memory cross, across, sigma = grid_calculator.get_cross_section() @@ -736,12 +736,12 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): if maxwgt: nunwgt = grid_calculator.get_nunwgt(maxwgt) # Make sure not to apply the security below during the first step of the - # survey. Also, disregard channels with a contribution relative to the + # survey. Also, disregard channels with a contribution relative to the # total cross-section smaller than 1e-8 since in this case it is unlikely # that this channel will need more than 1 event anyway. apply_instability_security = False rel_contrib = 0.0 - if (self.__class__ != gensym or step > 1): + if (self.__class__ != gensym or step > 1): Pdir_across = 0.0 Gdir_across = 0.0 for (mPdir,mG) in self.abscross.keys(): @@ -750,7 +750,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): (self.sigma[(mPdir,mG)]+1e-99)) if mG == G: Gdir_across += (self.abscross[(mPdir,mG)]/ - (self.sigma[(mPdir,mG)]+1e-99)) + (self.sigma[(mPdir,mG)]+1e-99)) rel_contrib = abs(Gdir_across/(Pdir_across+1e-99)) if rel_contrib > (1.0e-8) and \ nunwgt < 2 and len(grid_calculator.results) > 1: @@ -770,14 +770,14 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): exclude_sub_jobs = list(exclude_sub_jobs) exclude_sub_jobs.append(th_maxwgt[-1][1]) grid_calculator.results.run_statistics['skipped_subchannel'] += 1 - + # Add some monitoring of the problematic events - gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) + gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) if os.path.isfile(pjoin(gPath,'events.lhe')): lhe_file = lhe_parser.EventFile(pjoin(gPath,'events.lhe')) discardedPath = pjoin(Pdir,'DiscardedUnstableEvents') if not os.path.exists(discardedPath): - os.mkdir(discardedPath) + os.mkdir(discardedPath) if os.path.isdir(discardedPath): # Keep only the event with a maximum weight, as it surely # is the problematic one. @@ -790,10 +790,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): lhe_file.close() evtRecord.write(pjoin(gPath,'events.lhe').read()) evtRecord.close() - + return self.combine_grid(Pdir, G, step, exclude_sub_jobs) - + if across !=0: if sigma != 0: self.cross[(Pdir,G)] += cross**3/sigma**2 @@ -814,10 +814,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): self.chi2[(Pdir,G)] = 0 cross = self.cross[(Pdir,G)] error = 0 - + else: error = 0 - + grid_calculator.results.compute_values(update_statistics=True) if (str(os.path.basename(Pdir)), G) in self.run_statistics: self.run_statistics[(str(os.path.basename(Pdir)), G)]\ @@ -825,8 +825,8 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): else: self.run_statistics[(str(os.path.basename(Pdir)), G)] = \ grid_calculator.results.run_statistics - - self.warnings_from_statistics(G, grid_calculator.results.run_statistics) + + self.warnings_from_statistics(G, grid_calculator.results.run_statistics) stats_msg = grid_calculator.results.run_statistics.nice_output( '/'.join([os.path.basename(Pdir),'G%s'%G])) @@ -836,7 +836,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): # Clean up grid_information to avoid border effects in case of a crash for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) - try: + try: os.remove(pjoin(path, 'grid_information')) except OSError as oneerror: if oneerror.errno != 2: @@ -850,7 +850,7 @@ def warnings_from_statistics(self,G,stats): return EPS_fraction = float(stats['exceptional_points'])/stats['n_madloop_calls'] - + msg = "Channel %s has encountered a fraction of %.3g\n"+ \ "of numerically unstable loop matrix element computations\n"+\ "(which could not be rescued using quadruple precision).\n"+\ @@ -861,16 +861,16 @@ def warnings_from_statistics(self,G,stats): elif EPS_fraction > 0.01: logger.critical((msg%(G,EPS_fraction)).replace('might', 'can')) raise Exception((msg%(G,EPS_fraction)).replace('might', 'can')) - + def get_current_axsec(self): - + across = 0 for (Pdir,G) in self.abscross: across += self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) return across - + def write_results(self, grid_calculator, cross, error, Pdir, G, step): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -888,7 +888,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step): maxwgt = grid_calculator.get_max_wgt() nunwgt = grid_calculator.get_nunwgt() luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -897,20 +897,20 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s %s 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross), fstr(maxwgt)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - + def resubmit_survey(self, Pdir, G, Gdirs, step): """submit the next iteration of the survey""" # 1. write the new input_app.txt to double the number of points - run_card = self.cmd.run_card + run_card = self.cmd.run_card options = {'event' : 2**(step) * self.cmd.opts['points'] / self.splitted_grid, 'maxiter': 1, 'miniter': 1, @@ -919,18 +919,18 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): else run_card['nhel'], 'gridmode': -2, 'channel' : '' - } - + } + if int(options['helicity']) == 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + for Gdir in Gdirs: - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + + #2. resubmit the new jobs packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, \ - (Pdir, G, step+1)) + (Pdir, G, step+1)) nb_step = len(Gdirs) * (step+1) for i,subdir in enumerate(Gdirs): subdir = subdir.rsplit('_',1)[1] @@ -938,34 +938,34 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): offset = nb_step+i+1 offset=str(offset) tag = "%s.%s" % (subdir, offset) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[tag, G], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), packet_member=packet) - + def write_parameter_file(self, path, options): """ """ - + template =""" %(event)s %(maxiter)s %(miniter)s !Number of events and max and min iterations %(accuracy)s !Accuracy %(gridmode)s !Grid Adjustment 0=none, 2=adjust 1 !Suppress Amplitude 1=yes %(helicity)s !Helicity Sum/event 0=exact - %(channel)s """ + %(channel)s """ options['event'] = int(options['event']) open(path, 'w').write(template % options) - - + + def write_parameter(self, parralelization, Pdirs=None): """Write the parameter of the survey run""" run_card = self.cmd.run_card - + options = {'event' : self.cmd.opts['points'], 'maxiter': self.cmd.opts['iterations'], 'miniter': self.min_iterations, @@ -975,36 +975,36 @@ def write_parameter(self, parralelization, Pdirs=None): 'gridmode': 2, 'channel': '' } - + if int(options['helicity'])== 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + if parralelization: options['gridmode'] = -2 options['maxiter'] = 1 #this is automatic in dsample anyway options['miniter'] = 1 #this is automatic in dsample anyway options['event'] /= self.splitted_grid - + if not Pdirs: Pdirs = self.subproc - + for Pdir in Pdirs: - path =pjoin(Pdir, 'input_app.txt') + path =pjoin(Pdir, 'input_app.txt') self.write_parameter_file(path, options) - - -class gen_ximprove(object): - - + + +class gen_ximprove(object): + + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job @@ -1022,7 +1022,7 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_gridpack) elif cls.force_class == 'loop_induced': return super(gen_ximprove, cls).__new__(gen_ximprove_share) - + if cmd.proc_characteristics['loop_induced']: return super(gen_ximprove, cls).__new__(gen_ximprove_share) elif gen_ximprove.format_variable(cmd.run_card['gridpack'], bool): @@ -1031,31 +1031,31 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_share) else: return super(gen_ximprove, cls).__new__(gen_ximprove_v4) - - + + def __init__(self, cmd, opt=None): - + try: super(gen_ximprove, self).__init__(cmd, opt) except TypeError: pass - + self.run_statistics = {} self.cmd = cmd self.run_card = cmd.run_card run_card = self.run_card self.me_dir = cmd.me_dir - + #extract from the run_card the information that we need. self.gridpack = run_card['gridpack'] self.nhel = run_card['nhel'] if "nhel_refine" in run_card: self.nhel = run_card["nhel_refine"] - + if self.run_card['refine_evt_by_job'] != -1: self.max_request_event = run_card['refine_evt_by_job'] - - + + # Default option for the run self.gen_events = True self.parralel = False @@ -1066,7 +1066,7 @@ def __init__(self, cmd, opt=None): # parameter for the gridpack run self.nreq = 2000 self.iseed = 4321 - + # placeholder for information self.results = 0 #updated in launch/update_html @@ -1074,16 +1074,16 @@ def __init__(self, cmd, opt=None): self.configure(opt) elif isinstance(opt, bannermod.GridpackCard): self.configure_gridpack(opt) - + def __call__(self): return self.launch() - + def launch(self): - """running """ - + """running """ + #start the run self.handle_seed() - self.results = sum_html.collect_result(self.cmd, + self.results = sum_html.collect_result(self.cmd, main_dir=pjoin(self.cmd.me_dir,'SubProcesses')) #main_dir is for gridpack readonly mode if self.gen_events: # We run to provide a given number of events @@ -1095,15 +1095,15 @@ def launch(self): def configure(self, opt): """Defines some parameter of the run""" - + for key, value in opt.items(): if key in self.__dict__: targettype = type(getattr(self, key)) setattr(self, key, self.format_variable(value, targettype, key)) else: raise Exception('%s not define' % key) - - + + # special treatment always do outside the loop to avoid side effect if 'err_goal' in opt: if self.err_goal < 1: @@ -1113,24 +1113,24 @@ def configure(self, opt): logger.info("Generating %s unweighted events." % self.err_goal) self.gen_events = True self.err_goal = self.err_goal * self.gen_events_security # security - + def handle_seed(self): """not needed but for gridpack --which is not handle here for the moment""" return - - + + def find_job_for_event(self): """return the list of channel that need to be improved""" - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) - - goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 + + goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) - all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) - + all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) + to_refine = [] for C in all_channels: if C.get('axsec') == 0: @@ -1141,61 +1141,61 @@ def find_job_for_event(self): elif C.get('xerr') > max(C.get('axsec'), (1/(100*math.sqrt(self.err_goal)))*all_channels[-1].get('axsec')): to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru + - class gen_ximprove_v4(gen_ximprove): - + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + super(gen_ximprove_v4, self).__init__(cmd, opt) - + if cmd.opts['accuracy'] < cmd._survey_options['accuracy'][1]: self.increase_precision(cmd._survey_options['accuracy'][1]/cmd.opts['accuracy']) @@ -1203,7 +1203,7 @@ def reset_multijob(self): for path in misc.glob(pjoin('*', '*','multijob.dat'), pjoin(self.me_dir, 'SubProcesses')): open(path,'w').write('0\n') - + def write_multijob(self, Channel, nb_split): """ """ if nb_split <=1: @@ -1211,7 +1211,7 @@ def write_multijob(self, Channel, nb_split): f = open(pjoin(self.me_dir, 'SubProcesses', Channel.get('name'), 'multijob.dat'), 'w') f.write('%i\n' % nb_split) f.close() - + def increase_precision(self, rate=3): #misc.sprint(rate) if rate < 3: @@ -1222,25 +1222,25 @@ def increase_precision(self, rate=3): rate = rate -2 self.max_event_in_iter = int((rate+1) * 10000) self.min_events = int(rate+2) * 2500 - self.gen_events_security = 1 + 0.1 * (rate+2) - + self.gen_events_security = 1 + 0.1 * (rate+2) + if int(self.nhel) == 1: self.min_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//3) self.max_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//2) - - + + alphabet = "abcdefghijklmnopqrstuvwxyz" def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() #reset the potential multijob of previous run self.reset_multijob() - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. @@ -1257,17 +1257,17 @@ def get_job_for_event(self): else: for i in range(len(to_refine) //3): new_order.append(to_refine[i]) - new_order.append(to_refine[-2*i-1]) + new_order.append(to_refine[-2*i-1]) new_order.append(to_refine[-2*i-2]) if len(to_refine) % 3 == 1: - new_order.append(to_refine[i+1]) + new_order.append(to_refine[i+1]) elif len(to_refine) % 3 == 2: - new_order.append(to_refine[i+2]) + new_order.append(to_refine[i+2]) #ensure that the reordering is done nicely assert set([id(C) for C in to_refine]) == set([id(C) for C in new_order]) - to_refine = new_order - - + to_refine = new_order + + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target @@ -1279,7 +1279,7 @@ def get_job_for_event(self): nb_split = self.max_splitting nb_split=max(1, nb_split) - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1296,21 +1296,21 @@ def get_job_for_event(self): nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + # write the multi-job information self.write_multijob(C, nb_split) - + packet = cluster.Packet((C.parent_name, C.name), combine_runs.CombineRuns, (pjoin(self.me_dir, 'SubProcesses', C.parent_name)), {"subproc": C.name, "nb_split":nb_split}) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1321,7 +1321,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': packet, + 'packet': packet, } if nb_split == 1: @@ -1334,19 +1334,19 @@ def get_job_for_event(self): if self.keep_grid_for_refine: new_info['base_directory'] = info['directory'] jobs.append(new_info) - - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def create_ajob(self, template, jobs, write_dir=None): """create the ajob""" - + if not jobs: return if not write_dir: write_dir = pjoin(self.me_dir, 'SubProcesses') - + #filter the job according to their SubProcess directory # no mix submition P2job= collections.defaultdict(list) for j in jobs: @@ -1355,11 +1355,11 @@ def create_ajob(self, template, jobs, write_dir=None): for P in P2job.values(): self.create_ajob(template, P, write_dir) return - - + + #Here we can assume that all job are for the same directory. path = pjoin(write_dir, jobs[0]['P_dir']) - + template_text = open(template, 'r').read() # special treatment if needed to combine the script # computes how many submition miss one job @@ -1384,8 +1384,8 @@ def create_ajob(self, template, jobs, write_dir=None): skip1=0 combining_job =1 nb_sub = len(jobs) - - + + nb_use = 0 for i in range(nb_sub): script_number = i+1 @@ -1404,14 +1404,14 @@ def create_ajob(self, template, jobs, write_dir=None): info["base_directory"] = "./" fsock.write(template_text % info) nb_use += nb_job - + fsock.close() return script_number def get_job_for_precision(self): """create the ajob to achieve a give precision on the total cross-section""" - + assert self.err_goal <=1 xtot = abs(self.results.xsec) logger.info("Working on precision: %s %%" %(100*self.err_goal)) @@ -1428,46 +1428,46 @@ def get_job_for_precision(self): rerr *=rerr if not len(to_refine): return - - # change limit since most don't contribute + + # change limit since most don't contribute limit = math.sqrt((self.err_goal * xtot)**2 - rerr/math.sqrt(len(to_refine))) for C in to_refine[:]: cerr = C.mfactor*(C.xerru + len(to_refine)*C.xerrc) if cerr < limit: to_refine.remove(C) - + # all the channel are now selected. create the channel information logger.info('need to improve %s channels' % len(to_refine)) - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. # loop over the channel to refine for C in to_refine: - + #1. Determine how many events we need in each iteration yerr = C.mfactor*(C.xerru+len(to_refine)*C.xerrc) nevents = 0.2*C.nevents*(yerr/limit)**2 - + nb_split = int((nevents*(C.nunwgt/C.nevents)/self.max_request_event/ (2**self.min_iter-1))**(2/3)) nb_split = max(nb_split, 1) - # **(2/3) to slow down the increase in number of jobs + # **(2/3) to slow down the increase in number of jobs if nb_split > self.max_splitting: nb_split = self.max_splitting - + if nb_split >1: nevents = nevents / nb_split self.write_multijob(C, nb_split) # forbid too low/too large value nevents = min(self.min_event_in_iter, max(self.max_event_in_iter, nevents)) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1487,38 +1487,38 @@ def get_job_for_precision(self): new_info['offset'] = i+1 new_info['directory'] += self.alphabet[i % 26] + str((i+1)//26) jobs.append(new_info) - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru @@ -1528,27 +1528,27 @@ class gen_ximprove_v4_nogridupdate(gen_ximprove_v4): # some hardcoded value which impact the generation gen_events_security = 1.1 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 400 # split jobs if a channel if it needs more than that + max_request_event = 400 # split jobs if a channel if it needs more than that max_event_in_iter = 500 min_event_in_iter = 250 - max_splitting = 260 # maximum duplication of a given channel - min_iter = 2 + max_splitting = 260 # maximum duplication of a given channel + min_iter = 2 max_iter = 6 keep_grid_for_refine = True - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + gen_ximprove.__init__(cmd, opt) - + if cmd.proc_characteristics['loopinduced'] and \ cmd.proc_characteristics['nexternal'] > 2: self.increase_parralelization(cmd.proc_characteristics['nexternal']) - + def increase_parralelization(self, nexternal): - self.max_splitting = 1000 - + self.max_splitting = 1000 + if self.run_card['refine_evt_by_job'] != -1: pass elif nexternal == 3: @@ -1563,27 +1563,27 @@ def increase_parralelization(self, nexternal): class gen_ximprove_share(gen_ximprove, gensym): """Doing the refine in multicore. Each core handle a couple of PS point.""" - nb_ps_by_job = 2000 + nb_ps_by_job = 2000 mode = "refine" gen_events_security = 1.15 # Note the real security is lower since we stop the jobs if they are at 96% # of this target. def __init__(self, *args, **opts): - + super(gen_ximprove_share, self).__init__(*args, **opts) self.generated_events = {} self.splitted_for_dir = lambda x,y : self.splitted_Pdir[(x,y)] - + def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - + goal_lum, to_refine = self.find_job_for_event() self.goal_lum = goal_lum - + # loop over the channel to refine to find the number of PS point to launch total_ps_points = 0 channel_to_ps_point = [] @@ -1593,7 +1593,7 @@ def get_job_for_event(self): os.remove(pjoin(self.me_dir, "SubProcesses",C.parent_name, C.name, "events.lhe")) except: pass - + #1. Compute the number of points are needed to reach target needed_event = goal_lum*C.get('axsec') if needed_event == 0: @@ -1609,18 +1609,18 @@ def get_job_for_event(self): nb_split = 1 if nb_split > self.max_splitting: nb_split = self.max_splitting - nevents = self.max_event_in_iter * self.max_splitting + nevents = self.max_event_in_iter * self.max_splitting else: nevents = self.max_event_in_iter * nb_split if nevents > self.max_splitting*self.max_event_in_iter: logger.warning("Channel %s/%s has a very low efficiency of unweighting. Might not be possible to reach target" % \ (C.name, C.parent_name)) - nevents = self.max_event_in_iter * self.max_splitting - - total_ps_points += nevents - channel_to_ps_point.append((C, nevents)) - + nevents = self.max_event_in_iter * self.max_splitting + + total_ps_points += nevents + channel_to_ps_point.append((C, nevents)) + if self.cmd.options["run_mode"] == 1: if self.cmd.options["cluster_size"]: nb_ps_by_job = total_ps_points /int(self.cmd.options["cluster_size"]) @@ -1634,7 +1634,7 @@ def get_job_for_event(self): nb_ps_by_job = total_ps_points / self.cmd.options["nb_core"] else: nb_ps_by_job = self.nb_ps_by_job - + nb_ps_by_job = int(max(nb_ps_by_job, 500)) for C, nevents in channel_to_ps_point: @@ -1648,20 +1648,20 @@ def get_job_for_event(self): self.create_resubmit_one_iter(C.parent_name, C.name[1:], submit_ps, nb_job, step=0) needed_event = goal_lum*C.get('xsec') logger.debug("%s/%s : need %s event. Need %s split job of %s points", C.parent_name, C.name, needed_event, nb_job, submit_ps) - - + + def combine_iteration(self, Pdir, G, step): - + grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - + # collect all the generated_event Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) assert len(grid_calculator.results) == len(Gdirs) == self.splitted_for_dir(Pdir, G) - - + + # Check how many events are going to be kept after un-weighting. needed_event = cross * self.goal_lum if needed_event == 0: @@ -1671,19 +1671,19 @@ def combine_iteration(self, Pdir, G, step): if self.err_goal >=1: if needed_event > self.gen_events_security * self.err_goal: needed_event = int(self.gen_events_security * self.err_goal) - + if (Pdir, G) in self.generated_events: old_nunwgt, old_maxwgt = self.generated_events[(Pdir, G)] else: old_nunwgt, old_maxwgt = 0, 0 - + if old_nunwgt == 0 and os.path.exists(pjoin(Pdir,"G%s" % G, "events.lhe")): # possible for second refine. lhe = lhe_parser.EventFile(pjoin(Pdir,"G%s" % G, "events.lhe")) old_nunwgt = lhe.unweight(None, trunc_error=0.005, log_level=0) old_maxwgt = lhe.max_wgt - - + + maxwgt = max(grid_calculator.get_max_wgt(), old_maxwgt) new_evt = grid_calculator.get_nunwgt(maxwgt) @@ -1695,35 +1695,35 @@ def combine_iteration(self, Pdir, G, step): one_iter_nb_event = max(grid_calculator.get_nunwgt(),1) drop_previous_iteration = False # compare the number of events to generate if we discard the previous iteration - n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) n_target_combined = (needed_event-nunwgt) / efficiency if n_target_one_iter < n_target_combined: # the last iteration alone has more event that the combine iteration. - # it is therefore interesting to drop previous iteration. + # it is therefore interesting to drop previous iteration. drop_previous_iteration = True nunwgt = one_iter_nb_event maxwgt = grid_calculator.get_max_wgt() new_evt = nunwgt - efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) - + efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + try: if drop_previous_iteration: raise IOError output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'a') except IOError: output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'w') - + misc.call(["cat"] + [pjoin(d, "events.lhe") for d in Gdirs], stdout=output_file) output_file.close() # For large number of iteration. check the number of event by doing the # real unweighting. - if nunwgt < 0.6 * needed_event and step > self.min_iter: + if nunwgt < 0.6 * needed_event and step > self.min_iter: lhe = lhe_parser.EventFile(output_file.name) old_nunwgt =nunwgt nunwgt = lhe.unweight(None, trunc_error=0.01, log_level=0) - - + + self.generated_events[(Pdir, G)] = (nunwgt, maxwgt) # misc.sprint("Adding %s event to %s. Currently at %s" % (new_evt, G, nunwgt)) @@ -1742,21 +1742,21 @@ def combine_iteration(self, Pdir, G, step): nevents = grid_calculator.results[0].nevents if nevents == 0: # possible if some integral returns 0 nevents = max(g.nevents for g in grid_calculator.results) - + need_ps_point = (needed_event - nunwgt)/(efficiency+1e-99) - need_job = need_ps_point // nevents + 1 - + need_job = need_ps_point // nevents + 1 + if step < self.min_iter: # This is normal but check if we are on the good track - job_at_first_iter = nb_split_before/2**(step-1) + job_at_first_iter = nb_split_before/2**(step-1) expected_total_job = job_at_first_iter * (2**self.min_iter-1) done_job = job_at_first_iter * (2**step-1) expected_remaining_job = expected_total_job - done_job - logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) + logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) # increase if needed but not too much need_job = min(need_job, expected_remaining_job*1.25) - + nb_job = (need_job-0.5)//(2**(self.min_iter-step)-1) + 1 nb_job = max(1, nb_job) grid_calculator.write_grid_for_submission(Pdir,G, @@ -1768,7 +1768,7 @@ def combine_iteration(self, Pdir, G, step): nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) #self.create_job(Pdir, G, nb_job, nevents, step) - + elif step < self.max_iter: if step + 1 == self.max_iter: need_job = 1.20 * need_job # avoid to have just too few event. @@ -1777,21 +1777,21 @@ def combine_iteration(self, Pdir, G, step): grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_job*nevents ,mode=self.mode, conservative_factor=self.max_iter) - - + + logger.info("%s/G%s is at %i/%i ('%.2g%%') event. Resubmit %i job at iteration %i." \ % (os.path.basename(Pdir), G, int(nunwgt),int(needed_event)+1, (float(nunwgt)/needed_event)*100.0 if needed_event>0.0 else 0.0, nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) - - + + return 0 - - + + def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -1807,7 +1807,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency nevents = nunwgt # make the unweighting to compute the number of events: luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -1816,23 +1816,23 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s 0.0 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - - - + + + class gen_ximprove_gridpack(gen_ximprove_v4): - - min_iter = 1 + + min_iter = 1 max_iter = 13 - max_request_event = 1e12 # split jobs if a channel if it needs more than that + max_request_event = 1e12 # split jobs if a channel if it needs more than that max_event_in_iter = 4000 min_event_in_iter = 500 combining_job = sys.maxsize @@ -1844,7 +1844,7 @@ def __new__(cls, *args, **opts): return super(gen_ximprove_gridpack, cls).__new__(cls, *args, **opts) def __init__(self, *args, **opts): - + self.ngran = -1 self.gscalefact = {} self.readonly = False @@ -1855,23 +1855,23 @@ def __init__(self, *args, **opts): self.readonly = opts['readonly'] super(gen_ximprove_gridpack,self).__init__(*args, **opts) if self.ngran == -1: - self.ngran = 1 - + self.ngran = 1 + def find_job_for_event(self): """return the list of channel that need to be improved""" import random - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) self.gscalefact = {} - + xtot = self.results.axsec - goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 + goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 # logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) all_channels.sort(key=lambda x : x.get('luminosity'), reverse=True) - + to_refine = [] for C in all_channels: tag = C.get('name') @@ -1885,27 +1885,27 @@ def find_job_for_event(self): #need to generate events logger.debug('request events for ', C.get('name'), 'cross=', C.get('axsec'), 'needed events = ', goal_lum * C.get('axsec')) - to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + to_refine.append(C) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. - + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target needed_event = max(goal_lum*C.get('axsec'), self.ngran) nb_split = 1 - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1920,13 +1920,13 @@ def get_job_for_event(self): # forbid too low/too large value nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': os.path.basename(C.parent_name), + 'P_dir': os.path.basename(C.parent_name), 'offset': 1, # need to be change for splitted job 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'nevents': nevents, #int(nevents*self.gen_events_security)+1, @@ -1938,7 +1938,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': None, + 'packet': None, } if self.readonly: @@ -1946,11 +1946,11 @@ def get_job_for_event(self): info['base_directory'] = basedir jobs.append(info) - - write_dir = '.' if self.readonly else None - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) - + + write_dir = '.' if self.readonly else None + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) + done = [] for j in jobs: if j['P_dir'] in done: @@ -1967,22 +1967,22 @@ def get_job_for_event(self): write_dir = '.' if self.readonly else pjoin(self.me_dir, 'SubProcesses') self.check_events(goal_lum, to_refine, jobs, write_dir) - + def check_events(self, goal_lum, to_refine, jobs, Sdir): """check that we get the number of requested events if not resubmit.""" - + new_jobs = [] - + for C, job_info in zip(to_refine, jobs): - P = job_info['P_dir'] + P = job_info['P_dir'] G = job_info['channel'] axsec = C.get('axsec') - requested_events= job_info['requested_event'] - + requested_events= job_info['requested_event'] + new_results = sum_html.OneResult((P,G)) new_results.read_results(pjoin(Sdir,P, 'G%s'%G, 'results.dat')) - + # need to resubmit? if new_results.get('nunwgt') < requested_events: pwd = pjoin(os.getcwd(),job_info['P_dir'],'G%s'%G) if self.readonly else \ @@ -1992,10 +1992,10 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): job_info['offset'] += 1 new_jobs.append(job_info) files.mv(pjoin(pwd, 'events.lhe'), pjoin(pwd, 'events.lhe.previous')) - + if new_jobs: - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) + done = [] for j in new_jobs: if j['P_dir'] in done: @@ -2015,9 +2015,9 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): files.put_at_end(pjoin(pwd, 'events.lhe'),pjoin(pwd, 'events.lhe.previous')) return self.check_events(goal_lum, to_refine, new_jobs, Sdir) - - - - + + + + diff --git a/epochX/cudacpp/gg_ttgg.mad/bin/internal/madevent_interface.py b/epochX/cudacpp/gg_ttgg.mad/bin/internal/madevent_interface.py index cb6bf4ca57..8abba3f33f 100755 --- a/epochX/cudacpp/gg_ttgg.mad/bin/internal/madevent_interface.py +++ b/epochX/cudacpp/gg_ttgg.mad/bin/internal/madevent_interface.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,10 +53,10 @@ # Special logger for the Cmd Interface logger = logging.getLogger('madevent.stdout') # -> stdout logger_stderr = logging.getLogger('madevent.stderr') # ->stderr - + try: import madgraph -except ImportError as error: +except ImportError as error: # import from madevent directory MADEVENT = True import internal.extended_cmd as cmd @@ -92,7 +92,7 @@ import madgraph.various.lhe_parser as lhe_parser # import madgraph.various.histograms as histograms # imported later to not slow down the loading of the code import models.check_param_card as check_param_card - from madgraph.iolibs.files import ln + from madgraph.iolibs.files import ln from madgraph import InvalidCmd, MadGraph5Error, MG5DIR, ReadWrite @@ -113,10 +113,10 @@ class CmdExtended(common_run.CommonRunCmd): next_possibility = { 'start': [], } - + debug_output = 'ME5_debug' error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' - error_debug += 'More information is found in \'%(debug)s\'.\n' + error_debug += 'More information is found in \'%(debug)s\'.\n' error_debug += 'Please attach this file to your report.' config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' @@ -124,18 +124,18 @@ class CmdExtended(common_run.CommonRunCmd): keyboard_stop_msg = """stopping all operation in order to quit MadGraph5_aMC@NLO please enter exit""" - + # Define the Error InvalidCmd = InvalidCmd ConfigurationError = MadGraph5Error def __init__(self, me_dir, options, *arg, **opt): """Init history and line continuation""" - + # Tag allowing/forbiding question self.force = False - - # If possible, build an info line with current version number + + # If possible, build an info line with current version number # and date, from the VERSION text file info = misc.get_pkg_info() info_line = "" @@ -150,7 +150,7 @@ def __init__(self, me_dir, options, *arg, **opt): else: version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() info_line = "#* VERSION %s %s *\n" % \ - (version, (24 - len(version)) * ' ') + (version, (24 - len(version)) * ' ') # Create a header for the history file. # Remember to fill in time at writeout time! @@ -177,7 +177,7 @@ def __init__(self, me_dir, options, *arg, **opt): '#* run as ./bin/madevent.py filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - + if info_line: info_line = info_line[1:] @@ -203,11 +203,11 @@ def __init__(self, me_dir, options, *arg, **opt): "* *\n" + \ "************************************************************") super(CmdExtended, self).__init__(me_dir, options, *arg, **opt) - + def get_history_header(self): - """return the history header""" + """return the history header""" return self.history_header % misc.get_time_info() - + def stop_on_keyboard_stop(self): """action to perform to close nicely on a keyboard interupt""" try: @@ -219,20 +219,20 @@ def stop_on_keyboard_stop(self): self.add_error_log_in_html(KeyboardInterrupt) except: pass - + def postcmd(self, stop, line): """ Update the status of the run for finishing interactive command """ - - stop = super(CmdExtended, self).postcmd(stop, line) + + stop = super(CmdExtended, self).postcmd(stop, line) # relaxing the tag forbidding question self.force = False - + if not self.use_rawinput: return stop - + if self.results and not self.results.current: return stop - + arg = line.split() if len(arg) == 0: return stop @@ -240,41 +240,41 @@ def postcmd(self, stop, line): return stop if isinstance(self.results.status, str) and self.results.status == 'Stop by the user': self.update_status('%s Stop by the user' % arg[0], level=None, error=True) - return stop + return stop elif not self.results.status: return stop elif str(arg[0]) in ['exit','quit','EOF']: return stop - + try: - self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], + self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], level=None, error=True) except Exception: misc.sprint('update_status fails') pass - - + + def nice_user_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() - return cmd.Cmd.nice_user_error(self, error, line) - + return cmd.Cmd.nice_user_error(self, error, line) + def nice_config_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() stop = cmd.Cmd.nice_config_error(self, error, line) - - + + try: debug_file = open(self.debug_output, 'a') debug_file.write(open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat'))) debug_file.close() except: - pass + pass return stop - + def nice_error_handling(self, error, line): """If a ME run is currently running add a link in the html output""" @@ -294,7 +294,7 @@ def nice_error_handling(self, error, line): proc_card = pjoin(self.me_dir,'Cards','proc_card_mg5.dat') if os.path.exists(proc_card): self.banner.add(proc_card) - + out_dir = pjoin(self.me_dir, 'Events', self.run_name) if not os.path.isdir(out_dir): os.mkdir(out_dir) @@ -307,7 +307,7 @@ def nice_error_handling(self, error, line): else: pass else: - self.add_error_log_in_html() + self.add_error_log_in_html() stop = cmd.Cmd.nice_error_handling(self, error, line) try: debug_file = open(self.debug_output, 'a') @@ -316,14 +316,14 @@ def nice_error_handling(self, error, line): except: pass return stop - - + + #=============================================================================== # HelpToCmd #=============================================================================== class HelpToCmd(object): """ The Series of help routine for the MadEventCmd""" - + def help_pythia(self): logger.info("syntax: pythia [RUN] [--run_options]") logger.info("-- run pythia on RUN (current one by default)") @@ -352,29 +352,29 @@ def help_banner_run(self): logger.info(" Path should be the path of a valid banner.") logger.info(" RUN should be the name of a run of the current directory") self.run_options_help([('-f','answer all question by default'), - ('--name=X', 'Define the name associated with the new run')]) - + ('--name=X', 'Define the name associated with the new run')]) + def help_open(self): logger.info("syntax: open FILE ") logger.info("-- open a file with the appropriate editor.") logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') logger.info(' the path to the last created/used directory is used') logger.info(' The program used to open those files can be chosen in the') - logger.info(' configuration file ./input/mg5_configuration.txt') - - + logger.info(' configuration file ./input/mg5_configuration.txt') + + def run_options_help(self, data): if data: logger.info('-- local options:') for name, info in data: logger.info(' %s : %s' % (name, info)) - + logger.info("-- session options:") - logger.info(" Note that those options will be kept for the current session") + logger.info(" Note that those options will be kept for the current session") logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) logger.info(" --multicore : Run in multi-core configuration") logger.info(" --nb_core=X : limit the number of core to use to X.") - + def help_generate_events(self): logger.info("syntax: generate_events [run_name] [options]",) @@ -398,16 +398,16 @@ def help_initMadLoop(self): logger.info(" -f : Bypass the edition of MadLoopParams.dat.",'$MG:color:BLUE') logger.info(" -r : Refresh of the existing filters (erasing them if already present).",'$MG:color:BLUE') logger.info(" --nPS= : Specify how many phase-space points should be tried to set up the filters.",'$MG:color:BLUE') - + def help_calculate_decay_widths(self): - + if self.ninitial != 1: logger.warning("This command is only valid for processes of type A > B C.") logger.warning("This command can not be run in current context.") logger.warning("") - + logger.info("syntax: calculate_decay_widths [run_name] [options])") logger.info("-- Calculate decay widths and enter widths and BRs in param_card") logger.info(" for a series of processes of type A > B C ...") @@ -428,8 +428,8 @@ def help_survey(self): logger.info("-- evaluate the different channel associate to the process") self.run_options_help([("--" + key,value[-1]) for (key,value) in \ self._survey_options.items()]) - - + + def help_restart_gridpack(self): logger.info("syntax: restart_gridpack --precision= --restart_zero") @@ -439,14 +439,14 @@ def help_launch(self): logger.info("syntax: launch [run_name] [options])") logger.info(" --alias for either generate_events/calculate_decay_widths") logger.info(" depending of the number of particles in the initial state.") - + if self.ninitial == 1: logger.info("For this directory this is equivalent to calculate_decay_widths") self.help_calculate_decay_widths() else: logger.info("For this directory this is equivalent to $generate_events") self.help_generate_events() - + def help_refine(self): logger.info("syntax: refine require_precision [max_channel] [--run_options]") logger.info("-- refine the LAST run to achieve a given precision.") @@ -454,14 +454,14 @@ def help_refine(self): logger.info(' or the required relative error') logger.info(' max_channel:[5] maximal number of channel per job') self.run_options_help([]) - + def help_combine_events(self): """ """ logger.info("syntax: combine_events [run_name] [--tag=tag_name] [--run_options]") logger.info("-- Combine the last run in order to write the number of events") logger.info(" asked in the run_card.") self.run_options_help([]) - + def help_store_events(self): """ """ logger.info("syntax: store_events [--run_options]") @@ -481,7 +481,7 @@ def help_import(self): logger.info("syntax: import command PATH") logger.info("-- Execute the command present in the file") self.run_options_help([]) - + def help_syscalc(self): logger.info("syntax: syscalc [RUN] [%s] [-f | --tag=]" % '|'.join(self._plot_mode)) logger.info("-- calculate systematics information for the RUN (current run by default)") @@ -506,18 +506,18 @@ class AskRun(cmd.ControlSwitch): ('madspin', 'Decay onshell particles'), ('reweight', 'Add weights to events for new hypp.') ] - + def __init__(self, question, line_args=[], mode=None, force=False, *args, **opt): - + self.check_available_module(opt['mother_interface'].options) self.me_dir = opt['mother_interface'].me_dir super(AskRun,self).__init__(self.to_control, opt['mother_interface'], *args, **opt) - - + + def check_available_module(self, options): - + self.available_module = set() if options['pythia-pgs_path']: self.available_module.add('PY6') @@ -540,32 +540,32 @@ def check_available_module(self, options): self.available_module.add('Rivet') else: logger.warning("Rivet program installed but no parton shower with hepmc output detected.\n Please install pythia8") - + if not MADEVENT or ('mg5_path' in options and options['mg5_path']): self.available_module.add('MadSpin') if misc.has_f2py() or options['f2py_compiler']: self.available_module.add('reweight') -# old mode to activate the shower +# old mode to activate the shower def ans_parton(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if value is None: self.set_all_off() else: logger.warning('Invalid command: parton=%s' % value) - - + + # -# HANDLING SHOWER +# HANDLING SHOWER # def get_allowed_shower(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_shower'): return self.allowed_shower - + self.allowed_shower = [] if 'PY6' in self.available_module: self.allowed_shower.append('Pythia6') @@ -574,9 +574,9 @@ def get_allowed_shower(self): if self.allowed_shower: self.allowed_shower.append('OFF') return self.allowed_shower - + def set_default_shower(self): - + if 'PY6' in self.available_module and\ os.path.exists(pjoin(self.me_dir,'Cards','pythia_card.dat')): self.switch['shower'] = 'Pythia6' @@ -590,10 +590,10 @@ def set_default_shower(self): def check_value_shower(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_shower(): return True - + value =value.lower() if value in ['py6','p6','pythia_6'] and 'PY6' in self.available_module: return 'Pythia6' @@ -601,13 +601,13 @@ def check_value_shower(self, value): return 'Pythia8' else: return False - - -# old mode to activate the shower + + +# old mode to activate the shower def ans_pythia(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if 'PY6' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return @@ -621,13 +621,13 @@ def ans_pythia(self, value=None): self.set_switch('shower', 'OFF') else: logger.warning('Invalid command: pythia=%s' % value) - - + + def consistency_shower_detector(self, vshower, vdetector): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower == 'OFF': @@ -635,35 +635,35 @@ def consistency_shower_detector(self, vshower, vdetector): return 'OFF' if vshower == 'Pythia8' and vdetector == 'PGS': return 'OFF' - + return None - + # # HANDLING DETECTOR # def get_allowed_detector(self): """return valid entry for the switch""" - + if hasattr(self, 'allowed_detector'): - return self.allowed_detector - + return self.allowed_detector + self.allowed_detector = [] if 'PGS' in self.available_module: self.allowed_detector.append('PGS') if 'Delphes' in self.available_module: self.allowed_detector.append('Delphes') - + if self.allowed_detector: self.allowed_detector.append('OFF') - return self.allowed_detector + return self.allowed_detector def set_default_detector(self): - + self.set_default_shower() #ensure that this one is called first! - + if 'PGS' in self.available_module and self.switch['shower'] == 'Pythia6'\ and os.path.exists(pjoin(self.me_dir,'Cards','pgs_card.dat')): self.switch['detector'] = 'PGS' @@ -674,16 +674,16 @@ def set_default_detector(self): self.switch['detector'] = 'OFF' else: self.switch['detector'] = 'Not Avail.' - -# old mode to activate pgs + +# old mode to activate pgs def ans_pgs(self, value=None): """None: means that the user type 'pgs' - value: means that the user type pgs=value""" - + value: means that the user type pgs=value""" + if 'PGS' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return - + if value is None: self.set_all_off() self.switch['shower'] = 'Pythia6' @@ -696,16 +696,16 @@ def ans_pgs(self, value=None): else: logger.warning('Invalid command: pgs=%s' % value) - + # old mode to activate Delphes def ans_delphes(self, value=None): """None: means that the user type 'delphes' - value: means that the user type delphes=value""" - + value: means that the user type delphes=value""" + if 'Delphes' not in self.available_module: logger.warning('Delphes not available. Ignore commmand') return - + if value is None: self.set_all_off() if 'PY6' in self.available_module: @@ -718,15 +718,15 @@ def ans_delphes(self, value=None): elif value == 'off': self.set_switch('detector', 'OFF') else: - logger.warning('Invalid command: pgs=%s' % value) + logger.warning('Invalid command: pgs=%s' % value) def consistency_detector_shower(self,vdetector, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ - + if vdetector == 'PGS' and vshower != 'Pythia6': return 'Pythia6' if vdetector == 'Delphes' and vshower not in ['Pythia6', 'Pythia8']: @@ -744,28 +744,28 @@ def consistency_detector_shower(self,vdetector, vshower): # def get_allowed_analysis(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_analysis'): return self.allowed_analysis - + self.allowed_analysis = [] if 'ExRoot' in self.available_module: self.allowed_analysis.append('ExRoot') if 'MA4' in self.available_module: self.allowed_analysis.append('MadAnalysis4') if 'MA5' in self.available_module: - self.allowed_analysis.append('MadAnalysis5') + self.allowed_analysis.append('MadAnalysis5') if 'Rivet' in self.available_module: - self.allowed_analysis.append('Rivet') - + self.allowed_analysis.append('Rivet') + if self.allowed_analysis: self.allowed_analysis.append('OFF') - + return self.allowed_analysis - + def check_analysis(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_analysis(): return True if value.lower() in ['ma4', 'madanalysis4', 'madanalysis_4','4']: @@ -786,30 +786,30 @@ def consistency_shower_analysis(self, vshower, vanalysis): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'OFF' #new value for analysis - + return None - + def consistency_analysis_shower(self, vanalysis, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'Pythia8' #new value for analysis - + return None def set_default_analysis(self): """initialise the switch for analysis""" - + if 'MA4' in self.available_module and \ os.path.exists(pjoin(self.me_dir,'Cards','plot_card.dat')): self.switch['analysis'] = 'MadAnalysis4' @@ -818,46 +818,46 @@ def set_default_analysis(self): or os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat'))): self.switch['analysis'] = 'MadAnalysis5' elif 'ExRoot' in self.available_module: - self.switch['analysis'] = 'ExRoot' - elif self.get_allowed_analysis(): + self.switch['analysis'] = 'ExRoot' + elif self.get_allowed_analysis(): self.switch['analysis'] = 'OFF' else: self.switch['analysis'] = 'Not Avail.' - + # # MADSPIN handling # def get_allowed_madspin(self): """ ON|OFF|onshell """ - + if hasattr(self, 'allowed_madspin'): return self.allowed_madspin - + self.allowed_madspin = [] if 'MadSpin' in self.available_module: self.allowed_madspin = ['OFF',"ON",'onshell',"full"] return self.allowed_madspin - + def check_value_madspin(self, value): """handle alias and valid option not present in get_allowed_madspin""" - + if value.upper() in self.get_allowed_madspin(): return True elif value.lower() in self.get_allowed_madspin(): return True - + if 'MadSpin' not in self.available_module: return False - + if value.lower() in ['madspin', 'full']: return 'full' elif value.lower() in ['none']: return 'none' - - + + def set_default_madspin(self): """initialise the switch for madspin""" - + if 'MadSpin' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): self.switch['madspin'] = 'ON' @@ -865,10 +865,10 @@ def set_default_madspin(self): self.switch['madspin'] = 'OFF' else: self.switch['madspin'] = 'Not Avail.' - + def get_cardcmd_for_madspin(self, value): """set some command to run before allowing the user to modify the cards.""" - + if value == 'onshell': return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] elif value in ['full', 'madspin']: @@ -877,36 +877,36 @@ def get_cardcmd_for_madspin(self, value): return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] else: return [] - + # # ReWeight handling # def get_allowed_reweight(self): """ return the list of valid option for reweight=XXX """ - + if hasattr(self, 'allowed_reweight'): return getattr(self, 'allowed_reweight') - + if 'reweight' not in self.available_module: self.allowed_reweight = [] return self.allowed_reweight = ['OFF', 'ON'] - + # check for plugin mode plugin_path = self.mother_interface.plugin_path opts = misc.from_plugin_import(plugin_path, 'new_reweight', warning=False) self.allowed_reweight += opts - + def set_default_reweight(self): """initialise the switch for reweight""" - + if 'reweight' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): self.switch['reweight'] = 'ON' else: self.switch['reweight'] = 'OFF' else: - self.switch['reweight'] = 'Not Avail.' + self.switch['reweight'] = 'Not Avail.' #=============================================================================== # CheckValidForCmd @@ -916,14 +916,14 @@ class CheckValidForCmd(object): def check_banner_run(self, args): """check the validity of line""" - + if len(args) == 0: self.help_banner_run() raise self.InvalidCmd('banner_run requires at least one argument.') - + tag = [a[6:] for a in args if a.startswith('--tag=')] - - + + if os.path.exists(args[0]): type ='banner' format = self.detect_card_type(args[0]) @@ -931,7 +931,7 @@ def check_banner_run(self, args): raise self.InvalidCmd('The file is not a valid banner.') elif tag: args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) + (args[0], tag)) if not os.path.exists(args[0]): raise self.InvalidCmd('No banner associates to this name and tag.') else: @@ -939,7 +939,7 @@ def check_banner_run(self, args): type = 'run' banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) if not banners: - raise self.InvalidCmd('No banner associates to this name.') + raise self.InvalidCmd('No banner associates to this name.') elif len(banners) == 1: args[0] = banners[0] else: @@ -947,8 +947,8 @@ def check_banner_run(self, args): tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] tag = self.ask('which tag do you want to use?', tags[0], tags) args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) - + (args[0], tag)) + run_name = [arg[7:] for arg in args if arg.startswith('--name=')] if run_name: try: @@ -970,14 +970,14 @@ def check_banner_run(self, args): except Exception: pass self.set_run_name(name) - + def check_history(self, args): """check the validity of line""" - + if len(args) > 1: self.help_history() raise self.InvalidCmd('\"history\" command takes at most one argument') - + if not len(args): return elif args[0] != 'clean': @@ -985,16 +985,16 @@ def check_history(self, args): if dirpath and not os.path.exists(dirpath) or \ os.path.isdir(args[0]): raise self.InvalidCmd("invalid path %s " % dirpath) - + def check_save(self, args): """ check the validity of the line""" - + if len(args) == 0: args.append('options') if args[0] not in self._save_opts: raise self.InvalidCmd('wrong \"save\" format') - + if args[0] != 'options' and len(args) != 2: self.help_save() raise self.InvalidCmd('wrong \"save\" format') @@ -1003,7 +1003,7 @@ def check_save(self, args): if not os.path.exists(basename): raise self.InvalidCmd('%s is not a valid path, please retry' % \ args[1]) - + if args[0] == 'options': has_path = None for arg in args[1:]: @@ -1024,9 +1024,9 @@ def check_save(self, args): has_path = True if not has_path: if '--auto' in arg and self.options['mg5_path']: - args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) + args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) else: - args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) + args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) def check_set(self, args): """ check the validity of the line""" @@ -1039,20 +1039,20 @@ def check_set(self, args): self.help_set() raise self.InvalidCmd('Possible options for set are %s' % \ self._set_options) - + if args[0] in ['stdout_level']: if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \ and not args[1].isdigit(): raise self.InvalidCmd('output_level needs ' + \ - 'a valid level') - + 'a valid level') + if args[0] in ['timeout']: if not args[1].isdigit(): - raise self.InvalidCmd('timeout values should be a integer') - + raise self.InvalidCmd('timeout values should be a integer') + def check_open(self, args): """ check the validity of the line """ - + if len(args) != 1: self.help_open() raise self.InvalidCmd('OPEN command requires exactly one argument') @@ -1069,7 +1069,7 @@ def check_open(self, args): raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file') else: return True - + path = self.me_dir if os.path.isfile(os.path.join(path,args[0])): args[0] = os.path.join(path,args[0]) @@ -1078,7 +1078,7 @@ def check_open(self, args): elif os.path.isfile(os.path.join(path,'HTML',args[0])): args[0] = os.path.join(path,'HTML',args[0]) # special for card with _default define: copy the default and open it - elif '_card.dat' in args[0]: + elif '_card.dat' in args[0]: name = args[0].replace('_card.dat','_card_default.dat') if os.path.isfile(os.path.join(path,'Cards', name)): files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0])) @@ -1086,13 +1086,13 @@ def check_open(self, args): else: raise self.InvalidCmd('No default path for this file') elif not os.path.isfile(args[0]): - raise self.InvalidCmd('No default path for this file') - + raise self.InvalidCmd('No default path for this file') + def check_initMadLoop(self, args): """ check initMadLoop command arguments are valid.""" - + opt = {'refresh': False, 'nPS': None, 'force': False} - + for arg in args: if arg in ['-r','--refresh']: opt['refresh'] = True @@ -1105,14 +1105,14 @@ def check_initMadLoop(self, args): except ValueError: raise InvalidCmd("The number of attempts specified "+ "'%s' is not a valid integer."%n_attempts) - + return opt - + def check_treatcards(self, args): """check that treatcards arguments are valid [param|run|all] [--output_dir=] [--param_card=] [--run_card=] """ - + opt = {'output_dir':pjoin(self.me_dir,'Source'), 'param_card':pjoin(self.me_dir,'Cards','param_card.dat'), 'run_card':pjoin(self.me_dir,'Cards','run_card.dat'), @@ -1129,14 +1129,14 @@ def check_treatcards(self, args): if os.path.isfile(value): card_name = self.detect_card_type(value) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' % (card_name, key)) opt[key] = value elif os.path.isfile(pjoin(self.me_dir,value)): card_name = self.detect_card_type(pjoin(self.me_dir,value)) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' - % (card_name, key)) + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + % (card_name, key)) opt[key] = value else: raise self.InvalidCmd('No such file: %s ' % value) @@ -1154,14 +1154,14 @@ def check_treatcards(self, args): else: self.help_treatcards() raise self.InvalidCmd('Unvalid argument %s' % arg) - - return mode, opt - - + + return mode, opt + + def check_survey(self, args, cmd='survey'): """check that the argument for survey are valid""" - - + + self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) @@ -1183,41 +1183,41 @@ def check_survey(self, args, cmd='survey'): self.help_survey() raise self.InvalidCmd('Too many argument for %s command' % cmd) elif not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], None,'parton', True) args.pop(0) - + return True def check_generate_events(self, args): """check that the argument for generate_events are valid""" - + run = None if args and args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['auto','parton', 'pythia', 'pgs', 'delphes']: self.help_generate_events() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + #if len(args) > 1: # self.help_generate_events() # raise self.InvalidCmd('Too many argument for generate_events command: %s' % cmd) - + return run def check_calculate_decay_widths(self, args): """check that the argument for calculate_decay_widths are valid""" - + if self.ninitial != 1: raise self.InvalidCmd('Can only calculate decay widths for decay processes A > B C ...') @@ -1232,7 +1232,7 @@ def check_calculate_decay_widths(self, args): if len(args) > 1: self.help_calculate_decay_widths() raise self.InvalidCmd('Too many argument for calculate_decay_widths command: %s' % cmd) - + return accuracy @@ -1241,25 +1241,25 @@ def check_multi_run(self, args): """check that the argument for survey are valid""" run = None - + if not len(args): self.help_multi_run() raise self.InvalidCmd("""multi_run command requires at least one argument for the number of times that it call generate_events command""") - + if args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['parton', 'pythia', 'pgs', 'delphes']: self.help_multi_run() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + elif not args[0].isdigit(): self.help_multi_run() @@ -1267,7 +1267,7 @@ def check_multi_run(self, args): #pass nb run to an integer nb_run = args.pop(0) args.insert(0, int(nb_run)) - + return run @@ -1284,7 +1284,7 @@ def check_refine(self, args): self.help_refine() raise self.InvalidCmd('require_precision argument is require for refine cmd') - + if not self.run_name: if self.results.lastrun: self.set_run_name(self.results.lastrun) @@ -1296,17 +1296,17 @@ def check_refine(self, args): else: try: [float(arg) for arg in args] - except ValueError: - self.help_refine() + except ValueError: + self.help_refine() raise self.InvalidCmd('refine arguments are suppose to be number') - + return True - + def check_combine_events(self, arg): """ Check the argument for the combine events command """ - + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] elif not self.run_tag: @@ -1314,53 +1314,53 @@ def check_combine_events(self, arg): else: tag = self.run_tag self.run_tag = tag - + if len(arg) > 1: self.help_combine_events() raise self.InvalidCmd('Too many argument for combine_events command') - + if len(arg) == 1: self.set_run_name(arg[0], self.run_tag, 'parton', True) - + if not self.run_name: if not self.results.lastrun: raise self.InvalidCmd('No run_name currently define. Unable to run combine') else: self.set_run_name(self.results.lastrun) - + return True - + def check_pythia(self, args): """Check the argument for pythia command - syntax: pythia [NAME] + syntax: pythia [NAME] Note that other option are already removed at this point """ - + mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia', 'pgs', 'delphes']: self.help_pythia() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') - + if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' raise self.InvalidCmd(error_msg) - - - + + + tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1368,8 +1368,8 @@ def check_pythia(self, args): if self.results.lastrun: args.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): @@ -1388,21 +1388,21 @@ def check_pythia(self, args): files.ln(input_file, os.path.dirname(output_file)) else: misc.gunzip(input_file, keep=True, stdout=output_file) - + args.append(mode) - + def check_pythia8(self, args): """Check the argument for pythia command - syntax: pythia8 [NAME] + syntax: pythia8 [NAME] Note that other option are already removed at this point - """ + """ mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia','pythia8','delphes']: self.help_pythia8() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') @@ -1410,7 +1410,7 @@ def check_pythia8(self, args): if not self.options['pythia8_path']: logger.info('Retry reading configuration file to find pythia8 path') self.set_configuration() - + if not self.options['pythia8_path'] or not \ os.path.exists(pjoin(self.options['pythia8_path'],'bin','pythia8-config')): error_msg = 'No valid pythia8 path set.\n' @@ -1421,7 +1421,7 @@ def check_pythia8(self, args): raise self.InvalidCmd(error_msg) tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1430,11 +1430,11 @@ def check_pythia8(self, args): args.insert(0, self.results.lastrun) else: raise self.InvalidCmd('No run name currently define. '+ - 'Please add this information.') - + 'Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ - not os.path.exists(pjoin(self.me_dir,'Events',args[0], + not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): raise self.InvalidCmd('No events file corresponding to %s run. ' % args[0]) @@ -1451,9 +1451,9 @@ def check_pythia8(self, args): else: raise self.InvalidCmd('No event file corresponding to %s run. ' % self.run_name) - + args.append(mode) - + def check_remove(self, args): """Check that the remove command is valid""" @@ -1484,33 +1484,33 @@ def check_plot(self, args): madir = self.options['madanalysis_path'] td = self.options['td_path'] - + if not madir or not td: logger.info('Retry to read configuration file to find madanalysis/td') self.set_configuration() madir = self.options['madanalysis_path'] - td = self.options['td_path'] - + td = self.options['td_path'] + if not madir: error_msg = 'No valid MadAnalysis path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) + raise self.InvalidCmd(error_msg) if not td: error_msg = 'No valid td path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') + raise self.InvalidCmd('No run name currently define. Please add this information.') args.append('all') return - + if args[0] not in self._plot_mode: self.set_run_name(args[0], level='plot') del args[0] @@ -1518,45 +1518,45 @@ def check_plot(self, args): args.append('all') elif not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + for arg in args: if arg not in self._plot_mode and arg != self.run_name: self.help_plot() - raise self.InvalidCmd('unknown options %s' % arg) - + raise self.InvalidCmd('unknown options %s' % arg) + def check_syscalc(self, args): """Check the argument for the syscalc command syscalc run_name modes""" scdir = self.options['syscalc_path'] - + if not scdir: logger.info('Retry to read configuration file to find SysCalc') self.set_configuration() scdir = self.options['syscalc_path'] - + if not scdir: error_msg = 'No valid SysCalc path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' error_msg += 'Please note that you need to compile SysCalc first.' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') args.append('all') return #deal options tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] - + if args[0] not in self._syscalc_mode: self.set_run_name(args[0], tag=tag, level='syscalc') del args[0] @@ -1564,61 +1564,61 @@ def check_syscalc(self, args): args.append('all') elif not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') elif tag and tag != self.run_tag: self.set_run_name(self.run_name, tag=tag, level='syscalc') - + for arg in args: if arg not in self._syscalc_mode and arg != self.run_name: self.help_syscalc() - raise self.InvalidCmd('unknown options %s' % arg) + raise self.InvalidCmd('unknown options %s' % arg) if self.run_card['use_syst'] not in self.true: raise self.InvalidCmd('Run %s does not include ' % self.run_name + \ 'systematics information needed for syscalc.') - - + + def check_pgs(self, arg, no_default=False): """Check the argument for pythia command - syntax is "pgs [NAME]" + syntax is "pgs [NAME]" Note that other option are already remove at this point """ - + # If not pythia-pgs path if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] - - + + if len(arg) == 0 and not self.run_name: if self.results.lastrun: arg.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(arg) == 1 and self.run_name == arg[0]: arg.pop(0) - + if not len(arg) and \ not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): if not no_default: self.help_pgs() raise self.InvalidCmd('''No file file pythia_events.hep currently available Please specify a valid run_name''') - - lock = None + + lock = None if len(arg) == 1: prev_tag = self.set_run_name(arg[0], tag, 'pgs') if not os.path.exists(pjoin(self.me_dir,'Events',self.run_name,'%s_pythia_events.hep.gz' % prev_tag)): @@ -1626,25 +1626,25 @@ def check_pgs(self, arg, no_default=False): else: input_file = pjoin(self.me_dir,'Events', self.run_name, '%s_pythia_events.hep.gz' % prev_tag) output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') - lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), + lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), argument=['-c', input_file]) else: - if tag: + if tag: self.run_card['run_tag'] = tag self.set_run_name(self.run_name, tag, 'pgs') - - return lock + + return lock def check_display(self, args): """check the validity of line syntax is "display XXXXX" """ - + if len(args) < 1 or args[0] not in self._display_opts: self.help_display() raise self.InvalidCmd - + if args[0] == 'variable' and len(args) !=2: raise self.InvalidCmd('variable need a variable name') @@ -1654,39 +1654,39 @@ def check_display(self, args): def check_import(self, args): """check the validity of line""" - + if not args: self.help_import() raise self.InvalidCmd('wrong \"import\" format') - + if args[0] != 'command': args.insert(0,'command') - - + + if not len(args) == 2 or not os.path.exists(args[1]): raise self.InvalidCmd('PATH is mandatory for import command\n') - + #=============================================================================== # CompleteForCmd #=============================================================================== class CompleteForCmd(CheckValidForCmd): """ The Series of help routine for the MadGraphCmd""" - - + + def complete_banner_run(self, text, line, begidx, endidx, formatting=True): "Complete the banner run command" try: - - + + args = self.split_arg(line[0:begidx], error=False) - + if args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args \ - if a.endswith(os.path.sep)])) - - + if a.endswith(os.path.sep)])) + + if len(args) > 1: # only options are possible tags = misc.glob('%s_*_banner.txt' % args[1], pjoin(self.me_dir, 'Events' , args[1])) @@ -1697,9 +1697,9 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): else: return self.list_completion(text, tags) return self.list_completion(text, tags +['--name=','-f'], line) - + # First argument - possibilites = {} + possibilites = {} comp = self.path_completion(text, os.path.join('.',*[a for a in args \ if a.endswith(os.path.sep)])) @@ -1711,10 +1711,10 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) run_list = [n.rsplit('/',2)[1] for n in run_list] possibilites['RUN Name'] = self.list_completion(text, run_list) - + return self.deal_multiple_categories(possibilites, formatting) - - + + except Exception as error: print(error) @@ -1732,12 +1732,12 @@ def complete_history(self, text, line, begidx, endidx): if len(args) == 1: return self.path_completion(text) - - def complete_open(self, text, line, begidx, endidx): + + def complete_open(self, text, line, begidx, endidx): """ complete the open command """ args = self.split_arg(line[0:begidx]) - + # Directory continuation if os.path.sep in args[-1] + text: return self.path_completion(text, @@ -1751,10 +1751,10 @@ def complete_open(self, text, line, begidx, endidx): if os.path.isfile(os.path.join(path,'README')): possibility.append('README') if os.path.isdir(os.path.join(path,'Cards')): - possibility += [f for f in os.listdir(os.path.join(path,'Cards')) + possibility += [f for f in os.listdir(os.path.join(path,'Cards')) if f.endswith('.dat')] if os.path.isdir(os.path.join(path,'HTML')): - possibility += [f for f in os.listdir(os.path.join(path,'HTML')) + possibility += [f for f in os.listdir(os.path.join(path,'HTML')) if f.endswith('.html') and 'default' not in f] else: possibility.extend(['./','../']) @@ -1763,7 +1763,7 @@ def complete_open(self, text, line, begidx, endidx): if os.path.exists('MG5_debug'): possibility.append('MG5_debug') return self.list_completion(text, possibility) - + def complete_set(self, text, line, begidx, endidx): "Complete the set command" @@ -1784,27 +1784,27 @@ def complete_set(self, text, line, begidx, endidx): elif len(args) >2 and args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args if a.endswith(os.path.sep)]), - only_dirs = True) - + only_dirs = True) + def complete_survey(self, text, line, begidx, endidx): """ Complete the survey command """ - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + return self.list_completion(text, self._run_options, line) - + complete_refine = complete_survey complete_combine_events = complete_survey complite_store = complete_survey complete_generate_events = complete_survey complete_create_gridpack = complete_survey - + def complete_generate_events(self, text, line, begidx, endidx): """ Complete the generate events""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() @@ -1813,17 +1813,17 @@ def complete_generate_events(self, text, line, begidx, endidx): return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) def complete_initMadLoop(self, text, line, begidx, endidx): "Complete the initMadLoop command" - + numbers = [str(i) for i in range(10)] opts = ['-f','-r','--nPS='] - + args = self.split_arg(line[0:begidx], error=False) if len(line) >=6 and line[begidx-6:begidx]=='--nPS=': return self.list_completion(text, numbers, line) @@ -1840,18 +1840,18 @@ def complete_launch(self, *args, **opts): def complete_calculate_decay_widths(self, text, line, begidx, endidx): """ Complete the calculate_decay_widths command""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + opts = self._run_options + self._calculate_decay_options return self.list_completion(text, opts, line) - + def complete_display(self, text, line, begidx, endidx): - """ Complete the display command""" - + """ Complete the display command""" + args = self.split_arg(line[0:begidx], error=False) if len(args) >= 2 and args[1] =='results': start = line.find('results') @@ -1860,44 +1860,44 @@ def complete_display(self, text, line, begidx, endidx): def complete_multi_run(self, text, line, begidx, endidx): """complete multi run command""" - + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: data = [str(i) for i in range(0,20)] return self.list_completion(text, data, line) - + if line.endswith('run=') and not text: return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - - - + + + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - + def complete_plot(self, text, line, begidx, endidx): """ Complete the plot command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1: return self.list_completion(text, self._plot_mode) else: return self.list_completion(text, self._plot_mode + list(self.results.keys())) - + def complete_syscalc(self, text, line, begidx, endidx, formatting=True): """ Complete the syscalc command """ - + output = {} args = self.split_arg(line[0:begidx], error=False) - + if len(args) <=1: output['RUN_NAME'] = self.list_completion(list(self.results.keys())) output['MODE'] = self.list_completion(text, self._syscalc_mode) @@ -1907,12 +1907,12 @@ def complete_syscalc(self, text, line, begidx, endidx, formatting=True): if run in self.results: tags = ['--tag=%s' % tag['tag'] for tag in self.results[run]] output['options'] += tags - + return self.deal_multiple_categories(output, formatting) - + def complete_remove(self, text, line, begidx, endidx): """Complete the remove command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1 and (text.startswith('--t')): run = args[1] @@ -1932,8 +1932,8 @@ def complete_remove(self, text, line, begidx, endidx): data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1] for n in data] return self.list_completion(text, ['all'] + data) - - + + def complete_shower(self,text, line, begidx, endidx): "Complete the shower command" args = self.split_arg(line[0:begidx], error=False) @@ -1941,7 +1941,7 @@ def complete_shower(self,text, line, begidx, endidx): return self.list_completion(text, self._interfaced_showers) elif len(args)>1 and args[1] in self._interfaced_showers: return getattr(self, 'complete_%s' % text)\ - (text, args[1],line.replace(args[0]+' ',''), + (text, args[1],line.replace(args[0]+' ',''), begidx-len(args[0])-1, endidx-len(args[0])-1) def complete_pythia8(self,text, line, begidx, endidx): @@ -1955,11 +1955,11 @@ def complete_pythia8(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_madanalysis5_parton(self,text, line, begidx, endidx): @@ -1978,19 +1978,19 @@ def complete_madanalysis5_parton(self,text, line, begidx, endidx): else: tmp2 = self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) - return tmp1 + tmp2 + return tmp1 + tmp2 elif '--MA5_stdout_lvl=' in line and not any(arg.startswith( '--MA5_stdout_lvl=') for arg in args): - return self.list_completion(text, - ['--MA5_stdout_lvl=%s'%opt for opt in + return self.list_completion(text, + ['--MA5_stdout_lvl=%s'%opt for opt in ['logging.INFO','logging.DEBUG','logging.WARNING', 'logging.CRITICAL','90']], line) else: - return self.list_completion(text, ['-f', + return self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) def complete_pythia(self,text, line, begidx, endidx): - "Complete the pythia command" + "Complete the pythia command" args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: @@ -2001,16 +2001,16 @@ def complete_pythia(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_pgs(self,text, line, begidx, endidx): "Complete the pythia command" - args = self.split_arg(line[0:begidx], error=False) + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: #return valid run_name data = misc.glob(pjoin('*', '*_pythia_events.hep.gz'), pjoin(self.me_dir, 'Events')) @@ -2019,23 +2019,23 @@ def complete_pgs(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--tag=' ,'--no_default'], line) - return tmp1 + tmp2 + return tmp1 + tmp2 else: - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--tag=','--no_default'], line) - complete_delphes = complete_pgs - complete_rivet = complete_pgs + complete_delphes = complete_pgs + complete_rivet = complete_pgs #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCmd): - """The command line processor of Mad Graph""" - + """The command line processor of Mad Graph""" + LO = True # Truth values @@ -2063,7 +2063,7 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm cluster_mode = 0 queue = 'madgraph' nb_core = None - + next_possibility = { 'start': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]', 'calculate_decay_widths [OPTIONS]', @@ -2080,9 +2080,9 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm 'pgs': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'], 'delphes' : ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'] } - + asking_for_run = AskRun - + ############################################################################ def __init__(self, me_dir = None, options={}, *completekey, **stdin): """ add information to the cmd """ @@ -2095,16 +2095,16 @@ def __init__(self, me_dir = None, options={}, *completekey, **stdin): if self.web: os.system('touch %s' % pjoin(self.me_dir,'Online')) - self.load_results_db() + self.load_results_db() self.results.def_web_mode(self.web) self.Gdirs = None - + self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) self.configured = 0 # time for reading the card self._options = {} # for compatibility with extended_cmd - - + + def pass_in_web_mode(self): """configure web data""" self.web = True @@ -2113,22 +2113,22 @@ def pass_in_web_mode(self): if os.environ['MADGRAPH_BASE']: self.options['mg5_path'] = pjoin(os.environ['MADGRAPH_BASE'],'MG5') - ############################################################################ + ############################################################################ def check_output_type(self, path): """ Check that the output path is a valid madevent directory """ - + bin_path = os.path.join(path,'bin') if os.path.isfile(os.path.join(bin_path,'generate_events')): return True - else: + else: return False ############################################################################ def set_configuration(self, amcatnlo=False, final=True, **opt): - """assign all configuration variable from file + """assign all configuration variable from file loop over the different config file if config_file not define """ - - super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, + + super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, final=final, **opt) if not final: @@ -2171,24 +2171,24 @@ def set_configuration(self, amcatnlo=False, final=True, **opt): if not os.path.exists(pjoin(path, 'sys_calc')): logger.info("No valid SysCalc path found") continue - # No else since the next line reinitialize the option to the + # No else since the next line reinitialize the option to the #previous value anyway self.options[key] = os.path.realpath(path) continue else: self.options[key] = None - - + + return self.options ############################################################################ - def do_banner_run(self, line): + def do_banner_run(self, line): """Make a run from the banner file""" - + args = self.split_arg(line) #check the validity of the arguments - self.check_banner_run(args) - + self.check_banner_run(args) + # Remove previous cards for name in ['delphes_trigger.dat', 'delphes_card.dat', 'pgs_card.dat', 'pythia_card.dat', 'madspin_card.dat', @@ -2197,20 +2197,20 @@ def do_banner_run(self, line): os.remove(pjoin(self.me_dir, 'Cards', name)) except Exception: pass - + banner_mod.split_banner(args[0], self.me_dir, proc_card=False) - + # Check if we want to modify the run if not self.force: ans = self.ask('Do you want to modify the Cards?', 'n', ['y','n']) if ans == 'n': self.force = True - + # Call Generate events self.exec_cmd('generate_events %s %s' % (self.run_name, self.force and '-f' or '')) - - - + + + ############################################################################ def do_display(self, line, output=sys.stdout): """Display current internal status""" @@ -2223,7 +2223,7 @@ def do_display(self, line, output=sys.stdout): #return valid run_name data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1:] for n in data] - + if data: out = {} for name, tag in data: @@ -2235,11 +2235,11 @@ def do_display(self, line, output=sys.stdout): print('the runs available are:') for run_name, tags in out.items(): print(' run: %s' % run_name) - print(' tags: ', end=' ') + print(' tags: ', end=' ') print(', '.join(tags)) else: print('No run detected.') - + elif args[0] == 'options': outstr = " Run Options \n" outstr += " ----------- \n" @@ -2260,8 +2260,8 @@ def do_display(self, line, output=sys.stdout): if value == default: outstr += " %25s \t:\t%s\n" % (key,value) else: - outstr += " %25s \t:\t%s (user set)\n" % (key,value) - outstr += "\n" + outstr += " %25s \t:\t%s (user set)\n" % (key,value) + outstr += "\n" outstr += " Configuration Options \n" outstr += " --------------------- \n" for key, default in self.options_configuration.items(): @@ -2275,15 +2275,15 @@ def do_display(self, line, output=sys.stdout): self.do_print_results(' '.join(args[1:])) else: super(MadEventCmd, self).do_display(line, output) - + def do_save(self, line, check=True, to_keep={}): - """Not in help: Save information to file""" + """Not in help: Save information to file""" args = self.split_arg(line) # Check argument validity if check: self.check_save(args) - + if args[0] == 'options': # First look at options which should be put in MG5DIR/input to_define = {} @@ -2295,7 +2295,7 @@ def do_save(self, line, check=True, to_keep={}): for key, default in self.options_madevent.items(): if self.options[key] != self.options_madevent[key]: to_define[key] = self.options[key] - + if '--all' in args: for key, default in self.options_madgraph.items(): if self.options[key] != self.options_madgraph[key]: @@ -2312,12 +2312,12 @@ def do_save(self, line, check=True, to_keep={}): filepath = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basefile = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basedir = self.me_dir - + if to_keep: to_define = to_keep self.write_configuration(filepath, basefile, basedir, to_define) - - + + def do_edit_cards(self, line): @@ -2326,80 +2326,80 @@ def do_edit_cards(self, line): # Check argument's validity mode = self.check_generate_events(args) self.ask_run_configuration(mode) - + return ############################################################################ - + ############################################################################ def do_restart_gridpack(self, line): """ syntax restart_gridpack --precision=1.0 --restart_zero collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) - + # initialize / remove lhapdf mode #self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) #self.configure_directory() - + gensym = gen_ximprove.gensym(self) - + min_precision = 1.0 resubmit_zero=False if '--precision=' in line: s = line.index('--precision=') + len('--precision=') arg=line[s:].split(1)[0] min_precision = float(arg) - + if '--restart_zero' in line: resubmit_zero = True - - + + gensym.resubmit(min_precision, resubmit_zero) self.monitor(run_type='All jobs submitted for gridpack', html=True) #will be done during the refine (more precisely in gen_ximprove) cross, error = sum_html.make_all_html_results(self) self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(gensym.run_statistics)) - + #self.exec_cmd('combine_events', postcmd=False) #self.exec_cmd('store_events', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) self.exec_cmd('create_gridpack', postcmd=False) - - - ############################################################################ + + + ############################################################################ ############################################################################ def do_generate_events(self, line): """Main Commands: launch the full chain """ - + self.banner = None self.Gdirs = None - + args = self.split_arg(line) # Check argument's validity mode = self.check_generate_events(args) switch_mode = self.ask_run_configuration(mode, args) if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir), None, 'parton') else: self.set_run_name(args[0], None, 'parton', True) args.pop(0) - + self.run_generate_events(switch_mode, args) self.postprocessing() @@ -2420,8 +2420,8 @@ def postprocessing(self): def rivet_postprocessing(self, rivet_config, postprocess_RIVET, postprocess_CONTUR): - # Check number of Rivet jobs to run - run_dirs = [pjoin(self.me_dir, 'Events',run_name) + # Check number of Rivet jobs to run + run_dirs = [pjoin(self.me_dir, 'Events',run_name) for run_name in self.postprocessing_dirs] nb_rivet = len(run_dirs) @@ -2550,10 +2550,10 @@ def wait_monitoring(Idle, Running, Done): wrapper = open(pjoin(self.me_dir, "Analysis", "contur", "run_contur.sh"), "w") wrapper.write(set_env) - + wrapper.write('{0}\n'.format(contur_cmd)) wrapper.close() - + misc.call(["run_contur.sh"], cwd=(pjoin(self.me_dir, "Analysis", "contur"))) logger.info("Contur outputs are stored in {0}".format(pjoin(self.me_dir, "Analysis", "contur","conturPlot"))) @@ -2572,7 +2572,7 @@ def run_generate_events(self, switch_mode, args): self.do_set('run_mode 2') self.do_set('nb_core 1') - if self.run_card['gridpack'] in self.true: + if self.run_card['gridpack'] in self.true: # Running gridpack warmup gridpack_opts=[('accuracy', 0.01), ('points', 2000), @@ -2593,7 +2593,7 @@ def run_generate_events(self, switch_mode, args): # Regular run mode logger.info('Generating %s events with run name %s' % (self.run_card['nevents'], self.run_name)) - + self.exec_cmd('survey %s %s' % (self.run_name,' '.join(args)), postcmd=False) nb_event = self.run_card['nevents'] @@ -2601,7 +2601,7 @@ def run_generate_events(self, switch_mode, args): self.exec_cmd('refine %s' % nb_event, postcmd=False) if not float(self.results.current['cross']): # Zero cross-section. Try to guess why - text = '''Survey return zero cross section. + text = '''Survey return zero cross section. Typical reasons are the following: 1) A massive s-channel particle has a width set to zero. 2) The pdf are zero for at least one of the initial state particles @@ -2613,17 +2613,17 @@ def run_generate_events(self, switch_mode, args): raise ZeroResult('See https://cp3.irmp.ucl.ac.be/projects/madgraph/wiki/FAQ-General-14') else: bypass_run = True - + #we can bypass the following if scan and first result is zero if not bypass_run: self.exec_cmd('refine %s --treshold=%s' % (nb_event,self.run_card['second_refine_treshold']) , postcmd=False) - + self.exec_cmd('combine_events', postcmd=False,printcmd=False) self.print_results_in_shell(self.results.current) if self.run_card['use_syst']: - if self.run_card['systematics_program'] == 'auto': + if self.run_card['systematics_program'] == 'auto': scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): to_use = 'systematics' @@ -2634,26 +2634,26 @@ def run_generate_events(self, switch_mode, args): else: logger.critical('Unvalid options for systematics_program: bypass computation of systematics variations.') to_use = 'none' - + if to_use == 'systematics': if self.run_card['systematics_arguments'] != ['']: self.exec_cmd('systematics %s %s ' % (self.run_name, - ' '.join(self.run_card['systematics_arguments'])), + ' '.join(self.run_card['systematics_arguments'])), postcmd=False, printcmd=False) else: self.exec_cmd('systematics %s --from_card' % self.run_name, - postcmd=False,printcmd=False) + postcmd=False,printcmd=False) elif to_use == 'syscalc': self.run_syscalc('parton') - - - self.create_plot('parton') - self.exec_cmd('store_events', postcmd=False) + + + self.create_plot('parton') + self.exec_cmd('store_events', postcmd=False) if self.run_card['boost_event'].strip() and self.run_card['boost_event'] != 'False': self.boost_events() - - - self.exec_cmd('reweight -from_cards', postcmd=False) + + + self.exec_cmd('reweight -from_cards', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) if self.run_card['time_of_flight']>=0: self.exec_cmd("add_time_of_flight --threshold=%s" % self.run_card['time_of_flight'] ,postcmd=False) @@ -2664,43 +2664,43 @@ def run_generate_events(self, switch_mode, args): self.create_root_file(input , output) self.exec_cmd('madanalysis5_parton --no_default', postcmd=False, printcmd=False) - # shower launches pgs/delphes if needed + # shower launches pgs/delphes if needed self.exec_cmd('shower --no_default', postcmd=False, printcmd=False) self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) self.exec_cmd('rivet --no_default', postcmd=False, printcmd=False) self.store_result() - - if self.allow_notification_center: - misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), - '%s: %s +- %s ' % (self.results.current['run_name'], + + if self.allow_notification_center: + misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), + '%s: %s +- %s ' % (self.results.current['run_name'], self.results.current['cross'], self.results.current['error'])) - + def boost_events(self): - + if not self.run_card['boost_event']: return - + if self.run_card['boost_event'].startswith('lambda'): if not isinstance(self, cmd.CmdShell): raise Exception("boost not allowed online") filter = eval(self.run_card['boost_event']) else: raise Exception - + path = [pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')] - + for p in path: if os.path.exists(p): event_path = p break else: raise Exception("fail to find event file for the boost") - - + + lhe = lhe_parser.EventFile(event_path) with misc.TMP_directory() as tmp_dir: output = lhe_parser.EventFile(pjoin(tmp_dir, os.path.basename(event_path)), 'w') @@ -2711,28 +2711,28 @@ def boost_events(self): event.boost(filter) #write this modify event output.write(str(event)) - output.write('\n') + output.write('\n') lhe.close() - files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) - - - - - + files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) + + + + + def do_initMadLoop(self,line): - """Compile and run MadLoop for a certain number of PS point so as to + """Compile and run MadLoop for a certain number of PS point so as to initialize MadLoop (setup the zero helicity and loop filter.)""" - + args = line.split() # Check argument's validity options = self.check_initMadLoop(args) - + if not options['force']: self.ask_edit_cards(['MadLoopParams.dat'], mode='fixed', plot=False) self.exec_cmd('treatcards loop --no_MadLoopInit') if options['refresh']: - for filter in misc.glob('*Filter*', + for filter in misc.glob('*Filter*', pjoin(self.me_dir,'SubProcesses','MadLoop5_resources')): logger.debug("Resetting filter '%s'."%os.path.basename(filter)) os.remove(filter) @@ -2753,14 +2753,14 @@ def do_initMadLoop(self,line): def do_launch(self, line, *args, **opt): """Main Commands: exec generate_events for 2>N and calculate_width for 1>N""" - + if self.ninitial == 1: logger.info("Note that since 2.3. The launch for 1>N pass in event generation\n"+ " To have the previous behavior use the calculate_decay_widths function") # self.do_calculate_decay_widths(line, *args, **opt) #else: self.do_generate_events(line, *args, **opt) - + def print_results_in_shell(self, data): """Have a nice results prints in the shell, data should be of type: gen_crossxhtml.OneTagResults""" @@ -2770,7 +2770,7 @@ def print_results_in_shell(self, data): if data['run_statistics']: globalstat = sum_html.RunStatistics() - + logger.info(" " ) logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2786,13 +2786,13 @@ def print_results_in_shell(self, data): logger.warning(globalstat.get_warning_text()) logger.info(" ") - + logger.info(" === Results Summary for run: %s tag: %s ===\n" % (data['run_name'],data['tag'])) - + total_time = int(sum(_['cumulative_timing'] for _ in data['run_statistics'].values())) if total_time > 0: logger.info(" Cumulative sequential time for this run: %s"%misc.format_time(total_time)) - + if self.ninitial == 1: logger.info(" Width : %.4g +- %.4g GeV" % (data['cross'], data['error'])) else: @@ -2810,18 +2810,18 @@ def print_results_in_shell(self, data): if len(split)!=3: continue scale, cross, error = split - cross_sections[float(scale)] = (float(cross), float(error)) + cross_sections[float(scale)] = (float(cross), float(error)) if len(cross_sections)>0: logger.info(' Pythia8 merged cross-sections are:') for scale in sorted(cross_sections.keys()): logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ (scale,cross_sections[scale][0],cross_sections[scale][1])) - + else: if self.ninitial == 1: logger.info(" Matched width : %.4g +- %.4g GeV" % (data['cross_pythia'], data['error_pythia'])) else: - logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) + logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) logger.info(" Nb of events after matching/merging : %d" % int(data['nb_event_pythia'])) if self.run_card['use_syst'] in self.true and \ (int(self.run_card['ickkw'])==1 or self.run_card['ktdurham']>0.0 @@ -2838,9 +2838,9 @@ def print_results_in_file(self, data, path, mode='w', format='full'): data should be of type: gen_crossxhtml.OneTagResults""" if not data: return - + fsock = open(path, mode) - + if data['run_statistics']: logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2851,7 +2851,7 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if format == "full": fsock.write(" === Results Summary for run: %s tag: %s process: %s ===\n" % \ (data['run_name'],data['tag'], os.path.basename(self.me_dir))) - + if self.ninitial == 1: fsock.write(" Width : %.4g +- %.4g GeV\n" % (data['cross'], data['error'])) else: @@ -2861,20 +2861,20 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if self.ninitial == 1: fsock.write(" Matched Width : %.4g +- %.4g GeV\n" % (data['cross_pythia'], data['error_pythia'])) else: - fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) + fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) fsock.write(" Nb of events after Matching : %s\n" % data['nb_event_pythia']) fsock.write(" \n" ) elif format == "short": if mode == "w": fsock.write("# run_name tag cross error Nb_event cross_after_matching nb_event_after matching\n") - + if data['cross_pythia'] and data['nb_event_pythia']: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s %(cross_pythia)s %(nb_event_pythia)s\n" else: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s\n" fsock.write(text % data) - - ############################################################################ + + ############################################################################ def do_calculate_decay_widths(self, line): """Main Commands: launch decay width calculation and automatic inclusion of calculated widths and BRs in the param_card.""" @@ -2887,21 +2887,21 @@ def do_calculate_decay_widths(self, line): self.Gdirs = None if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], reload_card=True) args.pop(0) self.configure_directory() - + # Running gridpack warmup opts=[('accuracy', accuracy), # default 0.01 ('points', 1000), ('iterations',9)] logger.info('Calculating decay widths with run name %s' % self.run_name) - + self.exec_cmd('survey %s %s' % \ (self.run_name, " ".join(['--' + opt + '=' + str(val) for (opt,val) \ @@ -2910,26 +2910,26 @@ def do_calculate_decay_widths(self, line): self.refine_mode = "old" # specify how to combine event self.exec_cmd('combine_events', postcmd=False) self.exec_cmd('store_events', postcmd=False) - + self.collect_decay_widths() self.print_results_in_shell(self.results.current) - self.update_status('calculate_decay_widths done', - level='parton', makehtml=False) + self.update_status('calculate_decay_widths done', + level='parton', makehtml=False) + - ############################################################################ def collect_decay_widths(self): - """ Collect the decay widths and calculate BRs for all particles, and put - in param_card form. + """ Collect the decay widths and calculate BRs for all particles, and put + in param_card form. """ - + particle_dict = {} # store the results run_name = self.run_name # Looping over the Subprocesses for P_path in SubProcesses.get_subP(self.me_dir): ids = SubProcesses.get_subP_ids(P_path) - # due to grouping we need to compute the ratio factor for the + # due to grouping we need to compute the ratio factor for the # ungroup resutls (that we need here). Note that initial particles # grouping are not at the same stage as final particle grouping nb_output = len(ids) / (len(set([p[0] for p in ids]))) @@ -2940,30 +2940,30 @@ def collect_decay_widths(self): particle_dict[particles[0]].append([particles[1:], result/nb_output]) except KeyError: particle_dict[particles[0]] = [[particles[1:], result/nb_output]] - + self.update_width_in_param_card(particle_dict, initial = pjoin(self.me_dir, 'Cards', 'param_card.dat'), output=pjoin(self.me_dir, 'Events', run_name, "param_card.dat")) - + @staticmethod def update_width_in_param_card(decay_info, initial=None, output=None): # Open the param_card.dat and insert the calculated decays and BRs - + if not output: output = initial - + param_card_file = open(initial) param_card = param_card_file.read().split('\n') param_card_file.close() decay_lines = [] line_number = 0 - # Read and remove all decays from the param_card + # Read and remove all decays from the param_card while line_number < len(param_card): line = param_card[line_number] if line.lower().startswith('decay'): - # Read decay if particle in decay_info - # DECAY 6 1.455100e+00 + # Read decay if particle in decay_info + # DECAY 6 1.455100e+00 line = param_card.pop(line_number) line = line.split() particle = 0 @@ -2996,7 +2996,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): break line=param_card[line_number] if particle and particle not in decay_info: - # No decays given, only total width + # No decays given, only total width decay_info[particle] = [[[], width]] else: # Not decay line_number += 1 @@ -3004,7 +3004,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): while not param_card[-1] or param_card[-1].startswith('#'): param_card.pop(-1) - # Append calculated and read decays to the param_card + # Append calculated and read decays to the param_card param_card.append("#\n#*************************") param_card.append("# Decay widths *") param_card.append("#*************************") @@ -3018,7 +3018,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): param_card.append("# BR NDA ID1 ID2 ...") brs = [[(val[1]/width).real, val[0]] for val in decay_info[key] if val[1]] for val in sorted(brs, reverse=True): - param_card.append(" %e %i %s # %s" % + param_card.append(" %e %i %s # %s" % (val[0].real, len(val[1]), " ".join([str(v) for v in val[1]]), val[0] * width @@ -3031,7 +3031,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): ############################################################################ def do_multi_run(self, line): - + args = self.split_arg(line) # Check argument's validity mode = self.check_multi_run(args) @@ -3047,7 +3047,7 @@ def do_multi_run(self, line): self.check_param_card(path, run=False) #store it locally to avoid relaunch param_card_iterator, self.param_card_iterator = self.param_card_iterator, [] - + crossoversig = 0 inv_sq_err = 0 nb_event = 0 @@ -3055,8 +3055,8 @@ def do_multi_run(self, line): self.nb_refine = 0 self.exec_cmd('generate_events %s_%s -f' % (main_name, i), postcmd=False) # Update collected value - nb_event += int(self.results[self.run_name][-1]['nb_event']) - self.results.add_detail('nb_event', nb_event , run=main_name) + nb_event += int(self.results[self.run_name][-1]['nb_event']) + self.results.add_detail('nb_event', nb_event , run=main_name) cross = self.results[self.run_name][-1]['cross'] error = self.results[self.run_name][-1]['error'] + 1e-99 crossoversig+=cross/error**2 @@ -3070,7 +3070,7 @@ def do_multi_run(self, line): os.mkdir(pjoin(self.me_dir,'Events', self.run_name)) except Exception: pass - os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' + os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' % {'bin': self.dirbin, 'event': pjoin(self.me_dir,'Events'), 'name': self.run_name}) @@ -3084,19 +3084,19 @@ def do_multi_run(self, line): self.create_root_file('%s/unweighted_events.lhe' % self.run_name, '%s/unweighted_events.root' % self.run_name) - - path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") + + path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") self.create_plot('parton', path, pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') ) - - if not os.path.exists('%s.gz' % path): + + if not os.path.exists('%s.gz' % path): misc.gzip(path) self.update_status('', level='parton') - self.print_results_in_shell(self.results.current) - + self.print_results_in_shell(self.results.current) + cpath = pjoin(self.me_dir,'Cards','param_card.dat') if param_card_iterator: @@ -3112,21 +3112,21 @@ def do_multi_run(self, line): path = pjoin(self.me_dir, 'Events','scan_%s.txt' % scan_name) logger.info("write all cross-section results in %s" % path, '$MG:BOLD') param_card_iterator.write_summary(path) - - ############################################################################ + + ############################################################################ def do_treatcards(self, line, mode=None, opt=None): """Advanced commands: create .inc files from param_card.dat/run_card.dat""" if not mode and not opt: args = self.split_arg(line) mode, opt = self.check_treatcards(args) - + # To decide whether to refresh MadLoop's helicity filters, it is necessary # to check if the model parameters where modified or not, before doing - # anything else. + # anything else. need_MadLoopFilterUpdate = False - # Just to record what triggered the reinitialization of MadLoop for a + # Just to record what triggered the reinitialization of MadLoop for a # nice debug message. type_of_change = '' if not opt['forbid_MadLoopInit'] and self.proc_characteristics['loop_induced'] \ @@ -3137,10 +3137,10 @@ def do_treatcards(self, line, mode=None, opt=None): (os.path.getmtime(paramDat)-os.path.getmtime(paramInc)) > 0.0: need_MadLoopFilterUpdate = True type_of_change = 'model' - + ML_in = pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat') ML_out = pjoin(self.me_dir,"SubProcesses", - "MadLoop5_resources", "MadLoopParams.dat") + "MadLoop5_resources", "MadLoopParams.dat") if (not os.path.isfile(ML_in)) or (not os.path.isfile(ML_out)) or \ (os.path.getmtime(ML_in)-os.path.getmtime(ML_out)) > 0.0: need_MadLoopFilterUpdate = True @@ -3148,7 +3148,7 @@ def do_treatcards(self, line, mode=None, opt=None): #check if no 'Auto' are present in the file self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) - + if mode in ['param', 'all']: model = self.find_model_name() tmp_model = os.path.basename(model) @@ -3160,9 +3160,9 @@ def do_treatcards(self, line, mode=None, opt=None): check_param_card.check_valid_param_card(mg5_param) opt['param_card'] = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') else: - check_param_card.check_valid_param_card(opt['param_card']) - - logger.debug('write compile file for card: %s' % opt['param_card']) + check_param_card.check_valid_param_card(opt['param_card']) + + logger.debug('write compile file for card: %s' % opt['param_card']) param_card = check_param_card.ParamCard(opt['param_card']) outfile = pjoin(opt['output_dir'], 'param_card.inc') ident_card = pjoin(self.me_dir,'Cards','ident_card.dat') @@ -3185,10 +3185,10 @@ def do_treatcards(self, line, mode=None, opt=None): devnull.close() default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat') - need_mp = self.proc_characteristics['loop_induced'] + need_mp = self.proc_characteristics['loop_induced'] param_card.write_inc_file(outfile, ident_card, default, need_mp=need_mp) - - + + if mode in ['run', 'all']: if not hasattr(self, 'run_card'): run_card = banner_mod.RunCard(opt['run_card'], path=pjoin(self.me_dir, 'Cards', 'run_card.dat')) @@ -3202,7 +3202,7 @@ def do_treatcards(self, line, mode=None, opt=None): run_card['lpp2'] = 0 run_card['ebeam1'] = 0 run_card['ebeam2'] = 0 - + # Ensure that the bias parameters has all the required input from the # run_card if run_card['bias_module'].lower() not in ['dummy','none']: @@ -3219,7 +3219,7 @@ def do_treatcards(self, line, mode=None, opt=None): mandatory_file,run_card['bias_module'])) misc.copytree(run_card['bias_module'], pjoin(self.me_dir,'Source','BIAS', os.path.basename(run_card['bias_module']))) - + #check expected parameters for the module. default_bias_parameters = {} start, last = False,False @@ -3244,50 +3244,50 @@ def do_treatcards(self, line, mode=None, opt=None): for pair in line.split(','): if not pair.strip(): continue - x,y =pair.split(':') + x,y =pair.split(':') x=x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y elif ':' in line: x,y = line.split(':') x = x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y for key,value in run_card['bias_parameters'].items(): if key not in default_bias_parameters: logger.warning('%s not supported by the bias module. We discard this entry.', key) else: default_bias_parameters[key] = value - run_card['bias_parameters'] = default_bias_parameters - - - # Finally write the include file + run_card['bias_parameters'] = default_bias_parameters + + + # Finally write the include file run_card.write_include_file(opt['output_dir']) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: - self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, + self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat')) # The writing out of MadLoop filter is potentially dangerous # when running in multi-core with a central disk. So it is turned - # off here. If these filters were not initialized then they will + # off here. If these filters were not initialized then they will # have to be re-computed at the beginning of each run. if 'WriteOutFilters' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('WriteOutFilters'): logger.info( -"""You chose to have MadLoop writing out filters. +"""You chose to have MadLoop writing out filters. Beware that this can be dangerous for local multicore runs.""") self.MadLoopparam.set('WriteOutFilters',False, changeifuserset=False) - + # The conservative settings below for 'CTModeInit' and 'ZeroThres' # help adress issues for processes like g g > h z, and g g > h g - # where there are some helicity configuration heavily suppressed - # (by several orders of magnitude) so that the helicity filter + # where there are some helicity configuration heavily suppressed + # (by several orders of magnitude) so that the helicity filter # needs high numerical accuracy to correctly handle this spread in # magnitude. Also, because one cannot use the Born as a reference - # scale, it is better to force quadruple precision *for the + # scale, it is better to force quadruple precision *for the # initialization points only*. This avoids numerical accuracy issues # when setting up the helicity filters and does not significantly # slow down the run. @@ -3298,21 +3298,21 @@ def do_treatcards(self, line, mode=None, opt=None): # It is a bit superficial to use the level 2 which tries to numerically # map matching helicities (because of CP symmetry typically) together. -# It is useless in the context of MC over helicities and it can +# It is useless in the context of MC over helicities and it can # potentially make the helicity double checking fail. self.MadLoopparam.set('HelicityFilterLevel',1, changeifuserset=False) # To be on the safe side however, we ask for 4 consecutive matching # helicity filters. self.MadLoopparam.set('CheckCycle',4, changeifuserset=False) - + # For now it is tricky to have each channel performing the helicity # double check. What we will end up doing is probably some kind # of new initialization round at the beginning of each launch - # command, to reset the filters. + # command, to reset the filters. self.MadLoopparam.set('DoubleCheckHelicityFilter',False, changeifuserset=False) - + # Thanks to TIR recycling, TIR is typically much faster for Loop-induced # processes when not doing MC over helicities, so that we place OPP last. if not hasattr(self, 'run_card'): @@ -3349,7 +3349,7 @@ def do_treatcards(self, line, mode=None, opt=None): logger.warning( """You chose to also use a lorentz rotation for stability tests (see parameter NRotations_[DP|QP]). Beware that, for optimization purposes, MadEvent uses manual TIR cache clearing which is not compatible - with the lorentz rotation stability test. The number of these rotations to be used will be reset to + with the lorentz rotation stability test. The number of these rotations to be used will be reset to zero by MadLoop. You can avoid this by changing the parameter 'FORCE_ML_HELICITY_SUM' int he matrix.f files to be .TRUE. so that the sum over helicity configurations is performed within MadLoop (in which case the helicity of final state particles cannot be speicfied in the LHE file.""") @@ -3363,15 +3363,15 @@ def do_treatcards(self, line, mode=None, opt=None): # self.MadLoopparam.set('NRotations_DP',0,changeifuserset=False) # Revert to the above to be slightly less robust but twice faster. self.MadLoopparam.set('NRotations_DP',1,changeifuserset=False) - self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) - + self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) + # Finally, the stability tests are slightly less reliable for process - # with less or equal than 4 final state particles because the + # with less or equal than 4 final state particles because the # accessible kinematic is very limited (i.e. lorentz rotations don't # shuffle invariants numerics much). In these cases, we therefore # increase the required accuracy to 10^-7. # This is important for getting g g > z z [QCD] working with a - # ptheavy cut as low as 1 GeV. + # ptheavy cut as low as 1 GeV. if self.proc_characteristics['nexternal']<=4: if ('MLStabThres' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('MLStabThres')>1.0e-7): @@ -3381,12 +3381,12 @@ def do_treatcards(self, line, mode=None, opt=None): than four external legs, so this is not recommended (especially not for g g > z z).""") self.MadLoopparam.set('MLStabThres',1.0e-7,changeifuserset=False) else: - self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) + self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) #write the output file self.MadLoopparam.write(pjoin(self.me_dir,"SubProcesses","MadLoop5_resources", "MadLoopParams.dat")) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: # Now Update MadLoop filters if necessary (if modifications were made to # the model parameters). @@ -3403,12 +3403,12 @@ def do_treatcards(self, line, mode=None, opt=None): elif not opt['forbid_MadLoopInit'] and \ MadLoopInitializer.need_MadLoopInit(self.me_dir): self.exec_cmd('initMadLoop -f') - - ############################################################################ + + ############################################################################ def do_survey(self, line): """Advanced commands: launch survey for the current process """ - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) @@ -3416,7 +3416,7 @@ def do_survey(self, line): if os.path.exists(pjoin(self.me_dir,'error')): os.remove(pjoin(self.me_dir,'error')) - + self.configure_directory() # Save original random number self.random_orig = self.random @@ -3435,9 +3435,9 @@ def do_survey(self, line): P_zero_result = [] # check the number of times where they are no phase-space # File for the loop (for loop induced) - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources')) and cluster.need_transfer(self.options): - tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', + tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', 'MadLoop5_resources.tar.gz'), 'w:gz', dereference=True) tf.add(pjoin(self.me_dir,'SubProcesses','MadLoop5_resources'), arcname='MadLoop5_resources') @@ -3467,7 +3467,7 @@ def do_survey(self, line): except Exception as error: logger.debug(error) pass - + jobs, P_zero_result = ajobcreator.launch() # Check if all or only some fails if P_zero_result: @@ -3481,60 +3481,60 @@ def do_survey(self, line): self.get_Gdir() for P in P_zero_result: self.Gdirs[0][pjoin(self.me_dir,'SubProcesses',P)] = [] - + self.monitor(run_type='All jobs submitted for survey', html=True) if not self.history or 'survey' in self.history[-1] or self.ninitial ==1 or \ self.run_card['gridpack']: #will be done during the refine (more precisely in gen_ximprove) cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(ajobcreator.run_statistics)) self.update_status('End survey', 'parton', makehtml=False) ############################################################################ def pass_in_difficult_integration_mode(self, rate=1): """be more secure for the integration to not miss it due to strong cut""" - + # improve survey options if default if self.opts['points'] == self._survey_options['points'][1]: self.opts['points'] = (rate+2) * self._survey_options['points'][1] if self.opts['iterations'] == self._survey_options['iterations'][1]: self.opts['iterations'] = 1 + rate + self._survey_options['iterations'][1] if self.opts['accuracy'] == self._survey_options['accuracy'][1]: - self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) - + self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) + # Modify run_config.inc in order to improve the refine conf_path = pjoin(self.me_dir, 'Source','run_config.inc') files.cp(conf_path, conf_path + '.bk') # text = open(conf_path).read() - min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) - + min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) + text = re.sub('''\(min_events = \d+\)''', '(min_events = %i )' % min_evt, text) text = re.sub('''\(max_events = \d+\)''', '(max_events = %i )' % max_evt, text) fsock = open(conf_path, 'w') fsock.write(text) fsock.close() - + # Compile for name in ['../bin/internal/gen_ximprove', 'all']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) - - - ############################################################################ + + + ############################################################################ def do_refine(self, line): """Advanced commands: launch survey for the current process """ - devnull = open(os.devnull, 'w') + devnull = open(os.devnull, 'w') self.nb_refine += 1 args = self.split_arg(line) treshold=None - - + + for a in args: if a.startswith('--treshold='): treshold = float(a.split('=',1)[1]) @@ -3548,8 +3548,8 @@ def do_refine(self, line): break # Check argument's validity self.check_refine(args) - - refine_opt = {'err_goal': args[0], 'split_channels': True} + + refine_opt = {'err_goal': args[0], 'split_channels': True} precision = args[0] if len(args) == 2: refine_opt['max_process']= args[1] @@ -3560,15 +3560,15 @@ def do_refine(self, line): # Update random number self.update_random() self.save_random() - + if self.cluster_mode: logger.info('Creating Jobs') self.update_status('Refine results to %s' % precision, level=None) - + self.total_jobs = 0 - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + # cleanning the previous job for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() @@ -3589,14 +3589,14 @@ def do_refine(self, line): level = 5 if value.has_warning(): level = 10 - logger.log(level, + logger.log(level, value.nice_output(str('/'.join([key[0],'G%s'%key[1]]))). replace(' statistics','')) logger.debug(globalstat.nice_output('combined', no_warning=True)) - + if survey_statistics: x_improve.run_statistics = survey_statistics - + x_improve.launch() # create the ajob for the refinment. if not self.history or 'refine' not in self.history[-1]: cross, error = x_improve.update_html() #update html results for survey @@ -3610,9 +3610,9 @@ def do_refine(self, line): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) - + if os.path.exists(pjoin(Pdir, 'ajob1')): cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py @@ -3629,7 +3629,7 @@ def do_refine(self, line): ###self.compile(['all'], cwd=Pdir) alljobs = misc.glob('ajob*', Pdir) - + #remove associated results.dat (ensure to not mix with all data) Gre = re.compile("\s*j=(G[\d\.\w]+)") for job in alljobs: @@ -3637,49 +3637,49 @@ def do_refine(self, line): for Gdir in Gdirs: if os.path.exists(pjoin(Pdir, Gdir, 'results.dat')): os.remove(pjoin(Pdir, Gdir,'results.dat')) - - nb_tot = len(alljobs) + + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), - run_type='Refine number %s on %s (%s/%s)' % + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) - self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, html=True) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + if isinstance(x_improve, gen_ximprove.gen_ximprove_v4): # the merge of the events.lhe is handle in the x_improve class - # for splitted runs. (and partly in store_events). + # for splitted runs. (and partly in store_events). combine_runs.CombineRuns(self.me_dir) self.refine_mode = "old" else: self.refine_mode = "new" - + cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - self.results.add_detail('run_statistics', + self.results.add_detail('run_statistics', dict(self.results.get_detail('run_statistics'))) self.update_status('finish refine', 'parton', makehtml=False) devnull.close() - - ############################################################################ + + ############################################################################ def do_comine_iteration(self, line): """Not in help: Combine a given iteration combine_iteration Pdir Gdir S|R step - S is for survey + S is for survey R is for refine - step is the iteration number (not very critical)""" + step is the iteration number (not very critical)""" self.set_run_name("tmp") self.configure_directory(html_opening=False) @@ -3695,12 +3695,12 @@ def do_comine_iteration(self, line): gensym.combine_iteration(Pdir, Gdir, int(step)) elif mode == "R": refine = gen_ximprove.gen_ximprove_share(self) - refine.combine_iteration(Pdir, Gdir, int(step)) - - + refine.combine_iteration(Pdir, Gdir, int(step)) - - ############################################################################ + + + + ############################################################################ def do_combine_events(self, line): """Advanced commands: Launch combine events""" start=time.time() @@ -3710,11 +3710,11 @@ def do_combine_events(self, line): self.check_combine_events(args) self.update_status('Combining Events', level='parton') - + if self.run_card['gridpack'] and isinstance(self, GridPackCmd): return GridPackCmd.do_combine_events(self, line) - + # Define The Banner tag = self.run_card['run_tag'] # Update the banner with the pythia card @@ -3727,14 +3727,14 @@ def do_combine_events(self, line): self.banner.change_seed(self.random_orig) if not os.path.exists(pjoin(self.me_dir, 'Events', self.run_name)): os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) - self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, + self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -3751,12 +3751,12 @@ def do_combine_events(self, line): os.remove(pjoin(Gdir, 'events.lhe')) continue - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec'), result.get('xerru'), result.get('axsec') ) - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.run_card['nevents']) @@ -3765,13 +3765,13 @@ def do_combine_events(self, line): AllEvent.add(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() if len(AllEvent) == 0: - nb_event = 0 + nb_event = 0 else: nb_event = AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.run_card['nevents'], @@ -3791,22 +3791,22 @@ def do_combine_events(self, line): os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) - + if self.run_card['bias_module'].lower() not in ['dummy', 'none'] and nb_event: self.correct_bias() elif self.run_card['custom_fcts']: self.correct_bias() logger.info("combination of events done in %s s ", time.time()-start) - + self.to_store.append('event') - - ############################################################################ + + ############################################################################ def correct_bias(self): - """check the first event and correct the weight by the bias + """check the first event and correct the weight by the bias and correct the cross-section. - If the event do not have the bias tag it means that the bias is + If the event do not have the bias tag it means that the bias is one modifying the cross-section/shape so we have nothing to do """ @@ -3834,7 +3834,7 @@ def correct_bias(self): output.write('') output.close() lhe.close() - + # MODIFY THE BANNER i.e. INIT BLOCK # ensure information compatible with normalisation choice total_cross = sum(cross[key] for key in cross) @@ -3846,8 +3846,8 @@ def correct_bias(self): elif self.run_card['event_norm'] == 'unity': total_cross = self.results.current['cross'] * total_cross / nb_event for key in cross: - cross[key] *= total_cross / nb_event - + cross[key] *= total_cross / nb_event + bannerfile = lhe_parser.EventFile(pjoin(self.me_dir, 'Events', self.run_name, '.banner.tmp.gz'),'w') banner = banner_mod.Banner(lhe.banner) banner.modify_init_cross(cross) @@ -3862,12 +3862,12 @@ def correct_bias(self): os.remove(lhe.name) os.remove(bannerfile.name) os.remove(output.name) - - + + self.results.current['cross'] = total_cross self.results.current['error'] = 0 - - ############################################################################ + + ############################################################################ def do_store_events(self, line): """Advanced commands: Launch store events""" @@ -3883,16 +3883,16 @@ def do_store_events(self, line): if not os.path.exists(pjoin(self.me_dir, 'Events', run)): os.mkdir(pjoin(self.me_dir, 'Events', run)) if not os.path.exists(pjoin(self.me_dir, 'HTML', run)): - os.mkdir(pjoin(self.me_dir, 'HTML', run)) - + os.mkdir(pjoin(self.me_dir, 'HTML', run)) + # 1) Store overall process information #input = pjoin(self.me_dir, 'SubProcesses', 'results.dat') #output = pjoin(self.me_dir, 'SubProcesses', '%s_results.dat' % run) - #files.cp(input, output) + #files.cp(input, output) # 2) Treat the files present in the P directory - # Ensure that the number of events is different of 0 + # Ensure that the number of events is different of 0 if self.results.current['nb_event'] == 0 and not self.run_card['gridpack']: logger.warning("No event detected. No cleaning performed! This should allow to run:\n" + " cd Subprocesses; ../bin/internal/combine_events\n"+ @@ -3910,18 +3910,18 @@ def do_store_events(self, line): # if os.path.exists(pjoin(G_path, 'results.dat')): # input = pjoin(G_path, 'results.dat') # output = pjoin(G_path, '%s_results.dat' % run) - # files.cp(input, output) + # files.cp(input, output) #except Exception: - # continue + # continue # Store log try: if os.path.exists(pjoin(G_path, 'log.txt')): input = pjoin(G_path, 'log.txt') output = pjoin(G_path, '%s_log.txt' % run) - files.mv(input, output) + files.mv(input, output) except Exception: continue - #try: + #try: # # Grid # for name in ['ftn26']: # if os.path.exists(pjoin(G_path, name)): @@ -3930,7 +3930,7 @@ def do_store_events(self, line): # input = pjoin(G_path, name) # output = pjoin(G_path, '%s_%s' % (run,name)) # files.mv(input, output) - # misc.gzip(pjoin(G_path, output), error=None) + # misc.gzip(pjoin(G_path, output), error=None) #except Exception: # continue # Delete ftn25 to ensure reproducible runs @@ -3940,11 +3940,11 @@ def do_store_events(self, line): # 3) Update the index.html self.gen_card_html() - + # 4) Move the Files present in Events directory E_path = pjoin(self.me_dir, 'Events') O_path = pjoin(self.me_dir, 'Events', run) - + # The events file for name in ['events.lhe', 'unweighted_events.lhe']: finput = pjoin(E_path, name) @@ -3960,30 +3960,30 @@ def do_store_events(self, line): # os.remove(pjoin(O_path, '%s.gz' % name)) # input = pjoin(E_path, name) ## output = pjoin(O_path, name) - + self.update_status('End Parton', level='parton', makehtml=False) devnull.close() - - - ############################################################################ + + + ############################################################################ def do_create_gridpack(self, line): """Advanced commands: Create gridpack from present run""" self.update_status('Creating gridpack', level='parton') # compile gen_ximprove misc.compile(['../bin/internal/gen_ximprove'], cwd=pjoin(self.me_dir, "Source")) - + Gdir = self.get_Gdir() Pdir = set([os.path.dirname(G) for G in Gdir]) - for P in Pdir: + for P in Pdir: allG = misc.glob('G*', path=P) for G in allG: if pjoin(P, G) not in Gdir: logger.debug('removing %s', pjoin(P,G)) shutil.rmtree(pjoin(P,G)) - - + + args = self.split_arg(line) self.check_combine_events(args) if not self.run_tag: self.run_tag = 'tag_1' @@ -3996,13 +3996,13 @@ def do_create_gridpack(self, line): cwd=self.me_dir) misc.call(['./bin/internal/clean'], cwd=self.me_dir) misc.call(['./bin/internal/make_gridpack'], cwd=self.me_dir) - files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), + files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), pjoin(self.me_dir, '%s_gridpack.tar.gz' % self.run_name)) os.system("sed -i.bak \"s/\s*.true.*=.*GridRun/ .false. = GridRun/g\" %s/Cards/grid_card.dat" \ % self.me_dir) self.update_status('gridpack created', level='gridpack') - - ############################################################################ + + ############################################################################ def do_shower(self, line): """launch the shower""" @@ -4010,7 +4010,7 @@ def do_shower(self, line): if len(args)>1 and args[0] in self._interfaced_showers: chosen_showers = [args.pop(0)] elif '--no_default' in line: - # If '--no_default' was specified in the arguments, then only one + # If '--no_default' was specified in the arguments, then only one # shower will be run, depending on which card is present. # but we each of them are called. (each of them check if the file exists) chosen_showers = list(self._interfaced_showers) @@ -4021,9 +4021,9 @@ def do_shower(self, line): shower_priority = ['pythia8','pythia'] chosen_showers = [sorted(chosen_showers,key=lambda sh: shower_priority.index(sh) if sh in shower_priority else len(shower_priority)+1)[0]] - + for shower in chosen_showers: - self.exec_cmd('%s %s'%(shower,' '.join(args)), + self.exec_cmd('%s %s'%(shower,' '.join(args)), postcmd=False, printcmd=False) def do_madanalysis5_parton(self, line): @@ -4039,11 +4039,11 @@ def do_madanalysis5_parton(self, line): def mg5amc_py8_interface_consistency_warning(options): """ Check the consistency of the mg5amc_py8_interface installed with the current MG5 and Pythia8 versions. """ - + # All this is only relevant is Pythia8 is interfaced to MG5 if not options['pythia8_path']: return None - + if not options['mg5amc_py8_interface_path']: return \ """ @@ -4053,7 +4053,7 @@ def mg5amc_py8_interface_consistency_warning(options): Consider installing the MG5_aMC-PY8 interface with the following command: MG5_aMC>install mg5amc_py8_interface """ - + mg5amc_py8_interface_path = options['mg5amc_py8_interface_path'] py8_path = options['pythia8_path'] # If the specified interface path is relative, make it absolut w.r.t MGDIR if @@ -4062,7 +4062,7 @@ def mg5amc_py8_interface_consistency_warning(options): mg5amc_py8_interface_path = pjoin(MG5DIR,mg5amc_py8_interface_path) py8_path = pjoin(MG5DIR,py8_path) - # Retrieve all the on-install and current versions + # Retrieve all the on-install and current versions fsock = open(pjoin(mg5amc_py8_interface_path, 'MG5AMC_VERSION_ON_INSTALL')) MG5_version_on_install = fsock.read().replace('\n','') fsock.close() @@ -4074,7 +4074,7 @@ def mg5amc_py8_interface_consistency_warning(options): MG5_curr_version =misc.get_pkg_info()['version'] try: p = subprocess.Popen(['./get_pythia8_version.py',py8_path], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mg5amc_py8_interface_path) (out, err) = p.communicate() out = out.decode(errors='ignore').replace('\n','') @@ -4084,37 +4084,37 @@ def mg5amc_py8_interface_consistency_warning(options): float(out) except: PY8_curr_version = None - + if not MG5_version_on_install is None and not MG5_curr_version is None: if MG5_version_on_install != MG5_curr_version: return \ """ The current version of MG5_aMC (v%s) is different than the one active when - installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). + installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(MG5_curr_version, MG5_version_on_install) - + if not PY8_version_on_install is None and not PY8_curr_version is None: if PY8_version_on_install != PY8_curr_version: return \ """ The current version of Pythia8 (v%s) is different than the one active when - installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). + installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(PY8_curr_version,PY8_version_on_install) - + return None def setup_Pythia8RunAndCard(self, PY8_Card, run_type): """ Setup the Pythia8 Run environment and card. In particular all the process and run specific parameters of the card are automatically set here. This function returns the path where HEPMC events will be output, if any.""" - + HepMC_event_output = None tag = self.run_tag - + PY8_Card.subruns[0].systemSet('Beams:LHEF',"unweighted_events.lhe.gz") hepmc_format = PY8_Card['HEPMCoutput:file'].lower() @@ -4185,7 +4185,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): misc.mkfifo(fifo_path) # Use defaultSet not to overwrite the current userSet status PY8_Card.defaultSet('HEPMCoutput:file',fifo_path) - HepMC_event_output=fifo_path + HepMC_event_output=fifo_path elif hepmc_format in ['','/dev/null','None']: logger.warning('User disabled the HepMC output of Pythia8.') HepMC_event_output = None @@ -4206,7 +4206,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): # only if it is not already user_set. if PY8_Card['JetMatching:qCut']==-1.0: PY8_Card.MadGraphSet('JetMatching:qCut',1.5*self.run_card['xqcut'], force=True) - + if PY8_Card['JetMatching:qCut']<(1.5*self.run_card['xqcut']): logger.error( 'The MLM merging qCut parameter you chose (%f) is less than '%PY8_Card['JetMatching:qCut']+ @@ -4233,7 +4233,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): if PY8_Card['JetMatching:qCut'] not in qCutList: qCutList.append(PY8_Card['JetMatching:qCut']) PY8_Card.MadGraphSet('SysCalc:qCutList', qCutList, force=True) - + if PY8_Card['SysCalc:qCutList']!='auto': for scale in PY8_Card['SysCalc:qCutList']: @@ -4244,7 +4244,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): "'sys_matchscale' in the run_card) is less than 1.5*xqcut, where xqcut is"+ ' the run_card parameter (=%f)\n'%self.run_card['xqcut']+ 'It would be better/safer to use a larger qCut or a smaller xqcut.') - + # Specific MLM settings # PY8 should not implement the MLM veto since the driver should do it # if merging scale variation is turned on @@ -4294,18 +4294,18 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): CKKW_cut = 'ktdurham' elif self.run_card['ptlund']>0.0 and self.run_card['ktdurham']<=0.0: PY8_Card.subruns[0].MadGraphSet('Merging:doPTLundMerging',True) - CKKW_cut = 'ptlund' + CKKW_cut = 'ptlund' else: raise InvalidCmd("*Either* the 'ptlund' or 'ktdurham' cut in "+\ " the run_card must be turned on to activate CKKW(L) merging"+ " with Pythia8, but *both* cuts cannot be turned on at the same time."+ "\n ptlund=%f, ktdurham=%f."%(self.run_card['ptlund'],self.run_card['ktdurham'])) - + # Automatically set qWeed to the CKKWL cut if not defined by the user. if PY8_Card['SysCalc:qWeed']==-1.0: PY8_Card.MadGraphSet('SysCalc:qWeed',self.run_card[CKKW_cut], force=True) - + # MadGraphSet sets the corresponding value (in system mode) # only if it is not already user_set. if PY8_Card['Merging:TMS']==-1.0: @@ -4319,7 +4319,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): 'The CKKWl merging scale you chose (%f) is less than '%PY8_Card['Merging:TMS']+ 'the %s cut specified in the run_card parameter (=%f).\n'%(CKKW_cut,self.run_card[CKKW_cut])+ 'It is incorrect to use a smaller CKKWl scale than the generation-level %s cut!'%CKKW_cut) - + PY8_Card.MadGraphSet('TimeShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:rapidityOrder',False) @@ -4381,7 +4381,7 @@ def do_pythia8(self, line): try: import madgraph - except ImportError: + except ImportError: import internal.histograms as histograms else: import madgraph.various.histograms as histograms @@ -4400,16 +4400,16 @@ def do_pythia8(self, line): self.check_pythia8(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) - self.check_pythia8(args) + self.check_pythia8(args) # Update the banner with the pythia card if not self.banner or len(self.banner) <=1: # Here the level keyword 'pythia' must not be changed to 'pythia8'. self.banner = banner_mod.recover_banner(self.results, 'pythia') - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1], pythia_version=8, banner=self.banner) @@ -4425,7 +4425,7 @@ def do_pythia8(self, line): #"Please use 'event_norm = average' in the run_card to avoid this problem.") - + if not self.options['mg5amc_py8_interface_path'] or not \ os.path.exists(pjoin(self.options['mg5amc_py8_interface_path'], 'MG5aMC_PY8_interface')): @@ -4444,16 +4444,16 @@ def do_pythia8(self, line): # Again here 'pythia' is just a keyword for the simulation level. self.update_status('\033[92mRunning Pythia8 [arXiv:1410.3012]\033[0m', 'pythia8') - - tag = self.run_tag + + tag = self.run_tag # Now write Pythia8 card # Start by reading, starting from the default one so that the 'user_set' # tag are correctly set. - PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', + PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', 'pythia8_card_default.dat')) PY8_Card.read(pjoin(self.me_dir, 'Cards', 'pythia8_card.dat'), setter='user') - + run_type = 'default' merged_run_types = ['MLM','CKKW'] if int(self.run_card['ickkw'])==1: @@ -4471,7 +4471,7 @@ def do_pythia8(self, line): cmd_card = StringIO.StringIO() PY8_Card.write(cmd_card,pjoin(self.me_dir,'Cards','pythia8_card_default.dat'), direct_pythia_input=True) - + # Now setup the preamble to make sure that everything will use the locally # installed tools (if present) even if the user did not add it to its # environment variables. @@ -4486,13 +4486,13 @@ def do_pythia8(self, line): preamble = misc.get_HEPTools_location_setter( pjoin(MG5DIR,'HEPTools'),'lib') preamble += "\n unset PYTHIA8DATA\n" - + open(pythia_cmd_card,'w').write("""! ! It is possible to run this card manually with: ! %s %s ! """%(preamble+pythia_main,os.path.basename(pythia_cmd_card))+cmd_card.getvalue()) - + # launch pythia8 pythia_log = pjoin(self.me_dir , 'Events', self.run_name , '%s_pythia8.log' % tag) @@ -4504,13 +4504,13 @@ def do_pythia8(self, line): shell_exe = None if os.path.exists('/usr/bin/env'): shell_exe = '/usr/bin/env %s'%shell - else: + else: shell_exe = misc.which(shell) if not shell_exe: raise self.InvalidCmd('No s hell could be found in your environment.\n'+ "Make sure that either '%s' is in your path or that the"%shell+\ " command '/usr/bin/env %s' exists and returns a valid path."%shell) - + exe_cmd = "#!%s\n%s"%(shell_exe,' '.join( [preamble+pythia_main, os.path.basename(pythia_cmd_card)])) @@ -4528,7 +4528,7 @@ def do_pythia8(self, line): ( os.path.exists(HepMC_event_output) and \ stat.S_ISFIFO(os.stat(HepMC_event_output).st_mode)) startPY8timer = time.time() - + # Information that will be extracted from this PY8 run PY8_extracted_information={ 'sigma_m':None, 'Nacc':None, 'Ntry':None, 'cross_sections':{} } @@ -4556,7 +4556,7 @@ def do_pythia8(self, line): n_cores = max(int(self.options['cluster_size']),1) elif self.options['run_mode']==2: n_cores = max(int(self.cluster.nb_core),1) - + lhe_file_name = os.path.basename(PY8_Card.subruns[0]['Beams:LHEF']) lhe_file = lhe_parser.EventFile(pjoin(self.me_dir,'Events', self.run_name,PY8_Card.subruns[0]['Beams:LHEF'])) @@ -4574,7 +4574,7 @@ def do_pythia8(self, line): if self.options['run_mode']==2: min_n_events_per_job = 100 elif self.options['run_mode']==1: - min_n_events_per_job = 1000 + min_n_events_per_job = 1000 min_n_core = n_events//min_n_events_per_job n_cores = max(min(min_n_core,n_cores),1) @@ -4584,8 +4584,8 @@ def do_pythia8(self, line): logger.info('Follow Pythia8 shower by running the '+ 'following command (in a separate terminal):\n tail -f %s'%pythia_log) - if self.options['run_mode']==2 and self.options['nb_core']>1: - ret_code = self.cluster.launch_and_wait(wrapper_path, + if self.options['run_mode']==2 and self.options['nb_core']>1: + ret_code = self.cluster.launch_and_wait(wrapper_path, argument= [], stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events',self.run_name)) else: @@ -4630,10 +4630,10 @@ def do_pythia8(self, line): wrapper = open(wrapper_path,'w') if self.options['cluster_temp_path'] is None: exe_cmd = \ -"""#!%s +"""#!%s ./%s PY8Card.dat >& PY8_log.txt """ - else: + else: exe_cmd = \ """#!%s ln -s ./events_$1.lhe.gz ./events.lhe.gz @@ -4663,21 +4663,21 @@ def do_pythia8(self, line): # Set it as executable st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) - + # Split the .lhe event file, create event partition partition=[n_available_events//n_cores]*n_cores for i in range(n_available_events%n_cores): partition[i] += 1 - + # Splitting according to the total number of events requested by the user # Will be used to determine the number of events to indicate in the PY8 split cards. partition_for_PY8=[n_events//n_cores]*n_cores for i in range(n_events%n_cores): partition_for_PY8[i] += 1 - - logger.info('Splitting .lhe event file for PY8 parallelization...') - n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) - + + logger.info('Splitting .lhe event file for PY8 parallelization...') + n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) + if n_splits!=len(partition): raise MadGraph5Error('Error during lhe file splitting. Expected %d files but obtained %d.' %(len(partition),n_splits)) @@ -4690,7 +4690,7 @@ def do_pythia8(self, line): # Add the necessary run content shutil.move(pjoin(parallelization_dir,lhe_file.name+'_%d.lhe.gz'%split_id), pjoin(parallelization_dir,split_files[-1])) - + logger.info('Submitting Pythia8 jobs...') for i, split_file in enumerate(split_files): # We must write a PY8Card tailored for each split so as to correct the normalization @@ -4706,7 +4706,7 @@ def do_pythia8(self, line): split_PY8_Card.write(pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,'PY8Card.dat'), add_missing=False) in_files = [pjoin(parallelization_dir,os.path.basename(pythia_main)), - pjoin(parallelization_dir,'PY8Card_%d.dat'%i), + pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,split_file)] if self.options['cluster_temp_path'] is None: out_files = [] @@ -4718,35 +4718,35 @@ def do_pythia8(self, line): if os.path.basename(in_file)==split_file: ln(in_file,selected_cwd,name='events.lhe.gz') elif os.path.basename(in_file).startswith('PY8Card'): - ln(in_file,selected_cwd,name='PY8Card.dat') + ln(in_file,selected_cwd,name='PY8Card.dat') else: - ln(in_file,selected_cwd) + ln(in_file,selected_cwd) in_files = [] wrapper_path = os.path.basename(wrapper_path) else: out_files = ['split_%d.tar.gz'%i] selected_cwd = parallelization_dir - self.cluster.submit2(wrapper_path, - argument=[str(i)], cwd=selected_cwd, + self.cluster.submit2(wrapper_path, + argument=[str(i)], cwd=selected_cwd, input_files=in_files, output_files=out_files, required_output=out_files) - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return logger.info('Pythia8 shower jobs: %d Idle, %d Running, %d Done [%s]'\ %(Idle, Running, Done, misc.format_time(time.time() - startPY8timer))) self.cluster.wait(parallelization_dir,wait_monitoring) - + logger.info('Merging results from the split PY8 runs...') if self.options['cluster_temp_path']: # Decompressing the output for i, split_file in enumerate(split_files): misc.call(['tar','-xzf','split_%d.tar.gz'%i],cwd=parallelization_dir) os.remove(pjoin(parallelization_dir,'split_%d.tar.gz'%i)) - + # Now merge logs pythia_log_file = open(pythia_log,'w') n_added = 0 @@ -4778,7 +4778,7 @@ def wait_monitoring(Idle, Running, Done): if n_added>0: PY8_extracted_information['sigma_m'] /= float(n_added) pythia_log_file.close() - + # djr plots djr_HwU = None n_added = 0 @@ -4845,7 +4845,7 @@ def wait_monitoring(Idle, Running, Done): if not os.path.isfile(hepmc_file): continue all_hepmc_files.append(hepmc_file) - + if len(all_hepmc_files)>0: hepmc_output = pjoin(self.me_dir,'Events',self.run_name,HepMC_event_output) with misc.TMP_directory() as tmp_dir: @@ -4860,8 +4860,8 @@ def wait_monitoring(Idle, Running, Done): break header.close() tail = open(pjoin(tmp_dir,'tail.hepmc'),'w') - n_tail = 0 - + n_tail = 0 + for line in misc.reverse_readline(all_hepmc_files[-1]): if line.startswith('HepMC::'): n_tail += 1 @@ -4871,7 +4871,7 @@ def wait_monitoring(Idle, Running, Done): tail.close() if n_tail>1: raise MadGraph5Error('HEPMC files should only have one trailing command.') - ###################################################################### + ###################################################################### # This is the most efficient way of putting together HEPMC's, *BUT* # # WARNING: NEED TO RENDER THE CODE BELOW SAFE TOWARDS INJECTION # ###################################################################### @@ -4888,12 +4888,12 @@ def wait_monitoring(Idle, Running, Done): elif sys.platform == 'darwin': # sed on MAC has slightly different synthax than on os.system(' '.join(['sed','-i',"''","'%s;$d'"% - (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) - else: - # other UNIX systems + (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) + else: + # other UNIX systems os.system(' '.join(['sed','-i']+["-e '%id'"%(i+1) for i in range(n_head)]+ ["-e '$d'",hepmc_file])) - + os.system(' '.join(['cat',pjoin(tmp_dir,'header.hepmc')]+all_hepmc_files+ [pjoin(tmp_dir,'tail.hepmc'),'>',hepmc_output])) @@ -4915,12 +4915,12 @@ def wait_monitoring(Idle, Running, Done): 'Inclusive cross section:' not in '\n'.join(open(pythia_log,'r').readlines()[-20:]): logger.warning('Fail to produce a pythia8 output. More info in \n %s'%pythia_log) return - + # Plot for Pythia8 successful = self.create_plot('Pythia8') if not successful: logger.warning('Failed to produce Pythia8 merging plots.') - + self.to_store.append('pythia8') # Study matched cross-sections @@ -4931,7 +4931,7 @@ def wait_monitoring(Idle, Running, Done): if self.options['run_mode']==0 or (self.options['run_mode']==2 and self.options['nb_core']==1): PY8_extracted_information['sigma_m'],PY8_extracted_information['Nacc'],\ PY8_extracted_information['Ntry'] = self.parse_PY8_log_file( - pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) + pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) else: logger.warning('Pythia8 cross-section could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1. YYYYY') @@ -4944,8 +4944,8 @@ def wait_monitoring(Idle, Running, Done): Ntry = PY8_extracted_information['Ntry'] sigma_m = PY8_extracted_information['sigma_m'] # Compute pythia error - error = self.results[self.run_name].return_tag(self.run_tag)['error'] - try: + error = self.results[self.run_name].return_tag(self.run_tag)['error'] + try: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) except ZeroDivisionError: # Cannot compute error @@ -4966,31 +4966,31 @@ def wait_monitoring(Idle, Running, Done): else: logger.warning('Pythia8 merged cross-sections could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1.XXXXX') - PY8_extracted_information['cross_sections'] = {} - + PY8_extracted_information['cross_sections'] = {} + cross_sections = PY8_extracted_information['cross_sections'] if cross_sections: - # Filter the cross_sections specified an keep only the ones + # Filter the cross_sections specified an keep only the ones # with central parameters and a different merging scale a_float_re = '[\+|-]?\d+(\.\d*)?([EeDd][\+|-]?\d+)?' central_merging_re = re.compile( '^\s*Weight_MERGING\s*=\s*(?P%s)\s*$'%a_float_re, - re.IGNORECASE) + re.IGNORECASE) cross_sections = dict( (float(central_merging_re.match(xsec).group('merging')),value) - for xsec, value in cross_sections.items() if not + for xsec, value in cross_sections.items() if not central_merging_re.match(xsec) is None) central_scale = PY8_Card['JetMatching:qCut'] if \ int(self.run_card['ickkw'])==1 else PY8_Card['Merging:TMS'] if central_scale in cross_sections: self.results.add_detail('cross_pythia8', cross_sections[central_scale][0]) self.results.add_detail('error_pythia8', cross_sections[central_scale][1]) - + #logger.info('Pythia8 merged cross-sections are:') #for scale in sorted(cross_sections.keys()): # logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ - # (scale,cross_sections[scale][0],cross_sections[scale][1])) - + # (scale,cross_sections[scale][0],cross_sections[scale][1])) + xsecs_file = open(pjoin(self.me_dir,'Events',self.run_name, '%s_merged_xsecs.txt'%tag),'w') if cross_sections: @@ -5003,9 +5003,9 @@ def wait_monitoring(Idle, Running, Done): xsecs_file.write('Cross-sections could not be read from the'+\ "XML node 'xsection' of the .dat file produced by Pythia8.") xsecs_file.close() - + #Update the banner - # We add directly the pythia command card because it has the full + # We add directly the pythia command card because it has the full # information self.banner.add(pythia_cmd_card) @@ -5022,13 +5022,13 @@ def wait_monitoring(Idle, Running, Done): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + def parse_PY8_log_file(self, log_file_path): """ Parse a log file to extract number of event and cross-section. """ pythiare = re.compile("Les Houches User Process\(es\)\s*\d+\s*\|\s*(?P\d+)\s*(?P\d+)\s*(?P\d+)\s*\|\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") pythia_xsec_re = re.compile("Inclusive cross section\s*:\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") sigma_m, Nacc, Ntry = None, None, None - for line in misc.BackRead(log_file_path): + for line in misc.BackRead(log_file_path): info = pythiare.search(line) if not info: # Also try to obtain the cross-section and error from the final xsec line of pythia8 log @@ -5058,7 +5058,7 @@ def parse_PY8_log_file(self, log_file_path): raise self.InvalidCmd("Could not find cross-section and event number information "+\ "in Pythia8 log\n '%s'."%log_file_path) - + def extract_cross_sections_from_DJR(self,djr_output): """Extract cross-sections from a djr XML output.""" import xml.dom.minidom as minidom @@ -5075,11 +5075,11 @@ def extract_cross_sections_from_DJR(self,djr_output): [float(xsec.childNodes[0].data.split()[0]), float(xsec.childNodes[0].data.split()[1])]) for xsec in xsections) - + def do_pythia(self, line): """launch pythia""" - - + + # Check argument's validity args = self.split_arg(line) if '--no_default' in args: @@ -5089,12 +5089,12 @@ def do_pythia(self, line): args.remove('--no_default') else: no_default = False - + if not self.run_name: self.check_pythia(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) self.check_pythia(args) @@ -5102,7 +5102,7 @@ def do_pythia(self, line): logger.error('pythia-pgs require event_norm to be on sum. Do not run pythia6') return - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1]) if self.options['automatic_html_opening']: @@ -5114,35 +5114,35 @@ def do_pythia(self, line): self.banner = banner_mod.recover_banner(self.results, 'pythia') pythia_src = pjoin(self.options['pythia-pgs_path'],'src') - + self.results.add_detail('run_mode', 'madevent') self.update_status('Running Pythia', 'pythia') try: os.remove(pjoin(self.me_dir,'Events','pythia.done')) except Exception: - pass - + pass + ## LAUNCHING PYTHIA # check that LHAPATH is define. if not re.search(r'^\s*LHAPATH=%s/PDFsets' % pythia_src, - open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), + open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), re.M): f = open(pjoin(self.me_dir,'Cards','pythia_card.dat'),'a') f.write('\n LHAPATH=%s/PDFsets' % pythia_src) f.close() tag = self.run_tag pythia_log = pjoin(self.me_dir, 'Events', self.run_name , '%s_pythia.log' % tag) - #self.cluster.launch_and_wait('../bin/internal/run_pythia', + #self.cluster.launch_and_wait('../bin/internal/run_pythia', # argument= [pythia_src], stdout= pythia_log, # stderr=subprocess.STDOUT, # cwd=pjoin(self.me_dir,'Events')) output_files = ['pythia_events.hep'] if self.run_card['use_syst']: output_files.append('syst.dat') - if self.run_card['ickkw'] == 1: + if self.run_card['ickkw'] == 1: output_files += ['beforeveto.tree', 'xsecs.tree', 'events.tree'] - + os.environ['PDG_MASS_TBL'] = pjoin(pythia_src,'mass_width_2004.mc') self.cluster.launch_and_wait(pjoin(pythia_src, 'pythia'), input_files=[pjoin(self.me_dir, "Events", "unweighted_events.lhe"), @@ -5152,23 +5152,23 @@ def do_pythia(self, line): stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events')) - + os.remove(pjoin(self.me_dir, "Events", "unweighted_events.lhe")) if not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): logger.warning('Fail to produce pythia output. More info in \n %s' % pythia_log) return - + self.to_store.append('pythia') - + # Find the matched cross-section if int(self.run_card['ickkw']): # read the line from the bottom of the file - #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, + #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, # '%s_pythia.log' % tag)) - pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") - for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, + pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") + for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, '%s_pythia.log' % tag)): info = pythiare.search(line) if not info: @@ -5188,16 +5188,16 @@ def do_pythia(self, line): self.results.add_detail('nb_event_pythia', Nacc) #compute pythia error error = self.results[self.run_name].return_tag(self.run_tag)['error'] - if Nacc: + if Nacc: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) else: error_m = 10000 * sigma_m # works both for fixed number of generated events and fixed accepted events self.results.add_detail('error_pythia', error_m) - break + break #pythia_log.close() - + pydir = pjoin(self.options['pythia-pgs_path'], 'src') eradir = self.options['exrootanalysis_path'] madir = self.options['madanalysis_path'] @@ -5216,12 +5216,12 @@ def do_pythia(self, line): # Creating LHE file self.run_hep2lhe(banner_path) - + if int(self.run_card['ickkw']): misc.gzip(pjoin(self.me_dir,'Events','beforeveto.tree'), - stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + - if self.run_card['use_syst'] in self.true: # Calculate syscalc info based on syst.dat try: @@ -5233,7 +5233,7 @@ def do_pythia(self, line): # Store syst.dat misc.gzip(pjoin(self.me_dir,'Events', 'syst.dat'), stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_syst.dat.gz')) - + # Store syscalc.dat if os.path.exists(pjoin(self.me_dir, 'Events', 'syscalc.dat')): filename = pjoin(self.me_dir, 'Events' ,self.run_name, @@ -5253,7 +5253,7 @@ def do_pythia(self, line): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + ################################################################################ def do_remove(self, line): @@ -5263,8 +5263,8 @@ def do_remove(self, line): run, tag, mode = self.check_remove(args) if 'banner' in mode: mode.append('all') - - + + if run == 'all': # Check first if they are not a run with a name run. if os.path.exists(pjoin(self.me_dir, 'Events', 'all')): @@ -5280,7 +5280,7 @@ def do_remove(self, line): logger.info(error) pass # run already clear return - + # Check that run exists if not os.path.exists(pjoin(self.me_dir, 'Events', run)): raise self.InvalidCmd('No run \'%s\' detected' % run) @@ -5294,7 +5294,7 @@ def do_remove(self, line): # Found the file to delete - + to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) to_delete += misc.glob('*', pjoin(self.me_dir, 'HTML', run)) # forbid the banner to be removed @@ -5314,7 +5314,7 @@ def do_remove(self, line): if os.path.exists(pjoin(self.me_dir, 'Events', run, 'unweighted_events.lhe.gz')): to_delete.append('unweighted_events.lhe.gz') if os.path.exists(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')): - to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) + to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) if nb_rm != len(to_delete): logger.warning('Be carefull that partonic information are on the point to be removed.') if 'all' in mode: @@ -5327,8 +5327,8 @@ def do_remove(self, line): if 'delphes' not in mode: to_delete = [f for f in to_delete if 'delphes' not in f] if 'parton' not in mode: - to_delete = [f for f in to_delete if 'delphes' in f - or 'pgs' in f + to_delete = [f for f in to_delete if 'delphes' in f + or 'pgs' in f or 'pythia' in f] if not self.force and len(to_delete): question = 'Do you want to delete the following files?\n %s' % \ @@ -5336,7 +5336,7 @@ def do_remove(self, line): ans = self.ask(question, 'y', choices=['y','n']) else: ans = 'y' - + if ans == 'y': for file2rm in to_delete: if os.path.exists(pjoin(self.me_dir, 'Events', run, file2rm)): @@ -5374,7 +5374,7 @@ def do_remove(self, line): if ans == 'y': for file2rm in to_delete: os.remove(file2rm) - + if 'banner' in mode: to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) if tag: @@ -5389,8 +5389,8 @@ def do_remove(self, line): return elif any(['banner' not in os.path.basename(p) for p in to_delete]): if to_delete: - raise MadGraph5Error('''Some output still exists for this run. - Please remove those output first. Do for example: + raise MadGraph5Error('''Some output still exists for this run. + Please remove those output first. Do for example: remove %s all banner ''' % run) else: @@ -5400,7 +5400,7 @@ def do_remove(self, line): return else: logger.info('''The banner is not removed. In order to remove it run: - remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) + remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) # update database. self.results.clean(mode, run, tag) @@ -5420,7 +5420,7 @@ def do_plot(self, line): logger.info('plot for run %s' % self.run_name) if not self.force: self.ask_edit_cards(['plot_card.dat'], args, plot=True) - + if any([arg in ['all','parton'] for arg in args]): filename = pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe') if os.path.exists(filename+'.gz'): @@ -5438,8 +5438,8 @@ def do_plot(self, line): except Exception: pass else: - logger.info('No valid files for partonic plot') - + logger.info('No valid files for partonic plot') + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_events.lhe' % self.run_tag) @@ -5452,10 +5452,10 @@ def do_plot(self, line): stdout= "%s.gz" % filename) else: logger.info('No valid files for pythia plot') - - + + if any([arg in ['all','pgs'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) @@ -5464,15 +5464,15 @@ def do_plot(self, line): misc.gzip(filename) else: logger.info('No valid files for pgs plot') - + if any([arg in ['all','delphes'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) if os.path.exists(filename): self.create_plot('Delphes') - misc.gzip(filename) + misc.gzip(filename) else: logger.info('No valid files for delphes plot') @@ -5488,9 +5488,9 @@ def do_syscalc(self, line): if self.ninitial == 1: logger.error('SysCalc can\'t be run for decay processes') return - + logger.info('Calculating systematics for run %s' % self.run_name) - + self.ask_edit_cards(['run_card.dat'], args, plot=False) self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) if any([arg in ['all','parton'] for arg in args]): @@ -5504,7 +5504,7 @@ def do_syscalc(self, line): stdout="%s.gz" % filename) else: logger.info('No valid files for parton level systematics run.') - + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_syst.dat' % self.run_tag) @@ -5525,17 +5525,17 @@ def do_syscalc(self, line): else: logger.info('No valid files for pythia level') - + def store_result(self): - """ tar the pythia results. This is done when we are quite sure that + """ tar the pythia results. This is done when we are quite sure that the pythia output will not be use anymore """ if not self.run_name: return - + if not self.to_store: - return - + return + tag = self.run_card['run_tag'] self.update_status('storing files of previous run', level=None,\ error=True) @@ -5546,14 +5546,14 @@ def store_result(self): misc.gzip(pjoin(self.me_dir,'Events',self.run_name,"unweighted_events.lhe")) if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) - + if 'pythia' in self.to_store: self.update_status('Storing Pythia files of previous run', level='pythia', error=True) p = pjoin(self.me_dir,'Events') n = self.run_name t = tag self.to_store.remove('pythia') - misc.gzip(pjoin(p,'pythia_events.hep'), + misc.gzip(pjoin(p,'pythia_events.hep'), stdout=pjoin(p, str(n),'%s_pythia_events.hep' % t),forceexternal=True) if 'pythia8' in self.to_store: @@ -5581,26 +5581,26 @@ def store_result(self): os.system("mv " + file_path + hepmc_fileformat + " " + move_hepmc_path) self.update_status('Done', level='pythia',makehtml=False,error=True) - self.results.save() - + self.results.save() + self.to_store = [] - def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, + def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, run_type='', mode=None, **opt): """ """ argument = [str(arg) for arg in argument] if mode is None: mode = self.cluster_mode - + # ensure that exe is executable if os.path.exists(exe) and not os.access(exe, os.X_OK): os.system('chmod +x %s ' % exe) elif (cwd and os.path.exists(pjoin(cwd, exe))) and not \ os.access(pjoin(cwd, exe), os.X_OK): os.system('chmod +x %s ' % pjoin(cwd, exe)) - + if mode == 0: - self.update_status((remaining, 1, + self.update_status((remaining, 1, self.total_jobs - remaining -1, run_type), level=None, force=False) start = time.time() #os.system('cd %s; ./%s' % (cwd,exe)) @@ -5613,24 +5613,24 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, elif mode in [1,2]: exename = os.path.basename(exe) # For condor cluster, create the input/output files - if 'ajob' in exename: + if 'ajob' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat','dname.mg', pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) - + output_files = [] required_output = [] - + #Find the correct PDF input file input_files.append(self.get_pdf_input_filename()) - + #Find the correct ajob Gre = re.compile("\s*j=(G[\d\.\w]+)") origre = re.compile("grid_directory=(G[\d\.\w]+)") - try : + try : fsock = open(exe) except Exception: fsock = open(pjoin(cwd,exe)) @@ -5648,21 +5648,21 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if os.path.isdir(pjoin(cwd,G)): input_files.append(G) required_output.append('%s/results.dat' % G) - + if origre.search(text): G_grid = origre.search(text).groups()[0] input_files.append(pjoin(G_grid, 'ftn26')) - + #submitting - self.cluster.submit2(exe, stdout=stdout, cwd=cwd, + self.cluster.submit2(exe, stdout=stdout, cwd=cwd, input_files=input_files, output_files=output_files, required_output=required_output) elif 'survey' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5671,7 +5671,7 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [] required_output = [] - + #Find the correct ajob suffix = "_%s" % int(float(argument[0])) if suffix == '_0': @@ -5685,12 +5685,12 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if '.' in argument[0]: offset = int(str(argument[0]).split('.')[1]) else: - offset = 0 - + offset = 0 + if offset ==0 or offset == int(float(argument[0])): if os.path.exists(pjoin(cwd, G, 'input_app.txt')): os.remove(pjoin(cwd, G, 'input_app.txt')) - + if os.path.exists(os.path.realpath(pjoin(cwd, G, 'ftn25'))): if offset == 0 or offset == int(float(argument[0])): os.remove(pjoin(cwd, G, 'ftn25')) @@ -5706,16 +5706,16 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, pass #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, required_output=required_output, **opt) elif "refine_splitted.sh" in exename: input_files = ['madevent','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5725,25 +5725,25 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [argument[0]] required_output = [] for G in output_files: - required_output.append('%s/results.dat' % G) + required_output.append('%s/results.dat' % G) input_files.append(pjoin(argument[1], "input_app.txt")) input_files.append(pjoin(argument[1], "ftn26")) - + #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, - required_output=required_output, **opt) + required_output=required_output, **opt) + + - - else: self.cluster.submit(exe, argument=argument, stdout=stdout, cwd=cwd, **opt) - + ############################################################################ def find_madevent_mode(self): """Find if Madevent is in Group mode or not""" - + # The strategy is too look in the files Source/run_configs.inc # if we found: ChanPerJob=3 then it's a group mode. file_path = pjoin(self.me_dir, 'Source', 'run_config.inc') @@ -5752,11 +5752,11 @@ def find_madevent_mode(self): return 'group' else: return 'v4' - + ############################################################################ def monitor(self, run_type='monitor', mode=None, html=False): """ monitor the progress of running job """ - + starttime = time.time() if mode is None: @@ -5772,8 +5772,8 @@ def monitor(self, run_type='monitor', mode=None, html=False): else: update_status = lambda idle, run, finish: None update_first = None - try: - self.cluster.wait(self.me_dir, update_status, update_first=update_first) + try: + self.cluster.wait(self.me_dir, update_status, update_first=update_first) except Exception as error: logger.info(error) if not self.force: @@ -5788,24 +5788,24 @@ def monitor(self, run_type='monitor', mode=None, html=False): raise except KeyboardInterrupt as error: self.cluster.remove() - raise - - + raise + - ############################################################################ + + ############################################################################ def configure_directory(self, html_opening=True): - """ All action require before any type of run """ + """ All action require before any type of run """ # Basic check assert os.path.exists(pjoin(self.me_dir,'SubProcesses')) # environmental variables to be included in make_opts self.make_opts_var = {} - + #see when the last file was modified time_mod = max([os.path.getmtime(pjoin(self.me_dir,'Cards','run_card.dat')), os.path.getmtime(pjoin(self.me_dir,'Cards','param_card.dat'))]) - + if self.configured >= time_mod and hasattr(self, 'random') and hasattr(self, 'run_card'): #just ensure that cluster specific are correctly handled if self.cluster: @@ -5820,7 +5820,7 @@ def configure_directory(self, html_opening=True): #open only once the web page # Change current working directory self.launching_dir = os.getcwd() - + # Check if we need the MSSM special treatment model = self.find_model_name() if model == 'mssm' or model.startswith('mssm-'): @@ -5828,14 +5828,14 @@ def configure_directory(self, html_opening=True): mg5_param = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') check_param_card.convert_to_mg5card(param_card, mg5_param) check_param_card.check_valid_param_card(mg5_param) - + # limit the number of event to 100k self.check_nb_events() # this is in order to avoid conflicts between runs with and without # lhapdf. not needed anymore the makefile handles it automaticallu #misc.compile(['clean4pdf'], cwd = pjoin(self.me_dir, 'Source')) - + self.make_opts_var['pdlabel1'] = '' self.make_opts_var['pdlabel2'] = '' if self.run_card['pdlabel1'] in ['eva', 'iww']: @@ -5866,7 +5866,7 @@ def configure_directory(self, html_opening=True): self.copy_lep_densities(self.run_card['pdlabel'], pjoin(self.me_dir, 'Source')) self.make_opts_var['pdlabel1'] = 'ee' self.make_opts_var['pdlabel2'] = 'ee' - + # set random number if self.run_card['iseed'] != 0: self.random = int(self.run_card['iseed']) @@ -5885,18 +5885,18 @@ def configure_directory(self, html_opening=True): break else: self.random = random.randint(1, 30107) - + #set random seed for python part of the code if self.run_card['python_seed'] == -2: #-2 means same as run_card import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] elif self.run_card['python_seed'] >= 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] if self.run_card['ickkw'] == 2: logger.info('Running with CKKW matching') self.treat_ckkw_matching() @@ -5905,12 +5905,12 @@ def configure_directory(self, html_opening=True): self.update_make_opts(self.run_card) # reset list of Gdirectory self.Gdirs = None - + # create param_card.inc and run_card.inc self.do_treatcards('') - + logger.info("compile Source Directory") - + # Compile for name in [ 'all']:#, '../bin/internal/combine_events']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) @@ -5933,7 +5933,7 @@ def configure_directory(self, html_opening=True): os.remove(pjoin(self.me_dir, 'lib','libbias.a')) force_subproc_clean = True - + # Finally compile the bias module as well if self.run_card['bias_module'] not in ['dummy',None]: logger.debug("Compiling the bias module '%s'"%bias_name) @@ -5945,7 +5945,7 @@ def configure_directory(self, html_opening=True): 'INVALID' in str(bias_module_valid).upper(): raise InvalidCmd("The bias module '%s' cannot be used because of:\n%s"% (bias_name,bias_module_valid)) - + self.compile(arg=[], cwd=os.path.join(self.me_dir, 'Source','BIAS',bias_name)) self.proc_characteristics['bias_module']=bias_name # Update the proc_characterstics file @@ -5954,7 +5954,7 @@ def configure_directory(self, html_opening=True): if force_subproc_clean: # Make sure that madevent will be recompiled - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] for nb_proc,subdir in enumerate(subproc): Pdir = pjoin(self.me_dir, 'SubProcesses',subdir.strip()) @@ -5971,20 +5971,20 @@ def configure_directory(self, html_opening=True): ############################################################################ @staticmethod def check_dir(path, default=''): - """check if the directory exists. if so return the path otherwise the + """check if the directory exists. if so return the path otherwise the default""" - + if os.path.isdir(path): return path else: return default - + ############################################################################ def get_Gdir(self, Pdir=None, symfact=None): """get the list of Gdirectory if not yet saved.""" - + if hasattr(self, "Gdirs") and self.Gdirs: if self.me_dir in self.Gdirs[0]: if Pdir is None: @@ -6000,8 +6000,8 @@ def get_Gdir(self, Pdir=None, symfact=None): Pdirs = self.get_Pdir() - Gdirs = {self.me_dir:[]} - mfactors = {} + Gdirs = {self.me_dir:[]} + mfactors = {} for P in Pdirs: Gdirs[P] = [] #for the next line do not use P, since in readonly mode it might not have symfact @@ -6012,7 +6012,7 @@ def get_Gdir(self, Pdir=None, symfact=None): mfactors[pjoin(P, "G%s" % tag)] = mfactor self.Gdirs = (Gdirs, mfactors) return self.get_Gdir(Pdir, symfact=symfact) - + ############################################################################ def set_run_name(self, name, tag=None, level='parton', reload_card=False, allow_new_tag=True): @@ -6030,8 +6030,8 @@ def get_last_tag(self, level): tagRun = self.results[self.run_name][i] if tagRun.pythia or tagRun.shower or tagRun.pythia8 : return tagRun['tag'] - - + + # when are we force to change the tag new_run:previous run requiring changes upgrade_tag = {'parton': ['parton','pythia','pgs','delphes','madanalysis5_hadron','madanalysis5_parton', 'rivet'], 'pythia': ['pythia','pgs','delphes','madanalysis5_hadron'], @@ -6044,7 +6044,7 @@ def get_last_tag(self, level): 'syscalc':[], 'rivet':['rivet']} - if name == self.run_name: + if name == self.run_name: if reload_card: run_card = pjoin(self.me_dir, 'Cards','run_card.dat') self.run_card = banner_mod.RunCard(run_card) @@ -6064,13 +6064,13 @@ def get_last_tag(self, level): break return get_last_tag(self, level) - + # save/clean previous run if self.run_name: self.store_result() # store new name self.run_name = name - + new_tag = False # First call for this run -> set the banner self.banner = banner_mod.recover_banner(self.results, level, name) @@ -6079,8 +6079,8 @@ def get_last_tag(self, level): else: # Read run_card run_card = pjoin(self.me_dir, 'Cards','run_card.dat') - self.run_card = banner_mod.RunCard(run_card) - + self.run_card = banner_mod.RunCard(run_card) + if tag: self.run_card['run_tag'] = tag new_tag = True @@ -6093,7 +6093,7 @@ def get_last_tag(self, level): self.results.update('add run %s' % name, 'all', makehtml=False) else: for tag in upgrade_tag[level]: - + if getattr(self.results[self.run_name][-1], tag): # LEVEL is already define in the last tag -> need to switch tag tag = self.get_available_tag() @@ -6103,8 +6103,8 @@ def get_last_tag(self, level): if not new_tag: # We can add the results to the current run tag = self.results[self.run_name][-1]['tag'] - self.run_card['run_tag'] = tag # ensure that run_tag is correct - + self.run_card['run_tag'] = tag # ensure that run_tag is correct + if allow_new_tag and (name in self.results and not new_tag): self.results.def_current(self.run_name) else: @@ -6113,15 +6113,15 @@ def get_last_tag(self, level): self.run_tag = self.run_card['run_tag'] return get_last_tag(self, level) - - - + + + ############################################################################ def check_nb_events(self): - """Find the number of event in the run_card, and check that this is not + """Find the number of event in the run_card, and check that this is not too large""" - + nb_event = int(self.run_card['nevents']) if nb_event > 1000000: logger.warning("Attempting to generate more than 1M events") @@ -6133,20 +6133,20 @@ def check_nb_events(self): return - - ############################################################################ + + ############################################################################ def update_random(self): """ change random number""" - + self.random += 3 if self.random > 30081*30081: # can't use too big random number raise MadGraph5Error('Random seed too large ' + str(self.random) + ' > 30081*30081') - if self.run_card['python_seed'] == -2: + if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.random) + random.seed(self.random) random.mg_seedset = self.random - + ############################################################################ def save_random(self): """save random number in appropirate file""" @@ -6155,14 +6155,14 @@ def save_random(self): fsock.writelines('r=%s\n' % self.random) def do_quit(self, *args, **opts): - + return common_run.CommonRunCmd.do_quit(self, *args, **opts) #return CmdExtended.do_quit(self, *args, **opts) - + ############################################################################ def treat_CKKW_matching(self): """check for ckkw""" - + lpp1 = self.run_card['lpp1'] lpp2 = self.run_card['lpp2'] e1 = self.run_card['ebeam1'] @@ -6170,19 +6170,19 @@ def treat_CKKW_matching(self): pd = self.run_card['pdlabel'] lha = self.run_card['lhaid'] xq = self.run_card['xqcut'] - translation = {'e1': e1, 'e2':e2, 'pd':pd, + translation = {'e1': e1, 'e2':e2, 'pd':pd, 'lha':lha, 'xq':xq} if lpp1 or lpp2: - # Remove ':s from pd + # Remove ':s from pd if pd.startswith("'"): pd = pd[1:] if pd.endswith("'"): - pd = pd[:-1] + pd = pd[:-1] if xq >2 or xq ==2: xq = 2 - + # find data file if pd == "lhapdf": issudfile = 'lib/issudgrid-%(e1)s-%(e2)s-%(pd)s-%(lha)s-%(xq)s.dat.gz' @@ -6192,9 +6192,9 @@ def treat_CKKW_matching(self): issudfile = pjoin(self.webbin, issudfile % translation) else: issudfile = pjoin(self.me_dir, issudfile % translation) - + logger.info('Sudakov grid file: %s' % issudfile) - + # check that filepath exists if os.path.exists(issudfile): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') @@ -6203,20 +6203,20 @@ def treat_CKKW_matching(self): msg = 'No sudakov grid file for parameter choice. Start to generate it. This might take a while' logger.info(msg) self.update_status('GENERATE SUDAKOV GRID', level='parton') - + for i in range(-2,6): - self.cluster.submit('%s/gensudgrid ' % self.dirbin, + self.cluster.submit('%s/gensudgrid ' % self.dirbin, argument = ['%d'%i], - cwd=self.me_dir, + cwd=self.me_dir, stdout=open(pjoin(self.me_dir, 'gensudgrid%s.log' % i),'w')) self.monitor() for i in range(-2,6): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') os.system('cat %s/gensudgrid%s.log >> %s' % (self.me_dir, path)) misc.gzip(path, stdout=issudfile) - + ############################################################################ - def create_root_file(self, input='unweighted_events.lhe', + def create_root_file(self, input='unweighted_events.lhe', output='unweighted_events.root' ): """create the LHE root file """ self.update_status('Creating root files', level='parton') @@ -6233,14 +6233,14 @@ def create_root_file(self, input='unweighted_events.lhe', totar = False torm = True input = input[:-3] - + try: - misc.call(['%s/ExRootLHEFConverter' % eradir, + misc.call(['%s/ExRootLHEFConverter' % eradir, input, output], cwd=pjoin(self.me_dir, 'Events')) except Exception: logger.warning('fail to produce Root output [problem with ExRootAnalysis]') - + if totar: if os.path.exists('%s.gz' % input): try: @@ -6251,13 +6251,13 @@ def create_root_file(self, input='unweighted_events.lhe', misc.gzip(input) if torm: os.remove(input) - + def run_syscalc(self, mode='parton', event_path=None, output=None): - """create the syscalc output""" + """create the syscalc output""" if self.run_card['use_syst'] not in self.true: return - + scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): return @@ -6265,12 +6265,12 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): if self.run_card['event_norm'] != 'sum': logger.critical('SysCalc works only when event_norm is on \'sum\'.') return - logger.info('running SysCalc on mode %s' % mode) - + logger.info('running SysCalc on mode %s' % mode) + # Restore the old default for SysCalc+PY6 if self.run_card['sys_matchscale']=='auto': self.run_card['sys_matchscale'] = "30 50" - + # Check that all pdfset are correctly installed lhaid = [self.run_card.get_lhapdf_id()] if '&&' in self.run_card['sys_pdf']: @@ -6285,20 +6285,20 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.debug(str(error)) logger.warning('Systematic computation requires lhapdf to run. Bypass SysCalc') return - + # Copy all the relevant PDF sets [self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid] - + to_syscalc={'sys_scalefact': self.run_card['sys_scalefact'], 'sys_alpsfact': self.run_card['sys_alpsfact'], 'sys_matchscale': self.run_card['sys_matchscale'], 'sys_scalecorrelation': self.run_card['sys_scalecorrelation'], 'sys_pdf': self.run_card['sys_pdf']} - - tag = self.run_card['run_tag'] + + tag = self.run_card['run_tag'] card = pjoin(self.me_dir, 'bin','internal', 'syscalc_card.dat') template = open(pjoin(self.me_dir, 'bin','internal', 'syscalc_template.dat')).read() - + if '&&' in to_syscalc['sys_pdf']: to_syscalc['sys_pdf'] = to_syscalc['sys_pdf'].split('#',1)[0].replace('&&',' \n ') else: @@ -6311,8 +6311,8 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): new.append(d) else: new[-1] += ' %s' % d - to_syscalc['sys_pdf'] = '\n'.join(new) - + to_syscalc['sys_pdf'] = '\n'.join(new) + if to_syscalc['sys_pdf'].lower() in ['', 'f', 'false', 'none', '.false.']: to_syscalc['sys_pdf'] = '' if to_syscalc['sys_alpsfact'].lower() in ['', 'f', 'false', 'none','.false.']: @@ -6320,17 +6320,17 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): - + # check if the scalecorrelation parameter is define: if not 'sys_scalecorrelation' in self.run_card: self.run_card['sys_scalecorrelation'] = -1 open(card,'w').write(template % self.run_card) - + if not os.path.exists(card): return False - - + + event_dir = pjoin(self.me_dir, 'Events') if not event_path: @@ -6353,19 +6353,19 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): raise SysCalcError('qcut value for sys_matchscale lower than qcut in pythia_card. Bypass syscalc') if float(value) < xqcut: raise SysCalcError('qcut value for sys_matchscale lower than xqcut in run_card. Bypass syscalc') - - + + event_path = pjoin(event_dir,'syst.dat') output = pjoin(event_dir, 'syscalc.dat') else: raise self.InvalidCmd('Invalid mode %s' % mode) - + if not os.path.exists(event_path): if os.path.exists(event_path+'.gz'): misc.gunzip(event_path+'.gz') else: raise SysCalcError('Events file %s does not exits' % event_path) - + self.update_status('Calculating systematics for %s level' % mode, level = mode.lower()) try: proc = misc.call([os.path.join(scdir, 'sys_calc'), @@ -6374,7 +6374,7 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): stderr = subprocess.STDOUT, cwd=event_dir) # Wait 5 s to make sure file is finished writing - time.sleep(5) + time.sleep(5) except OSError as error: logger.error('fail to run syscalc: %s. Please check that SysCalc is correctly installed.' % error) else: @@ -6382,11 +6382,11 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.warning('SysCalc Failed. Please read the associate log to see the reason. Did you install the associate PDF set?') elif mode == 'parton': files.mv(output, event_path) - + self.update_status('End syscalc for %s level' % mode, level = mode.lower(), makehtml=False) - - return True + + return True action_switcher = AskRun @@ -6399,23 +6399,23 @@ def ask_run_configuration(self, mode=None, args=[]): passing_cmd.append('reweight=ON') if '-M' in args or '--madspin' in args: passing_cmd.append('madspin=ON') - + switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, mode=mode, line_args=args, force=self.force, first_cmd=passing_cmd, return_instance=True) # - self.switch = switch # store the value of the switch for plugin purpose + self.switch = switch # store the value of the switch for plugin purpose if 'dynamical' in switch: mode = 'auto' - + # Now that we know in which mode we are check that all the card #exists (copy default if needed) - + cards = ['param_card.dat', 'run_card.dat'] if switch['shower'] == 'Pythia6': cards.append('pythia_card.dat') if switch['shower'] == 'Pythia8': - cards.append('pythia8_card.dat') + cards.append('pythia8_card.dat') if switch['detector'] in ['PGS','DELPHES+PGS']: cards.append('pgs_card.dat') if switch['detector'] in ['Delphes', 'DELPHES+PGS']: @@ -6438,29 +6438,29 @@ def ask_run_configuration(self, mode=None, args=[]): cards.append('rivet_card.dat') self.keep_cards(cards) - + first_cmd = cmd_switch.get_cardcmd() - + if os.path.isfile(pjoin(self.me_dir,'Cards','MadLoopParams.dat')): cards.append('MadLoopParams.dat') - + if self.force: self.check_param_card(pjoin(self.me_dir,'Cards','param_card.dat' )) return switch - + if 'dynamical' in switch and switch['dynamical']: self.ask_edit_cards(cards, plot=False, mode='auto', first_cmd=first_cmd) else: self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) return switch - + ############################################################################ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None): """Ask the question when launching pythia""" - + pythia_suffix = '' if pythia_version==6 else '%d'%pythia_version - + available_mode = ['0', '1'] if pythia_version==6: available_mode.append('2') @@ -6485,10 +6485,10 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = self.ask(question, '0', options) elif not mode: mode = 'auto' - + if mode.isdigit(): mode = name[mode] - + auto = False if mode == 'auto': auto = True @@ -6497,7 +6497,7 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = 'pgs' elif os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')): mode = 'delphes' - else: + else: mode = 'pythia%s'%pythia_suffix logger.info('Will run in mode %s' % mode) # Now that we know in which mode we are check that all the card @@ -6513,15 +6513,15 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) cards.append('delphes_trigger.dat') self.keep_cards(cards, ignore=['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat', 'plot_card.dat']) - + if self.force: return mode - + if not banner: banner = self.banner - + if auto: - self.ask_edit_cards(cards, from_banner=['param', 'run'], + self.ask_edit_cards(cards, from_banner=['param', 'run'], mode='auto', plot=(pythia_version==6), banner=banner ) else: @@ -6529,12 +6529,12 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) plot=(pythia_version==6), banner=banner) return mode - + #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmdShell(MadEventCmd, cmd.CmdShell): - """The command line processor of MadGraph""" + """The command line processor of MadGraph""" @@ -6548,11 +6548,11 @@ class SubProcesses(object): @classmethod def clean(cls): cls.name_to_pdg = {} - + @staticmethod def get_subP(me_dir): """return the list of Subprocesses""" - + out = [] for line in open(pjoin(me_dir,'SubProcesses', 'subproc.mg')): if not line: @@ -6560,9 +6560,9 @@ def get_subP(me_dir): name = line.strip() if os.path.exists(pjoin(me_dir, 'SubProcesses', name)): out.append(pjoin(me_dir, 'SubProcesses', name)) - + return out - + @staticmethod @@ -6623,9 +6623,9 @@ def get_subP_ids(path): particles = re.search("/([\d,-]+)/", line) all_ids.append([int(p) for p in particles.group(1).split(',')]) return all_ids - - -#=============================================================================== + + +#=============================================================================== class GridPackCmd(MadEventCmd): """The command for the gridpack --Those are not suppose to be use interactively--""" @@ -6639,7 +6639,7 @@ def __init__(self, me_dir = None, nb_event=0, seed=0, gran=-1, *completekey, **s self.random = seed self.random_orig = self.random self.granularity = gran - + self.options['automatic_html_opening'] = False #write the grid_card.dat on disk self.nb_event = int(nb_event) @@ -6680,7 +6680,7 @@ def write_RunWeb(self, me_dir): def write_gridcard(self, nb_event, seed, gran): """write the grid_card.dat file at appropriate location""" - + # first try to write grid_card within the gridpack. print("WRITE GRIDCARD", self.me_dir) if self.readonly: @@ -6689,35 +6689,35 @@ def write_gridcard(self, nb_event, seed, gran): fsock = open('grid_card.dat','w') else: fsock = open(pjoin(self.me_dir, 'Cards', 'grid_card.dat'),'w') - + gridpackcard = banner_mod.GridpackCard() gridpackcard['GridRun'] = True gridpackcard['gevents'] = nb_event gridpackcard['gseed'] = seed gridpackcard['ngran'] = gran - + gridpackcard.write(fsock) ############################################################################ def get_Pdir(self): """get the list of Pdirectory if not yet saved.""" - + if hasattr(self, "Pdirs"): if self.me_dir in self.Pdirs[0]: return self.Pdirs - + if not self.readonly: - self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) + self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] else: - self.Pdirs = [l.strip() - for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + self.Pdirs = [l.strip() + for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] + return self.Pdirs - + def prepare_local_dir(self): """create the P directory structure in the local directory""" - + if not self.readonly: os.chdir(self.me_dir) else: @@ -6726,7 +6726,7 @@ def prepare_local_dir(self): os.mkdir(p) files.cp(pjoin(self.me_dir,'SubProcesses',p,'symfact.dat'), pjoin(p, 'symfact.dat')) - + def launch(self, nb_event, seed): """ launch the generation for the grid """ @@ -6742,13 +6742,13 @@ def launch(self, nb_event, seed): if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(seed) + random.seed(seed) random.mg_seedset = seed elif self.run_card['python_seed'] > 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] # 2) Run the refine for the grid self.update_status('Generating Events', level=None) #misc.call([pjoin(self.me_dir,'bin','refine4grid'), @@ -6767,70 +6767,70 @@ def launch(self, nb_event, seed): self.exec_cmd('decay_events -from_cards', postcmd=False) elif self.run_card['use_syst'] and self.run_card['systematics_program'] == 'systematics': self.options['nb_core'] = 1 - self.exec_cmd('systematics %s --from_card' % + self.exec_cmd('systematics %s --from_card' % pjoin('Events', self.run_name, 'unweighted_events.lhe.gz'), postcmd=False,printcmd=False) - + def refine4grid(self, nb_event): """Special refine for gridpack run.""" self.nb_refine += 1 - + precision = nb_event self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) - + # initialize / remove lhapdf mode # self.configure_directory() # All this has been done before self.cluster_mode = 0 # force single machine # Store seed in randinit file, to be read by ranmar.f self.save_random() - + self.update_status('Refine results to %s' % precision, level=None) logger.info("Using random number seed offset = %s" % self.random) refine_opt = {'err_goal': nb_event, 'split_channels': False, - 'ngran':self.granularity, 'readonly': self.readonly} + 'ngran':self.granularity, 'readonly': self.readonly} x_improve = gen_ximprove.gen_ximprove_gridpack(self, refine_opt) x_improve.launch() # create the ajob for the refinment and run those! - self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack - - + self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack + + #bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) #print 'run combine!!!' #combine_runs.CombineRuns(self.me_dir) - + return #update html output Presults = sum_html.collect_result(self) cross, error = Presults.xsec, Presults.xerru self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + #self.update_status('finish refine', 'parton', makehtml=False) #devnull.close() - - - + + + return self.total_jobs = 0 - subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if + subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if P.startswith('P') and os.path.isdir(pjoin(self.me_dir,'SubProcesses', P))] devnull = open(os.devnull, 'w') for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) # clean previous run for match in misc.glob('*ajob*', Pdir): if os.path.basename(match)[:4] in ['ajob', 'wait', 'run.', 'done']: os.remove(pjoin(Pdir, match)) - + logfile = pjoin(Pdir, 'gen_ximprove.log') misc.call([pjoin(bindir, 'gen_ximprove')], @@ -6840,40 +6840,40 @@ def refine4grid(self, nb_event): if os.path.exists(pjoin(Pdir, 'ajob1')): alljobs = misc.glob('ajob*', Pdir) - nb_tot = len(alljobs) + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) if os.path.exists(pjoin(self.me_dir,'error')): self.monitor(html=True) raise MadEventError('Error detected in dir %s: %s' % \ (Pdir, open(pjoin(self.me_dir,'error')).read())) - self.monitor(run_type='All job submitted for refine number %s' % + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) combine_runs.CombineRuns(self.me_dir) - + #update html output cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + self.update_status('finish refine', 'parton', makehtml=False) devnull.close() def do_combine_events(self, line): - """Advanced commands: Launch combine events""" + """Advanced commands: Launch combine events""" if self.readonly: outdir = 'Events' @@ -6895,17 +6895,17 @@ def do_combine_events(self, line): self.banner.add_generation_info(self.results.current['cross'], self.run_card['nevents']) if not hasattr(self, 'random_orig'): self.random_orig = 0 self.banner.change_seed(self.random_orig) - - + + if not os.path.exists(pjoin(outdir, self.run_name)): os.mkdir(pjoin(outdir, self.run_name)) - self.banner.write(pjoin(outdir, self.run_name, + self.banner.write(pjoin(outdir, self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -6915,7 +6915,7 @@ def do_combine_events(self, line): if os.path.exists(pjoin(Gdir, 'events.lhe')): result = sum_html.OneResult('') result.read_results(pjoin(Gdir, 'results.dat')) - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec')*gscalefact[Gdir], result.get('xerru')*gscalefact[Gdir], result.get('axsec')*gscalefact[Gdir] @@ -6924,7 +6924,7 @@ def do_combine_events(self, line): sum_xsec += result.get('xsec')*gscalefact[Gdir] sum_xerru.append(result.get('xerru')*gscalefact[Gdir]) sum_axsec += result.get('axsec')*gscalefact[Gdir] - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.nb_event) @@ -6933,26 +6933,26 @@ def do_combine_events(self, line): AllEvent.add(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() - + self.banner.add_generation_info(sum_xsec, self.nb_event) nb_event = AllEvent.unweight(pjoin(outdir, self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.nb_event, log_level=logging.DEBUG, normalization=self.run_card['event_norm'], proc_charac=self.proc_characteristic) - - + + if partials: for i in range(partials): try: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) self.banner.add_generation_info(sum_xsec, nb_event) if self.run_card['bias_module'].lower() not in ['dummy', 'none']: @@ -6961,7 +6961,7 @@ def do_combine_events(self, line): class MadLoopInitializer(object): """ A container class for the various methods for initializing MadLoop. It is - placed in MadEventInterface because it is used by Madevent for loop-induced + placed in MadEventInterface because it is used by Madevent for loop-induced simulations. """ @staticmethod @@ -6974,7 +6974,7 @@ def make_and_run(dir_name,checkRam=False): if os.path.isfile(pjoin(dir_name,'check')): os.remove(pjoin(dir_name,'check')) os.remove(pjoin(dir_name,'check_sa.o')) - os.remove(pjoin(dir_name,'loop_matrix.o')) + os.remove(pjoin(dir_name,'loop_matrix.o')) # Now run make devnull = open(os.devnull, 'w') start=time.time() @@ -6996,7 +6996,7 @@ def make_and_run(dir_name,checkRam=False): stdout=devnull, stderr=devnull, close_fds=True) try: ptimer.execute() - #poll as often as possible; otherwise the subprocess might + #poll as often as possible; otherwise the subprocess might # "sneak" in some extra memory usage while you aren't looking # Accuracy of .2 seconds is enough for the timing. while ptimer.poll(): @@ -7028,7 +7028,7 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, If mu_r > 0.0, then the renormalization constant value will be hardcoded directly in check_sa.f, if is is 0 it will be set to Sqrt(s) and if it is < 0.0 the value in the param_card.dat is used. - If the split_orders target (i.e. the target squared coupling orders for + If the split_orders target (i.e. the target squared coupling orders for the computation) is != -1, it will be changed in check_sa.f via the subroutine CALL SET_COUPLINGORDERS_TARGET(split_orders).""" @@ -7043,12 +7043,12 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, file_path = pjoin(directories[0],'check_sa.f') if not os.path.isfile(file_path): raise MadGraph5Error('Could not find the location of check_sa.f'+\ - ' from the specified path %s.'%str(file_path)) + ' from the specified path %s.'%str(file_path)) file = open(file_path, 'r') check_sa = file.read() file.close() - + file = open(file_path, 'w') check_sa = re.sub(r"READPS = \S+\)","READPS = %s)"%('.TRUE.' if read_ps \ else '.FALSE.'), check_sa) @@ -7064,42 +7064,42 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, (("%.17e"%mu_r).replace('e','d')),check_sa) elif mu_r < 0.0: check_sa = re.sub(r"MU_R=SQRTS","",check_sa) - + if split_orders > 0: check_sa = re.sub(r"SET_COUPLINGORDERS_TARGET\(-?\d+\)", - "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) - + "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) + file.write(check_sa) file.close() - @staticmethod + @staticmethod def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ req_files = ['HelFilter.dat','LoopFilter.dat'], attempts = [4,15]): - """ Run the initialization of the process in 'run_dir' with success + """ Run the initialization of the process in 'run_dir' with success characterized by the creation of the files req_files in this directory. The directory containing the driving source code 'check_sa.f'. - The list attempt gives the successive number of PS points the + The list attempt gives the successive number of PS points the initialization should be tried with before calling it failed. Returns the number of PS points which were necessary for the init. Notice at least run_dir or SubProc_dir must be provided. A negative attempt number given in input means that quadprec will be forced for initialization.""" - + # If the user does not want detailed info, then set the dictionary # to a dummy one. if infos is None: infos={} - + if SubProc_dir is None and run_dir is None: raise MadGraph5Error('At least one of [SubProc_dir,run_dir] must'+\ ' be provided in run_initialization.') - + # If the user does not specify where is check_sa.f, then it is assumed # to be one levels above run_dir if SubProc_dir is None: SubProc_dir = os.path.abspath(pjoin(run_dir,os.pardir)) - + if run_dir is None: directories =[ dir for dir in misc.glob('P[0-9]*', SubProc_dir) if os.path.isdir(dir) ] @@ -7109,7 +7109,7 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find a valid running directory'+\ ' in %s.'%str(SubProc_dir)) - # Use the presence of the file born_matrix.f to decide if it is a + # Use the presence of the file born_matrix.f to decide if it is a # loop-induced process or not. It's not crucial, but just that because # of the dynamic adjustment of the ref scale used for deciding what are # the zero contributions, more points are neeeded for loop-induced. @@ -7128,9 +7128,9 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ %MLCardPath) else: - MLCard = banner_mod.MadLoopParam(MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) MLCard_orig = banner_mod.MadLoopParam(MLCard) - + # Make sure that LoopFilter really is needed. if not MLCard['UseLoopFilter']: try: @@ -7153,11 +7153,11 @@ def need_init(): proc_prefix+fname)) for fname in my_req_files]) or \ not os.path.isfile(pjoin(run_dir,'check')) or \ not os.access(pjoin(run_dir,'check'), os.X_OK) - + # Check if this is a process without born by checking the presence of the # file born_matrix.f is_loop_induced = os.path.exists(pjoin(run_dir,'born_matrix.f')) - + # For loop induced processes, always attempt quadruple precision if # double precision attempts fail and the user didn't specify himself # quadruple precision initializations attempts @@ -7166,11 +7166,11 @@ def need_init(): use_quad_prec = 1 curr_attempt = 1 - MLCard.set('WriteOutFilters',True) - + MLCard.set('WriteOutFilters',True) + while to_attempt!=[] and need_init(): curr_attempt = to_attempt.pop() - # if the attempt is a negative number it means we must force + # if the attempt is a negative number it means we must force # quadruple precision at initialization time if curr_attempt < 0: use_quad_prec = -1 @@ -7183,11 +7183,11 @@ def need_init(): MLCard.set('ZeroThres',1e-9) # Plus one because the filter are written on the next PS point after curr_attempt = abs(curr_attempt+1) - MLCard.set('MaxAttempts',curr_attempt) + MLCard.set('MaxAttempts',curr_attempt) MLCard.write(pjoin(SubProc_dir,'MadLoopParams.dat')) # initialization is performed. - MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, + MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, npoints = curr_attempt) compile_time, run_time, ram_usage = \ MadLoopInitializer.make_and_run(run_dir) @@ -7200,7 +7200,7 @@ def need_init(): infos['Process_compilation']==None: infos['Process_compilation'] = compile_time infos['Initialization'] = run_time - + MLCard_orig.write(pjoin(SubProc_dir,'MadLoopParams.dat')) if need_init(): return None @@ -7219,8 +7219,8 @@ def need_init(ML_resources_path, proc_prefix, r_files): MLCardPath = pjoin(proc_dir,'SubProcesses','MadLoopParams.dat') if not os.path.isfile(MLCardPath): raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ - %MLCardPath) - MLCard = banner_mod.MadLoopParam(MLCardPath) + %MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) req_files = ['HelFilter.dat','LoopFilter.dat'] # Make sure that LoopFilter really is needed. @@ -7234,9 +7234,9 @@ def need_init(ML_resources_path, proc_prefix, r_files): req_files.remove('HelFilter.dat') except ValueError: pass - + for v_folder in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)): + '%s*'%subproc_prefix)): # Make sure it is a valid MadLoop directory if not os.path.isdir(v_folder) or not os.path.isfile(\ pjoin(v_folder,'loop_matrix.f')): @@ -7247,7 +7247,7 @@ def need_init(ML_resources_path, proc_prefix, r_files): if need_init(pjoin(proc_dir,'SubProcesses','MadLoop5_resources'), proc_prefix, req_files): return True - + return False @staticmethod @@ -7265,7 +7265,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['treatCardsLoopNoInit'], cwd=pjoin(proc_dir,'Source')) else: interface.do_treatcards('all --no_MadLoopInit') - + # First make sure that IREGI and CUTTOOLS are compiled if needed if os.path.exists(pjoin(proc_dir,'Source','CutTools')): misc.compile(arg=['libcuttools'],cwd=pjoin(proc_dir,'Source')) @@ -7273,8 +7273,8 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['libiregi'],cwd=pjoin(proc_dir,'Source')) # Then make sure DHELAS and MODEL are compiled misc.compile(arg=['libmodel'],cwd=pjoin(proc_dir,'Source')) - misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) - + misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) + # Now initialize the MadLoop outputs logger.info('Initializing MadLoop loop-induced matrix elements '+\ '(this can take some time)...') @@ -7283,7 +7283,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, if MG_options: if interface and hasattr(interface, 'cluster') and isinstance(interface.cluster, cluster.MultiCore): mcore = interface.cluster - else: + else: mcore = cluster.MultiCore(**MG_options) else: mcore = cluster.onecore @@ -7294,10 +7294,10 @@ def run_initialization_wrapper(run_dir, infos, attempts): run_dir=run_dir, infos=infos) else: n_PS = MadLoopInitializer.run_initialization( - run_dir=run_dir, infos=infos, attempts=attempts) + run_dir=run_dir, infos=infos, attempts=attempts) infos['nPS'] = n_PS return 0 - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return @@ -7307,21 +7307,21 @@ def wait_monitoring(Idle, Running, Done): init_info = {} # List all virtual folders while making sure they are valid MadLoop folders VirtualFolders = [f for f in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)) if (os.path.isdir(f) or + '%s*'%subproc_prefix)) if (os.path.isdir(f) or os.path.isfile(pjoin(f,'loop_matrix.f')))] logger.debug("Now Initializing MadLoop matrix element in %d folder%s:"%\ (len(VirtualFolders),'s' if len(VirtualFolders)>1 else '')) - logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in + logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in VirtualFolders)) for v_folder in VirtualFolders: init_info[v_folder] = {} - + # We try all multiples of n_PS from 1 to max_mult, first in DP and then # in QP before giving up, or use default values if n_PS is None. max_mult = 3 if n_PS is None: # Then use the default list of number of PS points to try - mcore.submit(run_initialization_wrapper, + mcore.submit(run_initialization_wrapper, [pjoin(v_folder), init_info[v_folder], None]) else: # Use specific set of PS points @@ -7348,8 +7348,8 @@ def wait_monitoring(Idle, Running, Done): '%d PS points (%s), in %.3g(compil.) + %.3g(init.) secs.'%( abs(init['nPS']),'DP' if init['nPS']>0 else 'QP', init['Process_compilation'],init['Initialization'])) - - logger.info('MadLoop initialization finished.') + + logger.info('MadLoop initialization finished.') AskforEditCard = common_run.AskforEditCard @@ -7364,16 +7364,16 @@ def wait_monitoring(Idle, Running, Done): import os import optparse - # Get the directory of the script real path (bin) - # and add it to the current PYTHONPATH + # Get the directory of the script real path (bin) + # and add it to the current PYTHONPATH #root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath( __file__ )))) sys.path.insert(0, root_path) - class MyOptParser(optparse.OptionParser): + class MyOptParser(optparse.OptionParser): class InvalidOption(Exception): pass def error(self, msg=''): raise MyOptParser.InvalidOption(msg) - # Write out nice usage message if called with -h or --help + # Write out nice usage message if called with -h or --help usage = "usage: %prog [options] [FILE] " parser = MyOptParser(usage=usage) parser.add_option("-l", "--logging", default='INFO', @@ -7384,7 +7384,7 @@ def error(self, msg=''): help='force to launch debug mode') parser_error = '' done = False - + for i in range(len(sys.argv)-1): try: (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) @@ -7394,7 +7394,7 @@ def error(self, msg=''): else: args += sys.argv[len(sys.argv)-i:] if not done: - # raise correct error: + # raise correct error: try: (options, args) = parser.parse_args() except MyOptParser.InvalidOption as error: @@ -7407,8 +7407,8 @@ def error(self, msg=''): import subprocess import logging import logging.config - # Set logging level according to the logging level given by options - #logging.basicConfig(level=vars(logging)[options.logging]) + # Set logging level according to the logging level given by options + #logging.basicConfig(level=vars(logging)[options.logging]) import internal import internal.coloring_logging # internal.file = XXX/bin/internal/__init__.py @@ -7431,13 +7431,13 @@ def error(self, msg=''): raise pass - # Call the cmd interface main loop + # Call the cmd interface main loop try: if args: # a single command is provided if '--web' in args: - i = args.index('--web') - args.pop(i) + i = args.index('--web') + args.pop(i) cmd_line = MadEventCmd(me_dir, force_run=True) else: cmd_line = MadEventCmdShell(me_dir, force_run=True) @@ -7457,13 +7457,13 @@ def error(self, msg=''): pass - - - - - - - - + + + + + + + + diff --git a/epochX/cudacpp/gg_ttgg.mad/src/cudacpp_src.mk b/epochX/cudacpp/gg_ttgg.mad/src/cudacpp_src.mk index d4cc628aec..b4e446bc45 100644 --- a/epochX/cudacpp/gg_ttgg.mad/src/cudacpp_src.mk +++ b/epochX/cudacpp/gg_ttgg.mad/src/cudacpp_src.mk @@ -1,12 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories - -THISMK = $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #------------------------------------------------------------------------------- @@ -16,165 +11,24 @@ SHELL := /bin/bash #------------------------------------------------------------------------------- -#=== Configure common compiler flags for CUDA and C++ - -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here - -#------------------------------------------------------------------------------- - #=== Configure the C++ compiler -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) $(USE_NVTX) -fPIC -Wall -Wshadow -Wextra +include ../Source/make_opts + +MG_CXXFLAGS += -fPIC -I. $(USE_NVTX) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS+= -ffast-math # see issue #117 +MG_CXXFLAGS += -ffast-math # see issue #117 endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY # Note: AR, CXX and FC are implicitly defined if not set externally # See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html ###RANLIB = ranlib -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -LDFLAGS = -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -LDFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler (note: NVCC is already exported including ccache) - -###$(info NVCC=$(NVCC)) - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ builds (note: NVCC is already exported including ccache) - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for CUDA and C++ - -# Assuming uname is available, detect if architecture is PowerPC -UNAME_P := $(shell uname -p) - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # BUILD ERROR IF THIS ADDED IN SRC?! -else - ###AR=gcc-ar # needed by -flto - ###RANLIB=gcc-ranlib # needed by -flto - ###CXXFLAGS+= -flto # NB: build error from src/Makefile unless gcc-ar and gcc-ranlib are used - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -#------------------------------------------------------------------------------- - #=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the build flags appropriate to OMPFLAGS ###$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -###$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -###$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -###$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -###$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - CXXFLAGS += -DMGONGPU_HAS_NO_CURAND -else ifneq ($(RNDGEN),hasCurand) - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- @@ -182,28 +36,18 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -###$(info Current directory is $(shell pwd)) -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIRREL = ../lib/$(BUILDDIR) - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR=1 is set)) -else - override BUILDDIR = . - override LIBDIRREL = ../lib - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) -endif -######$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) +# Build directory: +BUILDDIR := build.$(DIRTAG) +LIBDIRREL := ../lib/$(BUILDDIR) # Workaround for Mac #375 (I did not manage to fix rpath with @executable_path): use absolute paths for LIBDIR # (NB: this is quite ugly because it creates the directory if it does not exist - to avoid removing src by mistake) -UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Darwin) override LIBDIR = $(shell mkdir -p $(LIBDIRREL); cd $(LIBDIRREL); pwd) ifeq ($(wildcard $(LIBDIR)),) @@ -223,55 +67,35 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -# Target (and build options): debug -debug: OPTFLAGS = -g -O0 -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ #------------------------------------------------------------------------------- cxx_objects=$(addprefix $(BUILDDIR)/, Parameters_sm.o read_slha.o) -ifneq ($(NVCC),) +ifeq ($(AVX),cuda) +COMPILER=$(NVCC) cu_objects=$(addprefix $(BUILDDIR)/, Parameters_sm_cu.o) +else +COMPILER=$(CXX) +cu_objects= endif # Target (and build rules): common (src) library -ifneq ($(NVCC),) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) $(cu_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(NVCC) -shared -o $@ $(cxx_objects) $(cu_objects) $(LDFLAGS) -else -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(CXX) -shared -o $@ $(cxx_objects) $(LDFLAGS) -endif + mkdir -p $(LIBDIR) + $(COMPILER) -shared -o $@ $(cxx_objects) $(cu_objects) $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- @@ -279,19 +103,7 @@ endif .PHONY: clean clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) -else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe -endif - -cleanall: - @echo - $(MAKE) clean -f $(THISMK) - @echo - rm -rf $(LIBDIR)/build.* - rm -rf build.* + $(RM) -f ../lib/build.*/*.so + $(RM) -rf build.* #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_ttgg.mad/src/mgOnGpuCxtypes.h b/epochX/cudacpp/gg_ttgg.mad/src/mgOnGpuCxtypes.h index ca9a9f00c0..3290d314d6 100644 --- a/epochX/cudacpp/gg_ttgg.mad/src/mgOnGpuCxtypes.h +++ b/epochX/cudacpp/gg_ttgg.mad/src/mgOnGpuCxtypes.h @@ -21,10 +21,14 @@ // Complex type in cuda: thrust or cucomplex or cxsmpl #ifdef __CUDACC__ #if defined MGONGPU_CUCXTYPE_THRUST +#ifdef __CLANG__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wtautological-compare" // for icpx2021/clang13 (https://stackoverflow.com/a/15864661) +#endif #include +#ifdef __CLANG__ #pragma clang diagnostic pop +#endif #elif defined MGONGPU_CUCXTYPE_CUCOMPLEX #include #elif not defined MGONGPU_CUCXTYPE_CXSMPL diff --git a/epochX/cudacpp/gg_ttgg.sa/src/cudacpp_src.mk b/epochX/cudacpp/gg_ttgg.sa/src/cudacpp_src.mk index d4cc628aec..c757875347 100644 --- a/epochX/cudacpp/gg_ttgg.sa/src/cudacpp_src.mk +++ b/epochX/cudacpp/gg_ttgg.sa/src/cudacpp_src.mk @@ -1,7 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. +# Further modified by: J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) #=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories @@ -95,50 +95,52 @@ CXXFLAGS += $(OMPFLAGS) # Set the build flags appropriate to each AVX choice (example: "make AVX=none") # [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] # [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) +ifeq ($(NVCC),) + $(info AVX=$(AVX)) + ifeq ($(UNAME_P),ppc64le) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) + endif + else ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) + endif + else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + ifeq ($(AVX),none) + override AVXFLAGS = -march=x86-64 # no SIMD (see #588) + else ifeq ($(AVX),sse4) + override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif endif + # For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? + CXXFLAGS+= $(AVXFLAGS) endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) # Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") ###$(info FPTYPE=$(FPTYPE)) @@ -182,11 +184,19 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +ifneq ($(NVCC),) + override DIRTAG = cuda_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +else + override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +endif # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +ifneq ($(NVCC),) + override TAG = cuda_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +else + override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +endif # Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 ###$(info Current directory is $(shell pwd)) @@ -223,35 +233,21 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so # Target (and build options): debug debug: OPTFLAGS = -g -O0 debug: all.$(TAG) -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) - #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ @@ -278,20 +274,61 @@ endif # Target: clean the builds .PHONY: clean +BUILD_DIRS := $(wildcard build.*) +NUM_BUILD_DIRS := $(words $(BUILD_DIRS)) + clean: ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) +ifeq ($(NUM_BUILD_DIRS),1) + $(info USEBUILDDIR=1, only one src build directory found.) + rm -rf ../lib/$(BUILD_DIRS) + rm -rf $(BUILD_DIRS) +else ifeq ($(NUM_BUILD_DIRS),0) + $(error USEBUILDDIR=1, but no src build directories are found.) else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe + $(error Multiple src BUILDDIR's found! Use 'cleannone', 'cleansse4', 'cleanavx2', 'clean512y','clean512z', 'cleancuda' or 'cleanall'.) +endif +else + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + rm -f $(BUILDDIR)/*.o $(BUILDDIR)/*.exe endif cleanall: @echo - $(MAKE) clean -f $(THISMK) + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + rm -f $(BUILDDIR)/*.o $(BUILDDIR)/*.exe @echo - rm -rf $(LIBDIR)/build.* + rm -rf ../lib/build.* rm -rf build.* +# Target: clean different builds + +cleannone: + rm -rf ../lib/build.none_* + rm -rf build.none_* + +cleansse4: + rm -rf ../lib/build.sse4_* + rm -rf build.sse4_* + +cleanavx2: + rm -rf ../lib/build.avx2_* + rm -rf build.avx2_* + +clean512y: + rm -rf ../lib/build.512y_* + rm -rf build.512y_* + +clean512z: + rm -rf ../lib/build.512z_* + rm -rf build.512z_* + +cleancuda: + rm -rf ../lib/build.cuda_* + rm -rf build.cuda_* + +cleandir: + rm -f ./*.o ./*.exe + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_ttggg.mad/CODEGEN_mad_gg_ttggg_log.txt b/epochX/cudacpp/gg_ttggg.mad/CODEGEN_mad_gg_ttggg_log.txt index d94e7252af..663423dec4 100644 --- a/epochX/cudacpp/gg_ttggg.mad/CODEGEN_mad_gg_ttggg_log.txt +++ b/epochX/cudacpp/gg_ttggg.mad/CODEGEN_mad_gg_ttggg_log.txt @@ -52,7 +52,7 @@ Note that you can still compile and run aMC@NLO with the built-in PDFs Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt import /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttggg.mg The import format was not given, so we guess it as command set stdout_level DEBUG @@ -62,7 +62,7 @@ generate g g > t t~ g g g No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005348920822143555  +DEBUG: model prefixing takes 0.005719900131225586  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -155,7 +155,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=5: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ g g g WEIGHTED<=5 @1 INFO: Process has 1240 diagrams -1 processes with 1240 diagrams generated in 1.857 s +1 processes with 1240 diagrams generated in 1.854 s Total: 1 processes with 1240 diagrams output madevent ../TMPOUT/CODEGEN_mad_gg_ttggg --hel_recycling=False --vector_size=32 --me_exporter=standalone_cudacpp Load PLUGIN.CUDACPP_OUTPUT @@ -175,9 +175,9 @@ INFO: Generating Helas calls for process: g g > t t~ g g g WEIGHTED<=5 @1 INFO: Processing color information for process: g g > t t~ g g g @1 INFO: Creating files in directory P1_gg_ttxggg INFO: Computing Color-Flow optimization [15120 term] -INFO: Color-Flow passed to 1630 term in 7s. Introduce 3030 contraction +INFO: Color-Flow passed to 1630 term in 8s. Introduce 3030 contraction DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -192,15 +192,15 @@ INFO: Created files CPPProcess.h and CPPProcess.cc in directory ./. DEBUG: vector, subproc_group,self.opt['vector_size'] =  32 True 32 [export_v4.py at line 1872]  INFO: Generating Feynman diagrams for Process: g g > t t~ g g g WEIGHTED<=5 @1 INFO: Finding symmetric diagrams for subprocess group gg_ttxggg -Generated helas calls for 1 subprocesses (1240 diagrams) in 6.607 s -Wrote files for 2281 helas calls in 18.169 s +Generated helas calls for 1 subprocesses (1240 diagrams) in 6.496 s +Wrote files for 2281 helas calls in 18.410 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 5 routines in 0.317 s +ALOHA: aloha creates 5 routines in 0.315 s DEBUG: Entering PLUGIN_ProcessExporter.convert_model (create the model) [output.py at line 202]  ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines @@ -235,12 +235,14 @@ save configuration file to /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CO INFO: Use Fortran compiler gfortran INFO: Use c++ compiler g++ INFO: Generate web pages +DEBUG: standardise /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttggg/Source/make_opts (fix f2py3 and sort make_opts_variables) before applying patch.common DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttggg; patch -p4 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common patching file Source/genps.inc +patching file Source/make_opts patching file Source/makefile patching file SubProcesses/makefile +patching file bin/internal/banner.py patching file bin/internal/gen_ximprove.py -Hunk #1 succeeded at 391 (offset 6 lines). patching file bin/internal/madevent_interface.py DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttggg/SubProcesses/P1_gg_ttxggg; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f @@ -257,9 +259,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m28.894s -user 0m28.357s -sys 0m0.382s +real 0m29.000s +user 0m28.472s +sys 0m0.425s ************************************************************ * * * W E L C O M E to * @@ -285,7 +287,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttggg/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards run quit INFO: @@ -315,7 +317,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gg_ttggg/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards param quit INFO: diff --git a/epochX/cudacpp/gg_ttggg.mad/Source/make_opts b/epochX/cudacpp/gg_ttggg.mad/Source/make_opts index e4b87ee6ad..435bed0dc7 100644 --- a/epochX/cudacpp/gg_ttggg.mad/Source/make_opts +++ b/epochX/cudacpp/gg_ttggg.mad/Source/make_opts @@ -1,7 +1,7 @@ DEFAULT_CPP_COMPILER=g++ DEFAULT_F2PY_COMPILER=f2py3 DEFAULT_F_COMPILER=gfortran -GLOBAL_FLAG=-O3 -ffast-math -fbounds-check +GLOBAL_FLAG=-O3 -ffast-math MACFLAG= MG5AMC_VERSION=SpecifiedByMG5aMCAtRunTime PYTHIA8_PATH=NotInstalled @@ -13,31 +13,53 @@ BIASLIBDIR=../../../lib/ BIASLIBRARY=libbias.$(libext) # Rest of the makefile -ifeq ($(origin FFLAGS),undefined) -FFLAGS= -w -fPIC -#FFLAGS+= -g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none -endif -FFLAGS += $(GLOBAL_FLAG) +#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) + +# Detect O/S kernel (Linux, Darwin...) +UNAME_S := $(shell uname -s) + +# Detect architecture (x86_64, ppc64le...) +UNAME_P := $(shell uname -p) + +#------------------------------------------------------------------------------- # REMOVE MACFLAG IF NOT ON MAC OR FOR F2PY -UNAME := $(shell uname -s) ifdef f2pymode MACFLAG= else -ifneq ($(UNAME), Darwin) +ifneq ($(UNAME_S), Darwin) MACFLAG= endif endif +############################################################ +# Default compiler flags +# To change optimisation level, override these as follows: +# make CXXFLAGS="-O0 -g" +# or export them as environment variables +# For debugging Fortran, one could e.g. use: +# FCFLAGS="-g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none" +############################################################ +FCFLAGS ?= $(GLOBAL_FLAG) -fbounds-check +CXXFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG +NVCCFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG -use_fast_math -lineinfo +LDFLAGS ?= $(STDLIB) -ifeq ($(origin CXXFLAGS),undefined) -CXXFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +ifneq ($(FFLAGS),) +# Madgraph used to use FFLAGS, so the user probably tries to change the flags specifically for madgraph: +FCFLAGS = $(FFLAGS) endif -ifeq ($(origin CFLAGS),undefined) -CFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +# Madgraph-specific flags: +WARNFLAGS = -Wall -Wshadow -Wextra +ifeq (,$(findstring -std=,$(CXXFLAGS))) +CXXSTANDARD= -std=c++17 endif +MG_FCFLAGS += -fPIC -w +MG_CXXFLAGS += -fPIC $(CXXSTANDARD) $(WARNFLAGS) $(MACFLAG) +MG_NVCCFLAGS += -fPIC $(CXXSTANDARD) --forward-unknown-to-host-compiler $(WARNFLAGS) +MG_LDFLAGS += $(MACFLAG) # Set FC unless it's defined by an environment variable ifeq ($(origin FC),default) @@ -49,45 +71,40 @@ endif # Increase the number of allowed charcters in a Fortran line ifeq ($(FC), ftn) -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler else VERS="$(shell $(FC) --version | grep ifort -i)" ifeq ($(VERS), "") -FFLAGS+= -ffixed-line-length-132 +MG_FCFLAGS += -ffixed-line-length-132 else -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler endif endif -UNAME := $(shell uname -s) -ifeq ($(origin LDFLAGS), undefined) -LDFLAGS=$(STDLIB) $(MACFLAG) -endif - # Options: dynamic, lhapdf # Option dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) dylibext=dylib else dylibext=so endif ifdef dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) libext=dylib -FFLAGS+= -fno-common -LDFLAGS += -bundle +MG_FCFLAGS += -fno-common +MG_LDFLAGS += -bundle define CREATELIB $(FC) -dynamiclib -undefined dynamic_lookup -o $(1) $(2) endef else libext=so -FFLAGS+= -fPIC -LDFLAGS += -shared +MG_FCFLAGS += -fPIC +MG_LDFLAGS += -shared define CREATELIB -$(FC) $(FFLAGS) $(LDFLAGS) -o $(1) $(2) +$(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MG_LDFLAGS) $(LDFLAGS) -o $(1) $(2) endef endif else @@ -101,17 +118,9 @@ endif # Option lhapdf ifneq ($(lhapdf),) -CXXFLAGS += $(shell $(lhapdf) --cppflags) +MG_CXXFLAGS += $(shell $(lhapdf) --cppflags) alfas_functions=alfas_functions_lhapdf llhapdf+= $(shell $(lhapdf) --cflags --libs) -lLHAPDF -# check if we need to activate c++11 (for lhapdf6.2) -ifeq ($(origin CXX),default) -ifeq ($lhapdfversion$lhapdfsubversion,62) -CXX=$(DEFAULT_CPP_COMPILER) -std=c++11 -else -CXX=$(DEFAULT_CPP_COMPILER) -endif -endif else alfas_functions=alfas_functions llhapdf= @@ -120,4 +129,207 @@ endif # Helper function to check MG5 version define CHECK_MG5AMC_VERSION python -c 'import re; from distutils.version import StrictVersion; print StrictVersion("$(MG5AMC_VERSION)") >= StrictVersion("$(1)") if re.match("^[\d\.]+$$","$(MG5AMC_VERSION)") else True;' -endef \ No newline at end of file +endef + +#------------------------------------------------------------------------------- + +# Set special cases for non-gcc/clang builds +# AVX below gets overridden from outside in architecture-specific builds +AVX ?= none +# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] +# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] +$(info AVX=$(AVX)) +ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + endif +else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif +endif + +# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? +MG_CXXFLAGS+= $(AVXFLAGS) + +#------------------------------------------------------------------------------- + +#=== Configure the CUDA compiler if available + +# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) +# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below +ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside + $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") + override CUDA_HOME=disabled +endif + +# If CUDA_HOME is not set, try to set it from the location of nvcc +ifndef CUDA_HOME + CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) + $(info CUDA_HOME="$(CUDA_HOME)") +endif + +# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists +ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) + NVCC = $(CUDA_HOME)/bin/nvcc + USE_NVTX ?=-DUSE_NVTX + # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html + # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ + # Default: use compute capability 70 (Volta architecture), and embed PTX to support later architectures, too. + # Set MADGRAPH_CUDA_ARCHITECTURE to the desired value to change the default. + # Build for multiple architectures using a space-separated list, e.g. MADGRAPH_CUDA_ARCHITECTURE="70 80" + MADGRAPH_CUDA_ARCHITECTURE ?= 70 + # Generate PTX for the first architecture: + CUARCHFLAGS := --generate-code arch=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)),code=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)) + # Generate device code for all architectures: + CUARCHFLAGS += $(foreach arch,$(MADGRAPH_CUDA_ARCHITECTURE), --generate-code arch=compute_$(arch),code=sm_$(arch)) + + CUINC = -I$(CUDA_HOME)/include/ + CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! + MG_LDFLAGS += $(CURANDLIBFLAGS) + MG_NVCCFLAGS += $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) + +else ifeq ($(AVX),cuda) + $(error nvcc is not visible in PATH. Either add it to PATH or export CUDA_HOME to compile with cuda) + ifeq ($(AVX),cuda) + $(error Cannot compile for cuda without NVCC) + endif +endif + +# Set the host C++ compiler for nvcc via "-ccbin " +# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) +MG_NVCCFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) + +# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) +ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) +MG_NVCCFLAGS += -allow-unsupported-compiler +endif + +#------------------------------------------------------------------------------- + +#=== Configure ccache for C++ and CUDA builds + +# Enable ccache if USECCACHE=1 +ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) + override CXX:=ccache $(CXX) +endif + +ifneq ($(NVCC),) + ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) + override NVCC:=ccache $(NVCC) + endif +endif + +#------------------------------------------------------------------------------- + +#=== Configure PowerPC-specific compiler flags for C++ and CUDA + +# PowerPC-specific CXX / CUDA compiler flags (being reviewed) +ifeq ($(UNAME_P),ppc64le) + MG_CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 + MG_NVCCFLAGS+= -Xcompiler -mno-float128 + + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + endif +endif + +#------------------------------------------------------------------------------- +#=== Apple-specific compiler/linker options + +# Add -std=c++17 explicitly to avoid build errors on macOS +# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" +ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) +MG_CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 +endif + +ifeq ($(UNAME_S),Darwin) +STDLIB = -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) +MG_LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" +else +MG_LDFLAGS += -Xlinker --no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +endif + +#------------------------------------------------------------------------------- + +#=== C++/CUDA-specific flags for floating-point types and random generators to use + +# Set the default FPTYPE (floating point type) choice +FPTYPE ?= m + +# Set the default HELINL (inline helicities?) choice +HELINL ?= 0 + +# Set the default HRDCOD (hardcode cIPD physics parameters?) choice +HRDCOD ?= 0 + +# Set the default RNDGEN (random number generator) choice +ifeq ($(NVCC),) + RNDGEN ?= hasNoCurand +else + RNDGEN ?= hasCurand +endif + +# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so sub-makes don't go back to the defaults +export AVX +export AVXFLAGS +export FPTYPE +export HELINL +export HRDCOD +export RNDGEN + +#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN + +# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") +# $(info FPTYPE=$(FPTYPE)) +ifeq ($(FPTYPE),d) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE +else ifeq ($(FPTYPE),f) + COMMONFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT +else ifeq ($(FPTYPE),m) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT +else + $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) +endif + +# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") +# $(info HELINL=$(HELINL)) +ifeq ($(HELINL),1) + COMMONFLAGS += -DMGONGPU_INLINE_HELAMPS +else ifneq ($(HELINL),0) + $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") +# $(info HRDCOD=$(HRDCOD)) +ifeq ($(HRDCOD),1) + COMMONFLAGS += -DMGONGPU_HARDCODE_PARAM +else ifneq ($(HRDCOD),0) + $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") +$(info RNDGEN=$(RNDGEN)) +ifeq ($(RNDGEN),hasNoCurand) + override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND + override CURANDLIBFLAGS = +else ifeq ($(RNDGEN),hasCurand) + CXXFLAGSCURAND = $(CUINC) +else + $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) +endif + +MG_CXXFLAGS += $(COMMONFLAGS) +MG_NVCCFLAGS += $(COMMONFLAGS) + +#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_ttggg.mad/Source/makefile b/epochX/cudacpp/gg_ttggg.mad/Source/makefile index 00c73099a0..407b1b753e 100644 --- a/epochX/cudacpp/gg_ttggg.mad/Source/makefile +++ b/epochX/cudacpp/gg_ttggg.mad/Source/makefile @@ -10,8 +10,8 @@ include make_opts # Source files -PROCESS= hfill.o matrix.o myamp.o -DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o +PROCESS = hfill.o matrix.o myamp.o +DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o HBOOK = hfill.o hcurve.o hbook1.o hbook2.o GENERIC = $(alfas_functions).o transpole.o invarients.o hfill.o pawgraphs.o ran1.o \ rw_events.o rw_routines.o kin_functions.o open_file.o basecode.o setrun.o \ @@ -22,7 +22,7 @@ GENSUDGRID = gensudgrid.o is-sud.o setrun_gen.o rw_routines.o open_file.o # Locally compiled libraries -LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) +LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) # Binaries @@ -32,6 +32,9 @@ BINARIES = $(BINDIR)gen_ximprove $(BINDIR)gensudgrid $(BINDIR)combine_runs all: $(LIBRARIES) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(LIBDIR)libbias.$(libext) +%.o: %.f *.inc + $(FC) -I. $(MG_FCFLAGS) $(FCFLAGS) -c $< -o $@ + # Libraries $(LIBDIR)libdsample.$(libext): $(DSAMPLE) @@ -39,36 +42,35 @@ $(LIBDIR)libdsample.$(libext): $(DSAMPLE) $(LIBDIR)libgeneric.$(libext): $(GENERIC) $(call CREATELIB, $@, $^) $(LIBDIR)libdhelas.$(libext): DHELAS - cd DHELAS; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libpdf.$(libext): PDF make_opts - cd PDF; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" ifneq (,$(filter edff chff, $(pdlabel1) $(pdlabel2))) $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make ; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" else $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make -f makefile_dummy; cd ../../ -endif + $(MAKE) -C $< -f makefile_dummy FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" +endif $(LIBDIR)libcernlib.$(libext): CERNLIB - cd CERNLIB; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" # The bias library is here the dummy by default; compilation of other ones specified in the run_card will be done by MG5aMC directly. $(LIBDIR)libbias.$(libext): BIAS/dummy - cd BIAS/dummy; make; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libmodel.$(libext): MODEL param_card.inc - cd MODEL; make + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" param_card.inc: ../Cards/param_card.dat ../bin/madevent treatcards param + touch $@ # madevent doesn't update the time stamp if there's nothing to do -$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o - $(FC) $(LDFLAGS) -o $@ $^ -#$(BINDIR)combine_events: $(COMBINE) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) run_card.inc $(LIBDIR)libbias.$(libext) -# $(FC) -o $@ $(COMBINE) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC $(llhapdf) $(LDFLAGS) -lbias +$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o + $(FC) $(MG_LDFLAGS) $(LDFLAGS) -o $@ $^ $(BINDIR)gensudgrid: $(GENSUDGRID) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libcernlib.$(libext) - $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(LDFLAGS) + $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(MG_LDFLAGS) $(LDFLAGS) # Dependencies @@ -85,6 +87,7 @@ rw_events.o: rw_events.f run_config.inc run_card.inc: ../Cards/run_card.dat ../bin/madevent treatcards run + touch $@ # madevent doesn't update the time stamp if there's nothing to do clean4pdf: rm -f ../lib/libpdf.$(libext) @@ -120,7 +123,7 @@ $(LIBDIR)libiregi.a: $(IREGIDIR) cd $(IREGIDIR); make ln -sf ../Source/$(IREGIDIR)libiregi.a $(LIBDIR)libiregi.a -cleanSource: +clean: $(RM) *.o $(LIBRARIES) $(BINARIES) cd PDF; make clean; cd .. cd PDF/gammaUPC; make clean; cd ../../ @@ -132,11 +135,3 @@ cleanSource: cd BIAS/ptj_bias; make clean; cd ../.. if [ -d $(CUTTOOLSDIR) ]; then cd $(CUTTOOLSDIR); make clean; cd ..; fi if [ -d $(IREGIDIR) ]; then cd $(IREGIDIR); make clean; cd ..; fi - -clean: cleanSource - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make clean; cd -; done; - -cleanavx: - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; -cleanall: cleanSource # THIS IS THE ONE - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; diff --git a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/Bridge.h b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/Bridge.h index bf8b5e024d..c263f39a62 100644 --- a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/Bridge.h +++ b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/Bridge.h @@ -236,7 +236,7 @@ namespace mg5amcCpu #ifdef __CUDACC__ if( ( m_nevt < s_gputhreadsmin ) || ( m_nevt % s_gputhreadsmin != 0 ) ) throw std::runtime_error( "Bridge constructor: nevt should be a multiple of " + std::to_string( s_gputhreadsmin ) ); - while( m_nevt != m_gpublocks * m_gputhreads ) + while( m_nevt != static_cast( m_gpublocks * m_gputhreads ) ) { m_gputhreads /= 2; if( m_gputhreads < s_gputhreadsmin ) @@ -266,7 +266,7 @@ namespace mg5amcCpu template void Bridge::set_gpugrid( const int gpublocks, const int gputhreads ) { - if( m_nevt != gpublocks * gputhreads ) + if( m_nevt != static_cast( gpublocks * gputhreads ) ) throw std::runtime_error( "Bridge: gpublocks*gputhreads must equal m_nevt in set_gpugrid" ); m_gpublocks = gpublocks; m_gputhreads = gputhreads; diff --git a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/MadgraphTest.h b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/MadgraphTest.h index ef40624c88..b0f2250c25 100644 --- a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/MadgraphTest.h +++ b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/MadgraphTest.h @@ -199,10 +199,6 @@ class MadgraphTest : public testing::TestWithParam } }; -// Since we link both the CPU-only and GPU tests into the same executable, we prevent -// a multiply defined symbol by only compiling this in the non-CUDA phase: -#ifndef __CUDACC__ - /// Compare momenta and matrix elements. /// This uses an implementation of TestDriverBase to run a madgraph workflow, /// and compares momenta and matrix elements with a reference file. @@ -307,6 +303,4 @@ TEST_P( MadgraphTest, CompareMomentaAndME ) } } -#endif // __CUDACC__ - #endif /* MADGRAPHTEST_H_ */ diff --git a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/MatrixElementKernels.cc b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/MatrixElementKernels.cc index 74b5239ebf..2d6f27cd5d 100644 --- a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/MatrixElementKernels.cc +++ b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/MatrixElementKernels.cc @@ -196,6 +196,9 @@ namespace mg5amcGpu void MatrixElementKernelDevice::setGrid( const int gpublocks, const int gputhreads ) { + m_gpublocks = gpublocks; + m_gputhreads = gputhreads; + if( m_gpublocks == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gpublocks must be > 0 in setGrid" ); if( m_gputhreads == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gputhreads must be > 0 in setGrid" ); if( this->nevt() != m_gpublocks * m_gputhreads ) throw std::runtime_error( "MatrixElementKernelDevice: nevt mismatch in setGrid" ); diff --git a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/P1_gg_ttxggg/check_sa.cc b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/P1_gg_ttxggg/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/P1_gg_ttxggg/check_sa.cc +++ b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/P1_gg_ttxggg/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/P1_gg_ttxggg/counters.cc b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/P1_gg_ttxggg/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/P1_gg_ttxggg/counters.cc +++ b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/P1_gg_ttxggg/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk index 509307506b..a522ddb335 100644 --- a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk @@ -1,56 +1,41 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: use ':=' to ensure that the value of CUDACPP_MAKEFILE is not modified further down after including make_opts -#=== NB: use 'override' to ensure that the value can not be modified from the outside -override CUDACPP_MAKEFILE := $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) -###$(info CUDACPP_MAKEFILE='$(CUDACPP_MAKEFILE)') - -#=== NB: different names (e.g. cudacpp.mk and cudacpp_src.mk) are used in the Subprocess and src directories -override CUDACPP_SRC_MAKEFILE = cudacpp_src.mk - -#------------------------------------------------------------------------------- - -#=== Use bash in the Makefile (https://www.gnu.org/software/make/manual/html_node/Choosing-the-Shell.html) - -SHELL := /bin/bash - -#------------------------------------------------------------------------------- - -#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) - -# Detect O/S kernel (Linux, Darwin...) -UNAME_S := $(shell uname -s) -###$(info UNAME_S='$(UNAME_S)') - -# Detect architecture (x86_64, ppc64le...) -UNAME_P := $(shell uname -p) -###$(info UNAME_P='$(UNAME_P)') - -#------------------------------------------------------------------------------- - -#=== Include the common MG5aMC Makefile options - -# OM: this is crucial for MG5aMC flag consistency/documentation -# AV: temporarely comment this out because it breaks cudacpp builds -ifneq ($(wildcard ../../Source/make_opts),) -include ../../Source/make_opts -endif +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. + +# This makefile extends the Fortran makefile called "makefile" + +CUDACPP_SRC_MAKEFILE = cudacpp_src.mk + +# Self-invocation with adapted flags: +cppnative: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=native AVXFLAGS="-march=native" cppbuild +cppnone: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=none AVXFLAGS= cppbuild +cppsse4: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=sse4 AVXFLAGS=-march=nehalem cppbuild +cppavx2: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=avx2 AVXFLAGS=-march=haswell cppbuild +cppavx512y: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512y AVXFLAGS="-march=skylake-avx512 -mprefer-vector-width=256" cppbuild +cppavx512z: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512z AVXFLAGS="-march=skylake-avx512 -DMGONGPU_PVW512" cppbuild +cuda: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=cuda cudabuild #------------------------------------------------------------------------------- #=== Configure common compiler flags for C++ and CUDA +# NB: The base flags are defined in the fortran "makefile" + +# Include directories +INCFLAGS = -I. -I../../src -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here +MG_CXXFLAGS += $(INCFLAGS) +MG_NVCCFLAGS += $(INCFLAGS) # Dependency on src directory -MG5AMC_COMMONLIB = mg5amc_common -LIBFLAGS = -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -INCFLAGS += -I../../src +MG5AMC_COMMONLIB = mg5amc_common # Compiler-specific googletest build directory (#125 and #738) ifneq ($(shell $(CXX) --version | grep '^Intel(R) oneAPI DPC++/C++ Compiler'),) @@ -99,356 +84,42 @@ endif #------------------------------------------------------------------------------- -#=== Configure the C++ compiler - -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) -Wall -Wshadow -Wextra -ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS += -ffast-math # see issue #117 -endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY - -# Optionally add debug flags to display the full list of flags (eg on Darwin) -###CXXFLAGS+= -v - -# Note: AR, CXX and FC are implicitly defined if not set externally -# See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html - -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler - -# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) -# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below -ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside - $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") - override CUDA_HOME=disabled -endif - -# If CUDA_HOME is not set, try to set it from the location of nvcc -ifndef CUDA_HOME - CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) - $(warning CUDA_HOME was not set: using "$(CUDA_HOME)") -endif - -# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists -ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) - NVCC = $(CUDA_HOME)/bin/nvcc - USE_NVTX ?=-DUSE_NVTX - # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html - # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ - # Default: use compute capability 70 for V100 (CERN lxbatch, CERN itscrd, Juwels Cluster). - # Embed device code for 70, and PTX for 70+. - # Export MADGRAPH_CUDA_ARCHITECTURE (comma-separated list) to use another value or list of values (see #533). - # Examples: use 60 for P100 (Piz Daint), 80 for A100 (Juwels Booster, NVidia raplab/Curiosity). - MADGRAPH_CUDA_ARCHITECTURE ?= 70 - ###CUARCHFLAGS = -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=compute_$(MADGRAPH_CUDA_ARCHITECTURE) -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=sm_$(MADGRAPH_CUDA_ARCHITECTURE) # Older implementation (AV): go back to this one for multi-GPU support #533 - ###CUARCHFLAGS = --gpu-architecture=compute_$(MADGRAPH_CUDA_ARCHITECTURE) --gpu-code=sm_$(MADGRAPH_CUDA_ARCHITECTURE),compute_$(MADGRAPH_CUDA_ARCHITECTURE) # Newer implementation (SH): cannot use this as-is for multi-GPU support #533 - comma:=, - CUARCHFLAGS = $(foreach arch,$(subst $(comma), ,$(MADGRAPH_CUDA_ARCHITECTURE)),-gencode arch=compute_$(arch),code=compute_$(arch) -gencode arch=compute_$(arch),code=sm_$(arch)) - CUINC = -I$(CUDA_HOME)/include/ - ifeq ($(RNDGEN),hasNoCurand) - CURANDLIBFLAGS= - else - CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! - endif - CUOPTFLAGS = -lineinfo - CUFLAGS = $(foreach opt, $(OPTFLAGS), -Xcompiler $(opt)) $(CUOPTFLAGS) $(INCFLAGS) $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) -use_fast_math - ###CUFLAGS += -Xcompiler -Wall -Xcompiler -Wextra -Xcompiler -Wshadow - ###NVCC_VERSION = $(shell $(NVCC) --version | grep 'Cuda compilation tools' | cut -d' ' -f5 | cut -d, -f1) - CUFLAGS += -std=c++17 # need CUDA >= 11.2 (see #333): this is enforced in mgOnGpuConfig.h - # Without -maxrregcount: baseline throughput: 6.5E8 (16384 32 12) up to 7.3E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 160 # improves throughput: 6.9E8 (16384 32 12) up to 7.7E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 128 # improves throughput: 7.3E8 (16384 32 12) up to 7.6E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 96 # degrades throughput: 4.1E8 (16384 32 12) up to 4.5E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 64 # degrades throughput: 1.7E8 (16384 32 12) flat at 1.7E8 (65536 128 12) -else ifneq ($(origin REQUIRE_CUDA),undefined) - # If REQUIRE_CUDA is set but no cuda is found, stop here (e.g. for CI tests on GPU #443) - $(error No cuda installation found (set CUDA_HOME or make nvcc visible in PATH)) -else - # No cuda. Switch cuda compilation off and go to common random numbers in C++ - $(warning CUDA_HOME is not set or is invalid: export CUDA_HOME to compile with cuda) - override NVCC= - override USE_NVTX= - override CUINC= - override CURANDLIBFLAGS= -endif -export NVCC -export CUFLAGS - -# Set the host C++ compiler for nvcc via "-ccbin " -# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) -CUFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) - -# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) -ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) -CUFLAGS += -allow-unsupported-compiler -endif - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ and CUDA builds - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif -ifneq ($(NVCC),) - ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) - override NVCC:=ccache $(NVCC) - endif -endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for C++ and CUDA - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # would increase to none=4.08-4.12E6, sse4=4.99-5.03E6! -else - ###CXXFLAGS+= -flto # also on Intel this would increase throughputs by a factor 2 to 4... - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -# PowerPC-specific CUDA compiler flags (to be reviewed!) -ifeq ($(UNAME_P),ppc64le) - CUFLAGS+= -Xcompiler -mno-float128 -endif - -#------------------------------------------------------------------------------- - #=== Configure defaults and check if user-defined choices exist for OMPFLAGS, AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the default OMPFLAGS choice -ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on Intel (was ok without nvcc but not ok with nvcc before #578) -else ifneq ($(shell $(CXX) --version | egrep '^(clang)'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on clang (was not ok without or with nvcc before #578) -###else ifneq ($(shell $(CXX) --version | egrep '^(Apple clang)'),) # AV for Mac (Apple clang compiler) -else ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) +OMPFLAGS ?= -fopenmp +ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) override OMPFLAGS = # AV disable OpenMP MT on Apple clang (builds fail in the CI #578) -###override OMPFLAGS = -fopenmp # OM reenable OpenMP MT on Apple clang? (AV Oct 2023: this still fails in the CI) -else -override OMPFLAGS = -fopenmp # enable OpenMP MT by default on all other platforms -###override OMPFLAGS = # disable OpenMP MT on all other platforms (default before #575) -endif - -# Set the default AVX (vectorization) choice -ifeq ($(AVX),) - ifeq ($(UNAME_P),ppc64le) - ###override AVX = none - override AVX = sse4 - else ifeq ($(UNAME_P),arm) - ###override AVX = none - override AVX = sse4 - else ifeq ($(wildcard /proc/cpuinfo),) - override AVX = none - $(warning Using AVX='$(AVX)' because host SIMD features cannot be read from /proc/cpuinfo) - else ifeq ($(shell grep -m1 -c avx512vl /proc/cpuinfo)$(shell $(CXX) --version | grep ^clang),1) - override AVX = 512y - ###$(info Using AVX='$(AVX)' as no user input exists) - else - override AVX = avx2 - ifneq ($(shell grep -m1 -c avx512vl /proc/cpuinfo),1) - $(warning Using AVX='$(AVX)' because host does not support avx512vl) - else - $(warning Using AVX='$(AVX)' because this is faster than avx512vl for clang) - endif - endif -else - ###$(info Using AVX='$(AVX)' according to user input) -endif - -# Set the default FPTYPE (floating point type) choice -ifeq ($(FPTYPE),) - override FPTYPE = d -endif - -# Set the default HELINL (inline helicities?) choice -ifeq ($(HELINL),) - override HELINL = 0 -endif - -# Set the default HRDCOD (hardcode cIPD physics parameters?) choice -ifeq ($(HRDCOD),) - override HRDCOD = 0 -endif - -# Set the default RNDGEN (random number generator) choice -ifeq ($(RNDGEN),) - ifeq ($(NVCC),) - override RNDGEN = hasNoCurand - else ifeq ($(RNDGEN),) - override RNDGEN = hasCurand - endif endif -# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so that it is not necessary to pass them to the src Makefile too -export AVX -export FPTYPE -export HELINL -export HRDCOD -export RNDGEN +# Export here, so sub makes don't fall back to the defaults: export OMPFLAGS -#------------------------------------------------------------------------------- - -#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN - -# Set the build flags appropriate to OMPFLAGS -$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS - CUFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM - CUFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND -else ifeq ($(RNDGEN),hasCurand) - override CXXFLAGSCURAND = -else - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- #=== Configure build directories and build lockfiles === -# Build directory "short" tag (defines target and path to the optional build directory) -# (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) - -# Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) -# (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) - -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIR = ../../lib/$(BUILDDIR) - override LIBDIRRPATH = '$$ORIGIN/../$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is set = 1)) -else - override BUILDDIR = . - override LIBDIR = ../../lib - override LIBDIRRPATH = '$$ORIGIN/$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) +# Build directory "short" tag (defines target and path to the build directory) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +CUDACPP_BUILDDIR = build.$(DIRTAG) +CUDACPP_LIBDIR := ../../lib/$(CUDACPP_BUILDDIR) +LIBDIRRPATH := '$$ORIGIN:$$ORIGIN/../$(CUDACPP_LIBDIR)' +ifneq ($(AVX),) + $(info Building CUDACPP in CUDACPP_BUILDDIR=$(CUDACPP_BUILDDIR). Libs in $(CUDACPP_LIBDIR)) endif -###override INCDIR = ../../include -###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) -# On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH +# On Linux, set rpath to CUDACPP_LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables or shared libraries ($ORIGIN on Linux) -# On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary +# On Darwin, building libraries with absolute paths in CUDACPP_LIBDIR makes this unnecessary ifeq ($(UNAME_S),Darwin) override CXXLIBFLAGSRPATH = override CULIBFLAGSRPATH = - override CXXLIBFLAGSRPATH2 = - override CULIBFLAGSRPATH2 = else # RPATH to cuda/cpp libs when linking executables override CXXLIBFLAGSRPATH = -Wl,-rpath,$(LIBDIRRPATH) override CULIBFLAGSRPATH = -Xlinker -rpath,$(LIBDIRRPATH) - # RPATH to common lib when linking cuda/cpp libs - override CXXLIBFLAGSRPATH2 = -Wl,-rpath,'$$ORIGIN' - override CULIBFLAGSRPATH2 = -Xlinker -rpath,'$$ORIGIN' endif # Setting LD_LIBRARY_PATH or DYLD_LIBRARY_PATH in the RUNTIME is no longer necessary (neither on Linux nor on Mac) @@ -458,107 +129,68 @@ override RUNTIME = #=== Makefile TARGETS and build rules below #=============================================================================== -cxx_main=$(BUILDDIR)/check.exe -fcxx_main=$(BUILDDIR)/fcheck.exe +cxx_main=$(CUDACPP_BUILDDIR)/check.exe +fcxx_main=$(CUDACPP_BUILDDIR)/fcheck.exe -ifneq ($(NVCC),) -cu_main=$(BUILDDIR)/gcheck.exe -fcu_main=$(BUILDDIR)/fgcheck.exe -else -cu_main= -fcu_main= -endif - -testmain=$(BUILDDIR)/runTest.exe +cu_main=$(CUDACPP_BUILDDIR)/gcheck.exe +fcu_main=$(CUDACPP_BUILDDIR)/fgcheck.exe ifneq ($(GTESTLIBS),) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) $(testmain) -else -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) +testmain=$(CUDACPP_BUILDDIR)/runTest.exe +cutestmain=$(CUDACPP_BUILDDIR)/runTest_cuda.exe endif -# Target (and build options): debug -MAKEDEBUG= -debug: OPTFLAGS = -g -O0 -debug: CUOPTFLAGS = -G -debug: MAKEDEBUG := debug -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -$(BUILDDIR)/.build.$(TAG): - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @if [ "$(oldtagsb)" != "" ]; then echo "Cannot build for tag=$(TAG) as old builds exist for other tags:"; echo " $(oldtagsb)"; echo "Please run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @touch $(BUILDDIR)/.build.$(TAG) +cppbuild: $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(cxx_main) $(fcxx_main) $(testmain) +cudabuild: $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(cu_main) $(fcu_main) $(cutestmain) # Generic target and build rules: objects from CUDA compilation -ifneq ($(NVCC),) -$(BUILDDIR)/%.o : %.cu *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cu *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c $< -o $@ -$(BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ -endif +$(CUDACPP_BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ # Generic target and build rules: objects from C++ compilation # (NB do not include CUINC here! add it only for NVTX or curand #679) -$(BUILDDIR)/%.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Apply special build flags only to CrossSectionKernel.cc and gCrossSectionKernel.cu (no fast math, see #117 and #516) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS := $(filter-out -ffast-math,$(CXXFLAGS)) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math +$(CUDACPP_BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math ifneq ($(NVCC),) -$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -fno-fast-math +$(CUDACPP_BUILDDIR)/gCrossSectionKernels.o: NVCCFLAGS += -Xcompiler -fno-fast-math endif endif # Apply special build flags only to check_sa.o and gcheck_sa.o (NVTX in timermap.h, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) -$(BUILDDIR)/gcheck_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) # Apply special build flags only to check_sa and CurandRandomNumberKernel (curand headers, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gcheck_sa.o: CUFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gCurandRandomNumberKernel.o: CUFLAGS += $(CXXFLAGSCURAND) -ifeq ($(RNDGEN),hasCurand) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CUINC) -endif +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) + # Avoid "warning: builtin __has_trivial_... is deprecated; use __is_trivially_... instead" in nvcc with icx2023 (#592) ifneq ($(shell $(CXX) --version | egrep '^(Intel)'),) ifneq ($(NVCC),) -CUFLAGS += -Xcompiler -Wno-deprecated-builtins +MG_NVCCFLAGS += -Xcompiler -Wno-deprecated-builtins endif endif -# Avoid clang warning "overriding '-ffp-contract=fast' option with '-ffp-contract=on'" (#516) -# This patch does remove the warning, but I prefer to keep it disabled for the moment... -###ifneq ($(shell $(CXX) --version | egrep '^(clang|Apple clang|Intel)'),) -###$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -Wno-overriding-t-option -###ifneq ($(NVCC),) -###$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -Wno-overriding-t-option -###endif -###endif - #### Apply special build flags only to CPPProcess.cc (-flto) ###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += -flto -#### Apply special build flags only to CPPProcess.cc (AVXFLAGS) -###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += $(AVXFLAGS) - #------------------------------------------------------------------------------- -# Target (and build rules): common (src) library -commonlib : $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc $(BUILDDIR)/.build.$(TAG) - $(MAKE) -C ../../src $(MAKEDEBUG) -f $(CUDACPP_SRC_MAKEFILE) +$(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc + $(MAKE) AVX=$(AVX) AVXFLAGS="$(AVXFLAGS)" -C ../../src -f $(CUDACPP_SRC_MAKEFILE) #------------------------------------------------------------------------------- @@ -566,162 +198,123 @@ processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') ###$(info processid_short=$(processid_short)) MG5AMC_CXXLIB = mg5amc_$(processid_short)_cpp -cxx_objects_lib=$(BUILDDIR)/CPPProcess.o $(BUILDDIR)/MatrixElementKernels.o $(BUILDDIR)/BridgeKernels.o $(BUILDDIR)/CrossSectionKernels.o -cxx_objects_exe=$(BUILDDIR)/CommonRandomNumberKernel.o $(BUILDDIR)/RamboSamplingKernels.o +cxx_objects_lib=$(CUDACPP_BUILDDIR)/CPPProcess.o $(CUDACPP_BUILDDIR)/MatrixElementKernels.o $(CUDACPP_BUILDDIR)/BridgeKernels.o $(CUDACPP_BUILDDIR)/CrossSectionKernels.o +cxx_objects_exe=$(CUDACPP_BUILDDIR)/CommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/RamboSamplingKernels.o -ifneq ($(NVCC),) MG5AMC_CULIB = mg5amc_$(processid_short)_cuda -cu_objects_lib=$(BUILDDIR)/gCPPProcess.o $(BUILDDIR)/gMatrixElementKernels.o $(BUILDDIR)/gBridgeKernels.o $(BUILDDIR)/gCrossSectionKernels.o -cu_objects_exe=$(BUILDDIR)/gCommonRandomNumberKernel.o $(BUILDDIR)/gRamboSamplingKernels.o -endif +cu_objects_lib=$(CUDACPP_BUILDDIR)/gCPPProcess.o $(CUDACPP_BUILDDIR)/gMatrixElementKernels.o $(CUDACPP_BUILDDIR)/gBridgeKernels.o $(CUDACPP_BUILDDIR)/gCrossSectionKernels.o +cu_objects_exe=$(CUDACPP_BUILDDIR)/gCommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/gRamboSamplingKernels.o # Target (and build rules): C++ and CUDA shared libraries -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) - $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) - -ifneq ($(NVCC),) -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) - $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -endif +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) + $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) $(MG_LDFLAGS) $(LDFLAGS) -#------------------------------------------------------------------------------- - -# Target (and build rules): Fortran include files -###$(INCDIR)/%.inc : ../%.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### \cp $< $@ +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) + $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) #------------------------------------------------------------------------------- # Target (and build rules): C++ and CUDA standalone executables -$(cxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cxx_main): $(BUILDDIR)/check_sa.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o - $(CXX) -o $@ $(BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o $(CURANDLIBFLAGS) -ifneq ($(NVCC),) +$(cxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(cxx_main): $(CUDACPP_BUILDDIR)/check_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) + ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(cu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(cu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(cu_main): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(cu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cu_main): $(BUILDDIR)/gcheck_sa.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o - $(NVCC) -o $@ $(BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o $(CURANDLIBFLAGS) +$(cu_main): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif +$(cu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(cu_main): $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- - -# Generic target and build rules: objects from Fortran compilation -$(BUILDDIR)/%.o : %.f *.inc - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(FC) -I. -c $< -o $@ - -# Generic target and build rules: objects from Fortran compilation -###$(BUILDDIR)/%.o : %.f *.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi -### $(FC) -I. -I$(INCDIR) -c $< -o $@ - -# Target (and build rules): Fortran standalone executables -###$(BUILDDIR)/fcheck_sa.o : $(INCDIR)/fbridge.inc +# Check executables: ifeq ($(UNAME_S),Darwin) -$(fcxx_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 +$(fcxx_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif -$(fcxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcxx_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) - $(CXX) -o $@ $(BUILDDIR)/fcheck_sa.o $(OMPFLAGS) $(BUILDDIR)/fsampler.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) +$(fcxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(fcxx_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(cxx_objects_exe) $(OMPFLAGS) $(CUDACPP_BUILDDIR)/fsampler.o -lgfortran -L$(CUDACPP_LIBDIR) $(MG_LDFLAGS) $(LDFLAGS) -ifneq ($(NVCC),) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(fcu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(fcu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(fcu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(fcu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') endif ifeq ($(UNAME_S),Darwin) -$(fcu_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 -endif -$(fcu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcu_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) - $(NVCC) -o $@ $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) +$(fcu_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif +$(fcu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(fcu_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(cu_objects_exe) -lgfortran $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- # Target (and build rules): test objects and test executable -$(BUILDDIR)/testxxx.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx_cu.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx_cu.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -endif +$(testmain) $(cutestmain): $(GTESTLIBS) +$(testmain) $(cutestmain): INCFLAGS += $(GTESTINC) +$(testmain) $(cutestmain): MG_LDFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main -$(BUILDDIR)/testmisc.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(CUDACPP_BUILDDIR)/testxxx.o $(CUDACPP_BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) testxxx_cc_ref.txt +$(testmain): $(CUDACPP_BUILDDIR)/testxxx.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions +$(cutestmain): $(CUDACPP_BUILDDIR)/testxxx_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc_cu.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests -endif -$(BUILDDIR)/runTest.o: $(GTESTLIBS) -$(BUILDDIR)/runTest.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/runTest.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/runTest.o +$(CUDACPP_BUILDDIR)/testmisc.o $(CUDACPP_BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/testmisc.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(cutestmain): $(CUDACPP_BUILDDIR)/testmisc_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests + + +$(CUDACPP_BUILDDIR)/runTest.o $(CUDACPP_BUILDDIR)/runTest_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/runTest.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/runTest.o +$(cutestmain): $(CUDACPP_BUILDDIR)/runTest_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/runTest_cu.o + -ifneq ($(NVCC),) -$(BUILDDIR)/runTest_cu.o: $(GTESTLIBS) -$(BUILDDIR)/runTest_cu.o: INCFLAGS += $(GTESTINC) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(testmain): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(testmain): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cutestmain): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cutestmain): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(testmain): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(testmain): $(BUILDDIR)/runTest_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/runTest_cu.o +$(cutestmain): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif -$(testmain): $(GTESTLIBS) -$(testmain): INCFLAGS += $(GTESTINC) -$(testmain): LIBFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main ifneq ($(OMPFLAGS),) ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -$(testmain): LIBFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) +$(testmain): MG_LDFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -$(testmain): LIBFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 +$(testmain): MG_LDFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 ###else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) ###$(testmain): LIBFLAGS += ???? # OMP is not supported yet by cudacpp for Apple clang (see #578 and #604) else -$(testmain): LIBFLAGS += -lgomp +$(testmain): MG_LDFLAGS += -lgomp endif endif -ifeq ($(NVCC),) # link only runTest.o -$(testmain): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) - $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -ldl -pthread $(LIBFLAGS) -else # link both runTest.o and runTest_cu.o -$(testmain): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) - $(NVCC) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) -ldl $(LIBFLAGS) -lcuda -endif +$(testmain): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(testmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) + $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -pthread $(MG_LDFLAGS) $(LDFLAGS) + +$(cutestmain): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cutestmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) + $(NVCC) -o $@ $(cu_objects_lib) $(cu_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -lcuda $(MG_LDFLAGS) $(LDFLAGS) # Use target gtestlibs to build only googletest ifneq ($(GTESTLIBS),) @@ -731,72 +324,15 @@ endif # Use flock (Linux only, no Mac) to allow 'make -j' if googletest has not yet been downloaded https://stackoverflow.com/a/32666215 $(GTESTLIBS): ifneq ($(shell which flock 2>/dev/null),) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - flock $(BUILDDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) + flock $(TESTDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) else if [ -d $(TESTDIR) ]; then $(MAKE) -C $(TESTDIR); fi endif #------------------------------------------------------------------------------- -# Target: build all targets in all AVX modes (each AVX mode in a separate build directory) -# Split the avxall target into five separate targets to allow parallel 'make -j avxall' builds -# (Hack: add a fbridge.inc dependency to avxall, to ensure it is only copied once for all AVX modes) -avxnone: - @echo - $(MAKE) USEBUILDDIR=1 AVX=none -f $(CUDACPP_MAKEFILE) - -avxsse4: - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 -f $(CUDACPP_MAKEFILE) - -avxavx2: - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 -f $(CUDACPP_MAKEFILE) - -avx512y: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y -f $(CUDACPP_MAKEFILE) - -avx512z: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z -f $(CUDACPP_MAKEFILE) - -ifeq ($(UNAME_P),ppc64le) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else ifeq ($(UNAME_P),arm) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 avxavx2 avx512y avx512z -avxall: avxnone avxsse4 avxavx2 avx512y avx512z -endif - -#------------------------------------------------------------------------------- - -# Target: clean the builds -.PHONY: clean - -clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(BUILDDIR) -else - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe - rm -f $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(LIBDIR)/lib$(MG5AMC_CULIB).so -endif - $(MAKE) -C ../../src clean -f $(CUDACPP_SRC_MAKEFILE) -### rm -rf $(INCDIR) - -cleanall: - @echo - $(MAKE) USEBUILDDIR=0 clean -f $(CUDACPP_MAKEFILE) - @echo - $(MAKE) USEBUILDDIR=0 -C ../../src cleanall -f $(CUDACPP_SRC_MAKEFILE) - rm -rf build.* - # Target: clean the builds as well as the gtest installation(s) -distclean: cleanall +distclean: clean cleansrc ifneq ($(wildcard $(TESTDIRCOMMON)),) $(MAKE) -C $(TESTDIRCOMMON) clean endif @@ -848,50 +384,55 @@ endif #------------------------------------------------------------------------------- -# Target: check (run the C++ test executable) +# Target: check/gcheck (run the C++ test executable) # [NB THIS IS WHAT IS USED IN THE GITHUB CI!] -ifneq ($(NVCC),) -check: runTest cmpFcheck cmpFGcheck -else check: runTest cmpFcheck -endif +gcheck: + $(MAKE) AVX=cuda runTest cmpFGcheck # Target: runTest (run the C++ test executable runTest.exe) -runTest: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/runTest.exe +ifneq ($(AVX),cuda) +runTest: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest.exe +else +runTest: cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest_cuda.exe +endif + # Target: runCheck (run the C++ standalone executable check.exe, with a small number of events) -runCheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/check.exe -p 2 32 2 +runCheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe -p 2 32 2 # Target: runGcheck (run the CUDA standalone executable gcheck.exe, with a small number of events) -runGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/gcheck.exe -p 2 32 2 +runGcheck: AVX=cuda +runGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe -p 2 32 2 # Target: runFcheck (run the Fortran standalone executable - with C++ MEs - fcheck.exe, with a small number of events) -runFcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 +runFcheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 # Target: runFGcheck (run the Fortran standalone executable - with CUDA MEs - fgcheck.exe, with a small number of events) -runFGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 +runFGcheck: AVX=cuda +runFGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 # Target: cmpFcheck (compare ME results from the C++ and Fortran with C++ MEs standalone executables, with a small number of events) -cmpFcheck: all.$(TAG) +cmpFcheck: cppbuild @echo - @echo "$(BUILDDIR)/check.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi # Target: cmpFGcheck (compare ME results from the CUDA and Fortran with CUDA MEs standalone executables, with a small number of events) -cmpFGcheck: all.$(TAG) +cmpFGcheck: AVX=cuda +cmpFGcheck: + $(MAKE) AVX=cuda cudabuild @echo - @echo "$(BUILDDIR)/gcheck.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fgcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi -# Target: memcheck (run the CUDA standalone executable gcheck.exe with a small number of events through cuda-memcheck) -memcheck: all.$(TAG) - $(RUNTIME) $(CUDA_HOME)/bin/cuda-memcheck --check-api-memory-access yes --check-deprecated-instr yes --check-device-heap yes --demangle full --language c --leak-check full --racecheck-report all --report-api-errors all --show-backtrace yes --tool memcheck --track-unused-memory yes $(BUILDDIR)/gcheck.exe -p 2 32 2 - -#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/makefile b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/makefile index d572486c2e..b69917ee1f 100644 --- a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/makefile +++ b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/makefile @@ -1,27 +1,30 @@ SHELL := /bin/bash -include ../../Source/make_opts -FFLAGS+= -w +# Include general setup +OPTIONS_MAKEFILE := ../../Source/make_opts +include $(OPTIONS_MAKEFILE) # Enable the C preprocessor https://gcc.gnu.org/onlinedocs/gfortran/Preprocessing-Options.html -FFLAGS+= -cpp +MG_FCFLAGS += -cpp +MG_CXXFLAGS += -I. -# Compile counters with -O3 as in the cudacpp makefile (avoid being "unfair" to Fortran #740) -CXXFLAGS = -O3 -Wall -Wshadow -Wextra +all: help cppnative + +# Target if user does not specify target +help: + $(info No target specified.) + $(info Viable targets are 'cppnative' (default), 'cppnone', 'cppsse4', 'cppavx2', 'cpp512y', 'cpp512z' and 'cuda') + $(info Or 'cppall' for all C++ targets) + $(info Or 'ALL' for all C++ and cuda targets) -# Add -std=c++17 explicitly to avoid build errors on macOS -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 -endif -# Enable ccache if USECCACHE=1 +# Enable ccache for C++ if USECCACHE=1 (do not enable it for Fortran since it is not supported for Fortran) ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) override CXX:=ccache $(CXX) endif -ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) - override FC:=ccache $(FC) -endif +###ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) +### override FC:=ccache $(FC) +###endif # Load additional dependencies of the bias module, if present ifeq (,$(wildcard ../bias_dependencies)) @@ -46,34 +49,25 @@ else MADLOOP_LIB = endif -LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias - -processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') -CUDACPP_MAKEFILE=cudacpp.mk -# NB1 Using ":=" below instead of "=" is much faster (it only runs the subprocess once instead of many times) -# NB2 Use '|&' in CUDACPP_BUILDDIR to avoid confusing errors about googletest #507 -# NB3 Do not add a comment inlined "CUDACPP_BUILDDIR=$(shell ...) # comment" as otherwise a trailing space is included... -# NB4 The variables relevant to the cudacpp Makefile must be explicitly passed to $(shell...) -CUDACPP_MAKEENV:=$(shell echo '$(.VARIABLES)' | tr " " "\n" | egrep "(USEBUILDDIR|AVX|FPTYPE|HELINL|HRDCOD)") -###$(info CUDACPP_MAKEENV=$(CUDACPP_MAKEENV)) -###$(info $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))")) -CUDACPP_BUILDDIR:=$(shell $(MAKE) $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))") -f $(CUDACPP_MAKEFILE) -pn 2>&1 | awk '/Building/{print $$3}' | sed s/BUILDDIR=//) -ifeq ($(CUDACPP_BUILDDIR),) -$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) -else -$(info CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)') -endif -CUDACPP_COMMONLIB=mg5amc_common -CUDACPP_CXXLIB=mg5amc_$(processid_short)_cpp -CUDACPP_CULIB=mg5amc_$(processid_short)_cuda - +LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias LIBS = $(LIBDIR)libbias.$(libext) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(MADLOOP_LIB) $(LOOP_LIBS) ifneq ("$(wildcard ../../Source/RUNNING)","") LINKLIBS += -lrunning - LIBS += $(LIBDIR)librunning.$(libext) + LIBS += $(LIBDIR)librunning.$(libext) endif +SOURCEDIR_GUARD:=../../Source/.timestamp_guard +# We use $(SOURCEDIR_GUARD) to figure out if Source is out of date. The Source makefile doesn't correctly +# update all files, so we need a proxy that is updated every time we run "$(MAKE) -C ../../Source". +$(SOURCEDIR_GUARD) ../../Source/discretesampler.mod &: ../../Source/*.f ../../Cards/param_card.dat ../../Cards/run_card.dat +ifneq ($(shell which flock 2>/dev/null),) + flock ../../Source/.lock -c "$(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD)" +else + $(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD) +endif + +$(LIBS): $(SOURCEDIR_GUARD) # Source files @@ -91,82 +85,83 @@ PROCESS= myamp.o genps.o unwgt.o setcuts.o get_color.o \ DSIG=driver.o $(patsubst %.f, %.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) DSIG_cudacpp=driver_cudacpp.o $(patsubst %.f, %_cudacpp.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) -SYMMETRY = symmetry.o idenparts.o +SYMMETRY = symmetry.o idenparts.o -# Binaries +# cudacpp targets: +CUDACPP_MAKEFILE := cudacpp.mk +ifneq (,$(wildcard $(CUDACPP_MAKEFILE))) +include $(CUDACPP_MAKEFILE) +endif -ifeq ($(UNAME),Darwin) -LDFLAGS += -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) -LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" -else -LDFLAGS += -Wl,--no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +ifeq ($(CUDACPP_BUILDDIR),) +$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) endif +CUDACPP_COMMONLIB=mg5amc_common +CUDACPP_CXXLIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so +CUDACPP_CULIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so -all: $(PROG)_fortran $(CUDACPP_BUILDDIR)/$(PROG)_cpp # also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (#503) +# Set up OpenMP if supported +OMPFLAGS ?= -fopenmp ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp LINKLIBS += -liomp5 # see #578 LINKLIBS += -lintlc # undefined reference to `_intel_fast_memcpy' else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -override OMPFLAGS = -fopenmp $(CUDACPP_BUILDDIR)/$(PROG)_cpp: LINKLIBS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -override OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang -else -override OMPFLAGS = -fopenmp +OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang endif -$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o - $(FC) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) - -$(LIBS): .libs -.libs: ../../Cards/param_card.dat ../../Cards/run_card.dat - cd ../../Source; make - touch $@ +# Binaries -$(CUDACPP_BUILDDIR)/.cudacpplibs: - $(MAKE) -f $(CUDACPP_MAKEFILE) - touch $@ +$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) # On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables ($ORIGIN on Linux) # On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary -ifeq ($(UNAME_S),Darwin) - override LIBFLAGSRPATH = -else ifeq ($(USEBUILDDIR),1) - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' -else - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/$(LIBDIR)' +ifneq ($(UNAME_S),Darwin) + LIBFLAGSRPATH := -Wl,-rpath,'$$ORIGIN:$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' endif -.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link +.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link madevent_cppnone_link madevent_cppsse4_link madevent_cppavx2_link madevent_cpp512y_link madevent_cpp512z_link clean cleanall cleansrc madevent_fortran_link: $(PROG)_fortran rm -f $(PROG) ln -s $(PROG)_fortran $(PROG) -madevent_cpp_link: $(CUDACPP_BUILDDIR)/$(PROG)_cpp - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) +madevent_cppnone_link: AVX=none +madevent_cppnone_link: cppnone + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -madevent_cuda_link: $(CUDACPP_BUILDDIR)/$(PROG)_cuda - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) +madevent_cppavx2_link: AVX=avx2 +madevent_cppavx2_link: cppavx2 + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512y_link: AVX=512y +madevent_cpp512y_link: cppavx512y + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512z_link: AVX=512z +madevent_cpp512z_link: cppavx512z + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -# Building $(PROG)_cpp also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (improved patch for cpp-only builds #503) -$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o $(CUDACPP_BUILDDIR)/.cudacpplibs - $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CXXLIB) $(LIBFLAGSRPATH) $(LDFLAGS) - if [ -f $(LIBDIR)/$(CUDACPP_BUILDDIR)/lib$(CUDACPP_CULIB).* ]; then $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CULIB) $(LIBFLAGSRPATH) $(LDFLAGS); fi +madevent_cuda_link: AVX=cuda +madevent_cuda_link: cuda + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) -$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(CUDACPP_BUILDDIR)/$(PROG)_cpp +$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(LIBS) $(CUDACPP_CXXLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) + +$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(LIBS) $(CUDACPP_CULIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) counters.o: counters.cc timer.h - $(CXX) $(CXXFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ ompnumthreads.o: ompnumthreads.cc ompnumthreads.h - $(CXX) -I. $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) $(FC) -o $(PROG)_forhel $(PROCESS) $(MATRIX_HEL) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) $(OMPFLAGS) @@ -174,27 +169,14 @@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) gensym: $(SYMMETRY) configs.inc $(LIBS) $(FC) -o gensym $(SYMMETRY) -L$(LIBDIR) $(LINKLIBS) $(LDFLAGS) -###ifeq (,$(wildcard fbridge.inc)) # Pointless: fbridge.inc always exists as this is the cudacpp-modified makefile! -###$(LIBDIR)libmodel.$(libext): ../../Cards/param_card.dat -### cd ../../Source/MODEL; make -### -###$(LIBDIR)libgeneric.$(libext): ../../Cards/run_card.dat -### cd ../../Source; make -### -###$(LIBDIR)libpdf.$(libext): -### cd ../../Source/PDF; make -### -###$(LIBDIR)libgammaUPC.$(libext): -### cd ../../Source/PDF/gammaUPC; make -###endif # Add source so that the compiler finds the DiscreteSampler module. $(MATRIX): %.o: %.f - $(FC) $(FFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC -%.o: %.f - $(FC) $(FFLAGS) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC + $(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC +%.o $(CUDACPP_BUILDDIR)/%.o: %.f + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -I../../Source/ -I../../Source/PDF/gammaUPC -c $< -o $@ %_cudacpp.o: %.f - $(FC) $(FFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ # Dependencies @@ -215,60 +197,42 @@ unwgt.o: genps.inc nexternal.inc symswap.inc cluster.inc run.inc message.inc \ initcluster.o: message.inc # Extra dependencies on discretesampler.mod +../../Source/discretesampler.mod: ../../Source/DiscreteSampler.f -auto_dsig.o: .libs -driver.o: .libs -driver_cudacpp.o: .libs -$(MATRIX): .libs -genps.o: .libs +auto_dsig.o: ../../Source/discretesampler.mod +driver.o: ../../Source/discretesampler.mod +driver_cudacpp.o: ../../Source/discretesampler.mod +$(MATRIX): ../../Source/discretesampler.mod +genps.o: ../../Source/discretesampler.mod # Cudacpp avxall targets -UNAME_P := $(shell uname -p) ifeq ($(UNAME_P),ppc64le) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else ifeq ($(UNAME_P),arm) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else -avxall: avxnone avxsse4 avxavx2 avx512y avx512z +cppall: cppnative cppnone cppsse4 cppavx2 cppavx512y cppavx512z endif -avxnone: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=none - -avxsse4: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 - -avxavx2: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 - -avx512y: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y - -avx512z: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z - -###endif - -# Clean (NB: 'make clean' in Source calls 'make clean' in all P*) +ALL: cppall cuda -clean: # Clean builds: fortran in this Pn; cudacpp executables for one AVX in this Pn - $(RM) *.o gensym $(PROG) $(PROG)_fortran $(PROG)_forhel $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(CUDACPP_BUILDDIR)/$(PROG)_cuda +# Clean all architecture-specific builds: +clean: + $(RM) *.o gensym $(PROG) $(PROG)_* + $(RM) -rf build.*/*{.o,.so,.exe,.dylib,madevent_*} + @for dir in build.*; do if [ -z "$$(ls -A $${dir})" ]; then rm -r $${dir}; else echo "Not cleaning $${dir}; not empty"; fi; done -cleanavxs: clean # Clean builds: fortran in this Pn; cudacpp for all AVX in this Pn and in src - $(MAKE) -f $(CUDACPP_MAKEFILE) cleanall - rm -f $(CUDACPP_BUILDDIR)/.cudacpplibs - rm -f .libs +cleanall: cleansrc + for PROCESS in ../P[0-9]*; do $(MAKE) -C $${PROCESS} clean; done -cleanall: # Clean builds: fortran in all P* and in Source; cudacpp for all AVX in all P* and in src - make -C ../../Source cleanall - rm -rf $(LIBDIR)libbias.$(libext) - rm -f ../../Source/*.mod ../../Source/*/*.mod +# Clean one architecture-specific build +clean%: + $(RM) -r build.$*_* -distclean: cleanall # Clean all fortran and cudacpp builds as well as the googletest installation - $(MAKE) -f $(CUDACPP_MAKEFILE) distclean +# Clean common source directories (interferes with other P*) +cleansrc: + make -C ../../Source clean + $(RM) -f $(SOURCEDIR_GUARD) ../../Source/{*.mod,.lock} ../../Source/*/*.mod + $(RM) -r $(LIBDIR)libbias.$(libext) + if [ -d ../../src ]; then $(MAKE) -C ../../src -f cudacpp_src.mk clean; fi diff --git a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/runTest.cc b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/runTest.cc index d4a760a71b..6c77775fb2 100644 --- a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/runTest.cc +++ b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/runTest.cc @@ -243,18 +243,20 @@ struct CUDATest : public CUDA_CPU_TestBase // Use two levels of macros to force stringification at the right level // (see https://gcc.gnu.org/onlinedocs/gcc-3.0.1/cpp_3.html#SEC17 and https://stackoverflow.com/a/3419392) // Google macro is in https://github.com/google/googletest/blob/master/googletest/include/gtest/gtest-param-test.h +/* clang-format off */ #define TESTID_CPU( s ) s##_CPU #define XTESTID_CPU( s ) TESTID_CPU( s ) #define MG_INSTANTIATE_TEST_SUITE_CPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); #define TESTID_GPU( s ) s##_GPU #define XTESTID_GPU( s ) TESTID_GPU( s ) #define MG_INSTANTIATE_TEST_SUITE_GPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); +/* clang-format on */ #ifdef __CUDACC__ MG_INSTANTIATE_TEST_SUITE_GPU( XTESTID_GPU( MG_EPOCH_PROCESS_ID ), MadgraphTest ); diff --git a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/testxxx.cc b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/testxxx.cc index 3361fe5aa9..1d315f6d75 100644 --- a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/testxxx.cc +++ b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/testxxx.cc @@ -40,7 +40,7 @@ namespace mg5amcCpu { std::string FPEhandlerMessage = "unknown"; int FPEhandlerIevt = -1; - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU): '" << FPEhandlerMessage << "' ievt=" << FPEhandlerIevt << std::endl; @@ -71,11 +71,10 @@ TEST( XTESTID( MG_EPOCH_PROCESS_ID ), testxxx ) constexpr bool testEvents = !dumpEvents; // run the test? constexpr fptype toleranceXXXs = std::is_same::value ? 1.E-15 : 1.E-5; // Constant parameters - constexpr int neppM = MemoryAccessMomenta::neppM; // AOSOA layout constexpr int np4 = CPPProcess::np4; - const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') - assert( nevt % neppM == 0 ); // nevt must be a multiple of neppM - assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV + const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') + assert( nevt % MemoryAccessMomenta::neppM == 0 ); // nevt must be a multiple of neppM + assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV // Fill in the input momenta #ifdef __CUDACC__ mg5amcGpu::PinnedHostBufferMomenta hstMomenta( nevt ); // AOSOA[npagM][npar=4][np4=4][neppM] diff --git a/epochX/cudacpp/gg_ttggg.mad/bin/internal/banner.py b/epochX/cudacpp/gg_ttggg.mad/bin/internal/banner.py index bd1517985f..b408679c2f 100755 --- a/epochX/cudacpp/gg_ttggg.mad/bin/internal/banner.py +++ b/epochX/cudacpp/gg_ttggg.mad/bin/internal/banner.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,7 +53,7 @@ MADEVENT = False import madgraph.various.misc as misc import madgraph.iolibs.file_writers as file_writers - import madgraph.iolibs.files as files + import madgraph.iolibs.files as files import models.check_param_card as param_card_reader from madgraph import MG5DIR, MadGraph5Error, InvalidCmd @@ -80,36 +80,36 @@ class Banner(dict): 'mgproccard': 'MGProcCard', 'mgruncard': 'MGRunCard', 'ma5card_parton' : 'MA5Card_parton', - 'ma5card_hadron' : 'MA5Card_hadron', + 'ma5card_hadron' : 'MA5Card_hadron', 'mggenerationinfo': 'MGGenerationInfo', 'mgpythiacard': 'MGPythiaCard', 'mgpgscard': 'MGPGSCard', 'mgdelphescard': 'MGDelphesCard', 'mgdelphestrigger': 'MGDelphesTrigger', 'mgshowercard': 'MGShowerCard' } - + forbid_cdata = ['initrwgt'] - + def __init__(self, banner_path=None): """ """ if isinstance(banner_path, Banner): dict.__init__(self, banner_path) self.lhe_version = banner_path.lhe_version - return + return else: dict.__init__(self) - + #Look at the version if MADEVENT: self['mgversion'] = '#%s\n' % open(pjoin(MEDIR, 'MGMEVersion.txt')).read() else: info = misc.get_pkg_info() self['mgversion'] = info['version']+'\n' - + self.lhe_version = None - + if banner_path: self.read_banner(banner_path) @@ -123,7 +123,7 @@ def __init__(self, banner_path=None): 'mgruncard':'run_card.dat', 'mgpythiacard':'pythia_card.dat', 'mgpgscard' : 'pgs_card.dat', - 'mgdelphescard':'delphes_card.dat', + 'mgdelphescard':'delphes_card.dat', 'mgdelphestrigger':'delphes_trigger.dat', 'mg5proccard':'proc_card_mg5.dat', 'mgproccard': 'proc_card.dat', @@ -137,10 +137,10 @@ def __init__(self, banner_path=None): 'mgshowercard':'shower_card.dat', 'pythia8':'pythia8_card.dat', 'ma5card_parton':'madanalysis5_parton_card.dat', - 'ma5card_hadron':'madanalysis5_hadron_card.dat', + 'ma5card_hadron':'madanalysis5_hadron_card.dat', 'run_settings':'' } - + def read_banner(self, input_path): """read a banner""" @@ -151,7 +151,7 @@ def read_banner(self, input_path): def split_iter(string): return (x.groups(0)[0] for x in re.finditer(r"([^\n]*\n)", string, re.DOTALL)) input_path = split_iter(input_path) - + text = '' store = False for line in input_path: @@ -170,13 +170,13 @@ def split_iter(string): text += line else: text += '%s%s' % (line, '\n') - - #reaching end of the banner in a event file avoid to read full file + + #reaching end of the banner in a event file avoid to read full file if "
" in line: break elif "" in line: break - + def __getattribute__(self, attr): """allow auto-build for the run_card/param_card/... """ try: @@ -187,23 +187,23 @@ def __getattribute__(self, attr): return self.charge_card(attr) - + def change_lhe_version(self, version): """change the lhe version associate to the banner""" - + version = float(version) if version < 3: version = 1 elif version > 3: raise Exception("Not Supported version") self.lhe_version = version - + def get_cross(self, witherror=False): """return the cross-section of the file""" if "init" not in self: raise Exception - + text = self["init"].split('\n') cross = 0 error = 0 @@ -217,13 +217,13 @@ def get_cross(self, witherror=False): return cross else: return cross, math.sqrt(error) - + def scale_init_cross(self, ratio): """modify the init information with the associate scale""" assert "init" in self - + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -231,29 +231,29 @@ def scale_init_cross(self, ratio): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break pid = int(pid) - + line = " %+13.7e %+13.7e %+13.7e %i" % \ (ratio*float(xsec), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + def get_pdg_beam(self): """return the pdg of each beam""" - + assert "init" in self - + all_lines = self["init"].split('\n') pdg1,pdg2,_ = all_lines[0].split(None, 2) return int(pdg1), int(pdg2) - + def load_basic(self, medir): """ Load the proc_card /param_card and run_card """ - + self.add(pjoin(medir,'Cards', 'param_card.dat')) self.add(pjoin(medir,'Cards', 'run_card.dat')) if os.path.exists(pjoin(medir, 'SubProcesses', 'procdef_mg5.dat')): @@ -261,29 +261,29 @@ def load_basic(self, medir): self.add(pjoin(medir,'Cards', 'proc_card_mg5.dat')) else: self.add(pjoin(medir,'Cards', 'proc_card.dat')) - + def change_seed(self, seed): """Change the seed value in the banner""" # 0 = iseed p = re.compile(r'''^\s*\d+\s*=\s*iseed''', re.M) new_seed_str = " %s = iseed" % seed self['mgruncard'] = p.sub(new_seed_str, self['mgruncard']) - + def add_generation_info(self, cross, nb_event): """add info on MGGeneration""" - + text = """ # Number of Events : %s # Integrated weight (pb) : %s """ % (nb_event, cross) self['MGGenerationInfo'] = text - + ############################################################################ # SPLIT BANNER ############################################################################ def split(self, me_dir, proc_card=True): """write the banner in the Cards directory. - proc_card argument is present to avoid the overwrite of proc_card + proc_card argument is present to avoid the overwrite of proc_card information""" for tag, text in self.items(): @@ -305,37 +305,37 @@ def check_pid(self, pid2label): """special routine removing width/mass of particles not present in the model This is usefull in case of loop model card, when we want to use the non loop model.""" - + if not hasattr(self, 'param_card'): self.charge_card('slha') - + for tag in ['mass', 'decay']: block = self.param_card.get(tag) for data in block: pid = data.lhacode[0] - if pid not in list(pid2label.keys()): + if pid not in list(pid2label.keys()): block.remove((pid,)) def get_lha_strategy(self): """get the lha_strategy: how the weight have to be handle by the shower""" - + if not self["init"]: raise Exception("No init block define") - + data = self["init"].split('\n')[0].split() if len(data) != 10: misc.sprint(len(data), self['init']) raise Exception("init block has a wrong format") return int(float(data[-2])) - + def set_lha_strategy(self, value): """set the lha_strategy: how the weight have to be handle by the shower""" - + if not (-4 <= int(value) <= 4): six.reraise(Exception, "wrong value for lha_strategy", value) if not self["init"]: raise Exception("No init block define") - + all_lines = self["init"].split('\n') data = all_lines[0].split() if len(data) != 10: @@ -351,13 +351,13 @@ def modify_init_cross(self, cross, allow_zero=False): assert isinstance(cross, dict) # assert "all" in cross assert "init" in self - + cross = dict(cross) for key in cross.keys(): if isinstance(key, str) and key.isdigit() and int(key) not in cross: cross[int(key)] = cross[key] - - + + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -365,7 +365,7 @@ def modify_init_cross(self, cross, allow_zero=False): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break @@ -383,23 +383,23 @@ def modify_init_cross(self, cross, allow_zero=False): (float(cross[pid]), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + ############################################################################ # WRITE BANNER ############################################################################ def write(self, output_path, close_tag=True, exclude=[]): """write the banner""" - + if isinstance(output_path, str): ff = open(output_path, 'w') else: ff = output_path - + if MADEVENT: header = open(pjoin(MEDIR, 'Source', 'banner_header.txt')).read() else: header = open(pjoin(MG5DIR,'Template', 'LO', 'Source', 'banner_header.txt')).read() - + if not self.lhe_version: self.lhe_version = self.get('run_card', 'lhe_version', default=1.0) if float(self.lhe_version) < 3: @@ -412,7 +412,7 @@ def write(self, output_path, close_tag=True, exclude=[]): for tag in [t for t in self.ordered_items if t in list(self.keys())]+ \ [t for t in self.keys() if t not in self.ordered_items]: - if tag in ['init'] or tag in exclude: + if tag in ['init'] or tag in exclude: continue capitalized_tag = self.capitalized_items[tag] if tag in self.capitalized_items else tag start_data, stop_data = '', '' @@ -422,19 +422,19 @@ def write(self, output_path, close_tag=True, exclude=[]): stop_data = ']]>\n' out = '<%(tag)s>%(start_data)s\n%(text)s\n%(stop_data)s\n' % \ {'tag':capitalized_tag, 'text':self[tag].strip(), - 'start_data': start_data, 'stop_data':stop_data} + 'start_data': start_data, 'stop_data':stop_data} try: ff.write(out) except: ff.write(out.encode('utf-8')) - - + + if not '/header' in exclude: out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) if 'init' in self and not 'init' in exclude: text = self['init'] @@ -444,22 +444,22 @@ def write(self, output_path, close_tag=True, exclude=[]): ff.write(out) except: ff.write(out.encode('utf-8')) - + if close_tag: - out = '\n' + out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) return ff - - + + ############################################################################ # BANNER ############################################################################ def add(self, path, tag=None): """Add the content of the file to the banner""" - + if not tag: card_name = os.path.basename(path) if 'param_card' in card_name: @@ -505,33 +505,33 @@ def add_text(self, tag, text): if tag == 'param_card': tag = 'slha' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' - + self[tag.lower()] = text - - + + def charge_card(self, tag): """Build the python object associated to the card""" - + if tag in ['param_card', 'param']: tag = 'slha' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse'], 'invalid card %s' % tag - + if tag == 'slha': param_card = self[tag].split('\n') self.param_card = param_card_reader.ParamCard(param_card) @@ -544,56 +544,56 @@ def charge_card(self, tag): self.proc_card = ProcCard(proc_card) return self.proc_card elif tag =='mgshowercard': - shower_content = self[tag] + shower_content = self[tag] if MADEVENT: import internal.shower_card as shower_card else: import madgraph.various.shower_card as shower_card self.shower_card = shower_card.ShowerCard(shower_content, True) - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.shower_card.testing = False return self.shower_card elif tag =='foanalyse': - analyse_content = self[tag] + analyse_content = self[tag] if MADEVENT: import internal.FO_analyse_card as FO_analyse_card else: import madgraph.various.FO_analyse_card as FO_analyse_card - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.FOanalyse_card = FO_analyse_card.FOAnalyseCard(analyse_content, True) self.FOanalyse_card.testing = False return self.FOanalyse_card - + def get_detail(self, tag, *arg, **opt): """return a specific """ - + if tag in ['param_card', 'param']: tag = 'slha' attr_tag = 'param_card' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], '%s not recognized' % tag - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) + self.charge_card(attr_tag) card = getattr(self, attr_tag) if len(arg) == 0: @@ -613,7 +613,7 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 2 and tag == 'slha': try: return card[arg[0]].get(arg[1:]) @@ -621,15 +621,15 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 0: return card else: raise Exception("Unknow command") - + #convenient alias get = get_detail - + def set(self, tag, *args): """modify one of the cards""" @@ -637,27 +637,27 @@ def set(self, tag, *args): tag = 'slha' attr_tag = 'param_card' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized' - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) - + self.charge_card(attr_tag) + card = getattr(self, attr_tag) if len(args) ==2: if tag == 'mg5proccard': @@ -666,20 +666,20 @@ def set(self, tag, *args): card[args[0]] = args[1] else: card[args[:-1]] = args[-1] - - + + @misc.multiple_try() def add_to_file(self, path, seed=None, out=None): """Add the banner to a file and change the associate seed in the banner""" if seed is not None: self.set("run_card", "iseed", seed) - + if not out: path_out = "%s.tmp" % path else: path_out = out - + ff = self.write(path_out, close_tag=False, exclude=['MGGenerationInfo', '/header', 'init']) ff.write("## END BANNER##\n") @@ -698,44 +698,44 @@ def add_to_file(self, path, seed=None, out=None): files.mv(path_out, path) - + def split_banner(banner_path, me_dir, proc_card=True): """a simple way to split a banner""" - + banner = Banner(banner_path) banner.split(me_dir, proc_card) - + def recover_banner(results_object, level, run=None, tag=None): """as input we receive a gen_crossxhtml.AllResults object. This define the current banner and load it """ - + if not run: - try: - _run = results_object.current['run_name'] - _tag = results_object.current['tag'] + try: + _run = results_object.current['run_name'] + _tag = results_object.current['tag'] except Exception: return Banner() else: _run = run if not tag: - try: - _tag = results_object[run].tags[-1] + try: + _tag = results_object[run].tags[-1] except Exception as error: if os.path.exists( pjoin(results_object.path,'Events','%s_banner.txt' % (run))): tag = None else: - return Banner() + return Banner() else: _tag = tag - - path = results_object.path - if tag: + + path = results_object.path + if tag: banner_path = pjoin(path,'Events',run,'%s_%s_banner.txt' % (run, tag)) else: banner_path = pjoin(results_object.path,'Events','%s_banner.txt' % (run)) - + if not os.path.exists(banner_path): if level != "parton" and tag != _tag: return recover_banner(results_object, level, _run, results_object[_run].tags[0]) @@ -754,12 +754,12 @@ def recover_banner(results_object, level, run=None, tag=None): return Banner(lhe.banner) # security if the banner was remove (or program canceled before created it) - return Banner() - + return Banner() + banner = Banner(banner_path) - - - + + + if level == 'pythia': if 'mgpythiacard' in banner: del banner['mgpythiacard'] @@ -768,13 +768,13 @@ def recover_banner(results_object, level, run=None, tag=None): if tag in banner: del banner[tag] return banner - + class InvalidRunCard(InvalidCmd): pass class ProcCard(list): """Basic Proccard object""" - + history_header = \ '#************************************************************\n' + \ '#* MadGraph5_aMC@NLO *\n' + \ @@ -798,10 +798,10 @@ class ProcCard(list): '#* run as ./bin/mg5_aMC filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - - - - + + + + def __init__(self, init=None): """ initialize a basic proc_card""" self.info = {'model': 'sm', 'generate':None, @@ -810,13 +810,13 @@ def __init__(self, init=None): if init: self.read(init) - + def read(self, init): """read the proc_card and save the information""" - + if isinstance(init, str): #path to file init = open(init, 'r') - + store_line = '' for line in init: line = line.rstrip() @@ -828,28 +828,28 @@ def read(self, init): store_line = "" if store_line: raise Exception("WRONG CARD FORMAT") - - + + def move_to_last(self, cmd): """move an element to the last history.""" for line in self[:]: if line.startswith(cmd): self.remove(line) list.append(self, line) - + def append(self, line): """"add a line in the proc_card perform automatically cleaning""" - + line = line.strip() cmds = line.split() if len(cmds) == 0: return - + list.append(self, line) - + # command type: cmd = cmds[0] - + if cmd == 'output': # Remove previous outputs from history self.clean(allow_for_removal = ['output'], keep_switch=True, @@ -875,7 +875,7 @@ def append(self, line): elif cmds[1] == 'proc_v4': #full cleaning self[:] = [] - + def clean(self, to_keep=['set','add','load'], remove_bef_last=None, @@ -884,13 +884,13 @@ def clean(self, to_keep=['set','add','load'], keep_switch=False): """Remove command in arguments from history. All command before the last occurrence of 'remove_bef_last' - (including it) will be removed (but if another options tells the opposite). + (including it) will be removed (but if another options tells the opposite). 'to_keep' is a set of line to always keep. - 'to_remove' is a set of line to always remove (don't care about remove_bef_ + 'to_remove' is a set of line to always remove (don't care about remove_bef_ status but keep_switch acts.). - if 'allow_for_removal' is define only the command in that list can be + if 'allow_for_removal' is define only the command in that list can be remove of the history for older command that remove_bef_lb1. all parameter - present in to_remove are always remove even if they are not part of this + present in to_remove are always remove even if they are not part of this list. keep_switch force to keep the statement remove_bef_??? which changes starts the removal mode. @@ -900,8 +900,8 @@ def clean(self, to_keep=['set','add','load'], if __debug__ and allow_for_removal: for arg in to_keep: assert arg not in allow_for_removal - - + + nline = -1 removal = False #looping backward @@ -912,7 +912,7 @@ def clean(self, to_keep=['set','add','load'], if not removal and remove_bef_last: if self[nline].startswith(remove_bef_last): removal = True - switch = True + switch = True # if this is the switch and is protected pass to the next element if switch and keep_switch: @@ -923,12 +923,12 @@ def clean(self, to_keep=['set','add','load'], if any([self[nline].startswith(arg) for arg in to_remove]): self.pop(nline) continue - + # Only if removal mode is active! if removal: if allow_for_removal: # Only a subset of command can be removed - if any([self[nline].startswith(arg) + if any([self[nline].startswith(arg) for arg in allow_for_removal]): self.pop(nline) continue @@ -936,10 +936,10 @@ def clean(self, to_keep=['set','add','load'], # All command have to be remove but protected self.pop(nline) continue - + # update the counter to pass to the next element nline -= 1 - + def get(self, tag, default=None): if isinstance(tag, int): list.__getattr__(self, tag) @@ -954,32 +954,32 @@ def get(self, tag, default=None): except ValueError: name, content = line[7:].split(None,1) out.append((name, content)) - return out + return out else: return self.info[tag] - + def write(self, path): """write the proc_card to a given path""" - + fsock = open(path, 'w') fsock.write(self.history_header) for line in self: while len(line) > 70: - sub, line = line[:70]+"\\" , line[70:] + sub, line = line[:70]+"\\" , line[70:] fsock.write(sub+"\n") else: fsock.write(line+"\n") - -class InvalidCardEdition(InvalidCmd): pass - + +class InvalidCardEdition(InvalidCmd): pass + class ConfigFile(dict): """ a class for storing/dealing with input file. - """ + """ def __init__(self, finput=None, **opt): """initialize a new instance. input can be an instance of MadLoopParam, - a file, a path to a file, or simply Nothing""" - + a file, a path to a file, or simply Nothing""" + if isinstance(finput, self.__class__): dict.__init__(self) for key in finput.__dict__: @@ -989,7 +989,7 @@ def __init__(self, finput=None, **opt): return else: dict.__init__(self) - + # Initialize it with all the default value self.user_set = set() self.auto_set = set() @@ -1000,15 +1000,15 @@ def __init__(self, finput=None, **opt): self.comments = {} # comment associated to parameters. can be display via help message # store the valid options for a given parameter. self.allowed_value = {} - + self.default_setup() self.plugin_input(finput) - + # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, **opt) - + @@ -1028,7 +1028,7 @@ def __add__(self, other): base = self.__class__(self) #base = copy.copy(self) base.update((key.lower(),value) for key, value in other.items()) - + return base def __radd__(self, other): @@ -1036,26 +1036,26 @@ def __radd__(self, other): new = copy.copy(other) new.update((key, value) for key, value in self.items()) return new - + def __contains__(self, key): return dict.__contains__(self, key.lower()) def __iter__(self): - + for name in super(ConfigFile, self).__iter__(): yield self.lower_to_case[name.lower()] - - + + #iter = super(ConfigFile, self).__iter__() #misc.sprint(iter) #return (self.lower_to_case[name] for name in iter) - + def keys(self): return [name for name in self] - + def items(self): return [(name,self[name]) for name in self] - + @staticmethod def warn(text, level, raiseerror=False): """convenient proxy to raiseerror/print warning""" @@ -1071,11 +1071,11 @@ def warn(text, level, raiseerror=False): log = lambda t: logger.log(level, t) elif level: log = level - + return log(text) def post_set(self, name, value, change_userdefine, raiseerror): - + if value is None: value = self[name] @@ -1087,25 +1087,25 @@ def post_set(self, name, value, change_userdefine, raiseerror): return getattr(self, 'post_set_%s' % name)(value, change_userdefine, raiseerror) else: raise - + def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): """set the attribute and set correctly the type if the value is a string. change_userdefine on True if we have to add the parameter in user_set """ - + if not len(self): #Should never happen but when deepcopy/pickle self.__init__() - + name = name.strip() - lower_name = name.lower() - + lower_name = name.lower() + # 0. check if this parameter is a system only one if change_userdefine and lower_name in self.system_only: text='%s is a private entry which can not be modify by the user. Keep value at %s' % (name,self[name]) self.warn(text, 'critical', raiseerror) return - + #1. check if the parameter is set to auto -> pass it to special if lower_name in self: targettype = type(dict.__getitem__(self, lower_name)) @@ -1115,22 +1115,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): self.user_set.remove(lower_name) #keep old value. self.post_set(lower_name, 'auto', change_userdefine, raiseerror) - return + return elif lower_name in self.auto_set: self.auto_set.remove(lower_name) - + # 2. Find the type of the attribute that we want if lower_name in self.list_parameter: targettype = self.list_parameter[lower_name] - - - + + + if isinstance(value, str): # split for each comma/space value = value.strip() if value.startswith('[') and value.endswith(']'): value = value[1:-1] - #do not perform split within a " or ' block + #do not perform split within a " or ' block data = re.split(r"((? bad input dropped.append(val) - + if not new_values: text= "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ % (value, name, self[lower_name]) text += "allowed values are any list composed of the following entries: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) - return self.warn(text, 'warning', raiseerror) - elif dropped: + return self.warn(text, 'warning', raiseerror) + elif dropped: text = "some value for entry '%s' are not valid. Invalid items are: '%s'.\n" \ % (name, dropped) text += "value will be set to %s" % new_values - text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) + text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) self.warn(text, 'warning') values = new_values # make the assignment - dict.__setitem__(self, lower_name, values) + dict.__setitem__(self, lower_name, values) if change_userdefine: self.user_set.add(lower_name) #check for specific action - return self.post_set(lower_name, None, change_userdefine, raiseerror) + return self.post_set(lower_name, None, change_userdefine, raiseerror) elif lower_name in self.dict_parameter: - targettype = self.dict_parameter[lower_name] + targettype = self.dict_parameter[lower_name] full_reset = True #check if we just update the current dict or not - + if isinstance(value, str): value = value.strip() # allowed entry: @@ -1209,7 +1209,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): # name , value => just add the entry # name value => just add the entry # {name1:value1, name2:value2} => full reset - + # split for each comma/space if value.startswith('{') and value.endswith('}'): new_value = {} @@ -1219,23 +1219,23 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): x, y = pair.split(':') x, y = x.strip(), y.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] new_value[x] = y value = new_value elif ',' in value: x,y = value.split(',') value = {x.strip():y.strip()} full_reset = False - + elif ':' in value: x,y = value.split(':') value = {x.strip():y.strip()} - full_reset = False + full_reset = False else: x,y = value.split() value = {x:y} - full_reset = False - + full_reset = False + if isinstance(value, dict): for key in value: value[key] = self.format_variable(value[key], targettype, name=name) @@ -1248,7 +1248,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - elif name in self: + elif name in self: targettype = type(self[name]) else: logger.debug('Trying to add argument %s in %s. ' % (name, self.__class__.__name__) +\ @@ -1256,22 +1256,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): suggestions = [k for k in self.keys() if k.startswith(name[0].lower())] if len(suggestions)>0: logger.debug("Did you mean one of the following: %s"%suggestions) - self.add_param(lower_name, self.format_variable(UnknownType(value), + self.add_param(lower_name, self.format_variable(UnknownType(value), UnknownType, name)) self.lower_to_case[lower_name] = name if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - + value = self.format_variable(value, targettype, name=name) #check that the value is allowed: if lower_name in self.allowed_value and '*' not in self.allowed_value[lower_name]: valid = False allowed = self.allowed_value[lower_name] - + # check if the current value is allowed or not (set valid to True) if value in allowed: - valid=True + valid=True elif isinstance(value, str): value = value.lower().strip() allowed = [str(v).lower() for v in allowed] @@ -1279,7 +1279,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): i = allowed.index(value) value = self.allowed_value[lower_name][i] valid=True - + if not valid: # act if not valid: text = "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ @@ -1303,7 +1303,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, if __debug__: if lower_name in self: raise Exception("Duplicate case for %s in %s" % (name,self.__class__)) - + dict.__setitem__(self, lower_name, value) self.lower_to_case[lower_name] = name if isinstance(value, list): @@ -1318,12 +1318,12 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, elif isinstance(value, dict): allvalues = list(value.values()) if any([type(allvalues[0]) != type(v) for v in allvalues]): - raise Exception("All entry should have the same type") - self.dict_parameter[lower_name] = type(allvalues[0]) + raise Exception("All entry should have the same type") + self.dict_parameter[lower_name] = type(allvalues[0]) if '__type__' in value: del value['__type__'] dict.__setitem__(self, lower_name, value) - + if allowed and allowed != ['*']: self.allowed_value[lower_name] = allowed if lower_name in self.list_parameter: @@ -1333,8 +1333,8 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, assert value in allowed or '*' in allowed #elif isinstance(value, bool) and allowed != ['*']: # self.allowed_value[name] = [True, False] - - + + if system: self.system_only.add(lower_name) if comment: @@ -1342,7 +1342,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, def do_help(self, name): """return a minimal help for the parameter""" - + out = "## Information on parameter %s from class %s\n" % (name, self.__class__.__name__) if name.lower() in self: out += "## current value: %s (parameter should be of type %s)\n" % (self[name], type(self[name])) @@ -1351,7 +1351,7 @@ def do_help(self, name): else: out += "## Unknown for this class\n" if name.lower() in self.user_set: - out += "## This value is considered as being set by the user\n" + out += "## This value is considered as being set by the user\n" else: out += "## This value is considered as being set by the system\n" if name.lower() in self.allowed_value: @@ -1359,17 +1359,17 @@ def do_help(self, name): out += "Allowed value are: %s\n" % ','.join([str(p) for p in self.allowed_value[name.lower()]]) else: out += "Suggested value are : %s\n " % ','.join([str(p) for p in self.allowed_value[name.lower()] if p!='*']) - + logger.info(out) return out @staticmethod def guess_type_from_value(value): "try to guess the type of the string --do not use eval as it might not be safe" - + if not isinstance(value, str): return str(value.__class__.__name__) - + #use ast.literal_eval to be safe since value is untrusted # add a timeout to mitigate infinite loop, memory stack attack with misc.stdchannel_redirected(sys.stdout, os.devnull): @@ -1388,7 +1388,7 @@ def guess_type_from_value(value): @staticmethod def format_variable(value, targettype, name="unknown"): """assign the value to the attribute for the given format""" - + if isinstance(targettype, str): if targettype in ['str', 'int', 'float', 'bool']: targettype = eval(targettype) @@ -1412,7 +1412,7 @@ def format_variable(value, targettype, name="unknown"): (name, type(value), targettype, value)) else: raise InvalidCmd("Wrong input type for %s found %s and expecting %s for value %s" %\ - (name, type(value), targettype, value)) + (name, type(value), targettype, value)) else: if targettype != UnknownType: value = value.strip() @@ -1441,8 +1441,8 @@ def format_variable(value, targettype, name="unknown"): value = int(value) elif value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} - value =int(value[:-1]) * convert[value[-1]] - elif '/' in value or '*' in value: + value =int(value[:-1]) * convert[value[-1]] + elif '/' in value or '*' in value: try: split = re.split('(\*|/)',value) v = float(split[0]) @@ -1461,7 +1461,7 @@ def format_variable(value, targettype, name="unknown"): try: value = float(value.replace('d','e')) except ValueError: - raise InvalidCmd("%s can not be mapped to an integer" % value) + raise InvalidCmd("%s can not be mapped to an integer" % value) try: new_value = int(value) except ValueError: @@ -1471,7 +1471,7 @@ def format_variable(value, targettype, name="unknown"): value = new_value else: raise InvalidCmd("incorect input: %s need an integer for %s" % (value,name)) - + elif targettype == float: if value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} @@ -1496,33 +1496,33 @@ def format_variable(value, targettype, name="unknown"): value = v else: raise InvalidCmd("type %s is not handle by the card" % targettype) - + return value - - + + def __getitem__(self, name): - + lower_name = name.lower() if __debug__: if lower_name not in self: if lower_name in [key.lower() for key in self] : raise Exception("Some key are not lower case %s. Invalid use of the class!"\ % [key for key in self if key.lower() != key]) - + if lower_name in self.auto_set: return 'auto' - + return dict.__getitem__(self, name.lower()) - + get = __getitem__ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): """convenient way to change attribute. changeifuserset=False means that the value is NOT change is the value is not on default. - user=True, means that the value will be marked as modified by the user - (potentially preventing future change to the value) + user=True, means that the value will be marked as modified by the user + (potentially preventing future change to the value) """ # changeifuserset=False -> we need to check if the user force a value. @@ -1530,8 +1530,8 @@ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): if name.lower() in self.user_set: #value modified by the user -> do nothing return - self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) - + self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) + class RivetCard(ConfigFile): @@ -1706,7 +1706,7 @@ def setRelevantParamCard(self, f_params, f_relparams): yexec_dict = {} yexec_line = exec_line + "yaxis_relvar = " + self['yaxis_relvar'] exec(yexec_line, locals(), yexec_dict) - if self['yaxis_label'] == "": + if self['yaxis_label'] == "": self['yaxis_label'] = "yaxis_relvar" f_relparams.write("{0} = {1}\n".format(self['yaxis_label'], yexec_dict['yaxis_relvar'])) else: @@ -1715,11 +1715,11 @@ def setRelevantParamCard(self, f_params, f_relparams): class ProcCharacteristic(ConfigFile): """A class to handle information which are passed from MadGraph to the madevent - interface.""" - + interface.""" + def default_setup(self): """initialize the directory to the default value""" - + self.add_param('loop_induced', False) self.add_param('has_isr', False) self.add_param('has_fsr', False) @@ -1735,16 +1735,16 @@ def default_setup(self): self.add_param('pdg_initial1', [0]) self.add_param('pdg_initial2', [0]) self.add_param('splitting_types',[], typelist=str) - self.add_param('perturbation_order', [], typelist=str) - self.add_param('limitations', [], typelist=str) - self.add_param('hel_recycling', False) + self.add_param('perturbation_order', [], typelist=str) + self.add_param('limitations', [], typelist=str) + self.add_param('hel_recycling', False) self.add_param('single_color', True) - self.add_param('nlo_mixed_expansion', True) + self.add_param('nlo_mixed_expansion', True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1752,49 +1752,49 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: if '#' in line: line = line.split('#',1)[0] if not line: continue - + if '=' in line: key, value = line.split('=',1) self[key.strip()] = value - + def write(self, outputpath): """write the file""" template ="# Information about the process #\n" template +="#########################################\n" - + fsock = open(outputpath, 'w') fsock.write(template) - + for key, value in self.items(): fsock.write(" %s = %s \n" % (key, value)) - - fsock.close() - + + fsock.close() + class GridpackCard(ConfigFile): """an object for the GridpackCard""" - + def default_setup(self): """default value for the GridpackCard""" - + self.add_param("GridRun", True) self.add_param("gevents", 2500) self.add_param("gseed", 1) - self.add_param("ngran", -1) - + self.add_param("ngran", -1) + def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1802,7 +1802,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -1812,19 +1812,19 @@ def read(self, finput): self[line[1].strip()] = line[0].replace('\'','').strip() def write(self, output_file, template=None): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'grid_card_default.dat') else: template = pjoin(MEDIR, 'Cards', 'grid_card_default.dat') - + text = "" - for line in open(template,'r'): + for line in open(template,'r'): nline = line.split('#')[0] nline = nline.split('!')[0] comment = line[len(nline):] @@ -1832,19 +1832,19 @@ def write(self, output_file, template=None): if len(nline) != 2: text += line elif nline[1].strip() in self: - text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) + text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) else: logger.info('Adding missing parameter %s to current run_card (with default value)' % nline[1].strip()) - text += line - + text += line + if isinstance(output_file, str): fsock = open(output_file,'w') else: fsock = output_file - + fsock.write(text) fsock.close() - + class PY8Card(ConfigFile): """ Implements the Pythia8 card.""" @@ -1868,7 +1868,7 @@ def add_default_subruns(self, type): def default_setup(self): """ Sets up the list of available PY8 parameters.""" - + # Visible parameters # ================== self.add_param("Main:numberOfEvents", -1) @@ -1877,11 +1877,11 @@ def default_setup(self): self.add_param("JetMatching:qCut", -1.0, always_write_to_card=False) self.add_param("JetMatching:doShowerKt",False,always_write_to_card=False) # -1 means that it is automatically set. - self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) + self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) # for CKKWL merging self.add_param("Merging:TMS", -1.0, always_write_to_card=False) self.add_param("Merging:Process", '', always_write_to_card=False) - # -1 means that it is automatically set. + # -1 means that it is automatically set. self.add_param("Merging:nJetMax", -1, always_write_to_card=False) # for both merging, chose whether to also consider different merging # scale values for the extra weights related to scale and PDF variations. @@ -1918,10 +1918,10 @@ def default_setup(self): comment='This allows to turn on/off hadronization alltogether.') self.add_param("partonlevel:mpi", True, hidden=True, always_write_to_card=False, comment='This allows to turn on/off MPI alltogether.') - self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, + self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, always_write_to_card=False, comment='This parameter is automatically set to True by MG5aMC when doing MLM merging with PY8.') - + # for MLM merging self.add_param("JetMatching:merge", False, hidden=True, always_write_to_card=False, comment='Specifiy if we are merging sample of different multiplicity.') @@ -1931,9 +1931,9 @@ def default_setup(self): comment='Value of the merging scale below which one does not even write the HepMC event.') self.add_param("JetMatching:doVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') - self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) + self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) self.add_param("JetMatching:setMad", False, hidden=True, always_write_to_card=False, - comment='Specify one must read inputs from the MadGraph banner.') + comment='Specify one must read inputs from the MadGraph banner.') self.add_param("JetMatching:coneRadius", 1.0, hidden=True, always_write_to_card=False) self.add_param("JetMatching:nQmatch",4,hidden=True, always_write_to_card=False) # for CKKWL merging (common with UMEPS, UNLOPS) @@ -1946,7 +1946,7 @@ def default_setup(self): self.add_param("Merging:applyVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') self.add_param("Merging:includeWeightInXsection", True, hidden=True, always_write_to_card=False, - comment='If turned off, then the option belows forces PY8 to keep the original weight.') + comment='If turned off, then the option belows forces PY8 to keep the original weight.') self.add_param("Merging:muRen", 91.188, hidden=True, always_write_to_card=False, comment='Set renormalization scales of the 2->2 process.') self.add_param("Merging:muFacInME", 91.188, hidden=True, always_write_to_card=False, @@ -1958,7 +1958,7 @@ def default_setup(self): # To be added in subruns for CKKWL self.add_param("Merging:mayRemoveDecayProducts", False, hidden=True, always_write_to_card=False) self.add_param("Merging:doKTMerging", False, hidden=True, always_write_to_card=False) - self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) + self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) self.add_param("Merging:doPTLundMerging", False, hidden=True, always_write_to_card=False) # Special Pythia8 paremeters useful to simplify the shower. @@ -1975,33 +1975,33 @@ def default_setup(self): # Add parameters controlling the subruns execution flow. # These parameters should not be part of PY8SubRun daughter. self.add_default_subruns('parameters') - + def __init__(self, *args, **opts): - # Parameters which are not printed in the card unless they are - # 'user_set' or 'system_set' or part of the + # Parameters which are not printed in the card unless they are + # 'user_set' or 'system_set' or part of the # self.hidden_params_to_always_print set. self.hidden_param = [] self.hidden_params_to_always_write = set() self.visible_params_to_always_write = set() # List of parameters that should never be written out given the current context. self.params_to_never_write = set() - + # Parameters which have been set by the system (i.e. MG5 itself during # the regular course of the shower interface) self.system_set = set() - + # Add attributes controlling the subruns execution flow. # These attributes should not be part of PY8SubRun daughter. self.add_default_subruns('attributes') - - # Parameters which have been set by the + + # Parameters which have been set by the super(PY8Card, self).__init__(*args, **opts) - def add_param(self, name, value, hidden=False, always_write_to_card=True, + def add_param(self, name, value, hidden=False, always_write_to_card=True, comment=None): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. The option 'hidden' decides whether the parameter should be visible to the user. The option 'always_write_to_card' decides whether it should @@ -2017,7 +2017,7 @@ def add_param(self, name, value, hidden=False, always_write_to_card=True, self.hidden_params_to_always_write.add(name) else: if always_write_to_card: - self.visible_params_to_always_write.add(name) + self.visible_params_to_always_write.add(name) if not comment is None: if not isinstance(comment, str): raise MadGraph5Error("Option 'comment' must be a string, not"+\ @@ -2036,7 +2036,7 @@ def add_subrun(self, py8_subrun): self.subruns[py8_subrun['Main:subrun']] = py8_subrun if not 'LHEFInputs:nSubruns' in self.user_set: self['LHEFInputs:nSubruns'] = max(self.subruns.keys()) - + def userSet(self, name, value, **opts): """Set an attribute of this card, following a user_request""" self.__setitem__(name, value, change_userdefine=True, **opts) @@ -2044,10 +2044,10 @@ def userSet(self, name, value, **opts): self.system_set.remove(name.lower()) def vetoParamWriteOut(self, name): - """ Forbid the writeout of a specific parameter of this card when the + """ Forbid the writeout of a specific parameter of this card when the "write" function will be invoked.""" self.params_to_never_write.add(name.lower()) - + def systemSet(self, name, value, **opts): """Set an attribute of this card, independently of a specific user request and only if not already user_set.""" @@ -2058,7 +2058,7 @@ def systemSet(self, name, value, **opts): if force or name.lower() not in self.user_set: self.__setitem__(name, value, change_userdefine=False, **opts) self.system_set.add(name.lower()) - + def MadGraphSet(self, name, value, **opts): """ Sets a card attribute, but only if it is absent or not already user_set.""" @@ -2068,18 +2068,18 @@ def MadGraphSet(self, name, value, **opts): force = False if name.lower() not in self or (force or name.lower() not in self.user_set): self.__setitem__(name, value, change_userdefine=False, **opts) - self.system_set.add(name.lower()) - + self.system_set.add(name.lower()) + def defaultSet(self, name, value, **opts): self.__setitem__(name, value, change_userdefine=False, **opts) - + @staticmethod def pythia8_formatting(value, formatv=None): """format the variable into pythia8 card convention. The type is detected by default""" if not formatv: if isinstance(value,UnknownType): - formatv = 'unknown' + formatv = 'unknown' elif isinstance(value, bool): formatv = 'bool' elif isinstance(value, int): @@ -2095,7 +2095,7 @@ def pythia8_formatting(value, formatv=None): formatv = 'str' else: assert formatv - + if formatv == 'unknown': # No formatting then return str(value) @@ -2116,7 +2116,7 @@ def pythia8_formatting(value, formatv=None): elif formatv == 'float': return '%.10e' % float(value) elif formatv == 'shortfloat': - return '%.3f' % float(value) + return '%.3f' % float(value) elif formatv == 'str': return "%s" % value elif formatv == 'list': @@ -2124,9 +2124,9 @@ def pythia8_formatting(value, formatv=None): return ','.join([PY8Card.pythia8_formatting(arg, 'shortfloat') for arg in value]) else: return ','.join([PY8Card.pythia8_formatting(arg) for arg in value]) - - def write(self, output_file, template, read_subrun=False, + + def write(self, output_file, template, read_subrun=False, print_only_visible=False, direct_pythia_input=False, add_missing=True): """ Write the card to output_file using a specific template. > 'print_only_visible' specifies whether or not the hidden parameters @@ -2143,28 +2143,28 @@ def write(self, output_file, template, read_subrun=False, or p.lower() in self.user_set] # Filter against list of parameters vetoed for write-out visible_param = [p for p in visible_param if p.lower() not in self.params_to_never_write] - + # Now the hidden param which must be written out if print_only_visible: hidden_output_param = [] else: hidden_output_param = [p for p in self if p.lower() in self.hidden_param and not p.lower() in self.user_set and - (p.lower() in self.hidden_params_to_always_write or + (p.lower() in self.hidden_params_to_always_write or p.lower() in self.system_set)] # Filter against list of parameters vetoed for write-out hidden_output_param = [p for p in hidden_output_param if p not in self.params_to_never_write] - + if print_only_visible: subruns = [] else: if not read_subrun: subruns = sorted(self.subruns.keys()) - + # Store the subruns to write in a dictionary, with its ID in key # and the corresponding stringstream in value subruns_to_write = {} - + # Sort these parameters nicely so as to put together parameters # belonging to the same group (i.e. prefix before the ':' in their name). def group_params(params): @@ -2191,7 +2191,7 @@ def group_params(params): # First dump in a temporary_output (might need to have a second pass # at the very end to update 'LHEFInputs:nSubruns') output = StringIO.StringIO() - + # Setup template from which to read if isinstance(template, str): if os.path.isfile(template): @@ -2199,7 +2199,7 @@ def group_params(params): elif '\n' in template: tmpl = StringIO.StringIO(template) else: - raise Exception("File input '%s' not found." % file_input) + raise Exception("File input '%s' not found." % file_input) elif template is None: # Then use a dummy empty StringIO, hence skipping the reading tmpl = StringIO.StringIO() @@ -2257,8 +2257,8 @@ def group_params(params): # Remove all of its variables (so that nothing is overwritten) DummySubrun.clear() DummySubrun.write(subruns_to_write[int(value)], - tmpl, read_subrun=True, - print_only_visible=print_only_visible, + tmpl, read_subrun=True, + print_only_visible=print_only_visible, direct_pythia_input=direct_pythia_input) logger.info('Adding new unknown subrun with ID %d.'% @@ -2267,7 +2267,7 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - + # Change parameters which must be output if param in visible_param: new_value = PY8Card.pythia8_formatting(self[param]) @@ -2286,10 +2286,10 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - - # Substitute the value. + + # Substitute the value. # If it is directly the pytia input, then don't write the param if it - # is not in the list of visible_params_to_always_write and was + # is not in the list of visible_params_to_always_write and was # not user_set or system_set if ((not direct_pythia_input) or (param.lower() in self.visible_params_to_always_write) or @@ -2304,16 +2304,16 @@ def group_params(params): output.write(template%(param_entry, value_entry.replace(value,new_value))) - + # Proceed to next line last_pos = tmpl.tell() line = tmpl.readline() - + # If add_missing is False, make sure to empty the list of remaining parameters if not add_missing: visible_param = [] hidden_output_param = [] - + # Now output the missing parameters. Warn about visible ones. if len(visible_param)>0 and not template is None: output.write( @@ -2343,12 +2343,12 @@ def group_params(params): """%(' for subrun %d'%self['Main:subrun'] if 'Main:subrun' in self else '')) for param in hidden_output_param: if param.lower() in self.comments: - comment = '\n'.join('! %s'%c for c in + comment = '\n'.join('! %s'%c for c in self.comments[param.lower()].split('\n')) output.write(comment+'\n') output.write('%s=%s\n'%(param,PY8Card.pythia8_formatting(self[param]))) - - # Don't close the file if we were reading a subrun, but simply write + + # Don't close the file if we were reading a subrun, but simply write # output and return now if read_subrun: output_file.write(output.getvalue()) @@ -2382,12 +2382,12 @@ def group_params(params): out.close() else: output_file.write(output.getvalue()) - + def read(self, file_input, read_subrun=False, setter='default'): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file. - The setter option choses the authority that sets potential - modified/new parameters. It can be either: + The setter option choses the authority that sets potential + modified/new parameters. It can be either: 'default' or 'user' or 'system'""" if isinstance(file_input, str): if "\n" in file_input: @@ -2423,8 +2423,8 @@ def read(self, file_input, read_subrun=False, setter='default'): raise MadGraph5Error("Could not read line '%s' of Pythia8 card."%\ line) if '!' in value: - value,_ = value.split('!',1) - + value,_ = value.split('!',1) + # Read a subrun if detected: if param=='Main:subrun': if read_subrun: @@ -2451,7 +2451,7 @@ def read(self, file_input, read_subrun=False, setter='default'): last_pos = finput.tell() line = finput.readline() continue - + # Read parameter. The case of a parameter not defined in the card is # handled directly in ConfigFile. @@ -2478,7 +2478,7 @@ def add_default_subruns(self, type): def __init__(self, *args, **opts): """ Initialize a subrun """ - + # Force user to set it manually. subrunID = -1 if 'subrun_id' in opts: @@ -2489,7 +2489,7 @@ def __init__(self, *args, **opts): def default_setup(self): """Sets up the list of available PY8SubRun parameters.""" - + # Add all default PY8Card parameters super(PY8SubRun, self).default_setup() # Make sure they are all hidden @@ -2501,33 +2501,33 @@ def default_setup(self): self.add_param("Main:subrun", -1) self.add_param("Beams:LHEF", "events.lhe.gz") - + class RunBlock(object): """ Class for a series of parameter in the run_card that can be either visible or hidden. - name: allow to set in the default run_card $name to set where that + name: allow to set in the default run_card $name to set where that block need to be inserted template_on: information to include is block is active template_off: information to include is block is not active on_fields/off_fields: paramater associated to the block - can be specify but are otherwise automatically but + can be specify but are otherwise automatically but otherwise determined from the template. - + function: status(self,run_card) -> return which template need to be used check_validity(self, runcard) -> sanity check - create_default_for_process(self, run_card, proc_characteristic, - history, proc_def) + create_default_for_process(self, run_card, proc_characteristic, + history, proc_def) post_set_XXXX(card, value, change_userdefine, raiseerror) -> fct called when XXXXX is set post_set(card, value, change_userdefine, raiseerror, **opt) -> fct called when a parameter is changed - -> no access to parameter name + -> no access to parameter name -> not called if post_set_XXXX is defined """ - - + + def __init__(self, name, template_on, template_off, on_fields=False, off_fields=False): self.name = name @@ -2550,7 +2550,7 @@ def fields(self): def find_fields_from_template(template): """ return the list of fields from a template. checking line like %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ - + return re.findall(r"^\s*%\((.*)\)s\s*=\s*\1", template, re.M) def get_template(self, card): @@ -2565,7 +2565,7 @@ def get_unused_template(self, card): if self.status(card): return self.template_off else: - return self.template_on + return self.template_on def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -2594,20 +2594,20 @@ def manage_parameters(self, card, written, to_write): written.add(name) if name in to_write: to_write.remove(name) - + def check_validity(self, runcard): """run self consistency check here --avoid to use runcard[''] = xxx here since it can trigger post_set function""" return def create_default_for_process(self, run_card, proc_characteristic, history, proc_def): - return + return # @staticmethod # def post_set(card, value, change_userdefine, raiseerror, **opt): # """default action to run when a parameter of the block is defined. # Here we do not know which parameter is modified. if this is needed. # then one need to define post_set_XXXXX(card, value, change_userdefine, raiseerror) -# and then only that function is used +# and then only that function is used # """ # # if 'pdlabel' in card.user_set: @@ -2621,7 +2621,7 @@ class RunCard(ConfigFile): blocks = [] parameter_in_block = {} - allowed_lep_densities = {} + allowed_lep_densities = {} default_include_file = 'run_card.inc' default_autodef_file = 'run.inc' donewarning = [] @@ -2637,7 +2637,7 @@ def plugin_input(self, finput): curr_dir = os.path.dirname(os.path.dirname(finput.name)) elif isinstance(finput, str): curr_dir = os.path.dirname(os.path.dirname(finput)) - + if curr_dir: if os.path.exists(pjoin(curr_dir, 'bin', 'internal', 'plugin_run_card')): # expected format {} passing everything as optional argument @@ -2646,7 +2646,7 @@ def plugin_input(self, finput): continue opts = dict(eval(line)) self.add_param(**opts) - + @classmethod def fill_post_set_from_blocks(cls): """set the post_set function for any parameter defined in a run_block""" @@ -2659,8 +2659,8 @@ def fill_post_set_from_blocks(cls): elif hasattr(block, 'post_set'): setattr(cls, 'post_set_%s' % parameter, block.post_set) cls.parameter_in_block[parameter] = block - - + + def __new__(cls, finput=None, **opt): cls.fill_post_set_from_blocks() @@ -2718,9 +2718,9 @@ def __new__(cls, finput=None, **opt): return super(RunCard, cls).__new__(cls, finput, **opt) def __init__(self, *args, **opts): - + # The following parameter are updated in the defaultsetup stage. - + #parameter for which no warning should be raised if not define self.hidden_param = [] # in which include file the parameer should be written @@ -2739,11 +2739,11 @@ def __init__(self, *args, **opts): self.cuts_parameter = {} # parameter added where legacy requires an older value. self.system_default = {} - + self.display_block = [] # set some block to be displayed self.fct_mod = {} # {param: (fct_pointer, *argument, **opts)} - self.cut_class = {} + self.cut_class = {} self.warned=False @@ -2776,11 +2776,11 @@ def get_lepton_densities(cls): else: cls.allowed_lep_densities[identity].append(name) - def add_param(self, name, value, fortran_name=None, include=True, + def add_param(self, name, value, fortran_name=None, include=True, hidden=False, legacy=False, cut=False, system=False, sys_default=None, autodef=False, fct_mod=None, **opts): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. fortran_name: defines what is the associate name in the f77 code include: defines if we have to put the value in the include file @@ -2795,7 +2795,7 @@ def add_param(self, name, value, fortran_name=None, include=True, fct_mod: defines a function to run if the parameter is modify in the include file options of **opts: - allowed: list of valid options. '*' means anything else should be allowed. - empty list means anything possible as well. + empty list means anything possible as well. - comment: add comment for writing/help - typelist: type of the list if default is empty """ @@ -2823,9 +2823,9 @@ def add_param(self, name, value, fortran_name=None, include=True, self.fct_mod[name] = fct_mod def read(self, finput, consistency=True, unknown_warning=True, **opt): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -2836,7 +2836,7 @@ def read(self, finput, consistency=True, unknown_warning=True, **opt): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -2864,8 +2864,8 @@ def add_unknown_entry(self, name, value, unknow_warning): This is based on the guess_entry_fromname for the various syntax providing input. This then call add_param accordingly. - This function does not returns anything. - """ + This function does not returns anything. + """ if name == "dsqrt_q2fact1" and not self.LO: raise InvalidRunCard("Looks like you passed a LO run_card for a NLO run. Please correct") @@ -2903,7 +2903,7 @@ def add_unknown_entry(self, name, value, unknow_warning): " The type was assigned to %s. \n"+\ " The definition of that variable will %sbe automatically added to fortran file %s\n"+\ " The value of that variable will %sbe passed to the fortran code via fortran file %s",\ - name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, + name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, "" if opts.get('autodef', False) else "not", "" if opts.get('autodef', False) in [True,False] else opts.get('autodef'), "" if opts.get('include', True) else "not", "" if opts.get('include', True) in [True,False] else opts.get('include')) RunCard.donewarning.append(name) @@ -2923,19 +2923,19 @@ def valid_line(self, line, tmp): return False elif line.strip().startswith('%'): parameter = line[line.find('(')+1:line.find(')')] - + try: cond = self.cuts_parameter[parameter] except KeyError: return True - - + + if template_options.get(cond, default) or cond is True: return True else: - return False + return False else: - return True + return True def reset_simd(self, old_value, new_value, name, *args, **opts): @@ -2946,28 +2946,28 @@ def make_clean(self,old_value, new_value, name, dir): raise Exception('pass make clean for ', dir) def make_Ptouch(self,old_value, new_value, name, reset): - raise Exception('pass Ptouch for ', reset) - + raise Exception('pass Ptouch for ', reset) + def write(self, output_file, template=None, python_template=False, write_hidden=False, template_options=None, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" - to_write = set(self.user_set) + to_write = set(self.user_set) written = set() if not template: raise Exception if not template_options: template_options = collections.defaultdict(str) - + if python_template: text = open(template,'r').read() - text = text.split('\n') + text = text.split('\n') # remove if templating - text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] + text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] for l in text if self.valid_line(l, template_options)] text ='\n'.join(text) - + if python_template and not to_write: import string if self.blocks: @@ -2981,14 +2981,14 @@ def write(self, output_file, template=None, python_template=False, if not self.list_parameter: text = text % self else: - data = dict((key.lower(),value) for key, value in self.items()) + data = dict((key.lower(),value) for key, value in self.items()) for name in self.list_parameter: if self.list_parameter[name] != str: data[name] = ', '.join(str(v) for v in data[name]) else: data[name] = "['%s']" % "', '".join(str(v) for v in data[name]) text = text % data - else: + else: text = "" for line in open(template,'r'): nline = line.split('#')[0] @@ -3005,11 +3005,11 @@ def write(self, output_file, template=None, python_template=False, this_group = this_group[0] text += this_group.get_template(self) % self this_group.manage_parameters(self, written, to_write) - + elif len(nline) != 2: text += line elif nline[1].strip() in self: - + name = nline[1].strip().lower() value = self[name] if name in self.list_parameter: @@ -3026,15 +3026,15 @@ def write(self, output_file, template=None, python_template=False, else: endline = '' text += ' %s\t= %s %s%s' % (value, name, comment, endline) - written.add(name) + written.add(name) if name in to_write: to_write.remove(name) else: logger.info('Adding missing parameter %s to current %s (with default value)', (name, self.filename)) - written.add(name) - text += line + written.add(name) + text += line for b in self.blocks: if b.status(self): @@ -3057,7 +3057,7 @@ def write(self, output_file, template=None, python_template=False, else: #partial writting -> add only what is needed to_add = [] - for line in b.get_template(self).split('\n'): + for line in b.get_template(self).split('\n'): nline = line.split('#')[0] nline = nline.split('!')[0] nline = nline.split('=') @@ -3072,8 +3072,8 @@ def write(self, output_file, template=None, python_template=False, continue #already include before else: to_add.append(line % {nline[1].strip():value, name:value}) - written.add(name) - + written.add(name) + if name in to_write: to_write.remove(name) else: @@ -3095,13 +3095,13 @@ def write(self, output_file, template=None, python_template=False, text += '\n'.join(to_add) if to_write or write_hidden: - text+="""#********************************************************************* + text+="""#********************************************************************* # Additional hidden parameters #********************************************************************* -""" +""" if write_hidden: # - # do not write hidden parameter not hidden for this template + # do not write hidden parameter not hidden for this template # if python_template: written = written.union(set(re.findall('\%\((\w*)\)s', open(template,'r').read(), re.M))) @@ -3129,7 +3129,7 @@ def get_last_value_include(self, output_dir): if inc file does not exist we will return the current value (i.e. set has no change) """ - #remember that + #remember that # default_include_file is a class variable # self.includepath is on the form include_path : [list of param ] out = {} @@ -3165,7 +3165,7 @@ def get_value_from_include(self, path, list_of_params, output_dir): with open(pjoin(output_dir,path), 'r') as fsock: text = fsock.read() - + for name in list_of_params: misc.sprint(name, name in self.fortran_name) misc.sprint(self.fortran_name[name] if name in self.fortran_name[name] else name) @@ -3191,11 +3191,11 @@ def get_value_from_include(self, path, list_of_params, output_dir): misc.sprint(self.fortran_name) misc.sprint(text) raise Exception - return out + return out def get_default(self, name, default=None, log_level=None): - """return self[name] if exist otherwise default. log control if we + """return self[name] if exist otherwise default. log control if we put a warning or not if we use the default value""" lower_name = name.lower() @@ -3216,13 +3216,13 @@ def get_default(self, name, default=None, log_level=None): log_level = 20 if not default: default = dict.__getitem__(self, name.lower()) - + logger.log(log_level, '%s missed argument %s. Takes default: %s' % (self.filename, name, default)) self[name] = default return default else: - return self[name] + return self[name] def mod_inc_pdlabel(self, value): """flag pdlabel has 'dressed' if one of the special lepton PDF with beamstralung. @@ -3237,16 +3237,16 @@ def edit_dummy_fct_from_file(self, filelist, outdir): filelist is a list of input files (given by the user) containing a series of function to be placed in replacement of standard (typically dummy) functions of the code. - This use LO/NLO class attribute that defines which function name need to - be placed in which file. + This use LO/NLO class attribute that defines which function name need to + be placed in which file. First time this is used, a backup of the original file is done in order to - recover if the user remove some of those files. + recover if the user remove some of those files. The function present in the file are determined automatically via regular expression. and only that function is replaced in the associated file. - function in the filelist starting with user_ will also be include within the + function in the filelist starting with user_ will also be include within the dummy_fct.f file """ @@ -3269,7 +3269,7 @@ def edit_dummy_fct_from_file(self, filelist, outdir): fsock = file_writers.FortranWriter(tmp,'w') function_text = fsock.remove_routine(text, fct) fsock.close() - test = open(tmp,'r').read() + test = open(tmp,'r').read() if fct not in self.dummy_fct_file: if fct.startswith('user_'): self.dummy_fct_file[fct] = self.dummy_fct_file['user_'] @@ -3315,22 +3315,22 @@ def guess_entry_fromname(self, name, value): - vartype: type of the variable - name: name of the variable (stripped from metadata) - options: additional options for the add_param - rules: - - if name starts with str_, int_, float_, bool_, list_, dict_ then + rules: + - if name starts with str_, int_, float_, bool_, list_, dict_ then - vartype is set accordingly - name is strip accordingly - otherwise guessed from value (which is string) - if name contains min/max - vartype is set to float - options has an added {'cut':True} - - suffixes like + - suffixes like - will be removed from named - will be added in options (for add_param) as {'cut':True} see add_param documentation for the list of supported options - if include is on False set autodef to False (i.e. enforce it False for future change) """ - # local function + # local function def update_typelist(value, name, opts): """convert a string to a list and update opts to keep track of the type """ value = value.strip() @@ -3358,7 +3358,7 @@ def update_typelist(value, name, opts): opts[key] = val name = name.replace("<%s=%s>" %(key,val), '') - # get vartype + # get vartype # first check that name does not force it supported_type = ["str", "float", "int", "bool", "list", "dict"] if "_" in name and name.split("_")[0].lower() in supported_type: @@ -3406,13 +3406,13 @@ def f77_formatting(value, formatv=None): value = str(value).lower() else: assert formatv - + if formatv == 'bool': if str(value) in ['1','T','.true.','True']: return '.true.' else: return '.false.' - + elif formatv == 'int': try: return str(int(value)) @@ -3422,12 +3422,12 @@ def f77_formatting(value, formatv=None): return str(int(fl)) else: raise - + elif formatv == 'float': if isinstance(value, str): value = value.replace('d','e') return ('%.10e' % float(value)).replace('e','d') - + elif formatv == 'str': # Check if it is a list if value.strip().startswith('[') and value.strip().endswith(']'): @@ -3437,20 +3437,20 @@ def f77_formatting(value, formatv=None): enumerate(elements)] else: return "'%s'" % value - - + + def check_validity(self, log_level=30): """check that parameter missing in the card are set to the expected value""" for name, value in self.system_default.items(): self.set(name, value, changeifuserset=False) - + for name in self.includepath[False]: to_bypass = self.hidden_param + list(self.legacy_parameter.keys()) if name not in to_bypass: - self.get_default(name, log_level=log_level) + self.get_default(name, log_level=log_level) for name in self.legacy_parameter: if self[name] != self.legacy_parameter[name]: @@ -3458,28 +3458,28 @@ def check_validity(self, log_level=30): for block in self.blocks: block.check_validity(self) - + def update_system_parameter_for_include(self): - """update hidden system only parameter for the correct writtin in the + """update hidden system only parameter for the correct writtin in the include""" return - + def write_include_file(self, output_dir, output_file=None): """Write the various include file in output_dir. The entry True of self.includepath will be written in run_card.inc The entry False will not be written anywhere output_file allows testing by providing stream. - This also call the function to add variable definition for the - variable with autodef=True (handle by write_autodef function) + This also call the function to add variable definition for the + variable with autodef=True (handle by write_autodef function) """ - + # ensure that all parameter are coherent and fix those if needed self.check_validity() - + #ensusre that system only parameter are correctly set self.update_system_parameter_for_include() @@ -3490,10 +3490,10 @@ def write_include_file(self, output_dir, output_file=None): self.write_autodef(output_dir, output_file=None) # check/fix status of customised functions self.edit_dummy_fct_from_file(self["custom_fcts"], os.path.dirname(output_dir)) - + for incname in self.includepath: self.write_one_include_file(output_dir, incname, output_file) - + for name,value in value_in_old_include.items(): if value != self[name]: self.fct_mod[name][0](value, self[name], name, *self.fct_mod[name][1],**self.fct_mod[name][2]) @@ -3515,13 +3515,13 @@ def write_one_include_file(self, output_dir, incname, output_file=None): fsock = file_writers.FortranWriter(pjoin(output_dir,pathinc+'.tmp')) - for key in self.includepath[incname]: + for key in self.includepath[incname]: #define the fortran name if key in self.fortran_name: fortran_name = self.fortran_name[key] else: fortran_name = key - + if incname in self.include_as_parameter: fsock.writelines('INTEGER %s\n' % fortran_name) #get the value with warning if the user didn't set it @@ -3534,7 +3534,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): # in case of a list, add the length of the list as 0th # element in fortran. Only in case of integer or float # list (not for bool nor string) - targettype = self.list_parameter[key] + targettype = self.list_parameter[key] if targettype is bool: pass elif targettype is int: @@ -3550,7 +3550,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): elif isinstance(value, dict): for fortran_name, onevalue in value.items(): line = '%s = %s \n' % (fortran_name, self.f77_formatting(onevalue)) - fsock.writelines(line) + fsock.writelines(line) elif isinstance(incname,str) and 'compile' in incname: if incname in self.include_as_parameter: line = 'PARAMETER (%s=%s)' %( fortran_name, value) @@ -3585,7 +3585,7 @@ def write_autodef(self, output_dir, output_file=None): filetocheck = dict(self.definition_path) if True not in self.definition_path: filetocheck[True] = [] - + for incname in filetocheck: if incname is True: @@ -3598,7 +3598,7 @@ def write_autodef(self, output_dir, output_file=None): if output_file: fsock = output_file input = fsock.getvalue() - + else: input = open(pjoin(output_dir,pathinc),'r').read() # do not define fsock here since we might not need to overwrite it @@ -3608,7 +3608,7 @@ def write_autodef(self, output_dir, output_file=None): previous = re.findall(re_pat, input, re.M) # now check which one needed to be added (and remove those identicaly defined) to_add = [] - for key in filetocheck[incname]: + for key in filetocheck[incname]: curr_type = self[key].__class__.__name__ length = "" if curr_type in [list, "list"]: @@ -3640,10 +3640,10 @@ def write_autodef(self, output_dir, output_file=None): fsock.truncate(0) fsock.seek(0) - # remove outdated lines + # remove outdated lines lines = input.split('\n') if previous: - out = [line for line in lines if not re.search(re_pat, line, re.M) or + out = [line for line in lines if not re.search(re_pat, line, re.M) or re.search(re_pat, line, re.M).groups() not in previous] else: out = lines @@ -3662,7 +3662,7 @@ def write_autodef(self, output_dir, output_file=None): stop = out.index('C STOP USER COMMON BLOCK') out = out[:start]+ out[stop+1:] #add new common-block - if self.definition_path[incname]: + if self.definition_path[incname]: out.append("C START USER COMMON BLOCK") if isinstance(pathinc , str): filename = os.path.basename(pathinc).split('.',1)[0] @@ -3675,10 +3675,10 @@ def write_autodef(self, output_dir, output_file=None): filename = filename.upper() out.append(" COMMON/USER_CUSTOM_%s/%s" %(filename,','.join( self.definition_path[incname]))) out.append('C STOP USER COMMON BLOCK') - + if not output_file: fsock.writelines(out) - fsock.close() + fsock.close() else: # for iotest out = ["%s\n" %l for l in out] @@ -3702,7 +3702,7 @@ def get_idbmup(lpp): def get_banner_init_information(self): """return a dictionary with the information needed to write the first line of the block of the lhe file.""" - + output = {} output["idbmup1"] = self.get_idbmup(self['lpp1']) output["idbmup2"] = self.get_idbmup(self['lpp2']) @@ -3713,7 +3713,7 @@ def get_banner_init_information(self): output["pdfsup1"] = self.get_pdf_id(self["pdlabel"]) output["pdfsup2"] = self.get_pdf_id(self["pdlabel"]) return output - + def get_pdf_id(self, pdf): if pdf == "lhapdf": lhaid = self["lhaid"] @@ -3721,19 +3721,19 @@ def get_pdf_id(self, pdf): return lhaid[0] else: return lhaid - else: + else: try: return {'none': 0, 'iww': 0, 'eva':0, 'edff':0, 'chff':0, 'cteq6_m':10000,'cteq6_l':10041,'cteq6l1':10042, 'nn23lo':246800,'nn23lo1':247000,'nn23nlo':244800 - }[pdf] + }[pdf] except: - return 0 - + return 0 + def get_lhapdf_id(self): return self.get_pdf_id(self['pdlabel']) - def remove_all_cut(self): + def remove_all_cut(self): """remove all the cut""" for name in self.cuts_parameter: @@ -3749,7 +3749,7 @@ def remove_all_cut(self): elif 'eta' in name: self[name] = -1 else: - self[name] = 0 + self[name] = 0 ################################################################################################ ### Define various template subpart for the LO Run_card @@ -3767,11 +3767,11 @@ def remove_all_cut(self): %(nb_proton1)s = nb_proton1 # number of proton for the first beam %(nb_neutron1)s = nb_neutron1 # number of neutron for the first beam %(mass_ion1)s = mass_ion1 # mass of the heavy ion (first beam) -# Note that seting differently the two beams only work if you use +# Note that seting differently the two beams only work if you use # group_subprocess=False when generating your matrix-element %(nb_proton2)s = nb_proton2 # number of proton for the second beam %(nb_neutron2)s = nb_neutron2 # number of neutron for the second beam - %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) + %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ template_off = "# To see heavy ion options: type \"update ion_pdf\"" @@ -3834,11 +3834,11 @@ def remove_all_cut(self): # Frame for polarization ------------------------------------------------------------------------------------ template_on = \ """#********************************************************************* -# Frame where to evaluate the matrix-element (not the cut!) for polarization +# Frame where to evaluate the matrix-element (not the cut!) for polarization #********************************************************************* %(me_frame)s = me_frame ! list of particles to sum-up to define the rest-frame ! in which to evaluate the matrix-element - ! [1,2] means the partonic center of mass + ! [1,2] means the partonic center of mass """ template_off = "" frame_block = RunBlock('frame', template_on=template_on, template_off=template_off) @@ -3891,7 +3891,7 @@ def remove_all_cut(self): # CONTROL The extra running scale (not QCD) * # Such running is NOT include in systematics computation * #*********************************************************************** - %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale + %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode %(mue_over_ref)s = mue_over_ref ! ratio to mur if dynamical scale """ @@ -3908,10 +3908,10 @@ def remove_all_cut(self): %(tmin_for_channel)s = tmin_for_channel ! limit the non-singular reach of --some-- channel of integration related to T-channel diagram (value between -1 and 0), -1 is no impact %(survey_splitting)s = survey_splitting ! for loop-induced control how many core are used at survey for the computation of a single iteration. %(survey_nchannel_per_job)s = survey_nchannel_per_job ! control how many Channel are integrated inside a single job on cluster/multicore - %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) + %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) +#********************************************************************* +# Compilation flag. #********************************************************************* -# Compilation flag. -#********************************************************************* %(global_flag)s = global_flag ! fortran optimization flag use for the all code. %(aloha_flag)s = aloha_flag ! fortran optimization flag for aloha function. Suggestions: '-ffast-math' %(matrix_flag)s = matrix_flag ! fortran optimization flag for matrix.f function. Suggestions: '-O3' @@ -3948,7 +3948,7 @@ def check_validity(self, card): if card['pdlabel'] != card['pdlabel1']: dict.__setitem__(card, 'pdlabel', card['pdlabel1']) elif card['pdlabel1'] in sum(card.allowed_lep_densities.values(),[]): - raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") + raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel2'] in sum(card.allowed_lep_densities.values(),[]): raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel1'] == 'none': @@ -3962,7 +3962,7 @@ def check_validity(self, card): dict.__setitem__(card, 'pdlabel2', card['pdlabel']) if abs(card['lpp1']) == 1 == abs(card['lpp2']) and card['pdlabel1'] != card['pdlabel2']: - raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") + raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -4028,7 +4028,7 @@ def post_set(card, value, change_userdefine, raiseerror, name='unknown', **opt): if name == 'fixed_fac_scale2' and 'fixed_fac_scale1' not in card.user_set: dict.__setitem__(card, 'fixed_fac_scale1', card['fixed_fac_scale']) if name == 'fixed_fac_scale1' and 'fixed_fac_scale2' not in card.user_set: - dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) + dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) def status(self, card): @@ -4061,32 +4061,32 @@ def status(self, card): class RunCardLO(RunCard): """an object to handle in a nice way the run_card information""" - + blocks = [heavy_ion_block, beam_pol_block, syscalc_block, ecut_block, frame_block, eva_scale_block, mlm_block, ckkw_block, psoptim_block, pdlabel_block, fixedfacscale, running_block] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), "get_dummy_x1": pjoin("SubProcesses","dummy_fct.f"), - "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), + "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), "dummy_boostframe": pjoin("SubProcesses","dummy_fct.f"), "user_dynamical_scale": pjoin("SubProcesses","dummy_fct.f"), "bias_wgt_custom": pjoin("SubProcesses","dummy_fct.f"), "user_": pjoin("SubProcesses","dummy_fct.f") # all function starting by user will be added to that file } - + include_as_parameter = ['vector.inc'] if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_lo.dat") - + def default_setup(self): """default value for the run_card.dat""" - + self.add_param("run_tag", "tag_1", include=False) self.add_param("gridpack", False) self.add_param("time_of_flight", -1.0, include=False) - self.add_param("nevents", 10000) + self.add_param("nevents", 10000) self.add_param("iseed", 0) self.add_param("python_seed", -2, include=False, hidden=True, comment="controlling python seed [handling in particular the final unweighting].\n -1 means use default from random module.\n -2 means set to same value as iseed") self.add_param("lpp1", 1, fortran_name="lpp(1)", allowed=[-1,1,0,2,3,9,-2,-3,4,-4], @@ -4106,7 +4106,7 @@ def default_setup(self): self.add_param('nb_neutron1', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(1)", comment='For heavy ion physics nb of neutron in the ion (for both beam but if group_subprocess was False)') self.add_param('nb_neutron2', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(2)", - comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') + comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') self.add_param('mass_ion1', -1.0, hidden=True, fortran_name="mass_ion(1)", allowed=[-1,0, 0.938, 207.9766521*0.938, 0.000511, 0.105, '*'], comment='For heavy ion physics mass in GeV of the ion (of beam 1)') @@ -4133,11 +4133,11 @@ def default_setup(self): self.add_param("mue_over_ref", 1.0, hidden=True, comment='ratio mu_other/mu for dynamical scale') self.add_param("ievo_eva",0,hidden=True, allowed=[0,1],fortran_name="ievo_eva", comment='eva: 0 for EW pdf muf evolution by q^2; 1 for evo by pT^2') - + # Bias module options self.add_param("bias_module", 'None', include=False, hidden=True) self.add_param('bias_parameters', {'__type__':1.0}, include='BIAS/bias.inc', hidden=True) - + #matching self.add_param("scalefact", 1.0) self.add_param("ickkw", 0, allowed=[0,1], hidden=True, comment="\'0\' for standard fixed order computation.\n\'1\' for MLM merging activates alphas and pdf re-weighting according to a kt clustering of the QCD radiation.") @@ -4221,7 +4221,7 @@ def default_setup(self): self.add_param("mmaa", 0.0, cut='aa') self.add_param("mmll", 0.0, cut='ll') self.add_param("mmjjmax", -1.0, cut='jj') - self.add_param("mmbbmax", -1.0, cut='bb') + self.add_param("mmbbmax", -1.0, cut='bb') self.add_param("mmaamax", -1.0, cut='aa') self.add_param("mmllmax", -1.0, cut='ll') self.add_param("mmnl", 0.0, cut='LL') @@ -4231,9 +4231,9 @@ def default_setup(self): self.add_param("ptllmax", -1.0, cut='ll') self.add_param("xptj", 0.0, cut='jj') self.add_param("xptb", 0.0, cut='bb') - self.add_param("xpta", 0.0, cut='aa') + self.add_param("xpta", 0.0, cut='aa') self.add_param("xptl", 0.0, cut='ll') - # ordered pt jet + # ordered pt jet self.add_param("ptj1min", 0.0, cut='jj') self.add_param("ptj1max", -1.0, cut='jj') self.add_param("ptj2min", 0.0, cut='jj') @@ -4241,7 +4241,7 @@ def default_setup(self): self.add_param("ptj3min", 0.0, cut='jjj') self.add_param("ptj3max", -1.0, cut='jjj') self.add_param("ptj4min", 0.0, cut='j'*4) - self.add_param("ptj4max", -1.0, cut='j'*4) + self.add_param("ptj4max", -1.0, cut='j'*4) self.add_param("cutuse", 0, cut='jj') # ordered pt lepton self.add_param("ptl1min", 0.0, cut='l'*2) @@ -4249,7 +4249,7 @@ def default_setup(self): self.add_param("ptl2min", 0.0, cut='l'*2) self.add_param("ptl2max", -1.0, cut='l'*2) self.add_param("ptl3min", 0.0, cut='l'*3) - self.add_param("ptl3max", -1.0, cut='l'*3) + self.add_param("ptl3max", -1.0, cut='l'*3) self.add_param("ptl4min", 0.0, cut='l'*4) self.add_param("ptl4max", -1.0, cut='l'*4) # Ht sum of jets @@ -4257,7 +4257,7 @@ def default_setup(self): self.add_param("htjmax", -1.0, cut='j'*2) self.add_param("ihtmin", 0.0, cut='J'*2) self.add_param("ihtmax", -1.0, cut='J'*2) - self.add_param("ht2min", 0.0, cut='J'*3) + self.add_param("ht2min", 0.0, cut='J'*3) self.add_param("ht3min", 0.0, cut='J'*3) self.add_param("ht4min", 0.0, cut='J'*4) self.add_param("ht2max", -1.0, cut='J'*3) @@ -4267,7 +4267,7 @@ def default_setup(self): self.add_param("ptgmin", 0.0, cut='aj') self.add_param("r0gamma", 0.4, hidden=True) self.add_param("xn", 1.0, hidden=True) - self.add_param("epsgamma", 1.0, hidden=True) + self.add_param("epsgamma", 1.0, hidden=True) self.add_param("isoem", True, hidden=True) self.add_param("xetamin", 0.0, cut='jj') self.add_param("deltaeta", 0.0, cut='j'*2) @@ -4280,7 +4280,7 @@ def default_setup(self): self.add_param("use_syst", True) self.add_param('systematics_program', 'systematics', include=False, hidden=True, comment='Choose which program to use for systematics computation: none, systematics, syscalc') self.add_param('systematics_arguments', ['--mur=0.5,1,2', '--muf=0.5,1,2', '--pdf=errorset'], include=False, hidden=True, comment='Choose the argment to pass to the systematics command. like --mur=0.25,1,4. Look at the help of the systematics function for more details.') - + self.add_param("sys_scalefact", "0.5 1 2", include=False, hidden=True) self.add_param("sys_alpsfact", "None", include=False, hidden=True) self.add_param("sys_matchscale", "auto", include=False, hidden=True) @@ -4315,8 +4315,8 @@ def default_setup(self): self.add_param('aloha_flag', '', include=False, hidden=True, comment='global fortran compilation flag, suggestion: -ffast-math', fct_mod=(self.make_clean, ('Source/DHELAS'),{})) self.add_param('matrix_flag', '', include=False, hidden=True, comment='fortran compilation flag for the matrix-element files, suggestion -O3', - fct_mod=(self.make_Ptouch, ('matrix'),{})) - self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', + fct_mod=(self.make_Ptouch, ('matrix'),{})) + self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', fortran_name='VECSIZE_MEMMAX', fct_mod=(self.reset_simd,(),{})) # parameter allowing to define simple cut via the pdg @@ -4329,24 +4329,24 @@ def default_setup(self): self.add_param('eta_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False) - + self.add_param('pdg_cut',[0], system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], system=True) # store pt min self.add_param('ptmax4pdg',[-1.], system=True) self.add_param('Emin4pdg',[0.], system=True) # store pt min - self.add_param('Emax4pdg',[-1.], system=True) + self.add_param('Emax4pdg',[-1.], system=True) self.add_param('etamin4pdg',[0.], system=True) # store pt min - self.add_param('etamax4pdg',[-1.], system=True) + self.add_param('etamax4pdg',[-1.], system=True) self.add_param('mxxmin4pdg',[-1.], system=True) self.add_param('mxxpart_antipart', [False], system=True) - - - + + + def check_validity(self): """ """ - + super(RunCardLO, self).check_validity() - + #Make sure that nhel is only either 0 (i.e. no MC over hel) or #1 (MC over hel with importance sampling). In particular, it can #no longer be > 1. @@ -4357,12 +4357,12 @@ def check_validity(self): "not %s." % self['nhel']) if int(self['maxjetflavor']) > 6: raise InvalidRunCard('maxjetflavor should be lower than 5! (6 is partly supported)') - + if len(self['pdgs_for_merging_cut']) > 1000: raise InvalidRunCard("The number of elements in "+\ "'pdgs_for_merging_cut' should not exceed 1000.") - + # some cut need to be deactivated in presence of isolation if self['ptgmin'] > 0: if self['pta'] > 0: @@ -4370,18 +4370,18 @@ def check_validity(self): self['pta'] = 0.0 if self['draj'] > 0: logger.warning('draj cut discarded since photon isolation is used') - self['draj'] = 0.0 - - # special treatment for gridpack use the gseed instead of the iseed + self['draj'] = 0.0 + + # special treatment for gridpack use the gseed instead of the iseed if self['gridrun']: self['iseed'] = self['gseed'] - + #Some parameter need to be fixed when using syscalc #if self['use_syst']: # if self['scalefact'] != 1.0: # logger.warning('Since use_syst=T, changing the value of \'scalefact\' to 1') # self['scalefact'] = 1.0 - + # CKKW Treatment if self['ickkw'] > 0: if self['ickkw'] != 1: @@ -4399,7 +4399,7 @@ def check_validity(self): raise InvalidRunCard('maxjetflavor at 6 is NOT supported for matching!') if self['ickkw'] == 2: # add warning if ckkw selected but the associate parameter are empty - self.get_default('highestmult', log_level=20) + self.get_default('highestmult', log_level=20) self.get_default('issgridfile', 'issudgrid.dat', log_level=20) if self['xqcut'] > 0: if self['ickkw'] == 0: @@ -4412,13 +4412,13 @@ def check_validity(self): if self['drjl'] != 0: if 'drjl' in self.user_set: logger.warning('Since icckw>0, changing the value of \'drjl\' to 0') - self['drjl'] = 0 - if not self['auto_ptj_mjj']: + self['drjl'] = 0 + if not self['auto_ptj_mjj']: if self['mmjj'] > self['xqcut']: logger.warning('mmjj > xqcut (and auto_ptj_mjj = F). MMJJ set to 0') - self['mmjj'] = 0.0 - - # check validity of the pdf set + self['mmjj'] = 0.0 + + # check validity of the pdf set # note that pdlabel is automatically set to lhapdf if pdlabel1 or pdlabel2 is set to lhapdf if self['pdlabel'] == 'lhapdf': #add warning if lhaid not define @@ -4426,7 +4426,7 @@ def check_validity(self): mod = False for i in [1,2]: - lpp = 'lpp%i' %i + lpp = 'lpp%i' %i pdlabelX = 'pdlabel%i' % i if self[lpp] == 0: # nopdf if self[pdlabelX] != 'none': @@ -4459,12 +4459,12 @@ def check_validity(self): raise InvalidRunCard( "Heavy ion mode is only supported for lpp1=1/2") if self['lpp2'] not in [1,2]: if self['nb_proton2'] !=1 or self['nb_neutron2'] !=0: - raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") + raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") # check that fixed_fac_scale(1/2) is setting as expected # if lpp=2/3/4 -> default is that beam in fixed scale - # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are + # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are # check that both fixed_fac_scale1/2 are defined together # ensure that fixed_fac_scale1 and fixed_fac_scale2 are setup as needed if 'fixed_fac_scale1' in self.user_set: @@ -4475,13 +4475,13 @@ def check_validity(self): elif 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale1 are defined but not fixed_fac_scale2. The value of fixed_fac_scale2 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale2'] = self['fixed_fac_scale'] - elif self['lpp2'] !=0: + elif self['lpp2'] !=0: raise Exception('fixed_fac_scale2 not defined while fixed_fac_scale1 is. Please fix your run_card.') elif 'fixed_fac_scale2' in self.user_set: if 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale2 are defined but not fixed_fac_scale1. The value of fixed_fac_scale1 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale1'] = self['fixed_fac_scale'] - elif self['lpp1'] !=0: + elif self['lpp1'] !=0: raise Exception('fixed_fac_scale1 not defined while fixed_fac_scale2 is. Please fix your run_card.') else: if 'fixed_fac_scale' in self.user_set: @@ -4500,12 +4500,12 @@ def check_validity(self): logger.warning('fixed_fac_scale1 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale1']) logger.warning('fixed_fac_scale2 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale2']) - # check if lpp = + # check if lpp = if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]): for i in [1,2]: if abs(self['lpp%s' % i ]) in [3,4] and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: logger.warning("Vector boson from lepton PDF is using fixed scale value of muf [dsqrt_q2fact%s]. Looks like you kept the default value (Mz). Is this really the cut-off that you want to use?" % i) - + if abs(self['lpp%s' % i ]) == 2 and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: if self['pdlabel'] in ['edff','chff']: logger.warning("Since 3.5.0 exclusive photon-photon processes in ultraperipheral proton and nuclear collisions from gamma-UPC (arXiv:2207.03012) will ignore the factorisation scale.") @@ -4515,10 +4515,10 @@ def check_validity(self): if six.PY2 and self['hel_recycling']: self['hel_recycling'] = False - logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. + logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. In general this optimization speeds up the computation by a factor of two.""") - + # check that ebeam is bigger than the associated mass. for i in [1,2]: if self['lpp%s' % i ] not in [1,2]: @@ -4529,13 +4529,13 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") - elif self['ebeam%i' % i] < self['mass_ion%i' % i]: + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + elif self['ebeam%i' % i] < self['mass_ion%i' % i]: if self['ebeam%i' %i] == 0: logger.warning("At rest ion mode set: Energy beam set to %s" % self['mass_ion%i' % i]) self.set('ebeam%i' %i, self['mass_ion%i' % i]) - - + + # check the tmin_for_channel is negative if self['tmin_for_channel'] == 0: raise InvalidRunCard('tmin_for_channel can not be set to 0.') @@ -4543,15 +4543,15 @@ def check_validity(self): logger.warning('tmin_for_channel should be negative. Will be using -%f instead' % self['tmin_for_channel']) self.set('tmin_for_channel', -self['tmin_for_channel']) - + def update_system_parameter_for_include(self): """system parameter need to be setupe""" - + # polarization self['frame_id'] = sum(2**(n) for n in self['me_frame']) - + # set the pdg_for_cut fortran parameter - pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + + pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + list(self['e_min_pdg'].keys()) +list(self['e_max_pdg'].keys()) + list(self['eta_min_pdg'].keys()) +list(self['eta_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys()) + list(self['mxx_only_part_antipart'].keys())) @@ -4559,15 +4559,15 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different pdgs are allowed for pdg specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative pdg code') - - + + if any(pdg in pdg_to_cut for pdg in [1,2,3,4,5,21,22,11,13,15]): raise Exception("Can not use PDG related cut for light quark/b quark/lepton/gluon/photon") - + if pdg_to_cut: self['pdg_cut'] = list(pdg_to_cut) self['ptmin4pdg'] = [] @@ -4595,7 +4595,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -4605,11 +4605,11 @@ def update_system_parameter_for_include(self): self['ptmax4pdg'] = [-1.] self['Emax4pdg'] = [-1.] self['etamax4pdg'] =[-1.] - self['mxxmin4pdg'] =[0.] + self['mxxmin4pdg'] =[0.] self['mxxpart_antipart'] = [False] - - - + + + def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules process 1->N all cut set on off. @@ -4626,7 +4626,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if proc_characteristic['loop_induced']: self['nhel'] = 1 self['pdgs_for_merging_cut'] = proc_characteristic['colored_pdgs'] - + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() @@ -4636,7 +4636,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): # check for beam_id beam_id = set() beam_id_split = [set(), set()] - for proc in proc_def: + for proc in proc_def: for oneproc in proc: for i,leg in enumerate(oneproc['legs']): if not leg['state']: @@ -4654,20 +4654,20 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): maxjetflavor = max([4]+[abs(i) for i in beam_id if -7< i < 7]) self['maxjetflavor'] = maxjetflavor self['asrwgtflavor'] = maxjetflavor - + if any(i in beam_id for i in [1,-1,2,-2,3,-3,4,-4,5,-5,21,22]): # check for e p collision if any(id in beam_id for id in [11,-11,13,-13]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [11,-11,13,-13]): - self['lpp1'] = 0 - self['lpp2'] = 1 - self['ebeam1'] = '1k' - self['ebeam2'] = '6500' + self['lpp1'] = 0 + self['lpp2'] = 1 + self['ebeam1'] = '1k' + self['ebeam2'] = '6500' else: - self['lpp1'] = 1 - self['lpp2'] = 0 - self['ebeam1'] = '6500' + self['lpp1'] = 1 + self['lpp2'] = 0 + self['ebeam1'] = '6500' self['ebeam2'] = '1k' # UPC for p p collision @@ -4677,7 +4677,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam1'] = '6500' self['ebeam2'] = '6500' self['pdlabel'] = 'edff' - + elif any(id in beam_id for id in [11,-11,13,-13]): self['lpp1'] = 0 self['lpp2'] = 0 @@ -4688,7 +4688,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('ecut') self.display_block.append('beam_pol') - + # check for possibility of eva eva_in_b1 = any(i in beam_id_split[0] for i in [23,24,-24]) #,12,-12,14,-14]) @@ -4701,10 +4701,10 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['nhel'] = 1 self['pdlabel'] = 'eva' self['fixed_fac_scale'] = True - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') elif eva_in_b1: - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') self['pdlabel1'] = 'eva' self['fixed_fac_scale1'] = True self['nhel'] = 1 @@ -4724,7 +4724,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['pdlabel2'] = 'eva' self['fixed_fac_scale2'] = True self['nhel'] = 1 - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') for i in beam_id_split[0]: if abs(i) == 11: self['lpp1'] = math.copysign(3,i) @@ -4740,34 +4740,34 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if any(i in beam_id for i in [22,23,24,-24,12,-12,14,-14]): self.display_block.append('eva_scale') - # automatic polarisation of the beam if neutrino beam + # automatic polarisation of the beam if neutrino beam if any(id in beam_id for id in [12,-12,14,-14,16,-16]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [12,14,16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = -100 if not all(id in [12,14,16] for id in beam_id_split[0]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1]. %s') elif any(id in beam_id_split[0] for id in [-12,-14,-16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[0]): - logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') + logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') if any(id in beam_id_split[1] for id in [12,14,16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = -100 if not all(id in [12,14,16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') elif any(id in beam_id_split[1] for id in [-12,-14,-16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') - + # Check if need matching min_particle = 99 max_particle = 0 @@ -4798,12 +4798,12 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - + break + if matching: self['ickkw'] = 1 self['xqcut'] = 30 - #self['use_syst'] = False + #self['use_syst'] = False self['drjj'] = 0 self['drjl'] = 0 self['sys_alpsfact'] = "0.5 1 2" @@ -4811,8 +4811,8 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('mlm') self.display_block.append('ckkw') self['dynamical_scale_choice'] = -1 - - + + # For interference module, the systematics are wrong. # automatically set use_syst=F and set systematics_program=none no_systematics = False @@ -4826,14 +4826,14 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): continue break - + if interference or no_systematics: self['use_syst'] = False self['systematics_program'] = 'none' if interference: self['dynamical_scale_choice'] = 3 self['sde_strategy'] = 2 - + # set default integration strategy # interference case is already handle above # here pick strategy 2 if only one QCD color flow @@ -4852,7 +4852,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if pure_lepton and proton_initial: self['sde_strategy'] = 1 else: - # check if multi-jet j + # check if multi-jet j is_multijet = True for proc in proc_def: if any(abs(j.get('id')) not in jet_id for j in proc[0]['legs']): @@ -4860,7 +4860,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): break if is_multijet: self['sde_strategy'] = 2 - + # if polarization is used, set the choice of the frame in the run_card # But only if polarization is used for massive particles for plist in proc_def: @@ -4870,7 +4870,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): model = proc.get('model') particle = model.get_particle(l.get('id')) if particle.get('mass').lower() != 'zero': - self.display_block.append('frame') + self.display_block.append('frame') break else: continue @@ -4894,15 +4894,15 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): proc = proc_list[0] if proc['forbidden_onsh_s_channels']: self['sde_strategy'] = 1 - + if 'fix_scale' in proc_characteristic['limitations']: self['fixed_ren_scale'] = 1 self['fixed_fac_scale'] = 1 if self['ickkw'] == 1: logger.critical("MLM matching/merging not compatible with the model! You need to use another method to remove the double counting!") self['ickkw'] = 0 - - # define class of particles present to hide all the cuts associated to + + # define class of particles present to hide all the cuts associated to # not present class cut_class = collections.defaultdict(int) for proc in proc_def: @@ -4925,41 +4925,41 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): one_proc_cut['L'] += 1 elif abs(pdg) in [12,14,16]: one_proc_cut['n'] += 1 - one_proc_cut['L'] += 1 + one_proc_cut['L'] += 1 elif str(oneproc.get('model').get_particle(pdg)['mass']) != 'ZERO': one_proc_cut['H'] += 1 - + for key, nb in one_proc_cut.items(): cut_class[key] = max(cut_class[key], nb) self.cut_class = dict(cut_class) self.cut_class[''] = True #avoid empty - + # If model has running functionality add the additional parameter model = proc_def[0][0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Read file input/default_run_card_lo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'run_card.dat') python_template = True else: template = pjoin(MEDIR, 'Cards', 'run_card_default.dat') python_template = False - + hid_lines = {'default':True}#collections.defaultdict(itertools.repeat(True).next) if isinstance(output_file, str): @@ -4975,9 +4975,9 @@ def write(self, output_file, template=None, python_template=False, hid_lines[k1+k2] = True super(RunCardLO, self).write(output_file, template=template, - python_template=python_template, + python_template=python_template, template_options=hid_lines, - **opt) + **opt) class InvalidMadAnalysis5Card(InvalidCmd): @@ -4986,19 +4986,19 @@ class InvalidMadAnalysis5Card(InvalidCmd): class MadAnalysis5Card(dict): """ A class to store a MadAnalysis5 card. Very basic since it is basically free format.""" - + _MG5aMC_escape_tag = '@MG5aMC' - + _default_hadron_inputs = ['*.hepmc', '*.hep', '*.stdhep', '*.lhco','*.root'] _default_parton_inputs = ['*.lhe'] _skip_analysis = False - + @classmethod def events_can_be_reconstructed(cls, file_path): """ Checks from the type of an event file whether it can be reconstructed or not.""" return not (file_path.endswith('.lhco') or file_path.endswith('.lhco.gz') or \ file_path.endswith('.root') or file_path.endswith('.root.gz')) - + @classmethod def empty_analysis(cls): """ A method returning the structure of an empty analysis """ @@ -5012,7 +5012,7 @@ def empty_reconstruction(cls): 'reco_output':'lhe'} def default_setup(self): - """define the default value""" + """define the default value""" self['mode'] = 'parton' self['inputs'] = [] # None is the default stdout level, it will be set automatically by MG5aMC @@ -5025,8 +5025,8 @@ def default_setup(self): # of this class and some other property could be added to this dictionary # in the future. self['analyses'] = {} - # The recasting structure contains on set of commands and one set of - # card lines. + # The recasting structure contains on set of commands and one set of + # card lines. self['recasting'] = {'commands':[],'card':[]} # Add the default trivial reconstruction to use an lhco input # This is just for the structure @@ -5035,7 +5035,7 @@ def default_setup(self): 'root_input': MadAnalysis5Card.empty_reconstruction()} self['reconstruction']['lhco_input']['reco_output']='lhco' - self['reconstruction']['root_input']['reco_output']='root' + self['reconstruction']['root_input']['reco_output']='root' # Specify in which order the analysis/recasting were specified self['order'] = [] @@ -5049,7 +5049,7 @@ def __init__(self, finput=None,mode=None): return else: dict.__init__(self) - + # Initialize it with all the default value self.default_setup() if not mode is None: @@ -5058,15 +5058,15 @@ def __init__(self, finput=None,mode=None): # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, mode=mode) - + def read(self, input, mode=None): """ Read an MA5 card""" - + if mode not in [None,'parton','hadron']: raise MadGraph5Error('A MadAnalysis5Card can be read online the modes'+ "'parton' or 'hadron'") card_mode = mode - + if isinstance(input, (file, StringIO.StringIO)): input_stream = input elif isinstance(input, str): @@ -5099,10 +5099,10 @@ def read(self, input, mode=None): except ValueError: option = line[len(self._MG5aMC_escape_tag):] option = option.strip() - + if option=='inputs': self['inputs'].extend([v.strip() for v in value.split(',')]) - + elif option == 'skip_analysis': self._skip_analysis = True @@ -5118,7 +5118,7 @@ def read(self, input, mode=None): except: raise InvalidMadAnalysis5Card( "MA5 output level specification '%s' is incorrect."%str(value)) - + elif option=='analysis_name': current_type = 'analyses' current_name = value @@ -5127,7 +5127,7 @@ def read(self, input, mode=None): "Analysis '%s' already defined in MadAnalysis5 card"%current_name) else: self[current_type][current_name] = MadAnalysis5Card.empty_analysis() - + elif option=='set_reconstructions': try: reconstructions = eval(value) @@ -5142,7 +5142,7 @@ def read(self, input, mode=None): "analysis in a MadAnalysis5 card.") self[current_type][current_name]['reconstructions']=reconstructions continue - + elif option=='reconstruction_name': current_type = 'reconstruction' current_name = value @@ -5161,7 +5161,7 @@ def read(self, input, mode=None): raise InvalidMadAnalysis5Card( "Option '%s' can only take the values 'lhe' or 'root'"%option) self['reconstruction'][current_name]['reco_output'] = value.lower() - + elif option.startswith('recasting'): current_type = 'recasting' try: @@ -5171,11 +5171,11 @@ def read(self, input, mode=None): if len(self['recasting'][current_name])>0: raise InvalidMadAnalysis5Card( "Only one recasting can be defined in MadAnalysis5 hadron card") - + else: raise InvalidMadAnalysis5Card( "Unreckognized MG5aMC instruction in MadAnalysis5 card: '%s'"%option) - + if option in ['analysis_name','reconstruction_name'] or \ option.startswith('recasting'): self['order'].append((current_type,current_name)) @@ -5209,7 +5209,7 @@ def read(self, input, mode=None): self['inputs'] = self._default_hadron_inputs else: self['inputs'] = self._default_parton_inputs - + # Make sure at least one reconstruction is specified for each hadron # level analysis and that it exists. if self['mode']=='hadron': @@ -5221,7 +5221,7 @@ def read(self, input, mode=None): analysis['reconstructions']): raise InvalidMadAnalysis5Card('A reconstructions specified in'+\ " analysis '%s' is not defined."%analysis_name) - + def write(self, output): """ Write an MA5 card.""" @@ -5232,7 +5232,7 @@ def write(self, output): else: raise MadGraph5Error('Incorrect input for the write function of'+\ ' the MadAnalysis5Card card. Received argument type is: %s'%str(type(output))) - + output_lines = [] if self._skip_analysis: output_lines.append('%s skip_analysis'%self._MG5aMC_escape_tag) @@ -5240,11 +5240,11 @@ def write(self, output): if not self['stdout_lvl'] is None: output_lines.append('%s stdout_lvl=%s'%(self._MG5aMC_escape_tag,self['stdout_lvl'])) for definition_type, name in self['order']: - + if definition_type=='analyses': output_lines.append('%s analysis_name = %s'%(self._MG5aMC_escape_tag,name)) output_lines.append('%s set_reconstructions = %s'%(self._MG5aMC_escape_tag, - str(self['analyses'][name]['reconstructions']))) + str(self['analyses'][name]['reconstructions']))) elif definition_type=='reconstruction': output_lines.append('%s reconstruction_name = %s'%(self._MG5aMC_escape_tag,name)) elif definition_type=='recasting': @@ -5254,23 +5254,23 @@ def write(self, output): output_lines.extend(self[definition_type][name]) elif definition_type in ['reconstruction']: output_lines.append('%s reco_output = %s'%(self._MG5aMC_escape_tag, - self[definition_type][name]['reco_output'])) + self[definition_type][name]['reco_output'])) output_lines.extend(self[definition_type][name]['commands']) elif definition_type in ['analyses']: - output_lines.extend(self[definition_type][name]['commands']) - + output_lines.extend(self[definition_type][name]['commands']) + output_stream.write('\n'.join(output_lines)) - + return - - def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, + + def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, UFO_model_path=None, run_tag=''): - """ Returns a list of tuples ('AnalysisTag',['commands']) specifying - the commands of the MadAnalysis runs required from this card. - At parton-level, the number of such commands is the number of analysis + """ Returns a list of tuples ('AnalysisTag',['commands']) specifying + the commands of the MadAnalysis runs required from this card. + At parton-level, the number of such commands is the number of analysis asked for. In the future, the idea is that the entire card can be processed in one go from MA5 directly.""" - + if isinstance(inputs_arg, list): inputs = inputs_arg elif isinstance(inputs_arg, str): @@ -5278,21 +5278,21 @@ def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, else: raise MadGraph5Error("The function 'get_MA5_cmds' can only take "+\ " a string or a list for the argument 'inputs_arg'") - + if len(inputs)==0: raise MadGraph5Error("The function 'get_MA5_cmds' must have "+\ " at least one input specified'") - + if run_dir_path is None: run_dir_path = os.path.dirname(inputs_arg) - + cmds_list = [] - + UFO_load = [] # first import the UFO if provided if UFO_model_path: UFO_load.append('import %s'%UFO_model_path) - + def get_import(input, type=None): """ Generates the MA5 import commands for that event file. """ dataset_name = os.path.basename(input).split('.')[0] @@ -5304,7 +5304,7 @@ def get_import(input, type=None): if not type is None: res.append('set %s.type = %s'%(dataset_name, type)) return res - + fifo_status = {'warned_fifo':False,'fifo_used_up':False} def warn_fifo(input): if not input.endswith('.fifo'): @@ -5317,7 +5317,7 @@ def warn_fifo(input): logger.warning('Only the first MA5 analysis/reconstructions can be run on a fifo. Subsequent runs will skip fifo inputs.') fifo_status['warned_fifo'] = True return True - + # Then the event file(s) input(s) inputs_load = [] for input in inputs: @@ -5325,16 +5325,16 @@ def warn_fifo(input): if len(inputs) > 1: inputs_load.append('set main.stacking_method = superimpose') - + submit_command = 'submit %s'%submit_folder+'_%s' - + # Keep track of the reconstruction outpus in the MA5 workflow # Keys are reconstruction names and values are .lhe.gz reco file paths. # We put by default already the lhco/root ones present reconstruction_outputs = { - 'lhco_input':[f for f in inputs if + 'lhco_input':[f for f in inputs if f.endswith('.lhco') or f.endswith('.lhco.gz')], - 'root_input':[f for f in inputs if + 'root_input':[f for f in inputs if f.endswith('.root') or f.endswith('.root.gz')]} # If a recasting card has to be written out, chose here its path @@ -5343,7 +5343,7 @@ def warn_fifo(input): # Make sure to only run over one analysis over each fifo. for definition_type, name in self['order']: - if definition_type == 'reconstruction': + if definition_type == 'reconstruction': analysis_cmds = list(self['reconstruction'][name]['commands']) reco_outputs = [] for i_input, input in enumerate(inputs): @@ -5365,8 +5365,8 @@ def warn_fifo(input): analysis_cmds.append( submit_command%('reco_%s_%d'%(name,i_input+1))) analysis_cmds.append('remove reco_events') - - reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) + + reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) for rec_out in reco_outputs] if len(reco_outputs)>0: cmds_list.append(('_reco_%s'%name,analysis_cmds)) @@ -5386,7 +5386,7 @@ def warn_fifo(input): analysis_cmds = ['set main.mode = parton'] else: analysis_cmds = [] - analysis_cmds.extend(sum([get_import(rec_out) for + analysis_cmds.extend(sum([get_import(rec_out) for rec_out in reconstruction_outputs[reco]],[])) analysis_cmds.extend(self['analyses'][name]['commands']) analysis_cmds.append(submit_command%('%s_%s'%(name,reco))) @@ -5427,12 +5427,12 @@ def warn_fifo(input): %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode """ running_block_nlo = RunBlock('RUNNING', template_on=template_on, template_off="") - + class RunCardNLO(RunCard): """A class object for the run_card for a (aMC@)NLO pocess""" - + LO = False - + blocks = [running_block_nlo] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), @@ -5443,11 +5443,11 @@ class RunCardNLO(RunCard): if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_nlo.dat") - - + + def default_setup(self): """define the default value""" - + self.add_param('run_tag', 'tag_1', include=False) self.add_param('nevents', 10000) self.add_param('req_acc', -1.0, include=False) @@ -5455,27 +5455,27 @@ def default_setup(self): self.add_param("time_of_flight", -1.0, include=False) self.add_param('event_norm', 'average') #FO parameter - self.add_param('req_acc_fo', 0.01, include=False) + self.add_param('req_acc_fo', 0.01, include=False) self.add_param('npoints_fo_grid', 5000, include=False) self.add_param('niters_fo_grid', 4, include=False) - self.add_param('npoints_fo', 10000, include=False) + self.add_param('npoints_fo', 10000, include=False) self.add_param('niters_fo', 6, include=False) #seed and collider self.add_param('iseed', 0) - self.add_param('lpp1', 1, fortran_name='lpp(1)') - self.add_param('lpp2', 1, fortran_name='lpp(2)') + self.add_param('lpp1', 1, fortran_name='lpp(1)') + self.add_param('lpp2', 1, fortran_name='lpp(2)') self.add_param('ebeam1', 6500.0, fortran_name='ebeam(1)') - self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') + self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') self.add_param('pdlabel', 'nn23nlo', allowed=['lhapdf', 'emela', 'cteq6_m','cteq6_d','cteq6_l','cteq6l1', 'nn23lo','nn23lo1','nn23nlo','ct14q00','ct14q07','ct14q14','ct14q21'] +\ - sum(self.allowed_lep_densities.values(),[]) ) + sum(self.allowed_lep_densities.values(),[]) ) self.add_param('lhaid', [244600],fortran_name='lhaPDFid') self.add_param('pdfscheme', 0) # whether to include or not photon-initiated processes in lepton collisions self.add_param('photons_from_lepton', True) self.add_param('lhapdfsetname', ['internal_use_only'], system=True) - # stuff for lepton collisions - # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set - # whether the current PDF set has or not beamstrahlung + # stuff for lepton collisions + # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set + # whether the current PDF set has or not beamstrahlung self.add_param('has_bstrahl', False, system=True) # renormalisation scheme of alpha self.add_param('alphascheme', 0, system=True) @@ -5486,31 +5486,31 @@ def default_setup(self): # w contribution included or not in the running of alpha self.add_param('w_run', 1, system=True) #shower and scale - self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') + self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') self.add_param('shower_scale_factor',1.0) self.add_param('mcatnlo_delta', False) self.add_param('fixed_ren_scale', False) self.add_param('fixed_fac_scale', False) self.add_param('fixed_extra_scale', True, hidden=True, system=True) # set system since running from Ellis-Sexton scale not implemented - self.add_param('mur_ref_fixed', 91.118) + self.add_param('mur_ref_fixed', 91.118) self.add_param('muf1_ref_fixed', -1.0, hidden=True) - self.add_param('muf_ref_fixed', 91.118) + self.add_param('muf_ref_fixed', 91.118) self.add_param('muf2_ref_fixed', -1.0, hidden=True) - self.add_param('mue_ref_fixed', 91.118, hidden=True) - self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', + self.add_param('mue_ref_fixed', 91.118, hidden=True) + self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', allowed = [-2,-1,0,1,2,3,10], comment="\'-1\' is based on CKKW back clustering (following feynman diagram).\n \'1\' is the sum of transverse energy.\n '2' is HT (sum of the transverse mass)\n '3' is HT/2, '0' allows to use the user_hook definition (need to be defined via custom_fct entry) ") self.add_param('fixed_qes_scale', False, hidden=True) self.add_param('qes_ref_fixed', -1.0, hidden=True) self.add_param('mur_over_ref', 1.0) - self.add_param('muf_over_ref', 1.0) - self.add_param('muf1_over_ref', -1.0, hidden=True) + self.add_param('muf_over_ref', 1.0) + self.add_param('muf1_over_ref', -1.0, hidden=True) self.add_param('muf2_over_ref', -1.0, hidden=True) self.add_param('mue_over_ref', 1.0, hidden=True, system=True) # forbid the user to modigy due to incorrect handling of the Ellis-Sexton scale self.add_param('qes_over_ref', -1.0, hidden=True) self.add_param('reweight_scale', [True], fortran_name='lscalevar') - self.add_param('rw_rscale_down', -1.0, hidden=True) + self.add_param('rw_rscale_down', -1.0, hidden=True) self.add_param('rw_rscale_up', -1.0, hidden=True) - self.add_param('rw_fscale_down', -1.0, hidden=True) + self.add_param('rw_fscale_down', -1.0, hidden=True) self.add_param('rw_fscale_up', -1.0, hidden=True) self.add_param('rw_rscale', [1.0,2.0,0.5], fortran_name='scalevarR') self.add_param('rw_fscale', [1.0,2.0,0.5], fortran_name='scalevarF') @@ -5523,60 +5523,60 @@ def default_setup(self): #technical self.add_param('folding', [1,1,1], include=False) - + #merging self.add_param('ickkw', 0, allowed=[-1,0,3,4], comment=" - 0: No merging\n - 3: FxFx Merging : http://amcatnlo.cern.ch/FxFx_merging.htm\n - 4: UNLOPS merging (No interface within MG5aMC)\n - -1: NNLL+NLO jet-veto computation. See arxiv:1412.8408 [hep-ph]") self.add_param('bwcutoff', 15.0) - #cuts + #cuts self.add_param('jetalgo', 1.0) - self.add_param('jetradius', 0.7) + self.add_param('jetradius', 0.7) self.add_param('ptj', 10.0 , cut=True) - self.add_param('etaj', -1.0, cut=True) - self.add_param('gamma_is_j', True) + self.add_param('etaj', -1.0, cut=True) + self.add_param('gamma_is_j', True) self.add_param('ptl', 0.0, cut=True) - self.add_param('etal', -1.0, cut=True) + self.add_param('etal', -1.0, cut=True) self.add_param('drll', 0.0, cut=True) - self.add_param('drll_sf', 0.0, cut=True) + self.add_param('drll_sf', 0.0, cut=True) self.add_param('mll', 0.0, cut=True) - self.add_param('mll_sf', 30.0, cut=True) - self.add_param('rphreco', 0.1) - self.add_param('etaphreco', -1.0) - self.add_param('lepphreco', True) - self.add_param('quarkphreco', True) + self.add_param('mll_sf', 30.0, cut=True) + self.add_param('rphreco', 0.1) + self.add_param('etaphreco', -1.0) + self.add_param('lepphreco', True) + self.add_param('quarkphreco', True) self.add_param('ptgmin', 20.0, cut=True) - self.add_param('etagamma', -1.0) + self.add_param('etagamma', -1.0) self.add_param('r0gamma', 0.4) - self.add_param('xn', 1.0) + self.add_param('xn', 1.0) self.add_param('epsgamma', 1.0) - self.add_param('isoem', True) + self.add_param('isoem', True) self.add_param('maxjetflavor', 4, hidden=True) - self.add_param('pineappl', False) + self.add_param('pineappl', False) self.add_param('lhe_version', 3, hidden=True, include=False) - + # customization self.add_param("custom_fcts",[],typelist="str", include=False, comment="list of files containing function that overwritte dummy function of the code (like adding cuts/...)") #internal variable related to FO_analyse_card self.add_param('FO_LHE_weight_ratio',1e-3, hidden=True, system=True) - self.add_param('FO_LHE_postprocessing',['grouping','random'], + self.add_param('FO_LHE_postprocessing',['grouping','random'], hidden=True, system=True, include=False) - + # parameter allowing to define simple cut via the pdg self.add_param('pt_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('pt_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False, hidden=True) - + #hidden parameter that are transfer to the fortran code self.add_param('pdg_cut',[0], hidden=True, system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], hidden=True, system=True) # store pt min self.add_param('ptmax4pdg',[-1.], hidden=True, system=True) self.add_param('mxxmin4pdg',[0.], hidden=True, system=True) self.add_param('mxxpart_antipart', [False], hidden=True, system=True) - + def check_validity(self): """check the validity of the various input""" - + super(RunCardNLO, self).check_validity() # for lepton-lepton collisions, ignore 'pdlabel' and 'lhaid' @@ -5588,12 +5588,12 @@ def check_validity(self): # for dressed lepton collisions, check that the lhaid is a valid one if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]) + ['emela']: raise InvalidRunCard('pdlabel %s not allowed for dressed-lepton collisions' % self['pdlabel']) - + elif self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' self['reweight_pdf']=[False] logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') - + if self['lpp1'] == 0 == self['lpp2']: if self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' @@ -5601,8 +5601,8 @@ def check_validity(self): logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') # For FxFx merging, make sure that the following parameters are set correctly: - if self['ickkw'] == 3: - # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed + if self['ickkw'] == 3: + # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed scales=['fixed_ren_scale','fixed_fac_scale','fixed_QES_scale'] for scale in scales: if self[scale]: @@ -5615,7 +5615,7 @@ def check_validity(self): self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency in FxFx merging, dynamical_scale_choice has been set to -1 (default)''' ,'$MG:BOLD') - + # 2. Use kT algorithm for jets with pseudo-code size R=1.0 jetparams=['jetradius','jetalgo'] for jetparam in jetparams: @@ -5628,8 +5628,8 @@ def check_validity(self): self["dynamical_scale_choice"] = [-1] self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency with the jet veto, the scale which will be used is ptj. dynamical_scale_choice will be set at -1.''' - ,'$MG:BOLD') - + ,'$MG:BOLD') + # For interface to PINEAPPL, need to use LHAPDF and reweighting to get scale uncertainties if self['pineappl'] and self['pdlabel'].lower() != 'lhapdf': raise InvalidRunCard('PineAPPL generation only possible with the use of LHAPDF') @@ -5661,7 +5661,7 @@ def check_validity(self): if (self['rw_fscale_down'] != -1.0 and ['rw_fscale_down'] not in self['rw_fscale']) or\ (self['rw_fscale_up'] != -1.0 and ['rw_fscale_up'] not in self['rw_fscale']): self['rw_fscale']=[1.0,self['rw_fscale_up'],self['rw_fscale_down']] - + # PDF reweighting check if any(self['reweight_pdf']): # check that we use lhapdf if reweighting is ON @@ -5672,7 +5672,7 @@ def check_validity(self): if self['pdlabel'] != "lhapdf": self['reweight_pdf']=[self['reweight_pdf'][0]] self['lhaid']=[self['lhaid'][0]] - + # make sure set have reweight_scale and dyn_scale_choice of length 1 when fixed scales: if self['fixed_ren_scale'] and self['fixed_fac_scale']: self['reweight_scale']=[self['reweight_scale'][0]] @@ -5685,7 +5685,7 @@ def check_validity(self): self['reweight_pdf']=self['reweight_pdf']*len(self['lhaid']) logger.warning("Setting 'reweight_pdf' for all 'lhaid' to %s" % self['reweight_pdf'][0]) if len(self['reweight_scale']) == 1 and len(self['dynamical_scale_choice']) != 1: - self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) + self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) logger.warning("Setting 'reweight_scale' for all 'dynamical_scale_choice' to %s" % self['reweight_pdf'][0]) # Check that there are no identical elements in lhaid or dynamical_scale_choice @@ -5693,7 +5693,7 @@ def check_validity(self): raise InvalidRunCard("'lhaid' has two or more identical entries. They have to be all different for the code to work correctly.") if len(self['dynamical_scale_choice']) != len(set(self['dynamical_scale_choice'])): raise InvalidRunCard("'dynamical_scale_choice' has two or more identical entries. They have to be all different for the code to work correctly.") - + # Check that lenght of lists are consistent if len(self['reweight_pdf']) != len(self['lhaid']): raise InvalidRunCard("'reweight_pdf' and 'lhaid' lists should have the same length") @@ -5730,7 +5730,7 @@ def check_validity(self): if len(self['folding']) != 3: raise InvalidRunCard("'folding' should contain exactly three integers") for ifold in self['folding']: - if ifold not in [1,2,4,8]: + if ifold not in [1,2,4,8]: raise InvalidRunCard("The three 'folding' parameters should be equal to 1, 2, 4, or 8.") # Check MC@NLO-Delta if self['mcatnlo_delta'] and not self['parton_shower'].lower() == 'pythia8': @@ -5746,11 +5746,11 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938 GeV") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") def update_system_parameter_for_include(self): - + # set the pdg_for_cut fortran parameter pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys())+ list(self['mxx_only_part_antipart'].keys())) @@ -5758,12 +5758,12 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different PDGs are allowed for PDG specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative PDG codes') - - + + if any(pdg in pdg_to_cut for pdg in [21,22,11,13,15]+ list(range(self['maxjetflavor']+1))): # Note that this will double check in the fortran code raise Exception("Can not use PDG related cuts for massless SM particles/leptons") @@ -5790,7 +5790,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -5800,12 +5800,12 @@ def update_system_parameter_for_include(self): self['mxxpart_antipart'] = [False] def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', 'run_card.dat') python_template = True else: @@ -5818,7 +5818,7 @@ def write(self, output_file, template=None, python_template=False, **opt): def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules - e+ e- beam -> lpp:0 ebeam:500 + e+ e- beam -> lpp:0 ebeam:500 p p beam -> set maxjetflavor automatically process with tagged photons -> gamma_is_j = false process without QED splittings -> gamma_is_j = false, recombination = false @@ -5844,19 +5844,19 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam2'] = 500 else: self['lpp1'] = 0 - self['lpp2'] = 0 - + self['lpp2'] = 0 + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() # check for tagged photons tagged_particles = set() - + # If model has running functionality add the additional parameter model = proc_def[0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Check if need matching min_particle = 99 @@ -5885,7 +5885,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: idsmin = [l['id'] for l in procmin['legs']] break - + for procmax in proc_def: if len(procmax['legs']) != max_particle: continue @@ -5901,9 +5901,9 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - - if matching: + break + + if matching: self['ickkw'] = 3 self['fixed_ren_scale'] = False self["fixed_fac_scale"] = False @@ -5911,17 +5911,17 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self["jetalgo"] = 1 self["jetradius"] = 1 self["parton_shower"] = "PYTHIA8" - + # Read file input/default_run_card_nlo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + class MadLoopParam(ConfigFile): """ a class for storing/dealing with the file MadLoopParam.dat contains a parser to read it, facilities to write a new file,... """ - + _ID_reduction_tool_map = {1:'CutTools', 2:'PJFry++', 3:'IREGI', @@ -5929,10 +5929,10 @@ class MadLoopParam(ConfigFile): 5:'Samurai', 6:'Ninja', 7:'COLLIER'} - + def default_setup(self): """initialize the directory to the default value""" - + self.add_param("MLReductionLib", "6|7|1") self.add_param("IREGIMODE", 2) self.add_param("IREGIRECY", True) @@ -5954,7 +5954,7 @@ def default_setup(self): self.add_param("HelicityFilterLevel", 2) self.add_param("LoopInitStartOver", False) self.add_param("HelInitStartOver", False) - self.add_param("UseQPIntegrandForNinja", True) + self.add_param("UseQPIntegrandForNinja", True) self.add_param("UseQPIntegrandForCutTools", True) self.add_param("COLLIERMode", 1) self.add_param("COLLIERComputeUVpoles", True) @@ -5966,9 +5966,9 @@ def default_setup(self): self.add_param("COLLIERUseInternalStabilityTest",True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -5976,7 +5976,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % input) - + previous_line= '' for line in finput: if previous_line.startswith('#'): @@ -5985,20 +5985,20 @@ def read(self, finput): if len(value) and value[0] not in ['#', '!']: self.__setitem__(name, value, change_userdefine=True) previous_line = line - - + + def write(self, outputpath, template=None,commentdefault=False): - + if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', + template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', 'Cards', 'MadLoopParams.dat') else: template = pjoin(MEDIR, 'Cards', 'MadLoopParams_default.dat') fsock = open(template, 'r') template = fsock.readlines() fsock.close() - + if isinstance(outputpath, str): output = open(outputpath, 'w') else: @@ -6019,7 +6019,7 @@ def f77format(value): return value else: raise Exception("Can not format input %s" % type(value)) - + name = '' done = set() for line in template: @@ -6034,12 +6034,12 @@ def f77format(value): elif line.startswith('#'): name = line[1:].split()[0] output.write(line) - - - - - -class eMELA_info(ConfigFile): + + + + + +class eMELA_info(ConfigFile): """ a class for eMELA (LHAPDF-like) info files """ path = '' @@ -6053,7 +6053,7 @@ def __init__(self, finput, me_dir): def read(self, finput): - if isinstance(finput, file): + if isinstance(finput, file): lines = finput.open().read().split('\n') self.path = finput.name else: @@ -6066,7 +6066,7 @@ def read(self, finput): k, v = l.split(':', 1) # ignore further occurrences of : try: self[k.strip()] = eval(v) - except (NameError, SyntaxError): + except (NameError, SyntaxError): self[k.strip()] = v def default_setup(self): @@ -6091,7 +6091,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): +"powers of alpha should be reweighted a posteriori") - logger.info('Updating variables according to %s' % self.path) + logger.info('Updating variables according to %s' % self.path) # Flavours in the running of alpha nd, nu, nl = self['eMELA_ActiveFlavoursAlpha'] self.log_and_update(banner, 'run_card', 'ndnq_run', nd) @@ -6130,8 +6130,8 @@ def update_epdf_emela_variables(self, banner, uvscheme): logger.warning('Cannot treat the following renormalisation schemes for ME and PDFs: %d, %d' \ % (uvscheme, uvscheme_pdf)) - # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref - # also check that the com energy is equal to qref, otherwise print a + # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref + # also check that the com energy is equal to qref, otherwise print a # warning if uvscheme_pdf == 1: qref = self['eMELA_AlphaQref'] @@ -6144,23 +6144,23 @@ def update_epdf_emela_variables(self, banner, uvscheme): # LL / NLL PDF (0/1) pdforder = self['eMELA_PerturbativeOrder'] - # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) + # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) # 4->mixed (leptonic); 5-> nobeta (leptonic); 6->delta (leptonic) # if LL, use nobeta scheme unless LEGACYLLPDF > 0 if pdforder == 0: if 'eMELA_LEGACYLLPDF' not in self.keys() or self['eMELA_LEGACYLLPDF'] in [-1, 0]: self.log_and_update(banner, 'run_card', 'pdfscheme', 5) - elif self['eMELA_LEGACYLLPDF'] == 1: + elif self['eMELA_LEGACYLLPDF'] == 1: # mixed self.log_and_update(banner, 'run_card', 'pdfscheme', 4) - elif self['eMELA_LEGACYLLPDF'] == 2: + elif self['eMELA_LEGACYLLPDF'] == 2: # eta self.log_and_update(banner, 'run_card', 'pdfscheme', 2) - elif self['eMELA_LEGACYLLPDF'] == 3: + elif self['eMELA_LEGACYLLPDF'] == 3: # beta self.log_and_update(banner, 'run_card', 'pdfscheme', 3) elif pdforder == 1: - # for NLL, use eMELA_FactorisationSchemeInt = 0/1 + # for NLL, use eMELA_FactorisationSchemeInt = 0/1 # for delta/MSbar if self['eMELA_FactorisationSchemeInt'] == 0: # MSbar @@ -6177,7 +6177,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): - + def log_and_update(self, banner, card, par, v): """update the card parameter par to value v diff --git a/epochX/cudacpp/gg_ttggg.mad/bin/internal/gen_ximprove.py b/epochX/cudacpp/gg_ttggg.mad/bin/internal/gen_ximprove.py index 5fd170d18d..cc842aa50f 100755 --- a/epochX/cudacpp/gg_ttggg.mad/bin/internal/gen_ximprove.py +++ b/epochX/cudacpp/gg_ttggg.mad/bin/internal/gen_ximprove.py @@ -2,18 +2,18 @@ # # Copyright (c) 2014 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch # ################################################################################ """ A python file to replace the fortran script gen_ximprove. - This script analyses the result of the survey/ previous refine and + This script analyses the result of the survey/ previous refine and creates the jobs for the following script. """ from __future__ import division @@ -66,77 +66,77 @@ class gensym(object): """a class to call the fortran gensym executable and handle it's output in order to create the various job that are needed for the survey""" - + #convenient shortcut for the formatting of variable @ staticmethod def format_variable(*args): return bannermod.ConfigFile.format_variable(*args) - + combining_job = 2 # number of channel by ajob - splitted_grid = False + splitted_grid = False min_iterations = 3 mode= "survey" - + def __init__(self, cmd, opt=None): - + try: super(gensym, self).__init__(cmd, opt) except TypeError: pass - - # Run statistics, a dictionary of RunStatistics(), with + + # Run statistics, a dictionary of RunStatistics(), with self.run_statistics = {} - + self.cmd = cmd self.run_card = cmd.run_card self.me_dir = cmd.me_dir - - + + # dictionary to keep track of the precision when combining iteration self.cross = collections.defaultdict(int) self.abscross = collections.defaultdict(int) self.sigma = collections.defaultdict(int) self.chi2 = collections.defaultdict(int) - + self.splitted_grid = False if self.cmd.proc_characteristics['loop_induced']: nexternal = self.cmd.proc_characteristics['nexternal'] self.splitted_grid = max(2, (nexternal-2)**2) if hasattr(self.cmd, "opts") and self.cmd.opts['accuracy'] == 0.1: self.cmd.opts['accuracy'] = 0.02 - + if isinstance(cmd.cluster, cluster.MultiCore) and self.splitted_grid > 1: self.splitted_grid = int(cmd.cluster.nb_core**0.5) if self.splitted_grid == 1 and cmd.cluster.nb_core >1: self.splitted_grid = 2 - + #if the user defines it in the run_card: if self.run_card['survey_splitting'] != -1: self.splitted_grid = self.run_card['survey_splitting'] if self.run_card['survey_nchannel_per_job'] != 1 and 'survey_nchannel_per_job' in self.run_card.user_set: - self.combining_job = self.run_card['survey_nchannel_per_job'] + self.combining_job = self.run_card['survey_nchannel_per_job'] elif self.run_card['hard_survey'] > 1: self.combining_job = 1 - - + + self.splitted_Pdir = {} self.splitted_for_dir = lambda x,y: self.splitted_grid self.combining_job_for_Pdir = lambda x: self.combining_job self.lastoffset = {} - + done_warning_zero_coupling = False def get_helicity(self, to_submit=True, clean=True): """launch a single call to madevent to get the list of non zero helicity""" - - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc P_zero_result = [] nb_tot_proc = len(subproc) - job_list = {} - - + job_list = {} + + for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -154,7 +154,7 @@ def get_helicity(self, to_submit=True, clean=True): p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts - + (stdout, _) = p.communicate(''.encode()) stdout = stdout.decode('ascii',errors='ignore') if stdout: @@ -166,11 +166,11 @@ def get_helicity(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir, 'error')): os.remove(pjoin(self.me_dir, 'error')) continue # bypass bad process - + self.cmd.compile(['madevent_forhel'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'madevent_forhel')): - raise Exception('Error make madevent_forhel not successful') - + raise Exception('Error make madevent_forhel not successful') + if not os.path.exists(pjoin(Pdir, 'Hel')): os.mkdir(pjoin(Pdir, 'Hel')) ff = open(pjoin(Pdir, 'Hel', 'input_app.txt'),'w') @@ -180,15 +180,15 @@ def get_helicity(self, to_submit=True, clean=True): try: os.remove(pjoin(Pdir, 'Hel','results.dat')) except Exception: - pass + pass # Launch gensym - p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, + p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=pjoin(Pdir,'Hel'), shell=True) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(" ".encode()) stdout = stdout.decode('ascii',errors='ignore') if os.path.exists(pjoin(self.me_dir, 'error')): - raise Exception(pjoin(self.me_dir,'error')) + raise Exception(pjoin(self.me_dir,'error')) # note a continue is not enough here, we have in top to link # the matrixX_optim.f to matrixX_orig.f to let the code to work # after this error. @@ -203,7 +203,7 @@ def get_helicity(self, to_submit=True, clean=True): zero_gc = list() all_zampperhel = set() all_bad_amps_perhel = set() - + for line in stdout.splitlines(): if "=" not in line and ":" not in line: continue @@ -229,22 +229,22 @@ def get_helicity(self, to_submit=True, clean=True): "%s\n" % (' '.join(zero_gc)) +\ "This will slow down the computation. Please consider using restricted model:\n" +\ "https://answers.launchpad.net/mg5amcnlo/+faq/2312") - - + + all_good_hels = collections.defaultdict(list) for me_index, hel in all_hel: - all_good_hels[me_index].append(int(hel)) - + all_good_hels[me_index].append(int(hel)) + #print(all_hel) if self.run_card['hel_zeroamp']: all_bad_amps = collections.defaultdict(list) for me_index, amp in all_zamp: all_bad_amps[me_index].append(int(amp)) - + all_bad_amps_perhel = collections.defaultdict(list) for me_index, hel, amp in all_zampperhel: - all_bad_amps_perhel[me_index].append((int(hel),int(amp))) - + all_bad_amps_perhel[me_index].append((int(hel),int(amp))) + elif all_zamp: nb_zero = sum(int(a[1]) for a in all_zamp) if zero_gc: @@ -254,7 +254,7 @@ def get_helicity(self, to_submit=True, clean=True): else: logger.warning("The optimization detected that you have %i zero matrix-element for this SubProcess: %s.\n" % nb_zero +\ "This part can optimize if you set the flag hel_zeroamp to True in the run_card.") - + #check if we need to do something and write associate information" data = [all_hel, all_zamp, all_bad_amps_perhel] if not self.run_card['hel_zeroamp']: @@ -266,14 +266,14 @@ def get_helicity(self, to_submit=True, clean=True): old_data = open(pjoin(Pdir,'Hel','selection')).read() if old_data == data: continue - - + + with open(pjoin(Pdir,'Hel','selection'),'w') as fsock: - fsock.write(data) - - + fsock.write(data) + + for matrix_file in misc.glob('matrix*orig.f', Pdir): - + split_file = matrix_file.split('/') me_index = split_file[-1][len('matrix'):-len('_orig.f')] @@ -289,11 +289,11 @@ def get_helicity(self, to_submit=True, clean=True): #good_hels = sorted(list(good_hels)) good_hels = [str(x) for x in sorted(all_good_hels[me_index])] if self.run_card['hel_zeroamp']: - + bad_amps = [str(x) for x in sorted(all_bad_amps[me_index])] bad_amps_perhel = [x for x in sorted(all_bad_amps_perhel[me_index])] else: - bad_amps = [] + bad_amps = [] bad_amps_perhel = [] if __debug__: mtext = open(matrix_file).read() @@ -310,7 +310,7 @@ def get_helicity(self, to_submit=True, clean=True): recycler.set_input(matrix_file) recycler.set_output(out_file) - recycler.set_template(templ_file) + recycler.set_template(templ_file) recycler.generate_output_file() del recycler @@ -321,19 +321,19 @@ def get_helicity(self, to_submit=True, clean=True): return {}, P_zero_result - + def launch(self, to_submit=True, clean=True): """ """ if not hasattr(self, 'subproc'): - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc - + P_zero_result = [] # check the number of times where they are no phase-space - + nb_tot_proc = len(subproc) - job_list = {} + job_list = {} for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.
(previous processes already running)' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -341,7 +341,7 @@ def launch(self, to_submit=True, clean=True): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) logger.info(' %s ' % subdir) - + # clean previous run if clean: for match in misc.glob('*ajob*', Pdir): @@ -349,17 +349,17 @@ def launch(self, to_submit=True, clean=True): os.remove(match) for match in misc.glob('G*', Pdir): if os.path.exists(pjoin(match,'results.dat')): - os.remove(pjoin(match, 'results.dat')) + os.remove(pjoin(match, 'results.dat')) if os.path.exists(pjoin(match, 'ftn25')): - os.remove(pjoin(match, 'ftn25')) - + os.remove(pjoin(match, 'ftn25')) + #compile gensym self.cmd.compile(['gensym'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'gensym')): - raise Exception('Error make gensym not successful') - + raise Exception('Error make gensym not successful') + # Launch gensym - p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, + p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(''.encode()) @@ -367,8 +367,8 @@ def launch(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir,'error')): files.mv(pjoin(self.me_dir,'error'), pjoin(Pdir,'ajob.no_ps.log')) P_zero_result.append(subdir) - continue - + continue + jobs = stdout.split() job_list[Pdir] = jobs try: @@ -386,8 +386,8 @@ def launch(self, to_submit=True, clean=True): continue else: if done: - raise Exception('Parsing error in gensym: %s' % stdout) - job_list[Pdir] = l.split() + raise Exception('Parsing error in gensym: %s' % stdout) + job_list[Pdir] = l.split() done = True if not done: raise Exception('Parsing error in gensym: %s' % stdout) @@ -408,16 +408,16 @@ def launch(self, to_submit=True, clean=True): if to_submit: self.submit_to_cluster(job_list) job_list = {} - + return job_list, P_zero_result - + def resubmit(self, min_precision=1.0, resubmit_zero=False): """collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - + job_list, P_zero_result = self.launch(to_submit=False, clean=False) - + for P , jobs in dict(job_list).items(): misc.sprint(jobs) to_resub = [] @@ -434,7 +434,7 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): elif max(one_result.xerru, one_result.xerrc)/one_result.xsec > min_precision: to_resub.append(job) else: - to_resub.append(job) + to_resub.append(job) if to_resub: for G in to_resub: try: @@ -442,19 +442,19 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): except Exception as error: misc.sprint(error) pass - misc.sprint(to_resub) + misc.sprint(to_resub) self.submit_to_cluster({P: to_resub}) - - - - - - - - - - - + + + + + + + + + + + def submit_to_cluster(self, job_list): """ """ @@ -467,7 +467,7 @@ def submit_to_cluster(self, job_list): nexternal = self.cmd.proc_characteristics['nexternal'] current = open(pjoin(path, "nexternal.inc")).read() ext = re.search(r"PARAMETER \(NEXTERNAL=(\d+)\)", current).group(1) - + if self.run_card['job_strategy'] == 2: self.splitted_grid = 2 if nexternal == int(ext): @@ -498,18 +498,18 @@ def submit_to_cluster(self, job_list): return self.submit_to_cluster_no_splitting(job_list) else: return self.submit_to_cluster_splitted(job_list) - - + + def submit_to_cluster_no_splitting(self, job_list): """submit the survey without the parralelization. This is the old mode which is still usefull in single core""" - - # write the template file for the parameter file + + # write the template file for the parameter file self.write_parameter(parralelization=False, Pdirs=list(job_list.keys())) - - + + # launch the job with the appropriate grouping - for Pdir, jobs in job_list.items(): + for Pdir, jobs in job_list.items(): jobs = list(jobs) i=0 while jobs: @@ -518,16 +518,16 @@ def submit_to_cluster_no_splitting(self, job_list): for _ in range(self.combining_job_for_Pdir(Pdir)): if jobs: to_submit.append(jobs.pop(0)) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=to_submit, cwd=pjoin(self.me_dir,'SubProcesses' , Pdir)) - + def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): """prepare the input_file for submitting the channel""" - + if 'SubProcesses' not in Pdir: Pdir = pjoin(self.me_dir, 'SubProcesses', Pdir) @@ -535,8 +535,8 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): self.splitted_Pdir[(Pdir, G)] = int(nb_job) - # 1. write the new input_app.txt - run_card = self.cmd.run_card + # 1. write the new input_app.txt + run_card = self.cmd.run_card options = {'event' : submit_ps, 'maxiter': 1, 'miniter': 1, @@ -545,29 +545,29 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): else run_card['nhel'], 'gridmode': -2, 'channel' : G - } - + } + Gdir = pjoin(Pdir, 'G%s' % G) - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + # 2. check that ftn25 exists. - assert os.path.exists(pjoin(Gdir, "ftn25")) - - + assert os.path.exists(pjoin(Gdir, "ftn25")) + + # 3. Submit the new jobs #call back function - packet = cluster.Packet((Pdir, G, step+1), + packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, (Pdir, G, step+1)) - + if step ==0: - self.lastoffset[(Pdir, G)] = 0 - - # resubmit the new jobs + self.lastoffset[(Pdir, G)] = 0 + + # resubmit the new jobs for i in range(int(nb_job)): name = "G%s_%s" % (G,i+1) self.lastoffset[(Pdir, G)] += 1 - offset = self.lastoffset[(Pdir, G)] + offset = self.lastoffset[(Pdir, G)] self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'refine_splitted.sh'), argument=[name, 'G%s'%G, offset], cwd= Pdir, @@ -575,9 +575,9 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): def submit_to_cluster_splitted(self, job_list): - """ submit the version of the survey with splitted grid creation - """ - + """ submit the version of the survey with splitted grid creation + """ + #if self.splitted_grid <= 1: # return self.submit_to_cluster_no_splitting(job_list) @@ -592,7 +592,7 @@ def submit_to_cluster_splitted(self, job_list): for job in jobs: packet = cluster.Packet((Pdir, job, 1), self.combine_iteration, (Pdir, job, 1)) - for i in range(self.splitted_for_dir(Pdir, job)): + for i in range(self.splitted_for_dir(Pdir, job)): self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[i+1, job], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), @@ -601,15 +601,15 @@ def submit_to_cluster_splitted(self, job_list): def combine_iteration(self, Pdir, G, step): grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - - # Compute the number of events used for this run. + + # Compute the number of events used for this run. nb_events = grid_calculator.target_evt Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) - + # 4. make the submission of the next iteration # Three cases - less than 3 iteration -> continue # - more than 3 and less than 5 -> check error @@ -627,15 +627,15 @@ def combine_iteration(self, Pdir, G, step): need_submit = False else: need_submit = True - + elif step >= self.cmd.opts['iterations']: need_submit = False elif self.cmd.opts['accuracy'] < 0: #check for luminosity raise Exception("Not Implemented") elif self.abscross[(Pdir,G)] == 0: - need_submit = False - else: + need_submit = False + else: across = self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) tot_across = self.get_current_axsec() if across == 0: @@ -646,20 +646,20 @@ def combine_iteration(self, Pdir, G, step): need_submit = True else: need_submit = False - - + + if cross: grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_events,mode=self.mode, conservative_factor=5.0) - - xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) - if float(cross)!=0.0 and float(error)!=0.0 else 8) + + xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) + if float(cross)!=0.0 and float(error)!=0.0 else 8) if need_submit: message = "%%s/G%%s is at %%%s +- %%.3g pb. Now submitting iteration #%s."%(xsec_format, step+1) logger.info(message%\ - (os.path.basename(Pdir), G, float(cross), + (os.path.basename(Pdir), G, float(cross), float(error)*float(cross))) self.resubmit_survey(Pdir,G, Gdirs, step) elif cross: @@ -670,26 +670,26 @@ def combine_iteration(self, Pdir, G, step): newGpath = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(newGpath): os.mkdir(newGpath) - + # copy the new grid: - files.cp(pjoin(Gdirs[0], 'ftn25'), + files.cp(pjoin(Gdirs[0], 'ftn25'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, 'ftn26')) - + # copy the events fsock = open(pjoin(newGpath, 'events.lhe'), 'w') for Gdir in Gdirs: - fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) - + fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) + # copy one log - files.cp(pjoin(Gdirs[0], 'log.txt'), + files.cp(pjoin(Gdirs[0], 'log.txt'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G)) - - + + # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) else: logger.info("Survey finished for %s/G%s [0 cross]", os.path.basename(Pdir),G) - + Gdir = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(Gdir): os.mkdir(Gdir) @@ -697,21 +697,21 @@ def combine_iteration(self, Pdir, G, step): files.cp(pjoin(Gdirs[0], 'log.txt'), Gdir) # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) - + return 0 def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): """ exclude_sub_jobs is to remove some of the subjobs if a numerical issue is detected in one of them. Warning is issue when this occurs. """ - + # 1. create an object to combine the grid information and fill it grid_calculator = combine_grid.grid_information(self.run_card['nhel']) - + for i in range(self.splitted_for_dir(Pdir, G)): if i in exclude_sub_jobs: continue - path = pjoin(Pdir, "G%s_%s" % (G, i+1)) + path = pjoin(Pdir, "G%s_%s" % (G, i+1)) fsock = misc.mult_try_open(pjoin(path, 'results.dat')) one_result = grid_calculator.add_results_information(fsock) fsock.close() @@ -723,9 +723,9 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): fsock.close() os.remove(pjoin(path, 'results.dat')) #os.remove(pjoin(path, 'grid_information')) - - - + + + #2. combine the information about the total crossection / error # start by keep the interation in memory cross, across, sigma = grid_calculator.get_cross_section() @@ -736,12 +736,12 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): if maxwgt: nunwgt = grid_calculator.get_nunwgt(maxwgt) # Make sure not to apply the security below during the first step of the - # survey. Also, disregard channels with a contribution relative to the + # survey. Also, disregard channels with a contribution relative to the # total cross-section smaller than 1e-8 since in this case it is unlikely # that this channel will need more than 1 event anyway. apply_instability_security = False rel_contrib = 0.0 - if (self.__class__ != gensym or step > 1): + if (self.__class__ != gensym or step > 1): Pdir_across = 0.0 Gdir_across = 0.0 for (mPdir,mG) in self.abscross.keys(): @@ -750,7 +750,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): (self.sigma[(mPdir,mG)]+1e-99)) if mG == G: Gdir_across += (self.abscross[(mPdir,mG)]/ - (self.sigma[(mPdir,mG)]+1e-99)) + (self.sigma[(mPdir,mG)]+1e-99)) rel_contrib = abs(Gdir_across/(Pdir_across+1e-99)) if rel_contrib > (1.0e-8) and \ nunwgt < 2 and len(grid_calculator.results) > 1: @@ -770,14 +770,14 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): exclude_sub_jobs = list(exclude_sub_jobs) exclude_sub_jobs.append(th_maxwgt[-1][1]) grid_calculator.results.run_statistics['skipped_subchannel'] += 1 - + # Add some monitoring of the problematic events - gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) + gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) if os.path.isfile(pjoin(gPath,'events.lhe')): lhe_file = lhe_parser.EventFile(pjoin(gPath,'events.lhe')) discardedPath = pjoin(Pdir,'DiscardedUnstableEvents') if not os.path.exists(discardedPath): - os.mkdir(discardedPath) + os.mkdir(discardedPath) if os.path.isdir(discardedPath): # Keep only the event with a maximum weight, as it surely # is the problematic one. @@ -790,10 +790,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): lhe_file.close() evtRecord.write(pjoin(gPath,'events.lhe').read()) evtRecord.close() - + return self.combine_grid(Pdir, G, step, exclude_sub_jobs) - + if across !=0: if sigma != 0: self.cross[(Pdir,G)] += cross**3/sigma**2 @@ -814,10 +814,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): self.chi2[(Pdir,G)] = 0 cross = self.cross[(Pdir,G)] error = 0 - + else: error = 0 - + grid_calculator.results.compute_values(update_statistics=True) if (str(os.path.basename(Pdir)), G) in self.run_statistics: self.run_statistics[(str(os.path.basename(Pdir)), G)]\ @@ -825,8 +825,8 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): else: self.run_statistics[(str(os.path.basename(Pdir)), G)] = \ grid_calculator.results.run_statistics - - self.warnings_from_statistics(G, grid_calculator.results.run_statistics) + + self.warnings_from_statistics(G, grid_calculator.results.run_statistics) stats_msg = grid_calculator.results.run_statistics.nice_output( '/'.join([os.path.basename(Pdir),'G%s'%G])) @@ -836,7 +836,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): # Clean up grid_information to avoid border effects in case of a crash for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) - try: + try: os.remove(pjoin(path, 'grid_information')) except OSError as oneerror: if oneerror.errno != 2: @@ -850,7 +850,7 @@ def warnings_from_statistics(self,G,stats): return EPS_fraction = float(stats['exceptional_points'])/stats['n_madloop_calls'] - + msg = "Channel %s has encountered a fraction of %.3g\n"+ \ "of numerically unstable loop matrix element computations\n"+\ "(which could not be rescued using quadruple precision).\n"+\ @@ -861,16 +861,16 @@ def warnings_from_statistics(self,G,stats): elif EPS_fraction > 0.01: logger.critical((msg%(G,EPS_fraction)).replace('might', 'can')) raise Exception((msg%(G,EPS_fraction)).replace('might', 'can')) - + def get_current_axsec(self): - + across = 0 for (Pdir,G) in self.abscross: across += self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) return across - + def write_results(self, grid_calculator, cross, error, Pdir, G, step): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -888,7 +888,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step): maxwgt = grid_calculator.get_max_wgt() nunwgt = grid_calculator.get_nunwgt() luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -897,20 +897,20 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s %s 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross), fstr(maxwgt)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - + def resubmit_survey(self, Pdir, G, Gdirs, step): """submit the next iteration of the survey""" # 1. write the new input_app.txt to double the number of points - run_card = self.cmd.run_card + run_card = self.cmd.run_card options = {'event' : 2**(step) * self.cmd.opts['points'] / self.splitted_grid, 'maxiter': 1, 'miniter': 1, @@ -919,18 +919,18 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): else run_card['nhel'], 'gridmode': -2, 'channel' : '' - } - + } + if int(options['helicity']) == 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + for Gdir in Gdirs: - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + + #2. resubmit the new jobs packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, \ - (Pdir, G, step+1)) + (Pdir, G, step+1)) nb_step = len(Gdirs) * (step+1) for i,subdir in enumerate(Gdirs): subdir = subdir.rsplit('_',1)[1] @@ -938,34 +938,34 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): offset = nb_step+i+1 offset=str(offset) tag = "%s.%s" % (subdir, offset) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[tag, G], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), packet_member=packet) - + def write_parameter_file(self, path, options): """ """ - + template =""" %(event)s %(maxiter)s %(miniter)s !Number of events and max and min iterations %(accuracy)s !Accuracy %(gridmode)s !Grid Adjustment 0=none, 2=adjust 1 !Suppress Amplitude 1=yes %(helicity)s !Helicity Sum/event 0=exact - %(channel)s """ + %(channel)s """ options['event'] = int(options['event']) open(path, 'w').write(template % options) - - + + def write_parameter(self, parralelization, Pdirs=None): """Write the parameter of the survey run""" run_card = self.cmd.run_card - + options = {'event' : self.cmd.opts['points'], 'maxiter': self.cmd.opts['iterations'], 'miniter': self.min_iterations, @@ -975,36 +975,36 @@ def write_parameter(self, parralelization, Pdirs=None): 'gridmode': 2, 'channel': '' } - + if int(options['helicity'])== 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + if parralelization: options['gridmode'] = -2 options['maxiter'] = 1 #this is automatic in dsample anyway options['miniter'] = 1 #this is automatic in dsample anyway options['event'] /= self.splitted_grid - + if not Pdirs: Pdirs = self.subproc - + for Pdir in Pdirs: - path =pjoin(Pdir, 'input_app.txt') + path =pjoin(Pdir, 'input_app.txt') self.write_parameter_file(path, options) - - -class gen_ximprove(object): - - + + +class gen_ximprove(object): + + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job @@ -1022,7 +1022,7 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_gridpack) elif cls.force_class == 'loop_induced': return super(gen_ximprove, cls).__new__(gen_ximprove_share) - + if cmd.proc_characteristics['loop_induced']: return super(gen_ximprove, cls).__new__(gen_ximprove_share) elif gen_ximprove.format_variable(cmd.run_card['gridpack'], bool): @@ -1031,31 +1031,31 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_share) else: return super(gen_ximprove, cls).__new__(gen_ximprove_v4) - - + + def __init__(self, cmd, opt=None): - + try: super(gen_ximprove, self).__init__(cmd, opt) except TypeError: pass - + self.run_statistics = {} self.cmd = cmd self.run_card = cmd.run_card run_card = self.run_card self.me_dir = cmd.me_dir - + #extract from the run_card the information that we need. self.gridpack = run_card['gridpack'] self.nhel = run_card['nhel'] if "nhel_refine" in run_card: self.nhel = run_card["nhel_refine"] - + if self.run_card['refine_evt_by_job'] != -1: self.max_request_event = run_card['refine_evt_by_job'] - - + + # Default option for the run self.gen_events = True self.parralel = False @@ -1066,7 +1066,7 @@ def __init__(self, cmd, opt=None): # parameter for the gridpack run self.nreq = 2000 self.iseed = 4321 - + # placeholder for information self.results = 0 #updated in launch/update_html @@ -1074,16 +1074,16 @@ def __init__(self, cmd, opt=None): self.configure(opt) elif isinstance(opt, bannermod.GridpackCard): self.configure_gridpack(opt) - + def __call__(self): return self.launch() - + def launch(self): - """running """ - + """running """ + #start the run self.handle_seed() - self.results = sum_html.collect_result(self.cmd, + self.results = sum_html.collect_result(self.cmd, main_dir=pjoin(self.cmd.me_dir,'SubProcesses')) #main_dir is for gridpack readonly mode if self.gen_events: # We run to provide a given number of events @@ -1095,15 +1095,15 @@ def launch(self): def configure(self, opt): """Defines some parameter of the run""" - + for key, value in opt.items(): if key in self.__dict__: targettype = type(getattr(self, key)) setattr(self, key, self.format_variable(value, targettype, key)) else: raise Exception('%s not define' % key) - - + + # special treatment always do outside the loop to avoid side effect if 'err_goal' in opt: if self.err_goal < 1: @@ -1113,24 +1113,24 @@ def configure(self, opt): logger.info("Generating %s unweighted events." % self.err_goal) self.gen_events = True self.err_goal = self.err_goal * self.gen_events_security # security - + def handle_seed(self): """not needed but for gridpack --which is not handle here for the moment""" return - - + + def find_job_for_event(self): """return the list of channel that need to be improved""" - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) - - goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 + + goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) - all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) - + all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) + to_refine = [] for C in all_channels: if C.get('axsec') == 0: @@ -1141,61 +1141,61 @@ def find_job_for_event(self): elif C.get('xerr') > max(C.get('axsec'), (1/(100*math.sqrt(self.err_goal)))*all_channels[-1].get('axsec')): to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru + - class gen_ximprove_v4(gen_ximprove): - + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + super(gen_ximprove_v4, self).__init__(cmd, opt) - + if cmd.opts['accuracy'] < cmd._survey_options['accuracy'][1]: self.increase_precision(cmd._survey_options['accuracy'][1]/cmd.opts['accuracy']) @@ -1203,7 +1203,7 @@ def reset_multijob(self): for path in misc.glob(pjoin('*', '*','multijob.dat'), pjoin(self.me_dir, 'SubProcesses')): open(path,'w').write('0\n') - + def write_multijob(self, Channel, nb_split): """ """ if nb_split <=1: @@ -1211,7 +1211,7 @@ def write_multijob(self, Channel, nb_split): f = open(pjoin(self.me_dir, 'SubProcesses', Channel.get('name'), 'multijob.dat'), 'w') f.write('%i\n' % nb_split) f.close() - + def increase_precision(self, rate=3): #misc.sprint(rate) if rate < 3: @@ -1222,25 +1222,25 @@ def increase_precision(self, rate=3): rate = rate -2 self.max_event_in_iter = int((rate+1) * 10000) self.min_events = int(rate+2) * 2500 - self.gen_events_security = 1 + 0.1 * (rate+2) - + self.gen_events_security = 1 + 0.1 * (rate+2) + if int(self.nhel) == 1: self.min_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//3) self.max_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//2) - - + + alphabet = "abcdefghijklmnopqrstuvwxyz" def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() #reset the potential multijob of previous run self.reset_multijob() - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. @@ -1257,17 +1257,17 @@ def get_job_for_event(self): else: for i in range(len(to_refine) //3): new_order.append(to_refine[i]) - new_order.append(to_refine[-2*i-1]) + new_order.append(to_refine[-2*i-1]) new_order.append(to_refine[-2*i-2]) if len(to_refine) % 3 == 1: - new_order.append(to_refine[i+1]) + new_order.append(to_refine[i+1]) elif len(to_refine) % 3 == 2: - new_order.append(to_refine[i+2]) + new_order.append(to_refine[i+2]) #ensure that the reordering is done nicely assert set([id(C) for C in to_refine]) == set([id(C) for C in new_order]) - to_refine = new_order - - + to_refine = new_order + + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target @@ -1279,7 +1279,7 @@ def get_job_for_event(self): nb_split = self.max_splitting nb_split=max(1, nb_split) - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1296,21 +1296,21 @@ def get_job_for_event(self): nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + # write the multi-job information self.write_multijob(C, nb_split) - + packet = cluster.Packet((C.parent_name, C.name), combine_runs.CombineRuns, (pjoin(self.me_dir, 'SubProcesses', C.parent_name)), {"subproc": C.name, "nb_split":nb_split}) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1321,7 +1321,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': packet, + 'packet': packet, } if nb_split == 1: @@ -1334,19 +1334,19 @@ def get_job_for_event(self): if self.keep_grid_for_refine: new_info['base_directory'] = info['directory'] jobs.append(new_info) - - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def create_ajob(self, template, jobs, write_dir=None): """create the ajob""" - + if not jobs: return if not write_dir: write_dir = pjoin(self.me_dir, 'SubProcesses') - + #filter the job according to their SubProcess directory # no mix submition P2job= collections.defaultdict(list) for j in jobs: @@ -1355,11 +1355,11 @@ def create_ajob(self, template, jobs, write_dir=None): for P in P2job.values(): self.create_ajob(template, P, write_dir) return - - + + #Here we can assume that all job are for the same directory. path = pjoin(write_dir, jobs[0]['P_dir']) - + template_text = open(template, 'r').read() # special treatment if needed to combine the script # computes how many submition miss one job @@ -1384,8 +1384,8 @@ def create_ajob(self, template, jobs, write_dir=None): skip1=0 combining_job =1 nb_sub = len(jobs) - - + + nb_use = 0 for i in range(nb_sub): script_number = i+1 @@ -1404,14 +1404,14 @@ def create_ajob(self, template, jobs, write_dir=None): info["base_directory"] = "./" fsock.write(template_text % info) nb_use += nb_job - + fsock.close() return script_number def get_job_for_precision(self): """create the ajob to achieve a give precision on the total cross-section""" - + assert self.err_goal <=1 xtot = abs(self.results.xsec) logger.info("Working on precision: %s %%" %(100*self.err_goal)) @@ -1428,46 +1428,46 @@ def get_job_for_precision(self): rerr *=rerr if not len(to_refine): return - - # change limit since most don't contribute + + # change limit since most don't contribute limit = math.sqrt((self.err_goal * xtot)**2 - rerr/math.sqrt(len(to_refine))) for C in to_refine[:]: cerr = C.mfactor*(C.xerru + len(to_refine)*C.xerrc) if cerr < limit: to_refine.remove(C) - + # all the channel are now selected. create the channel information logger.info('need to improve %s channels' % len(to_refine)) - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. # loop over the channel to refine for C in to_refine: - + #1. Determine how many events we need in each iteration yerr = C.mfactor*(C.xerru+len(to_refine)*C.xerrc) nevents = 0.2*C.nevents*(yerr/limit)**2 - + nb_split = int((nevents*(C.nunwgt/C.nevents)/self.max_request_event/ (2**self.min_iter-1))**(2/3)) nb_split = max(nb_split, 1) - # **(2/3) to slow down the increase in number of jobs + # **(2/3) to slow down the increase in number of jobs if nb_split > self.max_splitting: nb_split = self.max_splitting - + if nb_split >1: nevents = nevents / nb_split self.write_multijob(C, nb_split) # forbid too low/too large value nevents = min(self.min_event_in_iter, max(self.max_event_in_iter, nevents)) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1487,38 +1487,38 @@ def get_job_for_precision(self): new_info['offset'] = i+1 new_info['directory'] += self.alphabet[i % 26] + str((i+1)//26) jobs.append(new_info) - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru @@ -1528,27 +1528,27 @@ class gen_ximprove_v4_nogridupdate(gen_ximprove_v4): # some hardcoded value which impact the generation gen_events_security = 1.1 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 400 # split jobs if a channel if it needs more than that + max_request_event = 400 # split jobs if a channel if it needs more than that max_event_in_iter = 500 min_event_in_iter = 250 - max_splitting = 260 # maximum duplication of a given channel - min_iter = 2 + max_splitting = 260 # maximum duplication of a given channel + min_iter = 2 max_iter = 6 keep_grid_for_refine = True - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + gen_ximprove.__init__(cmd, opt) - + if cmd.proc_characteristics['loopinduced'] and \ cmd.proc_characteristics['nexternal'] > 2: self.increase_parralelization(cmd.proc_characteristics['nexternal']) - + def increase_parralelization(self, nexternal): - self.max_splitting = 1000 - + self.max_splitting = 1000 + if self.run_card['refine_evt_by_job'] != -1: pass elif nexternal == 3: @@ -1563,27 +1563,27 @@ def increase_parralelization(self, nexternal): class gen_ximprove_share(gen_ximprove, gensym): """Doing the refine in multicore. Each core handle a couple of PS point.""" - nb_ps_by_job = 2000 + nb_ps_by_job = 2000 mode = "refine" gen_events_security = 1.15 # Note the real security is lower since we stop the jobs if they are at 96% # of this target. def __init__(self, *args, **opts): - + super(gen_ximprove_share, self).__init__(*args, **opts) self.generated_events = {} self.splitted_for_dir = lambda x,y : self.splitted_Pdir[(x,y)] - + def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - + goal_lum, to_refine = self.find_job_for_event() self.goal_lum = goal_lum - + # loop over the channel to refine to find the number of PS point to launch total_ps_points = 0 channel_to_ps_point = [] @@ -1593,7 +1593,7 @@ def get_job_for_event(self): os.remove(pjoin(self.me_dir, "SubProcesses",C.parent_name, C.name, "events.lhe")) except: pass - + #1. Compute the number of points are needed to reach target needed_event = goal_lum*C.get('axsec') if needed_event == 0: @@ -1609,18 +1609,18 @@ def get_job_for_event(self): nb_split = 1 if nb_split > self.max_splitting: nb_split = self.max_splitting - nevents = self.max_event_in_iter * self.max_splitting + nevents = self.max_event_in_iter * self.max_splitting else: nevents = self.max_event_in_iter * nb_split if nevents > self.max_splitting*self.max_event_in_iter: logger.warning("Channel %s/%s has a very low efficiency of unweighting. Might not be possible to reach target" % \ (C.name, C.parent_name)) - nevents = self.max_event_in_iter * self.max_splitting - - total_ps_points += nevents - channel_to_ps_point.append((C, nevents)) - + nevents = self.max_event_in_iter * self.max_splitting + + total_ps_points += nevents + channel_to_ps_point.append((C, nevents)) + if self.cmd.options["run_mode"] == 1: if self.cmd.options["cluster_size"]: nb_ps_by_job = total_ps_points /int(self.cmd.options["cluster_size"]) @@ -1634,7 +1634,7 @@ def get_job_for_event(self): nb_ps_by_job = total_ps_points / self.cmd.options["nb_core"] else: nb_ps_by_job = self.nb_ps_by_job - + nb_ps_by_job = int(max(nb_ps_by_job, 500)) for C, nevents in channel_to_ps_point: @@ -1648,20 +1648,20 @@ def get_job_for_event(self): self.create_resubmit_one_iter(C.parent_name, C.name[1:], submit_ps, nb_job, step=0) needed_event = goal_lum*C.get('xsec') logger.debug("%s/%s : need %s event. Need %s split job of %s points", C.parent_name, C.name, needed_event, nb_job, submit_ps) - - + + def combine_iteration(self, Pdir, G, step): - + grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - + # collect all the generated_event Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) assert len(grid_calculator.results) == len(Gdirs) == self.splitted_for_dir(Pdir, G) - - + + # Check how many events are going to be kept after un-weighting. needed_event = cross * self.goal_lum if needed_event == 0: @@ -1671,19 +1671,19 @@ def combine_iteration(self, Pdir, G, step): if self.err_goal >=1: if needed_event > self.gen_events_security * self.err_goal: needed_event = int(self.gen_events_security * self.err_goal) - + if (Pdir, G) in self.generated_events: old_nunwgt, old_maxwgt = self.generated_events[(Pdir, G)] else: old_nunwgt, old_maxwgt = 0, 0 - + if old_nunwgt == 0 and os.path.exists(pjoin(Pdir,"G%s" % G, "events.lhe")): # possible for second refine. lhe = lhe_parser.EventFile(pjoin(Pdir,"G%s" % G, "events.lhe")) old_nunwgt = lhe.unweight(None, trunc_error=0.005, log_level=0) old_maxwgt = lhe.max_wgt - - + + maxwgt = max(grid_calculator.get_max_wgt(), old_maxwgt) new_evt = grid_calculator.get_nunwgt(maxwgt) @@ -1695,35 +1695,35 @@ def combine_iteration(self, Pdir, G, step): one_iter_nb_event = max(grid_calculator.get_nunwgt(),1) drop_previous_iteration = False # compare the number of events to generate if we discard the previous iteration - n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) n_target_combined = (needed_event-nunwgt) / efficiency if n_target_one_iter < n_target_combined: # the last iteration alone has more event that the combine iteration. - # it is therefore interesting to drop previous iteration. + # it is therefore interesting to drop previous iteration. drop_previous_iteration = True nunwgt = one_iter_nb_event maxwgt = grid_calculator.get_max_wgt() new_evt = nunwgt - efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) - + efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + try: if drop_previous_iteration: raise IOError output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'a') except IOError: output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'w') - + misc.call(["cat"] + [pjoin(d, "events.lhe") for d in Gdirs], stdout=output_file) output_file.close() # For large number of iteration. check the number of event by doing the # real unweighting. - if nunwgt < 0.6 * needed_event and step > self.min_iter: + if nunwgt < 0.6 * needed_event and step > self.min_iter: lhe = lhe_parser.EventFile(output_file.name) old_nunwgt =nunwgt nunwgt = lhe.unweight(None, trunc_error=0.01, log_level=0) - - + + self.generated_events[(Pdir, G)] = (nunwgt, maxwgt) # misc.sprint("Adding %s event to %s. Currently at %s" % (new_evt, G, nunwgt)) @@ -1742,21 +1742,21 @@ def combine_iteration(self, Pdir, G, step): nevents = grid_calculator.results[0].nevents if nevents == 0: # possible if some integral returns 0 nevents = max(g.nevents for g in grid_calculator.results) - + need_ps_point = (needed_event - nunwgt)/(efficiency+1e-99) - need_job = need_ps_point // nevents + 1 - + need_job = need_ps_point // nevents + 1 + if step < self.min_iter: # This is normal but check if we are on the good track - job_at_first_iter = nb_split_before/2**(step-1) + job_at_first_iter = nb_split_before/2**(step-1) expected_total_job = job_at_first_iter * (2**self.min_iter-1) done_job = job_at_first_iter * (2**step-1) expected_remaining_job = expected_total_job - done_job - logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) + logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) # increase if needed but not too much need_job = min(need_job, expected_remaining_job*1.25) - + nb_job = (need_job-0.5)//(2**(self.min_iter-step)-1) + 1 nb_job = max(1, nb_job) grid_calculator.write_grid_for_submission(Pdir,G, @@ -1768,7 +1768,7 @@ def combine_iteration(self, Pdir, G, step): nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) #self.create_job(Pdir, G, nb_job, nevents, step) - + elif step < self.max_iter: if step + 1 == self.max_iter: need_job = 1.20 * need_job # avoid to have just too few event. @@ -1777,21 +1777,21 @@ def combine_iteration(self, Pdir, G, step): grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_job*nevents ,mode=self.mode, conservative_factor=self.max_iter) - - + + logger.info("%s/G%s is at %i/%i ('%.2g%%') event. Resubmit %i job at iteration %i." \ % (os.path.basename(Pdir), G, int(nunwgt),int(needed_event)+1, (float(nunwgt)/needed_event)*100.0 if needed_event>0.0 else 0.0, nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) - - + + return 0 - - + + def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -1807,7 +1807,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency nevents = nunwgt # make the unweighting to compute the number of events: luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -1816,23 +1816,23 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s 0.0 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - - - + + + class gen_ximprove_gridpack(gen_ximprove_v4): - - min_iter = 1 + + min_iter = 1 max_iter = 13 - max_request_event = 1e12 # split jobs if a channel if it needs more than that + max_request_event = 1e12 # split jobs if a channel if it needs more than that max_event_in_iter = 4000 min_event_in_iter = 500 combining_job = sys.maxsize @@ -1844,7 +1844,7 @@ def __new__(cls, *args, **opts): return super(gen_ximprove_gridpack, cls).__new__(cls, *args, **opts) def __init__(self, *args, **opts): - + self.ngran = -1 self.gscalefact = {} self.readonly = False @@ -1855,23 +1855,23 @@ def __init__(self, *args, **opts): self.readonly = opts['readonly'] super(gen_ximprove_gridpack,self).__init__(*args, **opts) if self.ngran == -1: - self.ngran = 1 - + self.ngran = 1 + def find_job_for_event(self): """return the list of channel that need to be improved""" import random - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) self.gscalefact = {} - + xtot = self.results.axsec - goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 + goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 # logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) all_channels.sort(key=lambda x : x.get('luminosity'), reverse=True) - + to_refine = [] for C in all_channels: tag = C.get('name') @@ -1885,27 +1885,27 @@ def find_job_for_event(self): #need to generate events logger.debug('request events for ', C.get('name'), 'cross=', C.get('axsec'), 'needed events = ', goal_lum * C.get('axsec')) - to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + to_refine.append(C) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. - + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target needed_event = max(goal_lum*C.get('axsec'), self.ngran) nb_split = 1 - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1920,13 +1920,13 @@ def get_job_for_event(self): # forbid too low/too large value nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': os.path.basename(C.parent_name), + 'P_dir': os.path.basename(C.parent_name), 'offset': 1, # need to be change for splitted job 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'nevents': nevents, #int(nevents*self.gen_events_security)+1, @@ -1938,7 +1938,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': None, + 'packet': None, } if self.readonly: @@ -1946,11 +1946,11 @@ def get_job_for_event(self): info['base_directory'] = basedir jobs.append(info) - - write_dir = '.' if self.readonly else None - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) - + + write_dir = '.' if self.readonly else None + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) + done = [] for j in jobs: if j['P_dir'] in done: @@ -1967,22 +1967,22 @@ def get_job_for_event(self): write_dir = '.' if self.readonly else pjoin(self.me_dir, 'SubProcesses') self.check_events(goal_lum, to_refine, jobs, write_dir) - + def check_events(self, goal_lum, to_refine, jobs, Sdir): """check that we get the number of requested events if not resubmit.""" - + new_jobs = [] - + for C, job_info in zip(to_refine, jobs): - P = job_info['P_dir'] + P = job_info['P_dir'] G = job_info['channel'] axsec = C.get('axsec') - requested_events= job_info['requested_event'] - + requested_events= job_info['requested_event'] + new_results = sum_html.OneResult((P,G)) new_results.read_results(pjoin(Sdir,P, 'G%s'%G, 'results.dat')) - + # need to resubmit? if new_results.get('nunwgt') < requested_events: pwd = pjoin(os.getcwd(),job_info['P_dir'],'G%s'%G) if self.readonly else \ @@ -1992,10 +1992,10 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): job_info['offset'] += 1 new_jobs.append(job_info) files.mv(pjoin(pwd, 'events.lhe'), pjoin(pwd, 'events.lhe.previous')) - + if new_jobs: - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) + done = [] for j in new_jobs: if j['P_dir'] in done: @@ -2015,9 +2015,9 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): files.put_at_end(pjoin(pwd, 'events.lhe'),pjoin(pwd, 'events.lhe.previous')) return self.check_events(goal_lum, to_refine, new_jobs, Sdir) - - - - + + + + diff --git a/epochX/cudacpp/gg_ttggg.mad/bin/internal/madevent_interface.py b/epochX/cudacpp/gg_ttggg.mad/bin/internal/madevent_interface.py index cb6bf4ca57..8abba3f33f 100755 --- a/epochX/cudacpp/gg_ttggg.mad/bin/internal/madevent_interface.py +++ b/epochX/cudacpp/gg_ttggg.mad/bin/internal/madevent_interface.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,10 +53,10 @@ # Special logger for the Cmd Interface logger = logging.getLogger('madevent.stdout') # -> stdout logger_stderr = logging.getLogger('madevent.stderr') # ->stderr - + try: import madgraph -except ImportError as error: +except ImportError as error: # import from madevent directory MADEVENT = True import internal.extended_cmd as cmd @@ -92,7 +92,7 @@ import madgraph.various.lhe_parser as lhe_parser # import madgraph.various.histograms as histograms # imported later to not slow down the loading of the code import models.check_param_card as check_param_card - from madgraph.iolibs.files import ln + from madgraph.iolibs.files import ln from madgraph import InvalidCmd, MadGraph5Error, MG5DIR, ReadWrite @@ -113,10 +113,10 @@ class CmdExtended(common_run.CommonRunCmd): next_possibility = { 'start': [], } - + debug_output = 'ME5_debug' error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' - error_debug += 'More information is found in \'%(debug)s\'.\n' + error_debug += 'More information is found in \'%(debug)s\'.\n' error_debug += 'Please attach this file to your report.' config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' @@ -124,18 +124,18 @@ class CmdExtended(common_run.CommonRunCmd): keyboard_stop_msg = """stopping all operation in order to quit MadGraph5_aMC@NLO please enter exit""" - + # Define the Error InvalidCmd = InvalidCmd ConfigurationError = MadGraph5Error def __init__(self, me_dir, options, *arg, **opt): """Init history and line continuation""" - + # Tag allowing/forbiding question self.force = False - - # If possible, build an info line with current version number + + # If possible, build an info line with current version number # and date, from the VERSION text file info = misc.get_pkg_info() info_line = "" @@ -150,7 +150,7 @@ def __init__(self, me_dir, options, *arg, **opt): else: version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() info_line = "#* VERSION %s %s *\n" % \ - (version, (24 - len(version)) * ' ') + (version, (24 - len(version)) * ' ') # Create a header for the history file. # Remember to fill in time at writeout time! @@ -177,7 +177,7 @@ def __init__(self, me_dir, options, *arg, **opt): '#* run as ./bin/madevent.py filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - + if info_line: info_line = info_line[1:] @@ -203,11 +203,11 @@ def __init__(self, me_dir, options, *arg, **opt): "* *\n" + \ "************************************************************") super(CmdExtended, self).__init__(me_dir, options, *arg, **opt) - + def get_history_header(self): - """return the history header""" + """return the history header""" return self.history_header % misc.get_time_info() - + def stop_on_keyboard_stop(self): """action to perform to close nicely on a keyboard interupt""" try: @@ -219,20 +219,20 @@ def stop_on_keyboard_stop(self): self.add_error_log_in_html(KeyboardInterrupt) except: pass - + def postcmd(self, stop, line): """ Update the status of the run for finishing interactive command """ - - stop = super(CmdExtended, self).postcmd(stop, line) + + stop = super(CmdExtended, self).postcmd(stop, line) # relaxing the tag forbidding question self.force = False - + if not self.use_rawinput: return stop - + if self.results and not self.results.current: return stop - + arg = line.split() if len(arg) == 0: return stop @@ -240,41 +240,41 @@ def postcmd(self, stop, line): return stop if isinstance(self.results.status, str) and self.results.status == 'Stop by the user': self.update_status('%s Stop by the user' % arg[0], level=None, error=True) - return stop + return stop elif not self.results.status: return stop elif str(arg[0]) in ['exit','quit','EOF']: return stop - + try: - self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], + self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], level=None, error=True) except Exception: misc.sprint('update_status fails') pass - - + + def nice_user_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() - return cmd.Cmd.nice_user_error(self, error, line) - + return cmd.Cmd.nice_user_error(self, error, line) + def nice_config_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() stop = cmd.Cmd.nice_config_error(self, error, line) - - + + try: debug_file = open(self.debug_output, 'a') debug_file.write(open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat'))) debug_file.close() except: - pass + pass return stop - + def nice_error_handling(self, error, line): """If a ME run is currently running add a link in the html output""" @@ -294,7 +294,7 @@ def nice_error_handling(self, error, line): proc_card = pjoin(self.me_dir,'Cards','proc_card_mg5.dat') if os.path.exists(proc_card): self.banner.add(proc_card) - + out_dir = pjoin(self.me_dir, 'Events', self.run_name) if not os.path.isdir(out_dir): os.mkdir(out_dir) @@ -307,7 +307,7 @@ def nice_error_handling(self, error, line): else: pass else: - self.add_error_log_in_html() + self.add_error_log_in_html() stop = cmd.Cmd.nice_error_handling(self, error, line) try: debug_file = open(self.debug_output, 'a') @@ -316,14 +316,14 @@ def nice_error_handling(self, error, line): except: pass return stop - - + + #=============================================================================== # HelpToCmd #=============================================================================== class HelpToCmd(object): """ The Series of help routine for the MadEventCmd""" - + def help_pythia(self): logger.info("syntax: pythia [RUN] [--run_options]") logger.info("-- run pythia on RUN (current one by default)") @@ -352,29 +352,29 @@ def help_banner_run(self): logger.info(" Path should be the path of a valid banner.") logger.info(" RUN should be the name of a run of the current directory") self.run_options_help([('-f','answer all question by default'), - ('--name=X', 'Define the name associated with the new run')]) - + ('--name=X', 'Define the name associated with the new run')]) + def help_open(self): logger.info("syntax: open FILE ") logger.info("-- open a file with the appropriate editor.") logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') logger.info(' the path to the last created/used directory is used') logger.info(' The program used to open those files can be chosen in the') - logger.info(' configuration file ./input/mg5_configuration.txt') - - + logger.info(' configuration file ./input/mg5_configuration.txt') + + def run_options_help(self, data): if data: logger.info('-- local options:') for name, info in data: logger.info(' %s : %s' % (name, info)) - + logger.info("-- session options:") - logger.info(" Note that those options will be kept for the current session") + logger.info(" Note that those options will be kept for the current session") logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) logger.info(" --multicore : Run in multi-core configuration") logger.info(" --nb_core=X : limit the number of core to use to X.") - + def help_generate_events(self): logger.info("syntax: generate_events [run_name] [options]",) @@ -398,16 +398,16 @@ def help_initMadLoop(self): logger.info(" -f : Bypass the edition of MadLoopParams.dat.",'$MG:color:BLUE') logger.info(" -r : Refresh of the existing filters (erasing them if already present).",'$MG:color:BLUE') logger.info(" --nPS= : Specify how many phase-space points should be tried to set up the filters.",'$MG:color:BLUE') - + def help_calculate_decay_widths(self): - + if self.ninitial != 1: logger.warning("This command is only valid for processes of type A > B C.") logger.warning("This command can not be run in current context.") logger.warning("") - + logger.info("syntax: calculate_decay_widths [run_name] [options])") logger.info("-- Calculate decay widths and enter widths and BRs in param_card") logger.info(" for a series of processes of type A > B C ...") @@ -428,8 +428,8 @@ def help_survey(self): logger.info("-- evaluate the different channel associate to the process") self.run_options_help([("--" + key,value[-1]) for (key,value) in \ self._survey_options.items()]) - - + + def help_restart_gridpack(self): logger.info("syntax: restart_gridpack --precision= --restart_zero") @@ -439,14 +439,14 @@ def help_launch(self): logger.info("syntax: launch [run_name] [options])") logger.info(" --alias for either generate_events/calculate_decay_widths") logger.info(" depending of the number of particles in the initial state.") - + if self.ninitial == 1: logger.info("For this directory this is equivalent to calculate_decay_widths") self.help_calculate_decay_widths() else: logger.info("For this directory this is equivalent to $generate_events") self.help_generate_events() - + def help_refine(self): logger.info("syntax: refine require_precision [max_channel] [--run_options]") logger.info("-- refine the LAST run to achieve a given precision.") @@ -454,14 +454,14 @@ def help_refine(self): logger.info(' or the required relative error') logger.info(' max_channel:[5] maximal number of channel per job') self.run_options_help([]) - + def help_combine_events(self): """ """ logger.info("syntax: combine_events [run_name] [--tag=tag_name] [--run_options]") logger.info("-- Combine the last run in order to write the number of events") logger.info(" asked in the run_card.") self.run_options_help([]) - + def help_store_events(self): """ """ logger.info("syntax: store_events [--run_options]") @@ -481,7 +481,7 @@ def help_import(self): logger.info("syntax: import command PATH") logger.info("-- Execute the command present in the file") self.run_options_help([]) - + def help_syscalc(self): logger.info("syntax: syscalc [RUN] [%s] [-f | --tag=]" % '|'.join(self._plot_mode)) logger.info("-- calculate systematics information for the RUN (current run by default)") @@ -506,18 +506,18 @@ class AskRun(cmd.ControlSwitch): ('madspin', 'Decay onshell particles'), ('reweight', 'Add weights to events for new hypp.') ] - + def __init__(self, question, line_args=[], mode=None, force=False, *args, **opt): - + self.check_available_module(opt['mother_interface'].options) self.me_dir = opt['mother_interface'].me_dir super(AskRun,self).__init__(self.to_control, opt['mother_interface'], *args, **opt) - - + + def check_available_module(self, options): - + self.available_module = set() if options['pythia-pgs_path']: self.available_module.add('PY6') @@ -540,32 +540,32 @@ def check_available_module(self, options): self.available_module.add('Rivet') else: logger.warning("Rivet program installed but no parton shower with hepmc output detected.\n Please install pythia8") - + if not MADEVENT or ('mg5_path' in options and options['mg5_path']): self.available_module.add('MadSpin') if misc.has_f2py() or options['f2py_compiler']: self.available_module.add('reweight') -# old mode to activate the shower +# old mode to activate the shower def ans_parton(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if value is None: self.set_all_off() else: logger.warning('Invalid command: parton=%s' % value) - - + + # -# HANDLING SHOWER +# HANDLING SHOWER # def get_allowed_shower(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_shower'): return self.allowed_shower - + self.allowed_shower = [] if 'PY6' in self.available_module: self.allowed_shower.append('Pythia6') @@ -574,9 +574,9 @@ def get_allowed_shower(self): if self.allowed_shower: self.allowed_shower.append('OFF') return self.allowed_shower - + def set_default_shower(self): - + if 'PY6' in self.available_module and\ os.path.exists(pjoin(self.me_dir,'Cards','pythia_card.dat')): self.switch['shower'] = 'Pythia6' @@ -590,10 +590,10 @@ def set_default_shower(self): def check_value_shower(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_shower(): return True - + value =value.lower() if value in ['py6','p6','pythia_6'] and 'PY6' in self.available_module: return 'Pythia6' @@ -601,13 +601,13 @@ def check_value_shower(self, value): return 'Pythia8' else: return False - - -# old mode to activate the shower + + +# old mode to activate the shower def ans_pythia(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if 'PY6' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return @@ -621,13 +621,13 @@ def ans_pythia(self, value=None): self.set_switch('shower', 'OFF') else: logger.warning('Invalid command: pythia=%s' % value) - - + + def consistency_shower_detector(self, vshower, vdetector): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower == 'OFF': @@ -635,35 +635,35 @@ def consistency_shower_detector(self, vshower, vdetector): return 'OFF' if vshower == 'Pythia8' and vdetector == 'PGS': return 'OFF' - + return None - + # # HANDLING DETECTOR # def get_allowed_detector(self): """return valid entry for the switch""" - + if hasattr(self, 'allowed_detector'): - return self.allowed_detector - + return self.allowed_detector + self.allowed_detector = [] if 'PGS' in self.available_module: self.allowed_detector.append('PGS') if 'Delphes' in self.available_module: self.allowed_detector.append('Delphes') - + if self.allowed_detector: self.allowed_detector.append('OFF') - return self.allowed_detector + return self.allowed_detector def set_default_detector(self): - + self.set_default_shower() #ensure that this one is called first! - + if 'PGS' in self.available_module and self.switch['shower'] == 'Pythia6'\ and os.path.exists(pjoin(self.me_dir,'Cards','pgs_card.dat')): self.switch['detector'] = 'PGS' @@ -674,16 +674,16 @@ def set_default_detector(self): self.switch['detector'] = 'OFF' else: self.switch['detector'] = 'Not Avail.' - -# old mode to activate pgs + +# old mode to activate pgs def ans_pgs(self, value=None): """None: means that the user type 'pgs' - value: means that the user type pgs=value""" - + value: means that the user type pgs=value""" + if 'PGS' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return - + if value is None: self.set_all_off() self.switch['shower'] = 'Pythia6' @@ -696,16 +696,16 @@ def ans_pgs(self, value=None): else: logger.warning('Invalid command: pgs=%s' % value) - + # old mode to activate Delphes def ans_delphes(self, value=None): """None: means that the user type 'delphes' - value: means that the user type delphes=value""" - + value: means that the user type delphes=value""" + if 'Delphes' not in self.available_module: logger.warning('Delphes not available. Ignore commmand') return - + if value is None: self.set_all_off() if 'PY6' in self.available_module: @@ -718,15 +718,15 @@ def ans_delphes(self, value=None): elif value == 'off': self.set_switch('detector', 'OFF') else: - logger.warning('Invalid command: pgs=%s' % value) + logger.warning('Invalid command: pgs=%s' % value) def consistency_detector_shower(self,vdetector, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ - + if vdetector == 'PGS' and vshower != 'Pythia6': return 'Pythia6' if vdetector == 'Delphes' and vshower not in ['Pythia6', 'Pythia8']: @@ -744,28 +744,28 @@ def consistency_detector_shower(self,vdetector, vshower): # def get_allowed_analysis(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_analysis'): return self.allowed_analysis - + self.allowed_analysis = [] if 'ExRoot' in self.available_module: self.allowed_analysis.append('ExRoot') if 'MA4' in self.available_module: self.allowed_analysis.append('MadAnalysis4') if 'MA5' in self.available_module: - self.allowed_analysis.append('MadAnalysis5') + self.allowed_analysis.append('MadAnalysis5') if 'Rivet' in self.available_module: - self.allowed_analysis.append('Rivet') - + self.allowed_analysis.append('Rivet') + if self.allowed_analysis: self.allowed_analysis.append('OFF') - + return self.allowed_analysis - + def check_analysis(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_analysis(): return True if value.lower() in ['ma4', 'madanalysis4', 'madanalysis_4','4']: @@ -786,30 +786,30 @@ def consistency_shower_analysis(self, vshower, vanalysis): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'OFF' #new value for analysis - + return None - + def consistency_analysis_shower(self, vanalysis, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'Pythia8' #new value for analysis - + return None def set_default_analysis(self): """initialise the switch for analysis""" - + if 'MA4' in self.available_module and \ os.path.exists(pjoin(self.me_dir,'Cards','plot_card.dat')): self.switch['analysis'] = 'MadAnalysis4' @@ -818,46 +818,46 @@ def set_default_analysis(self): or os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat'))): self.switch['analysis'] = 'MadAnalysis5' elif 'ExRoot' in self.available_module: - self.switch['analysis'] = 'ExRoot' - elif self.get_allowed_analysis(): + self.switch['analysis'] = 'ExRoot' + elif self.get_allowed_analysis(): self.switch['analysis'] = 'OFF' else: self.switch['analysis'] = 'Not Avail.' - + # # MADSPIN handling # def get_allowed_madspin(self): """ ON|OFF|onshell """ - + if hasattr(self, 'allowed_madspin'): return self.allowed_madspin - + self.allowed_madspin = [] if 'MadSpin' in self.available_module: self.allowed_madspin = ['OFF',"ON",'onshell',"full"] return self.allowed_madspin - + def check_value_madspin(self, value): """handle alias and valid option not present in get_allowed_madspin""" - + if value.upper() in self.get_allowed_madspin(): return True elif value.lower() in self.get_allowed_madspin(): return True - + if 'MadSpin' not in self.available_module: return False - + if value.lower() in ['madspin', 'full']: return 'full' elif value.lower() in ['none']: return 'none' - - + + def set_default_madspin(self): """initialise the switch for madspin""" - + if 'MadSpin' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): self.switch['madspin'] = 'ON' @@ -865,10 +865,10 @@ def set_default_madspin(self): self.switch['madspin'] = 'OFF' else: self.switch['madspin'] = 'Not Avail.' - + def get_cardcmd_for_madspin(self, value): """set some command to run before allowing the user to modify the cards.""" - + if value == 'onshell': return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] elif value in ['full', 'madspin']: @@ -877,36 +877,36 @@ def get_cardcmd_for_madspin(self, value): return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] else: return [] - + # # ReWeight handling # def get_allowed_reweight(self): """ return the list of valid option for reweight=XXX """ - + if hasattr(self, 'allowed_reweight'): return getattr(self, 'allowed_reweight') - + if 'reweight' not in self.available_module: self.allowed_reweight = [] return self.allowed_reweight = ['OFF', 'ON'] - + # check for plugin mode plugin_path = self.mother_interface.plugin_path opts = misc.from_plugin_import(plugin_path, 'new_reweight', warning=False) self.allowed_reweight += opts - + def set_default_reweight(self): """initialise the switch for reweight""" - + if 'reweight' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): self.switch['reweight'] = 'ON' else: self.switch['reweight'] = 'OFF' else: - self.switch['reweight'] = 'Not Avail.' + self.switch['reweight'] = 'Not Avail.' #=============================================================================== # CheckValidForCmd @@ -916,14 +916,14 @@ class CheckValidForCmd(object): def check_banner_run(self, args): """check the validity of line""" - + if len(args) == 0: self.help_banner_run() raise self.InvalidCmd('banner_run requires at least one argument.') - + tag = [a[6:] for a in args if a.startswith('--tag=')] - - + + if os.path.exists(args[0]): type ='banner' format = self.detect_card_type(args[0]) @@ -931,7 +931,7 @@ def check_banner_run(self, args): raise self.InvalidCmd('The file is not a valid banner.') elif tag: args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) + (args[0], tag)) if not os.path.exists(args[0]): raise self.InvalidCmd('No banner associates to this name and tag.') else: @@ -939,7 +939,7 @@ def check_banner_run(self, args): type = 'run' banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) if not banners: - raise self.InvalidCmd('No banner associates to this name.') + raise self.InvalidCmd('No banner associates to this name.') elif len(banners) == 1: args[0] = banners[0] else: @@ -947,8 +947,8 @@ def check_banner_run(self, args): tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] tag = self.ask('which tag do you want to use?', tags[0], tags) args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) - + (args[0], tag)) + run_name = [arg[7:] for arg in args if arg.startswith('--name=')] if run_name: try: @@ -970,14 +970,14 @@ def check_banner_run(self, args): except Exception: pass self.set_run_name(name) - + def check_history(self, args): """check the validity of line""" - + if len(args) > 1: self.help_history() raise self.InvalidCmd('\"history\" command takes at most one argument') - + if not len(args): return elif args[0] != 'clean': @@ -985,16 +985,16 @@ def check_history(self, args): if dirpath and not os.path.exists(dirpath) or \ os.path.isdir(args[0]): raise self.InvalidCmd("invalid path %s " % dirpath) - + def check_save(self, args): """ check the validity of the line""" - + if len(args) == 0: args.append('options') if args[0] not in self._save_opts: raise self.InvalidCmd('wrong \"save\" format') - + if args[0] != 'options' and len(args) != 2: self.help_save() raise self.InvalidCmd('wrong \"save\" format') @@ -1003,7 +1003,7 @@ def check_save(self, args): if not os.path.exists(basename): raise self.InvalidCmd('%s is not a valid path, please retry' % \ args[1]) - + if args[0] == 'options': has_path = None for arg in args[1:]: @@ -1024,9 +1024,9 @@ def check_save(self, args): has_path = True if not has_path: if '--auto' in arg and self.options['mg5_path']: - args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) + args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) else: - args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) + args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) def check_set(self, args): """ check the validity of the line""" @@ -1039,20 +1039,20 @@ def check_set(self, args): self.help_set() raise self.InvalidCmd('Possible options for set are %s' % \ self._set_options) - + if args[0] in ['stdout_level']: if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \ and not args[1].isdigit(): raise self.InvalidCmd('output_level needs ' + \ - 'a valid level') - + 'a valid level') + if args[0] in ['timeout']: if not args[1].isdigit(): - raise self.InvalidCmd('timeout values should be a integer') - + raise self.InvalidCmd('timeout values should be a integer') + def check_open(self, args): """ check the validity of the line """ - + if len(args) != 1: self.help_open() raise self.InvalidCmd('OPEN command requires exactly one argument') @@ -1069,7 +1069,7 @@ def check_open(self, args): raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file') else: return True - + path = self.me_dir if os.path.isfile(os.path.join(path,args[0])): args[0] = os.path.join(path,args[0]) @@ -1078,7 +1078,7 @@ def check_open(self, args): elif os.path.isfile(os.path.join(path,'HTML',args[0])): args[0] = os.path.join(path,'HTML',args[0]) # special for card with _default define: copy the default and open it - elif '_card.dat' in args[0]: + elif '_card.dat' in args[0]: name = args[0].replace('_card.dat','_card_default.dat') if os.path.isfile(os.path.join(path,'Cards', name)): files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0])) @@ -1086,13 +1086,13 @@ def check_open(self, args): else: raise self.InvalidCmd('No default path for this file') elif not os.path.isfile(args[0]): - raise self.InvalidCmd('No default path for this file') - + raise self.InvalidCmd('No default path for this file') + def check_initMadLoop(self, args): """ check initMadLoop command arguments are valid.""" - + opt = {'refresh': False, 'nPS': None, 'force': False} - + for arg in args: if arg in ['-r','--refresh']: opt['refresh'] = True @@ -1105,14 +1105,14 @@ def check_initMadLoop(self, args): except ValueError: raise InvalidCmd("The number of attempts specified "+ "'%s' is not a valid integer."%n_attempts) - + return opt - + def check_treatcards(self, args): """check that treatcards arguments are valid [param|run|all] [--output_dir=] [--param_card=] [--run_card=] """ - + opt = {'output_dir':pjoin(self.me_dir,'Source'), 'param_card':pjoin(self.me_dir,'Cards','param_card.dat'), 'run_card':pjoin(self.me_dir,'Cards','run_card.dat'), @@ -1129,14 +1129,14 @@ def check_treatcards(self, args): if os.path.isfile(value): card_name = self.detect_card_type(value) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' % (card_name, key)) opt[key] = value elif os.path.isfile(pjoin(self.me_dir,value)): card_name = self.detect_card_type(pjoin(self.me_dir,value)) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' - % (card_name, key)) + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + % (card_name, key)) opt[key] = value else: raise self.InvalidCmd('No such file: %s ' % value) @@ -1154,14 +1154,14 @@ def check_treatcards(self, args): else: self.help_treatcards() raise self.InvalidCmd('Unvalid argument %s' % arg) - - return mode, opt - - + + return mode, opt + + def check_survey(self, args, cmd='survey'): """check that the argument for survey are valid""" - - + + self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) @@ -1183,41 +1183,41 @@ def check_survey(self, args, cmd='survey'): self.help_survey() raise self.InvalidCmd('Too many argument for %s command' % cmd) elif not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], None,'parton', True) args.pop(0) - + return True def check_generate_events(self, args): """check that the argument for generate_events are valid""" - + run = None if args and args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['auto','parton', 'pythia', 'pgs', 'delphes']: self.help_generate_events() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + #if len(args) > 1: # self.help_generate_events() # raise self.InvalidCmd('Too many argument for generate_events command: %s' % cmd) - + return run def check_calculate_decay_widths(self, args): """check that the argument for calculate_decay_widths are valid""" - + if self.ninitial != 1: raise self.InvalidCmd('Can only calculate decay widths for decay processes A > B C ...') @@ -1232,7 +1232,7 @@ def check_calculate_decay_widths(self, args): if len(args) > 1: self.help_calculate_decay_widths() raise self.InvalidCmd('Too many argument for calculate_decay_widths command: %s' % cmd) - + return accuracy @@ -1241,25 +1241,25 @@ def check_multi_run(self, args): """check that the argument for survey are valid""" run = None - + if not len(args): self.help_multi_run() raise self.InvalidCmd("""multi_run command requires at least one argument for the number of times that it call generate_events command""") - + if args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['parton', 'pythia', 'pgs', 'delphes']: self.help_multi_run() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + elif not args[0].isdigit(): self.help_multi_run() @@ -1267,7 +1267,7 @@ def check_multi_run(self, args): #pass nb run to an integer nb_run = args.pop(0) args.insert(0, int(nb_run)) - + return run @@ -1284,7 +1284,7 @@ def check_refine(self, args): self.help_refine() raise self.InvalidCmd('require_precision argument is require for refine cmd') - + if not self.run_name: if self.results.lastrun: self.set_run_name(self.results.lastrun) @@ -1296,17 +1296,17 @@ def check_refine(self, args): else: try: [float(arg) for arg in args] - except ValueError: - self.help_refine() + except ValueError: + self.help_refine() raise self.InvalidCmd('refine arguments are suppose to be number') - + return True - + def check_combine_events(self, arg): """ Check the argument for the combine events command """ - + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] elif not self.run_tag: @@ -1314,53 +1314,53 @@ def check_combine_events(self, arg): else: tag = self.run_tag self.run_tag = tag - + if len(arg) > 1: self.help_combine_events() raise self.InvalidCmd('Too many argument for combine_events command') - + if len(arg) == 1: self.set_run_name(arg[0], self.run_tag, 'parton', True) - + if not self.run_name: if not self.results.lastrun: raise self.InvalidCmd('No run_name currently define. Unable to run combine') else: self.set_run_name(self.results.lastrun) - + return True - + def check_pythia(self, args): """Check the argument for pythia command - syntax: pythia [NAME] + syntax: pythia [NAME] Note that other option are already removed at this point """ - + mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia', 'pgs', 'delphes']: self.help_pythia() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') - + if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' raise self.InvalidCmd(error_msg) - - - + + + tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1368,8 +1368,8 @@ def check_pythia(self, args): if self.results.lastrun: args.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): @@ -1388,21 +1388,21 @@ def check_pythia(self, args): files.ln(input_file, os.path.dirname(output_file)) else: misc.gunzip(input_file, keep=True, stdout=output_file) - + args.append(mode) - + def check_pythia8(self, args): """Check the argument for pythia command - syntax: pythia8 [NAME] + syntax: pythia8 [NAME] Note that other option are already removed at this point - """ + """ mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia','pythia8','delphes']: self.help_pythia8() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') @@ -1410,7 +1410,7 @@ def check_pythia8(self, args): if not self.options['pythia8_path']: logger.info('Retry reading configuration file to find pythia8 path') self.set_configuration() - + if not self.options['pythia8_path'] or not \ os.path.exists(pjoin(self.options['pythia8_path'],'bin','pythia8-config')): error_msg = 'No valid pythia8 path set.\n' @@ -1421,7 +1421,7 @@ def check_pythia8(self, args): raise self.InvalidCmd(error_msg) tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1430,11 +1430,11 @@ def check_pythia8(self, args): args.insert(0, self.results.lastrun) else: raise self.InvalidCmd('No run name currently define. '+ - 'Please add this information.') - + 'Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ - not os.path.exists(pjoin(self.me_dir,'Events',args[0], + not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): raise self.InvalidCmd('No events file corresponding to %s run. ' % args[0]) @@ -1451,9 +1451,9 @@ def check_pythia8(self, args): else: raise self.InvalidCmd('No event file corresponding to %s run. ' % self.run_name) - + args.append(mode) - + def check_remove(self, args): """Check that the remove command is valid""" @@ -1484,33 +1484,33 @@ def check_plot(self, args): madir = self.options['madanalysis_path'] td = self.options['td_path'] - + if not madir or not td: logger.info('Retry to read configuration file to find madanalysis/td') self.set_configuration() madir = self.options['madanalysis_path'] - td = self.options['td_path'] - + td = self.options['td_path'] + if not madir: error_msg = 'No valid MadAnalysis path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) + raise self.InvalidCmd(error_msg) if not td: error_msg = 'No valid td path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') + raise self.InvalidCmd('No run name currently define. Please add this information.') args.append('all') return - + if args[0] not in self._plot_mode: self.set_run_name(args[0], level='plot') del args[0] @@ -1518,45 +1518,45 @@ def check_plot(self, args): args.append('all') elif not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + for arg in args: if arg not in self._plot_mode and arg != self.run_name: self.help_plot() - raise self.InvalidCmd('unknown options %s' % arg) - + raise self.InvalidCmd('unknown options %s' % arg) + def check_syscalc(self, args): """Check the argument for the syscalc command syscalc run_name modes""" scdir = self.options['syscalc_path'] - + if not scdir: logger.info('Retry to read configuration file to find SysCalc') self.set_configuration() scdir = self.options['syscalc_path'] - + if not scdir: error_msg = 'No valid SysCalc path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' error_msg += 'Please note that you need to compile SysCalc first.' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') args.append('all') return #deal options tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] - + if args[0] not in self._syscalc_mode: self.set_run_name(args[0], tag=tag, level='syscalc') del args[0] @@ -1564,61 +1564,61 @@ def check_syscalc(self, args): args.append('all') elif not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') elif tag and tag != self.run_tag: self.set_run_name(self.run_name, tag=tag, level='syscalc') - + for arg in args: if arg not in self._syscalc_mode and arg != self.run_name: self.help_syscalc() - raise self.InvalidCmd('unknown options %s' % arg) + raise self.InvalidCmd('unknown options %s' % arg) if self.run_card['use_syst'] not in self.true: raise self.InvalidCmd('Run %s does not include ' % self.run_name + \ 'systematics information needed for syscalc.') - - + + def check_pgs(self, arg, no_default=False): """Check the argument for pythia command - syntax is "pgs [NAME]" + syntax is "pgs [NAME]" Note that other option are already remove at this point """ - + # If not pythia-pgs path if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] - - + + if len(arg) == 0 and not self.run_name: if self.results.lastrun: arg.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(arg) == 1 and self.run_name == arg[0]: arg.pop(0) - + if not len(arg) and \ not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): if not no_default: self.help_pgs() raise self.InvalidCmd('''No file file pythia_events.hep currently available Please specify a valid run_name''') - - lock = None + + lock = None if len(arg) == 1: prev_tag = self.set_run_name(arg[0], tag, 'pgs') if not os.path.exists(pjoin(self.me_dir,'Events',self.run_name,'%s_pythia_events.hep.gz' % prev_tag)): @@ -1626,25 +1626,25 @@ def check_pgs(self, arg, no_default=False): else: input_file = pjoin(self.me_dir,'Events', self.run_name, '%s_pythia_events.hep.gz' % prev_tag) output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') - lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), + lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), argument=['-c', input_file]) else: - if tag: + if tag: self.run_card['run_tag'] = tag self.set_run_name(self.run_name, tag, 'pgs') - - return lock + + return lock def check_display(self, args): """check the validity of line syntax is "display XXXXX" """ - + if len(args) < 1 or args[0] not in self._display_opts: self.help_display() raise self.InvalidCmd - + if args[0] == 'variable' and len(args) !=2: raise self.InvalidCmd('variable need a variable name') @@ -1654,39 +1654,39 @@ def check_display(self, args): def check_import(self, args): """check the validity of line""" - + if not args: self.help_import() raise self.InvalidCmd('wrong \"import\" format') - + if args[0] != 'command': args.insert(0,'command') - - + + if not len(args) == 2 or not os.path.exists(args[1]): raise self.InvalidCmd('PATH is mandatory for import command\n') - + #=============================================================================== # CompleteForCmd #=============================================================================== class CompleteForCmd(CheckValidForCmd): """ The Series of help routine for the MadGraphCmd""" - - + + def complete_banner_run(self, text, line, begidx, endidx, formatting=True): "Complete the banner run command" try: - - + + args = self.split_arg(line[0:begidx], error=False) - + if args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args \ - if a.endswith(os.path.sep)])) - - + if a.endswith(os.path.sep)])) + + if len(args) > 1: # only options are possible tags = misc.glob('%s_*_banner.txt' % args[1], pjoin(self.me_dir, 'Events' , args[1])) @@ -1697,9 +1697,9 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): else: return self.list_completion(text, tags) return self.list_completion(text, tags +['--name=','-f'], line) - + # First argument - possibilites = {} + possibilites = {} comp = self.path_completion(text, os.path.join('.',*[a for a in args \ if a.endswith(os.path.sep)])) @@ -1711,10 +1711,10 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) run_list = [n.rsplit('/',2)[1] for n in run_list] possibilites['RUN Name'] = self.list_completion(text, run_list) - + return self.deal_multiple_categories(possibilites, formatting) - - + + except Exception as error: print(error) @@ -1732,12 +1732,12 @@ def complete_history(self, text, line, begidx, endidx): if len(args) == 1: return self.path_completion(text) - - def complete_open(self, text, line, begidx, endidx): + + def complete_open(self, text, line, begidx, endidx): """ complete the open command """ args = self.split_arg(line[0:begidx]) - + # Directory continuation if os.path.sep in args[-1] + text: return self.path_completion(text, @@ -1751,10 +1751,10 @@ def complete_open(self, text, line, begidx, endidx): if os.path.isfile(os.path.join(path,'README')): possibility.append('README') if os.path.isdir(os.path.join(path,'Cards')): - possibility += [f for f in os.listdir(os.path.join(path,'Cards')) + possibility += [f for f in os.listdir(os.path.join(path,'Cards')) if f.endswith('.dat')] if os.path.isdir(os.path.join(path,'HTML')): - possibility += [f for f in os.listdir(os.path.join(path,'HTML')) + possibility += [f for f in os.listdir(os.path.join(path,'HTML')) if f.endswith('.html') and 'default' not in f] else: possibility.extend(['./','../']) @@ -1763,7 +1763,7 @@ def complete_open(self, text, line, begidx, endidx): if os.path.exists('MG5_debug'): possibility.append('MG5_debug') return self.list_completion(text, possibility) - + def complete_set(self, text, line, begidx, endidx): "Complete the set command" @@ -1784,27 +1784,27 @@ def complete_set(self, text, line, begidx, endidx): elif len(args) >2 and args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args if a.endswith(os.path.sep)]), - only_dirs = True) - + only_dirs = True) + def complete_survey(self, text, line, begidx, endidx): """ Complete the survey command """ - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + return self.list_completion(text, self._run_options, line) - + complete_refine = complete_survey complete_combine_events = complete_survey complite_store = complete_survey complete_generate_events = complete_survey complete_create_gridpack = complete_survey - + def complete_generate_events(self, text, line, begidx, endidx): """ Complete the generate events""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() @@ -1813,17 +1813,17 @@ def complete_generate_events(self, text, line, begidx, endidx): return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) def complete_initMadLoop(self, text, line, begidx, endidx): "Complete the initMadLoop command" - + numbers = [str(i) for i in range(10)] opts = ['-f','-r','--nPS='] - + args = self.split_arg(line[0:begidx], error=False) if len(line) >=6 and line[begidx-6:begidx]=='--nPS=': return self.list_completion(text, numbers, line) @@ -1840,18 +1840,18 @@ def complete_launch(self, *args, **opts): def complete_calculate_decay_widths(self, text, line, begidx, endidx): """ Complete the calculate_decay_widths command""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + opts = self._run_options + self._calculate_decay_options return self.list_completion(text, opts, line) - + def complete_display(self, text, line, begidx, endidx): - """ Complete the display command""" - + """ Complete the display command""" + args = self.split_arg(line[0:begidx], error=False) if len(args) >= 2 and args[1] =='results': start = line.find('results') @@ -1860,44 +1860,44 @@ def complete_display(self, text, line, begidx, endidx): def complete_multi_run(self, text, line, begidx, endidx): """complete multi run command""" - + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: data = [str(i) for i in range(0,20)] return self.list_completion(text, data, line) - + if line.endswith('run=') and not text: return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - - - + + + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - + def complete_plot(self, text, line, begidx, endidx): """ Complete the plot command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1: return self.list_completion(text, self._plot_mode) else: return self.list_completion(text, self._plot_mode + list(self.results.keys())) - + def complete_syscalc(self, text, line, begidx, endidx, formatting=True): """ Complete the syscalc command """ - + output = {} args = self.split_arg(line[0:begidx], error=False) - + if len(args) <=1: output['RUN_NAME'] = self.list_completion(list(self.results.keys())) output['MODE'] = self.list_completion(text, self._syscalc_mode) @@ -1907,12 +1907,12 @@ def complete_syscalc(self, text, line, begidx, endidx, formatting=True): if run in self.results: tags = ['--tag=%s' % tag['tag'] for tag in self.results[run]] output['options'] += tags - + return self.deal_multiple_categories(output, formatting) - + def complete_remove(self, text, line, begidx, endidx): """Complete the remove command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1 and (text.startswith('--t')): run = args[1] @@ -1932,8 +1932,8 @@ def complete_remove(self, text, line, begidx, endidx): data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1] for n in data] return self.list_completion(text, ['all'] + data) - - + + def complete_shower(self,text, line, begidx, endidx): "Complete the shower command" args = self.split_arg(line[0:begidx], error=False) @@ -1941,7 +1941,7 @@ def complete_shower(self,text, line, begidx, endidx): return self.list_completion(text, self._interfaced_showers) elif len(args)>1 and args[1] in self._interfaced_showers: return getattr(self, 'complete_%s' % text)\ - (text, args[1],line.replace(args[0]+' ',''), + (text, args[1],line.replace(args[0]+' ',''), begidx-len(args[0])-1, endidx-len(args[0])-1) def complete_pythia8(self,text, line, begidx, endidx): @@ -1955,11 +1955,11 @@ def complete_pythia8(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_madanalysis5_parton(self,text, line, begidx, endidx): @@ -1978,19 +1978,19 @@ def complete_madanalysis5_parton(self,text, line, begidx, endidx): else: tmp2 = self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) - return tmp1 + tmp2 + return tmp1 + tmp2 elif '--MA5_stdout_lvl=' in line and not any(arg.startswith( '--MA5_stdout_lvl=') for arg in args): - return self.list_completion(text, - ['--MA5_stdout_lvl=%s'%opt for opt in + return self.list_completion(text, + ['--MA5_stdout_lvl=%s'%opt for opt in ['logging.INFO','logging.DEBUG','logging.WARNING', 'logging.CRITICAL','90']], line) else: - return self.list_completion(text, ['-f', + return self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) def complete_pythia(self,text, line, begidx, endidx): - "Complete the pythia command" + "Complete the pythia command" args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: @@ -2001,16 +2001,16 @@ def complete_pythia(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_pgs(self,text, line, begidx, endidx): "Complete the pythia command" - args = self.split_arg(line[0:begidx], error=False) + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: #return valid run_name data = misc.glob(pjoin('*', '*_pythia_events.hep.gz'), pjoin(self.me_dir, 'Events')) @@ -2019,23 +2019,23 @@ def complete_pgs(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--tag=' ,'--no_default'], line) - return tmp1 + tmp2 + return tmp1 + tmp2 else: - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--tag=','--no_default'], line) - complete_delphes = complete_pgs - complete_rivet = complete_pgs + complete_delphes = complete_pgs + complete_rivet = complete_pgs #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCmd): - """The command line processor of Mad Graph""" - + """The command line processor of Mad Graph""" + LO = True # Truth values @@ -2063,7 +2063,7 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm cluster_mode = 0 queue = 'madgraph' nb_core = None - + next_possibility = { 'start': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]', 'calculate_decay_widths [OPTIONS]', @@ -2080,9 +2080,9 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm 'pgs': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'], 'delphes' : ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'] } - + asking_for_run = AskRun - + ############################################################################ def __init__(self, me_dir = None, options={}, *completekey, **stdin): """ add information to the cmd """ @@ -2095,16 +2095,16 @@ def __init__(self, me_dir = None, options={}, *completekey, **stdin): if self.web: os.system('touch %s' % pjoin(self.me_dir,'Online')) - self.load_results_db() + self.load_results_db() self.results.def_web_mode(self.web) self.Gdirs = None - + self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) self.configured = 0 # time for reading the card self._options = {} # for compatibility with extended_cmd - - + + def pass_in_web_mode(self): """configure web data""" self.web = True @@ -2113,22 +2113,22 @@ def pass_in_web_mode(self): if os.environ['MADGRAPH_BASE']: self.options['mg5_path'] = pjoin(os.environ['MADGRAPH_BASE'],'MG5') - ############################################################################ + ############################################################################ def check_output_type(self, path): """ Check that the output path is a valid madevent directory """ - + bin_path = os.path.join(path,'bin') if os.path.isfile(os.path.join(bin_path,'generate_events')): return True - else: + else: return False ############################################################################ def set_configuration(self, amcatnlo=False, final=True, **opt): - """assign all configuration variable from file + """assign all configuration variable from file loop over the different config file if config_file not define """ - - super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, + + super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, final=final, **opt) if not final: @@ -2171,24 +2171,24 @@ def set_configuration(self, amcatnlo=False, final=True, **opt): if not os.path.exists(pjoin(path, 'sys_calc')): logger.info("No valid SysCalc path found") continue - # No else since the next line reinitialize the option to the + # No else since the next line reinitialize the option to the #previous value anyway self.options[key] = os.path.realpath(path) continue else: self.options[key] = None - - + + return self.options ############################################################################ - def do_banner_run(self, line): + def do_banner_run(self, line): """Make a run from the banner file""" - + args = self.split_arg(line) #check the validity of the arguments - self.check_banner_run(args) - + self.check_banner_run(args) + # Remove previous cards for name in ['delphes_trigger.dat', 'delphes_card.dat', 'pgs_card.dat', 'pythia_card.dat', 'madspin_card.dat', @@ -2197,20 +2197,20 @@ def do_banner_run(self, line): os.remove(pjoin(self.me_dir, 'Cards', name)) except Exception: pass - + banner_mod.split_banner(args[0], self.me_dir, proc_card=False) - + # Check if we want to modify the run if not self.force: ans = self.ask('Do you want to modify the Cards?', 'n', ['y','n']) if ans == 'n': self.force = True - + # Call Generate events self.exec_cmd('generate_events %s %s' % (self.run_name, self.force and '-f' or '')) - - - + + + ############################################################################ def do_display(self, line, output=sys.stdout): """Display current internal status""" @@ -2223,7 +2223,7 @@ def do_display(self, line, output=sys.stdout): #return valid run_name data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1:] for n in data] - + if data: out = {} for name, tag in data: @@ -2235,11 +2235,11 @@ def do_display(self, line, output=sys.stdout): print('the runs available are:') for run_name, tags in out.items(): print(' run: %s' % run_name) - print(' tags: ', end=' ') + print(' tags: ', end=' ') print(', '.join(tags)) else: print('No run detected.') - + elif args[0] == 'options': outstr = " Run Options \n" outstr += " ----------- \n" @@ -2260,8 +2260,8 @@ def do_display(self, line, output=sys.stdout): if value == default: outstr += " %25s \t:\t%s\n" % (key,value) else: - outstr += " %25s \t:\t%s (user set)\n" % (key,value) - outstr += "\n" + outstr += " %25s \t:\t%s (user set)\n" % (key,value) + outstr += "\n" outstr += " Configuration Options \n" outstr += " --------------------- \n" for key, default in self.options_configuration.items(): @@ -2275,15 +2275,15 @@ def do_display(self, line, output=sys.stdout): self.do_print_results(' '.join(args[1:])) else: super(MadEventCmd, self).do_display(line, output) - + def do_save(self, line, check=True, to_keep={}): - """Not in help: Save information to file""" + """Not in help: Save information to file""" args = self.split_arg(line) # Check argument validity if check: self.check_save(args) - + if args[0] == 'options': # First look at options which should be put in MG5DIR/input to_define = {} @@ -2295,7 +2295,7 @@ def do_save(self, line, check=True, to_keep={}): for key, default in self.options_madevent.items(): if self.options[key] != self.options_madevent[key]: to_define[key] = self.options[key] - + if '--all' in args: for key, default in self.options_madgraph.items(): if self.options[key] != self.options_madgraph[key]: @@ -2312,12 +2312,12 @@ def do_save(self, line, check=True, to_keep={}): filepath = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basefile = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basedir = self.me_dir - + if to_keep: to_define = to_keep self.write_configuration(filepath, basefile, basedir, to_define) - - + + def do_edit_cards(self, line): @@ -2326,80 +2326,80 @@ def do_edit_cards(self, line): # Check argument's validity mode = self.check_generate_events(args) self.ask_run_configuration(mode) - + return ############################################################################ - + ############################################################################ def do_restart_gridpack(self, line): """ syntax restart_gridpack --precision=1.0 --restart_zero collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) - + # initialize / remove lhapdf mode #self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) #self.configure_directory() - + gensym = gen_ximprove.gensym(self) - + min_precision = 1.0 resubmit_zero=False if '--precision=' in line: s = line.index('--precision=') + len('--precision=') arg=line[s:].split(1)[0] min_precision = float(arg) - + if '--restart_zero' in line: resubmit_zero = True - - + + gensym.resubmit(min_precision, resubmit_zero) self.monitor(run_type='All jobs submitted for gridpack', html=True) #will be done during the refine (more precisely in gen_ximprove) cross, error = sum_html.make_all_html_results(self) self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(gensym.run_statistics)) - + #self.exec_cmd('combine_events', postcmd=False) #self.exec_cmd('store_events', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) self.exec_cmd('create_gridpack', postcmd=False) - - - ############################################################################ + + + ############################################################################ ############################################################################ def do_generate_events(self, line): """Main Commands: launch the full chain """ - + self.banner = None self.Gdirs = None - + args = self.split_arg(line) # Check argument's validity mode = self.check_generate_events(args) switch_mode = self.ask_run_configuration(mode, args) if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir), None, 'parton') else: self.set_run_name(args[0], None, 'parton', True) args.pop(0) - + self.run_generate_events(switch_mode, args) self.postprocessing() @@ -2420,8 +2420,8 @@ def postprocessing(self): def rivet_postprocessing(self, rivet_config, postprocess_RIVET, postprocess_CONTUR): - # Check number of Rivet jobs to run - run_dirs = [pjoin(self.me_dir, 'Events',run_name) + # Check number of Rivet jobs to run + run_dirs = [pjoin(self.me_dir, 'Events',run_name) for run_name in self.postprocessing_dirs] nb_rivet = len(run_dirs) @@ -2550,10 +2550,10 @@ def wait_monitoring(Idle, Running, Done): wrapper = open(pjoin(self.me_dir, "Analysis", "contur", "run_contur.sh"), "w") wrapper.write(set_env) - + wrapper.write('{0}\n'.format(contur_cmd)) wrapper.close() - + misc.call(["run_contur.sh"], cwd=(pjoin(self.me_dir, "Analysis", "contur"))) logger.info("Contur outputs are stored in {0}".format(pjoin(self.me_dir, "Analysis", "contur","conturPlot"))) @@ -2572,7 +2572,7 @@ def run_generate_events(self, switch_mode, args): self.do_set('run_mode 2') self.do_set('nb_core 1') - if self.run_card['gridpack'] in self.true: + if self.run_card['gridpack'] in self.true: # Running gridpack warmup gridpack_opts=[('accuracy', 0.01), ('points', 2000), @@ -2593,7 +2593,7 @@ def run_generate_events(self, switch_mode, args): # Regular run mode logger.info('Generating %s events with run name %s' % (self.run_card['nevents'], self.run_name)) - + self.exec_cmd('survey %s %s' % (self.run_name,' '.join(args)), postcmd=False) nb_event = self.run_card['nevents'] @@ -2601,7 +2601,7 @@ def run_generate_events(self, switch_mode, args): self.exec_cmd('refine %s' % nb_event, postcmd=False) if not float(self.results.current['cross']): # Zero cross-section. Try to guess why - text = '''Survey return zero cross section. + text = '''Survey return zero cross section. Typical reasons are the following: 1) A massive s-channel particle has a width set to zero. 2) The pdf are zero for at least one of the initial state particles @@ -2613,17 +2613,17 @@ def run_generate_events(self, switch_mode, args): raise ZeroResult('See https://cp3.irmp.ucl.ac.be/projects/madgraph/wiki/FAQ-General-14') else: bypass_run = True - + #we can bypass the following if scan and first result is zero if not bypass_run: self.exec_cmd('refine %s --treshold=%s' % (nb_event,self.run_card['second_refine_treshold']) , postcmd=False) - + self.exec_cmd('combine_events', postcmd=False,printcmd=False) self.print_results_in_shell(self.results.current) if self.run_card['use_syst']: - if self.run_card['systematics_program'] == 'auto': + if self.run_card['systematics_program'] == 'auto': scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): to_use = 'systematics' @@ -2634,26 +2634,26 @@ def run_generate_events(self, switch_mode, args): else: logger.critical('Unvalid options for systematics_program: bypass computation of systematics variations.') to_use = 'none' - + if to_use == 'systematics': if self.run_card['systematics_arguments'] != ['']: self.exec_cmd('systematics %s %s ' % (self.run_name, - ' '.join(self.run_card['systematics_arguments'])), + ' '.join(self.run_card['systematics_arguments'])), postcmd=False, printcmd=False) else: self.exec_cmd('systematics %s --from_card' % self.run_name, - postcmd=False,printcmd=False) + postcmd=False,printcmd=False) elif to_use == 'syscalc': self.run_syscalc('parton') - - - self.create_plot('parton') - self.exec_cmd('store_events', postcmd=False) + + + self.create_plot('parton') + self.exec_cmd('store_events', postcmd=False) if self.run_card['boost_event'].strip() and self.run_card['boost_event'] != 'False': self.boost_events() - - - self.exec_cmd('reweight -from_cards', postcmd=False) + + + self.exec_cmd('reweight -from_cards', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) if self.run_card['time_of_flight']>=0: self.exec_cmd("add_time_of_flight --threshold=%s" % self.run_card['time_of_flight'] ,postcmd=False) @@ -2664,43 +2664,43 @@ def run_generate_events(self, switch_mode, args): self.create_root_file(input , output) self.exec_cmd('madanalysis5_parton --no_default', postcmd=False, printcmd=False) - # shower launches pgs/delphes if needed + # shower launches pgs/delphes if needed self.exec_cmd('shower --no_default', postcmd=False, printcmd=False) self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) self.exec_cmd('rivet --no_default', postcmd=False, printcmd=False) self.store_result() - - if self.allow_notification_center: - misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), - '%s: %s +- %s ' % (self.results.current['run_name'], + + if self.allow_notification_center: + misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), + '%s: %s +- %s ' % (self.results.current['run_name'], self.results.current['cross'], self.results.current['error'])) - + def boost_events(self): - + if not self.run_card['boost_event']: return - + if self.run_card['boost_event'].startswith('lambda'): if not isinstance(self, cmd.CmdShell): raise Exception("boost not allowed online") filter = eval(self.run_card['boost_event']) else: raise Exception - + path = [pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')] - + for p in path: if os.path.exists(p): event_path = p break else: raise Exception("fail to find event file for the boost") - - + + lhe = lhe_parser.EventFile(event_path) with misc.TMP_directory() as tmp_dir: output = lhe_parser.EventFile(pjoin(tmp_dir, os.path.basename(event_path)), 'w') @@ -2711,28 +2711,28 @@ def boost_events(self): event.boost(filter) #write this modify event output.write(str(event)) - output.write('\n') + output.write('\n') lhe.close() - files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) - - - - - + files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) + + + + + def do_initMadLoop(self,line): - """Compile and run MadLoop for a certain number of PS point so as to + """Compile and run MadLoop for a certain number of PS point so as to initialize MadLoop (setup the zero helicity and loop filter.)""" - + args = line.split() # Check argument's validity options = self.check_initMadLoop(args) - + if not options['force']: self.ask_edit_cards(['MadLoopParams.dat'], mode='fixed', plot=False) self.exec_cmd('treatcards loop --no_MadLoopInit') if options['refresh']: - for filter in misc.glob('*Filter*', + for filter in misc.glob('*Filter*', pjoin(self.me_dir,'SubProcesses','MadLoop5_resources')): logger.debug("Resetting filter '%s'."%os.path.basename(filter)) os.remove(filter) @@ -2753,14 +2753,14 @@ def do_initMadLoop(self,line): def do_launch(self, line, *args, **opt): """Main Commands: exec generate_events for 2>N and calculate_width for 1>N""" - + if self.ninitial == 1: logger.info("Note that since 2.3. The launch for 1>N pass in event generation\n"+ " To have the previous behavior use the calculate_decay_widths function") # self.do_calculate_decay_widths(line, *args, **opt) #else: self.do_generate_events(line, *args, **opt) - + def print_results_in_shell(self, data): """Have a nice results prints in the shell, data should be of type: gen_crossxhtml.OneTagResults""" @@ -2770,7 +2770,7 @@ def print_results_in_shell(self, data): if data['run_statistics']: globalstat = sum_html.RunStatistics() - + logger.info(" " ) logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2786,13 +2786,13 @@ def print_results_in_shell(self, data): logger.warning(globalstat.get_warning_text()) logger.info(" ") - + logger.info(" === Results Summary for run: %s tag: %s ===\n" % (data['run_name'],data['tag'])) - + total_time = int(sum(_['cumulative_timing'] for _ in data['run_statistics'].values())) if total_time > 0: logger.info(" Cumulative sequential time for this run: %s"%misc.format_time(total_time)) - + if self.ninitial == 1: logger.info(" Width : %.4g +- %.4g GeV" % (data['cross'], data['error'])) else: @@ -2810,18 +2810,18 @@ def print_results_in_shell(self, data): if len(split)!=3: continue scale, cross, error = split - cross_sections[float(scale)] = (float(cross), float(error)) + cross_sections[float(scale)] = (float(cross), float(error)) if len(cross_sections)>0: logger.info(' Pythia8 merged cross-sections are:') for scale in sorted(cross_sections.keys()): logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ (scale,cross_sections[scale][0],cross_sections[scale][1])) - + else: if self.ninitial == 1: logger.info(" Matched width : %.4g +- %.4g GeV" % (data['cross_pythia'], data['error_pythia'])) else: - logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) + logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) logger.info(" Nb of events after matching/merging : %d" % int(data['nb_event_pythia'])) if self.run_card['use_syst'] in self.true and \ (int(self.run_card['ickkw'])==1 or self.run_card['ktdurham']>0.0 @@ -2838,9 +2838,9 @@ def print_results_in_file(self, data, path, mode='w', format='full'): data should be of type: gen_crossxhtml.OneTagResults""" if not data: return - + fsock = open(path, mode) - + if data['run_statistics']: logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2851,7 +2851,7 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if format == "full": fsock.write(" === Results Summary for run: %s tag: %s process: %s ===\n" % \ (data['run_name'],data['tag'], os.path.basename(self.me_dir))) - + if self.ninitial == 1: fsock.write(" Width : %.4g +- %.4g GeV\n" % (data['cross'], data['error'])) else: @@ -2861,20 +2861,20 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if self.ninitial == 1: fsock.write(" Matched Width : %.4g +- %.4g GeV\n" % (data['cross_pythia'], data['error_pythia'])) else: - fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) + fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) fsock.write(" Nb of events after Matching : %s\n" % data['nb_event_pythia']) fsock.write(" \n" ) elif format == "short": if mode == "w": fsock.write("# run_name tag cross error Nb_event cross_after_matching nb_event_after matching\n") - + if data['cross_pythia'] and data['nb_event_pythia']: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s %(cross_pythia)s %(nb_event_pythia)s\n" else: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s\n" fsock.write(text % data) - - ############################################################################ + + ############################################################################ def do_calculate_decay_widths(self, line): """Main Commands: launch decay width calculation and automatic inclusion of calculated widths and BRs in the param_card.""" @@ -2887,21 +2887,21 @@ def do_calculate_decay_widths(self, line): self.Gdirs = None if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], reload_card=True) args.pop(0) self.configure_directory() - + # Running gridpack warmup opts=[('accuracy', accuracy), # default 0.01 ('points', 1000), ('iterations',9)] logger.info('Calculating decay widths with run name %s' % self.run_name) - + self.exec_cmd('survey %s %s' % \ (self.run_name, " ".join(['--' + opt + '=' + str(val) for (opt,val) \ @@ -2910,26 +2910,26 @@ def do_calculate_decay_widths(self, line): self.refine_mode = "old" # specify how to combine event self.exec_cmd('combine_events', postcmd=False) self.exec_cmd('store_events', postcmd=False) - + self.collect_decay_widths() self.print_results_in_shell(self.results.current) - self.update_status('calculate_decay_widths done', - level='parton', makehtml=False) + self.update_status('calculate_decay_widths done', + level='parton', makehtml=False) + - ############################################################################ def collect_decay_widths(self): - """ Collect the decay widths and calculate BRs for all particles, and put - in param_card form. + """ Collect the decay widths and calculate BRs for all particles, and put + in param_card form. """ - + particle_dict = {} # store the results run_name = self.run_name # Looping over the Subprocesses for P_path in SubProcesses.get_subP(self.me_dir): ids = SubProcesses.get_subP_ids(P_path) - # due to grouping we need to compute the ratio factor for the + # due to grouping we need to compute the ratio factor for the # ungroup resutls (that we need here). Note that initial particles # grouping are not at the same stage as final particle grouping nb_output = len(ids) / (len(set([p[0] for p in ids]))) @@ -2940,30 +2940,30 @@ def collect_decay_widths(self): particle_dict[particles[0]].append([particles[1:], result/nb_output]) except KeyError: particle_dict[particles[0]] = [[particles[1:], result/nb_output]] - + self.update_width_in_param_card(particle_dict, initial = pjoin(self.me_dir, 'Cards', 'param_card.dat'), output=pjoin(self.me_dir, 'Events', run_name, "param_card.dat")) - + @staticmethod def update_width_in_param_card(decay_info, initial=None, output=None): # Open the param_card.dat and insert the calculated decays and BRs - + if not output: output = initial - + param_card_file = open(initial) param_card = param_card_file.read().split('\n') param_card_file.close() decay_lines = [] line_number = 0 - # Read and remove all decays from the param_card + # Read and remove all decays from the param_card while line_number < len(param_card): line = param_card[line_number] if line.lower().startswith('decay'): - # Read decay if particle in decay_info - # DECAY 6 1.455100e+00 + # Read decay if particle in decay_info + # DECAY 6 1.455100e+00 line = param_card.pop(line_number) line = line.split() particle = 0 @@ -2996,7 +2996,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): break line=param_card[line_number] if particle and particle not in decay_info: - # No decays given, only total width + # No decays given, only total width decay_info[particle] = [[[], width]] else: # Not decay line_number += 1 @@ -3004,7 +3004,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): while not param_card[-1] or param_card[-1].startswith('#'): param_card.pop(-1) - # Append calculated and read decays to the param_card + # Append calculated and read decays to the param_card param_card.append("#\n#*************************") param_card.append("# Decay widths *") param_card.append("#*************************") @@ -3018,7 +3018,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): param_card.append("# BR NDA ID1 ID2 ...") brs = [[(val[1]/width).real, val[0]] for val in decay_info[key] if val[1]] for val in sorted(brs, reverse=True): - param_card.append(" %e %i %s # %s" % + param_card.append(" %e %i %s # %s" % (val[0].real, len(val[1]), " ".join([str(v) for v in val[1]]), val[0] * width @@ -3031,7 +3031,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): ############################################################################ def do_multi_run(self, line): - + args = self.split_arg(line) # Check argument's validity mode = self.check_multi_run(args) @@ -3047,7 +3047,7 @@ def do_multi_run(self, line): self.check_param_card(path, run=False) #store it locally to avoid relaunch param_card_iterator, self.param_card_iterator = self.param_card_iterator, [] - + crossoversig = 0 inv_sq_err = 0 nb_event = 0 @@ -3055,8 +3055,8 @@ def do_multi_run(self, line): self.nb_refine = 0 self.exec_cmd('generate_events %s_%s -f' % (main_name, i), postcmd=False) # Update collected value - nb_event += int(self.results[self.run_name][-1]['nb_event']) - self.results.add_detail('nb_event', nb_event , run=main_name) + nb_event += int(self.results[self.run_name][-1]['nb_event']) + self.results.add_detail('nb_event', nb_event , run=main_name) cross = self.results[self.run_name][-1]['cross'] error = self.results[self.run_name][-1]['error'] + 1e-99 crossoversig+=cross/error**2 @@ -3070,7 +3070,7 @@ def do_multi_run(self, line): os.mkdir(pjoin(self.me_dir,'Events', self.run_name)) except Exception: pass - os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' + os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' % {'bin': self.dirbin, 'event': pjoin(self.me_dir,'Events'), 'name': self.run_name}) @@ -3084,19 +3084,19 @@ def do_multi_run(self, line): self.create_root_file('%s/unweighted_events.lhe' % self.run_name, '%s/unweighted_events.root' % self.run_name) - - path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") + + path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") self.create_plot('parton', path, pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') ) - - if not os.path.exists('%s.gz' % path): + + if not os.path.exists('%s.gz' % path): misc.gzip(path) self.update_status('', level='parton') - self.print_results_in_shell(self.results.current) - + self.print_results_in_shell(self.results.current) + cpath = pjoin(self.me_dir,'Cards','param_card.dat') if param_card_iterator: @@ -3112,21 +3112,21 @@ def do_multi_run(self, line): path = pjoin(self.me_dir, 'Events','scan_%s.txt' % scan_name) logger.info("write all cross-section results in %s" % path, '$MG:BOLD') param_card_iterator.write_summary(path) - - ############################################################################ + + ############################################################################ def do_treatcards(self, line, mode=None, opt=None): """Advanced commands: create .inc files from param_card.dat/run_card.dat""" if not mode and not opt: args = self.split_arg(line) mode, opt = self.check_treatcards(args) - + # To decide whether to refresh MadLoop's helicity filters, it is necessary # to check if the model parameters where modified or not, before doing - # anything else. + # anything else. need_MadLoopFilterUpdate = False - # Just to record what triggered the reinitialization of MadLoop for a + # Just to record what triggered the reinitialization of MadLoop for a # nice debug message. type_of_change = '' if not opt['forbid_MadLoopInit'] and self.proc_characteristics['loop_induced'] \ @@ -3137,10 +3137,10 @@ def do_treatcards(self, line, mode=None, opt=None): (os.path.getmtime(paramDat)-os.path.getmtime(paramInc)) > 0.0: need_MadLoopFilterUpdate = True type_of_change = 'model' - + ML_in = pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat') ML_out = pjoin(self.me_dir,"SubProcesses", - "MadLoop5_resources", "MadLoopParams.dat") + "MadLoop5_resources", "MadLoopParams.dat") if (not os.path.isfile(ML_in)) or (not os.path.isfile(ML_out)) or \ (os.path.getmtime(ML_in)-os.path.getmtime(ML_out)) > 0.0: need_MadLoopFilterUpdate = True @@ -3148,7 +3148,7 @@ def do_treatcards(self, line, mode=None, opt=None): #check if no 'Auto' are present in the file self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) - + if mode in ['param', 'all']: model = self.find_model_name() tmp_model = os.path.basename(model) @@ -3160,9 +3160,9 @@ def do_treatcards(self, line, mode=None, opt=None): check_param_card.check_valid_param_card(mg5_param) opt['param_card'] = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') else: - check_param_card.check_valid_param_card(opt['param_card']) - - logger.debug('write compile file for card: %s' % opt['param_card']) + check_param_card.check_valid_param_card(opt['param_card']) + + logger.debug('write compile file for card: %s' % opt['param_card']) param_card = check_param_card.ParamCard(opt['param_card']) outfile = pjoin(opt['output_dir'], 'param_card.inc') ident_card = pjoin(self.me_dir,'Cards','ident_card.dat') @@ -3185,10 +3185,10 @@ def do_treatcards(self, line, mode=None, opt=None): devnull.close() default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat') - need_mp = self.proc_characteristics['loop_induced'] + need_mp = self.proc_characteristics['loop_induced'] param_card.write_inc_file(outfile, ident_card, default, need_mp=need_mp) - - + + if mode in ['run', 'all']: if not hasattr(self, 'run_card'): run_card = banner_mod.RunCard(opt['run_card'], path=pjoin(self.me_dir, 'Cards', 'run_card.dat')) @@ -3202,7 +3202,7 @@ def do_treatcards(self, line, mode=None, opt=None): run_card['lpp2'] = 0 run_card['ebeam1'] = 0 run_card['ebeam2'] = 0 - + # Ensure that the bias parameters has all the required input from the # run_card if run_card['bias_module'].lower() not in ['dummy','none']: @@ -3219,7 +3219,7 @@ def do_treatcards(self, line, mode=None, opt=None): mandatory_file,run_card['bias_module'])) misc.copytree(run_card['bias_module'], pjoin(self.me_dir,'Source','BIAS', os.path.basename(run_card['bias_module']))) - + #check expected parameters for the module. default_bias_parameters = {} start, last = False,False @@ -3244,50 +3244,50 @@ def do_treatcards(self, line, mode=None, opt=None): for pair in line.split(','): if not pair.strip(): continue - x,y =pair.split(':') + x,y =pair.split(':') x=x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y elif ':' in line: x,y = line.split(':') x = x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y for key,value in run_card['bias_parameters'].items(): if key not in default_bias_parameters: logger.warning('%s not supported by the bias module. We discard this entry.', key) else: default_bias_parameters[key] = value - run_card['bias_parameters'] = default_bias_parameters - - - # Finally write the include file + run_card['bias_parameters'] = default_bias_parameters + + + # Finally write the include file run_card.write_include_file(opt['output_dir']) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: - self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, + self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat')) # The writing out of MadLoop filter is potentially dangerous # when running in multi-core with a central disk. So it is turned - # off here. If these filters were not initialized then they will + # off here. If these filters were not initialized then they will # have to be re-computed at the beginning of each run. if 'WriteOutFilters' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('WriteOutFilters'): logger.info( -"""You chose to have MadLoop writing out filters. +"""You chose to have MadLoop writing out filters. Beware that this can be dangerous for local multicore runs.""") self.MadLoopparam.set('WriteOutFilters',False, changeifuserset=False) - + # The conservative settings below for 'CTModeInit' and 'ZeroThres' # help adress issues for processes like g g > h z, and g g > h g - # where there are some helicity configuration heavily suppressed - # (by several orders of magnitude) so that the helicity filter + # where there are some helicity configuration heavily suppressed + # (by several orders of magnitude) so that the helicity filter # needs high numerical accuracy to correctly handle this spread in # magnitude. Also, because one cannot use the Born as a reference - # scale, it is better to force quadruple precision *for the + # scale, it is better to force quadruple precision *for the # initialization points only*. This avoids numerical accuracy issues # when setting up the helicity filters and does not significantly # slow down the run. @@ -3298,21 +3298,21 @@ def do_treatcards(self, line, mode=None, opt=None): # It is a bit superficial to use the level 2 which tries to numerically # map matching helicities (because of CP symmetry typically) together. -# It is useless in the context of MC over helicities and it can +# It is useless in the context of MC over helicities and it can # potentially make the helicity double checking fail. self.MadLoopparam.set('HelicityFilterLevel',1, changeifuserset=False) # To be on the safe side however, we ask for 4 consecutive matching # helicity filters. self.MadLoopparam.set('CheckCycle',4, changeifuserset=False) - + # For now it is tricky to have each channel performing the helicity # double check. What we will end up doing is probably some kind # of new initialization round at the beginning of each launch - # command, to reset the filters. + # command, to reset the filters. self.MadLoopparam.set('DoubleCheckHelicityFilter',False, changeifuserset=False) - + # Thanks to TIR recycling, TIR is typically much faster for Loop-induced # processes when not doing MC over helicities, so that we place OPP last. if not hasattr(self, 'run_card'): @@ -3349,7 +3349,7 @@ def do_treatcards(self, line, mode=None, opt=None): logger.warning( """You chose to also use a lorentz rotation for stability tests (see parameter NRotations_[DP|QP]). Beware that, for optimization purposes, MadEvent uses manual TIR cache clearing which is not compatible - with the lorentz rotation stability test. The number of these rotations to be used will be reset to + with the lorentz rotation stability test. The number of these rotations to be used will be reset to zero by MadLoop. You can avoid this by changing the parameter 'FORCE_ML_HELICITY_SUM' int he matrix.f files to be .TRUE. so that the sum over helicity configurations is performed within MadLoop (in which case the helicity of final state particles cannot be speicfied in the LHE file.""") @@ -3363,15 +3363,15 @@ def do_treatcards(self, line, mode=None, opt=None): # self.MadLoopparam.set('NRotations_DP',0,changeifuserset=False) # Revert to the above to be slightly less robust but twice faster. self.MadLoopparam.set('NRotations_DP',1,changeifuserset=False) - self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) - + self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) + # Finally, the stability tests are slightly less reliable for process - # with less or equal than 4 final state particles because the + # with less or equal than 4 final state particles because the # accessible kinematic is very limited (i.e. lorentz rotations don't # shuffle invariants numerics much). In these cases, we therefore # increase the required accuracy to 10^-7. # This is important for getting g g > z z [QCD] working with a - # ptheavy cut as low as 1 GeV. + # ptheavy cut as low as 1 GeV. if self.proc_characteristics['nexternal']<=4: if ('MLStabThres' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('MLStabThres')>1.0e-7): @@ -3381,12 +3381,12 @@ def do_treatcards(self, line, mode=None, opt=None): than four external legs, so this is not recommended (especially not for g g > z z).""") self.MadLoopparam.set('MLStabThres',1.0e-7,changeifuserset=False) else: - self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) + self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) #write the output file self.MadLoopparam.write(pjoin(self.me_dir,"SubProcesses","MadLoop5_resources", "MadLoopParams.dat")) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: # Now Update MadLoop filters if necessary (if modifications were made to # the model parameters). @@ -3403,12 +3403,12 @@ def do_treatcards(self, line, mode=None, opt=None): elif not opt['forbid_MadLoopInit'] and \ MadLoopInitializer.need_MadLoopInit(self.me_dir): self.exec_cmd('initMadLoop -f') - - ############################################################################ + + ############################################################################ def do_survey(self, line): """Advanced commands: launch survey for the current process """ - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) @@ -3416,7 +3416,7 @@ def do_survey(self, line): if os.path.exists(pjoin(self.me_dir,'error')): os.remove(pjoin(self.me_dir,'error')) - + self.configure_directory() # Save original random number self.random_orig = self.random @@ -3435,9 +3435,9 @@ def do_survey(self, line): P_zero_result = [] # check the number of times where they are no phase-space # File for the loop (for loop induced) - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources')) and cluster.need_transfer(self.options): - tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', + tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', 'MadLoop5_resources.tar.gz'), 'w:gz', dereference=True) tf.add(pjoin(self.me_dir,'SubProcesses','MadLoop5_resources'), arcname='MadLoop5_resources') @@ -3467,7 +3467,7 @@ def do_survey(self, line): except Exception as error: logger.debug(error) pass - + jobs, P_zero_result = ajobcreator.launch() # Check if all or only some fails if P_zero_result: @@ -3481,60 +3481,60 @@ def do_survey(self, line): self.get_Gdir() for P in P_zero_result: self.Gdirs[0][pjoin(self.me_dir,'SubProcesses',P)] = [] - + self.monitor(run_type='All jobs submitted for survey', html=True) if not self.history or 'survey' in self.history[-1] or self.ninitial ==1 or \ self.run_card['gridpack']: #will be done during the refine (more precisely in gen_ximprove) cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(ajobcreator.run_statistics)) self.update_status('End survey', 'parton', makehtml=False) ############################################################################ def pass_in_difficult_integration_mode(self, rate=1): """be more secure for the integration to not miss it due to strong cut""" - + # improve survey options if default if self.opts['points'] == self._survey_options['points'][1]: self.opts['points'] = (rate+2) * self._survey_options['points'][1] if self.opts['iterations'] == self._survey_options['iterations'][1]: self.opts['iterations'] = 1 + rate + self._survey_options['iterations'][1] if self.opts['accuracy'] == self._survey_options['accuracy'][1]: - self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) - + self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) + # Modify run_config.inc in order to improve the refine conf_path = pjoin(self.me_dir, 'Source','run_config.inc') files.cp(conf_path, conf_path + '.bk') # text = open(conf_path).read() - min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) - + min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) + text = re.sub('''\(min_events = \d+\)''', '(min_events = %i )' % min_evt, text) text = re.sub('''\(max_events = \d+\)''', '(max_events = %i )' % max_evt, text) fsock = open(conf_path, 'w') fsock.write(text) fsock.close() - + # Compile for name in ['../bin/internal/gen_ximprove', 'all']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) - - - ############################################################################ + + + ############################################################################ def do_refine(self, line): """Advanced commands: launch survey for the current process """ - devnull = open(os.devnull, 'w') + devnull = open(os.devnull, 'w') self.nb_refine += 1 args = self.split_arg(line) treshold=None - - + + for a in args: if a.startswith('--treshold='): treshold = float(a.split('=',1)[1]) @@ -3548,8 +3548,8 @@ def do_refine(self, line): break # Check argument's validity self.check_refine(args) - - refine_opt = {'err_goal': args[0], 'split_channels': True} + + refine_opt = {'err_goal': args[0], 'split_channels': True} precision = args[0] if len(args) == 2: refine_opt['max_process']= args[1] @@ -3560,15 +3560,15 @@ def do_refine(self, line): # Update random number self.update_random() self.save_random() - + if self.cluster_mode: logger.info('Creating Jobs') self.update_status('Refine results to %s' % precision, level=None) - + self.total_jobs = 0 - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + # cleanning the previous job for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() @@ -3589,14 +3589,14 @@ def do_refine(self, line): level = 5 if value.has_warning(): level = 10 - logger.log(level, + logger.log(level, value.nice_output(str('/'.join([key[0],'G%s'%key[1]]))). replace(' statistics','')) logger.debug(globalstat.nice_output('combined', no_warning=True)) - + if survey_statistics: x_improve.run_statistics = survey_statistics - + x_improve.launch() # create the ajob for the refinment. if not self.history or 'refine' not in self.history[-1]: cross, error = x_improve.update_html() #update html results for survey @@ -3610,9 +3610,9 @@ def do_refine(self, line): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) - + if os.path.exists(pjoin(Pdir, 'ajob1')): cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py @@ -3629,7 +3629,7 @@ def do_refine(self, line): ###self.compile(['all'], cwd=Pdir) alljobs = misc.glob('ajob*', Pdir) - + #remove associated results.dat (ensure to not mix with all data) Gre = re.compile("\s*j=(G[\d\.\w]+)") for job in alljobs: @@ -3637,49 +3637,49 @@ def do_refine(self, line): for Gdir in Gdirs: if os.path.exists(pjoin(Pdir, Gdir, 'results.dat')): os.remove(pjoin(Pdir, Gdir,'results.dat')) - - nb_tot = len(alljobs) + + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), - run_type='Refine number %s on %s (%s/%s)' % + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) - self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, html=True) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + if isinstance(x_improve, gen_ximprove.gen_ximprove_v4): # the merge of the events.lhe is handle in the x_improve class - # for splitted runs. (and partly in store_events). + # for splitted runs. (and partly in store_events). combine_runs.CombineRuns(self.me_dir) self.refine_mode = "old" else: self.refine_mode = "new" - + cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - self.results.add_detail('run_statistics', + self.results.add_detail('run_statistics', dict(self.results.get_detail('run_statistics'))) self.update_status('finish refine', 'parton', makehtml=False) devnull.close() - - ############################################################################ + + ############################################################################ def do_comine_iteration(self, line): """Not in help: Combine a given iteration combine_iteration Pdir Gdir S|R step - S is for survey + S is for survey R is for refine - step is the iteration number (not very critical)""" + step is the iteration number (not very critical)""" self.set_run_name("tmp") self.configure_directory(html_opening=False) @@ -3695,12 +3695,12 @@ def do_comine_iteration(self, line): gensym.combine_iteration(Pdir, Gdir, int(step)) elif mode == "R": refine = gen_ximprove.gen_ximprove_share(self) - refine.combine_iteration(Pdir, Gdir, int(step)) - - + refine.combine_iteration(Pdir, Gdir, int(step)) - - ############################################################################ + + + + ############################################################################ def do_combine_events(self, line): """Advanced commands: Launch combine events""" start=time.time() @@ -3710,11 +3710,11 @@ def do_combine_events(self, line): self.check_combine_events(args) self.update_status('Combining Events', level='parton') - + if self.run_card['gridpack'] and isinstance(self, GridPackCmd): return GridPackCmd.do_combine_events(self, line) - + # Define The Banner tag = self.run_card['run_tag'] # Update the banner with the pythia card @@ -3727,14 +3727,14 @@ def do_combine_events(self, line): self.banner.change_seed(self.random_orig) if not os.path.exists(pjoin(self.me_dir, 'Events', self.run_name)): os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) - self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, + self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -3751,12 +3751,12 @@ def do_combine_events(self, line): os.remove(pjoin(Gdir, 'events.lhe')) continue - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec'), result.get('xerru'), result.get('axsec') ) - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.run_card['nevents']) @@ -3765,13 +3765,13 @@ def do_combine_events(self, line): AllEvent.add(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() if len(AllEvent) == 0: - nb_event = 0 + nb_event = 0 else: nb_event = AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.run_card['nevents'], @@ -3791,22 +3791,22 @@ def do_combine_events(self, line): os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) - + if self.run_card['bias_module'].lower() not in ['dummy', 'none'] and nb_event: self.correct_bias() elif self.run_card['custom_fcts']: self.correct_bias() logger.info("combination of events done in %s s ", time.time()-start) - + self.to_store.append('event') - - ############################################################################ + + ############################################################################ def correct_bias(self): - """check the first event and correct the weight by the bias + """check the first event and correct the weight by the bias and correct the cross-section. - If the event do not have the bias tag it means that the bias is + If the event do not have the bias tag it means that the bias is one modifying the cross-section/shape so we have nothing to do """ @@ -3834,7 +3834,7 @@ def correct_bias(self): output.write('') output.close() lhe.close() - + # MODIFY THE BANNER i.e. INIT BLOCK # ensure information compatible with normalisation choice total_cross = sum(cross[key] for key in cross) @@ -3846,8 +3846,8 @@ def correct_bias(self): elif self.run_card['event_norm'] == 'unity': total_cross = self.results.current['cross'] * total_cross / nb_event for key in cross: - cross[key] *= total_cross / nb_event - + cross[key] *= total_cross / nb_event + bannerfile = lhe_parser.EventFile(pjoin(self.me_dir, 'Events', self.run_name, '.banner.tmp.gz'),'w') banner = banner_mod.Banner(lhe.banner) banner.modify_init_cross(cross) @@ -3862,12 +3862,12 @@ def correct_bias(self): os.remove(lhe.name) os.remove(bannerfile.name) os.remove(output.name) - - + + self.results.current['cross'] = total_cross self.results.current['error'] = 0 - - ############################################################################ + + ############################################################################ def do_store_events(self, line): """Advanced commands: Launch store events""" @@ -3883,16 +3883,16 @@ def do_store_events(self, line): if not os.path.exists(pjoin(self.me_dir, 'Events', run)): os.mkdir(pjoin(self.me_dir, 'Events', run)) if not os.path.exists(pjoin(self.me_dir, 'HTML', run)): - os.mkdir(pjoin(self.me_dir, 'HTML', run)) - + os.mkdir(pjoin(self.me_dir, 'HTML', run)) + # 1) Store overall process information #input = pjoin(self.me_dir, 'SubProcesses', 'results.dat') #output = pjoin(self.me_dir, 'SubProcesses', '%s_results.dat' % run) - #files.cp(input, output) + #files.cp(input, output) # 2) Treat the files present in the P directory - # Ensure that the number of events is different of 0 + # Ensure that the number of events is different of 0 if self.results.current['nb_event'] == 0 and not self.run_card['gridpack']: logger.warning("No event detected. No cleaning performed! This should allow to run:\n" + " cd Subprocesses; ../bin/internal/combine_events\n"+ @@ -3910,18 +3910,18 @@ def do_store_events(self, line): # if os.path.exists(pjoin(G_path, 'results.dat')): # input = pjoin(G_path, 'results.dat') # output = pjoin(G_path, '%s_results.dat' % run) - # files.cp(input, output) + # files.cp(input, output) #except Exception: - # continue + # continue # Store log try: if os.path.exists(pjoin(G_path, 'log.txt')): input = pjoin(G_path, 'log.txt') output = pjoin(G_path, '%s_log.txt' % run) - files.mv(input, output) + files.mv(input, output) except Exception: continue - #try: + #try: # # Grid # for name in ['ftn26']: # if os.path.exists(pjoin(G_path, name)): @@ -3930,7 +3930,7 @@ def do_store_events(self, line): # input = pjoin(G_path, name) # output = pjoin(G_path, '%s_%s' % (run,name)) # files.mv(input, output) - # misc.gzip(pjoin(G_path, output), error=None) + # misc.gzip(pjoin(G_path, output), error=None) #except Exception: # continue # Delete ftn25 to ensure reproducible runs @@ -3940,11 +3940,11 @@ def do_store_events(self, line): # 3) Update the index.html self.gen_card_html() - + # 4) Move the Files present in Events directory E_path = pjoin(self.me_dir, 'Events') O_path = pjoin(self.me_dir, 'Events', run) - + # The events file for name in ['events.lhe', 'unweighted_events.lhe']: finput = pjoin(E_path, name) @@ -3960,30 +3960,30 @@ def do_store_events(self, line): # os.remove(pjoin(O_path, '%s.gz' % name)) # input = pjoin(E_path, name) ## output = pjoin(O_path, name) - + self.update_status('End Parton', level='parton', makehtml=False) devnull.close() - - - ############################################################################ + + + ############################################################################ def do_create_gridpack(self, line): """Advanced commands: Create gridpack from present run""" self.update_status('Creating gridpack', level='parton') # compile gen_ximprove misc.compile(['../bin/internal/gen_ximprove'], cwd=pjoin(self.me_dir, "Source")) - + Gdir = self.get_Gdir() Pdir = set([os.path.dirname(G) for G in Gdir]) - for P in Pdir: + for P in Pdir: allG = misc.glob('G*', path=P) for G in allG: if pjoin(P, G) not in Gdir: logger.debug('removing %s', pjoin(P,G)) shutil.rmtree(pjoin(P,G)) - - + + args = self.split_arg(line) self.check_combine_events(args) if not self.run_tag: self.run_tag = 'tag_1' @@ -3996,13 +3996,13 @@ def do_create_gridpack(self, line): cwd=self.me_dir) misc.call(['./bin/internal/clean'], cwd=self.me_dir) misc.call(['./bin/internal/make_gridpack'], cwd=self.me_dir) - files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), + files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), pjoin(self.me_dir, '%s_gridpack.tar.gz' % self.run_name)) os.system("sed -i.bak \"s/\s*.true.*=.*GridRun/ .false. = GridRun/g\" %s/Cards/grid_card.dat" \ % self.me_dir) self.update_status('gridpack created', level='gridpack') - - ############################################################################ + + ############################################################################ def do_shower(self, line): """launch the shower""" @@ -4010,7 +4010,7 @@ def do_shower(self, line): if len(args)>1 and args[0] in self._interfaced_showers: chosen_showers = [args.pop(0)] elif '--no_default' in line: - # If '--no_default' was specified in the arguments, then only one + # If '--no_default' was specified in the arguments, then only one # shower will be run, depending on which card is present. # but we each of them are called. (each of them check if the file exists) chosen_showers = list(self._interfaced_showers) @@ -4021,9 +4021,9 @@ def do_shower(self, line): shower_priority = ['pythia8','pythia'] chosen_showers = [sorted(chosen_showers,key=lambda sh: shower_priority.index(sh) if sh in shower_priority else len(shower_priority)+1)[0]] - + for shower in chosen_showers: - self.exec_cmd('%s %s'%(shower,' '.join(args)), + self.exec_cmd('%s %s'%(shower,' '.join(args)), postcmd=False, printcmd=False) def do_madanalysis5_parton(self, line): @@ -4039,11 +4039,11 @@ def do_madanalysis5_parton(self, line): def mg5amc_py8_interface_consistency_warning(options): """ Check the consistency of the mg5amc_py8_interface installed with the current MG5 and Pythia8 versions. """ - + # All this is only relevant is Pythia8 is interfaced to MG5 if not options['pythia8_path']: return None - + if not options['mg5amc_py8_interface_path']: return \ """ @@ -4053,7 +4053,7 @@ def mg5amc_py8_interface_consistency_warning(options): Consider installing the MG5_aMC-PY8 interface with the following command: MG5_aMC>install mg5amc_py8_interface """ - + mg5amc_py8_interface_path = options['mg5amc_py8_interface_path'] py8_path = options['pythia8_path'] # If the specified interface path is relative, make it absolut w.r.t MGDIR if @@ -4062,7 +4062,7 @@ def mg5amc_py8_interface_consistency_warning(options): mg5amc_py8_interface_path = pjoin(MG5DIR,mg5amc_py8_interface_path) py8_path = pjoin(MG5DIR,py8_path) - # Retrieve all the on-install and current versions + # Retrieve all the on-install and current versions fsock = open(pjoin(mg5amc_py8_interface_path, 'MG5AMC_VERSION_ON_INSTALL')) MG5_version_on_install = fsock.read().replace('\n','') fsock.close() @@ -4074,7 +4074,7 @@ def mg5amc_py8_interface_consistency_warning(options): MG5_curr_version =misc.get_pkg_info()['version'] try: p = subprocess.Popen(['./get_pythia8_version.py',py8_path], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mg5amc_py8_interface_path) (out, err) = p.communicate() out = out.decode(errors='ignore').replace('\n','') @@ -4084,37 +4084,37 @@ def mg5amc_py8_interface_consistency_warning(options): float(out) except: PY8_curr_version = None - + if not MG5_version_on_install is None and not MG5_curr_version is None: if MG5_version_on_install != MG5_curr_version: return \ """ The current version of MG5_aMC (v%s) is different than the one active when - installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). + installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(MG5_curr_version, MG5_version_on_install) - + if not PY8_version_on_install is None and not PY8_curr_version is None: if PY8_version_on_install != PY8_curr_version: return \ """ The current version of Pythia8 (v%s) is different than the one active when - installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). + installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(PY8_curr_version,PY8_version_on_install) - + return None def setup_Pythia8RunAndCard(self, PY8_Card, run_type): """ Setup the Pythia8 Run environment and card. In particular all the process and run specific parameters of the card are automatically set here. This function returns the path where HEPMC events will be output, if any.""" - + HepMC_event_output = None tag = self.run_tag - + PY8_Card.subruns[0].systemSet('Beams:LHEF',"unweighted_events.lhe.gz") hepmc_format = PY8_Card['HEPMCoutput:file'].lower() @@ -4185,7 +4185,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): misc.mkfifo(fifo_path) # Use defaultSet not to overwrite the current userSet status PY8_Card.defaultSet('HEPMCoutput:file',fifo_path) - HepMC_event_output=fifo_path + HepMC_event_output=fifo_path elif hepmc_format in ['','/dev/null','None']: logger.warning('User disabled the HepMC output of Pythia8.') HepMC_event_output = None @@ -4206,7 +4206,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): # only if it is not already user_set. if PY8_Card['JetMatching:qCut']==-1.0: PY8_Card.MadGraphSet('JetMatching:qCut',1.5*self.run_card['xqcut'], force=True) - + if PY8_Card['JetMatching:qCut']<(1.5*self.run_card['xqcut']): logger.error( 'The MLM merging qCut parameter you chose (%f) is less than '%PY8_Card['JetMatching:qCut']+ @@ -4233,7 +4233,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): if PY8_Card['JetMatching:qCut'] not in qCutList: qCutList.append(PY8_Card['JetMatching:qCut']) PY8_Card.MadGraphSet('SysCalc:qCutList', qCutList, force=True) - + if PY8_Card['SysCalc:qCutList']!='auto': for scale in PY8_Card['SysCalc:qCutList']: @@ -4244,7 +4244,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): "'sys_matchscale' in the run_card) is less than 1.5*xqcut, where xqcut is"+ ' the run_card parameter (=%f)\n'%self.run_card['xqcut']+ 'It would be better/safer to use a larger qCut or a smaller xqcut.') - + # Specific MLM settings # PY8 should not implement the MLM veto since the driver should do it # if merging scale variation is turned on @@ -4294,18 +4294,18 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): CKKW_cut = 'ktdurham' elif self.run_card['ptlund']>0.0 and self.run_card['ktdurham']<=0.0: PY8_Card.subruns[0].MadGraphSet('Merging:doPTLundMerging',True) - CKKW_cut = 'ptlund' + CKKW_cut = 'ptlund' else: raise InvalidCmd("*Either* the 'ptlund' or 'ktdurham' cut in "+\ " the run_card must be turned on to activate CKKW(L) merging"+ " with Pythia8, but *both* cuts cannot be turned on at the same time."+ "\n ptlund=%f, ktdurham=%f."%(self.run_card['ptlund'],self.run_card['ktdurham'])) - + # Automatically set qWeed to the CKKWL cut if not defined by the user. if PY8_Card['SysCalc:qWeed']==-1.0: PY8_Card.MadGraphSet('SysCalc:qWeed',self.run_card[CKKW_cut], force=True) - + # MadGraphSet sets the corresponding value (in system mode) # only if it is not already user_set. if PY8_Card['Merging:TMS']==-1.0: @@ -4319,7 +4319,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): 'The CKKWl merging scale you chose (%f) is less than '%PY8_Card['Merging:TMS']+ 'the %s cut specified in the run_card parameter (=%f).\n'%(CKKW_cut,self.run_card[CKKW_cut])+ 'It is incorrect to use a smaller CKKWl scale than the generation-level %s cut!'%CKKW_cut) - + PY8_Card.MadGraphSet('TimeShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:rapidityOrder',False) @@ -4381,7 +4381,7 @@ def do_pythia8(self, line): try: import madgraph - except ImportError: + except ImportError: import internal.histograms as histograms else: import madgraph.various.histograms as histograms @@ -4400,16 +4400,16 @@ def do_pythia8(self, line): self.check_pythia8(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) - self.check_pythia8(args) + self.check_pythia8(args) # Update the banner with the pythia card if not self.banner or len(self.banner) <=1: # Here the level keyword 'pythia' must not be changed to 'pythia8'. self.banner = banner_mod.recover_banner(self.results, 'pythia') - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1], pythia_version=8, banner=self.banner) @@ -4425,7 +4425,7 @@ def do_pythia8(self, line): #"Please use 'event_norm = average' in the run_card to avoid this problem.") - + if not self.options['mg5amc_py8_interface_path'] or not \ os.path.exists(pjoin(self.options['mg5amc_py8_interface_path'], 'MG5aMC_PY8_interface')): @@ -4444,16 +4444,16 @@ def do_pythia8(self, line): # Again here 'pythia' is just a keyword for the simulation level. self.update_status('\033[92mRunning Pythia8 [arXiv:1410.3012]\033[0m', 'pythia8') - - tag = self.run_tag + + tag = self.run_tag # Now write Pythia8 card # Start by reading, starting from the default one so that the 'user_set' # tag are correctly set. - PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', + PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', 'pythia8_card_default.dat')) PY8_Card.read(pjoin(self.me_dir, 'Cards', 'pythia8_card.dat'), setter='user') - + run_type = 'default' merged_run_types = ['MLM','CKKW'] if int(self.run_card['ickkw'])==1: @@ -4471,7 +4471,7 @@ def do_pythia8(self, line): cmd_card = StringIO.StringIO() PY8_Card.write(cmd_card,pjoin(self.me_dir,'Cards','pythia8_card_default.dat'), direct_pythia_input=True) - + # Now setup the preamble to make sure that everything will use the locally # installed tools (if present) even if the user did not add it to its # environment variables. @@ -4486,13 +4486,13 @@ def do_pythia8(self, line): preamble = misc.get_HEPTools_location_setter( pjoin(MG5DIR,'HEPTools'),'lib') preamble += "\n unset PYTHIA8DATA\n" - + open(pythia_cmd_card,'w').write("""! ! It is possible to run this card manually with: ! %s %s ! """%(preamble+pythia_main,os.path.basename(pythia_cmd_card))+cmd_card.getvalue()) - + # launch pythia8 pythia_log = pjoin(self.me_dir , 'Events', self.run_name , '%s_pythia8.log' % tag) @@ -4504,13 +4504,13 @@ def do_pythia8(self, line): shell_exe = None if os.path.exists('/usr/bin/env'): shell_exe = '/usr/bin/env %s'%shell - else: + else: shell_exe = misc.which(shell) if not shell_exe: raise self.InvalidCmd('No s hell could be found in your environment.\n'+ "Make sure that either '%s' is in your path or that the"%shell+\ " command '/usr/bin/env %s' exists and returns a valid path."%shell) - + exe_cmd = "#!%s\n%s"%(shell_exe,' '.join( [preamble+pythia_main, os.path.basename(pythia_cmd_card)])) @@ -4528,7 +4528,7 @@ def do_pythia8(self, line): ( os.path.exists(HepMC_event_output) and \ stat.S_ISFIFO(os.stat(HepMC_event_output).st_mode)) startPY8timer = time.time() - + # Information that will be extracted from this PY8 run PY8_extracted_information={ 'sigma_m':None, 'Nacc':None, 'Ntry':None, 'cross_sections':{} } @@ -4556,7 +4556,7 @@ def do_pythia8(self, line): n_cores = max(int(self.options['cluster_size']),1) elif self.options['run_mode']==2: n_cores = max(int(self.cluster.nb_core),1) - + lhe_file_name = os.path.basename(PY8_Card.subruns[0]['Beams:LHEF']) lhe_file = lhe_parser.EventFile(pjoin(self.me_dir,'Events', self.run_name,PY8_Card.subruns[0]['Beams:LHEF'])) @@ -4574,7 +4574,7 @@ def do_pythia8(self, line): if self.options['run_mode']==2: min_n_events_per_job = 100 elif self.options['run_mode']==1: - min_n_events_per_job = 1000 + min_n_events_per_job = 1000 min_n_core = n_events//min_n_events_per_job n_cores = max(min(min_n_core,n_cores),1) @@ -4584,8 +4584,8 @@ def do_pythia8(self, line): logger.info('Follow Pythia8 shower by running the '+ 'following command (in a separate terminal):\n tail -f %s'%pythia_log) - if self.options['run_mode']==2 and self.options['nb_core']>1: - ret_code = self.cluster.launch_and_wait(wrapper_path, + if self.options['run_mode']==2 and self.options['nb_core']>1: + ret_code = self.cluster.launch_and_wait(wrapper_path, argument= [], stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events',self.run_name)) else: @@ -4630,10 +4630,10 @@ def do_pythia8(self, line): wrapper = open(wrapper_path,'w') if self.options['cluster_temp_path'] is None: exe_cmd = \ -"""#!%s +"""#!%s ./%s PY8Card.dat >& PY8_log.txt """ - else: + else: exe_cmd = \ """#!%s ln -s ./events_$1.lhe.gz ./events.lhe.gz @@ -4663,21 +4663,21 @@ def do_pythia8(self, line): # Set it as executable st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) - + # Split the .lhe event file, create event partition partition=[n_available_events//n_cores]*n_cores for i in range(n_available_events%n_cores): partition[i] += 1 - + # Splitting according to the total number of events requested by the user # Will be used to determine the number of events to indicate in the PY8 split cards. partition_for_PY8=[n_events//n_cores]*n_cores for i in range(n_events%n_cores): partition_for_PY8[i] += 1 - - logger.info('Splitting .lhe event file for PY8 parallelization...') - n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) - + + logger.info('Splitting .lhe event file for PY8 parallelization...') + n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) + if n_splits!=len(partition): raise MadGraph5Error('Error during lhe file splitting. Expected %d files but obtained %d.' %(len(partition),n_splits)) @@ -4690,7 +4690,7 @@ def do_pythia8(self, line): # Add the necessary run content shutil.move(pjoin(parallelization_dir,lhe_file.name+'_%d.lhe.gz'%split_id), pjoin(parallelization_dir,split_files[-1])) - + logger.info('Submitting Pythia8 jobs...') for i, split_file in enumerate(split_files): # We must write a PY8Card tailored for each split so as to correct the normalization @@ -4706,7 +4706,7 @@ def do_pythia8(self, line): split_PY8_Card.write(pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,'PY8Card.dat'), add_missing=False) in_files = [pjoin(parallelization_dir,os.path.basename(pythia_main)), - pjoin(parallelization_dir,'PY8Card_%d.dat'%i), + pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,split_file)] if self.options['cluster_temp_path'] is None: out_files = [] @@ -4718,35 +4718,35 @@ def do_pythia8(self, line): if os.path.basename(in_file)==split_file: ln(in_file,selected_cwd,name='events.lhe.gz') elif os.path.basename(in_file).startswith('PY8Card'): - ln(in_file,selected_cwd,name='PY8Card.dat') + ln(in_file,selected_cwd,name='PY8Card.dat') else: - ln(in_file,selected_cwd) + ln(in_file,selected_cwd) in_files = [] wrapper_path = os.path.basename(wrapper_path) else: out_files = ['split_%d.tar.gz'%i] selected_cwd = parallelization_dir - self.cluster.submit2(wrapper_path, - argument=[str(i)], cwd=selected_cwd, + self.cluster.submit2(wrapper_path, + argument=[str(i)], cwd=selected_cwd, input_files=in_files, output_files=out_files, required_output=out_files) - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return logger.info('Pythia8 shower jobs: %d Idle, %d Running, %d Done [%s]'\ %(Idle, Running, Done, misc.format_time(time.time() - startPY8timer))) self.cluster.wait(parallelization_dir,wait_monitoring) - + logger.info('Merging results from the split PY8 runs...') if self.options['cluster_temp_path']: # Decompressing the output for i, split_file in enumerate(split_files): misc.call(['tar','-xzf','split_%d.tar.gz'%i],cwd=parallelization_dir) os.remove(pjoin(parallelization_dir,'split_%d.tar.gz'%i)) - + # Now merge logs pythia_log_file = open(pythia_log,'w') n_added = 0 @@ -4778,7 +4778,7 @@ def wait_monitoring(Idle, Running, Done): if n_added>0: PY8_extracted_information['sigma_m'] /= float(n_added) pythia_log_file.close() - + # djr plots djr_HwU = None n_added = 0 @@ -4845,7 +4845,7 @@ def wait_monitoring(Idle, Running, Done): if not os.path.isfile(hepmc_file): continue all_hepmc_files.append(hepmc_file) - + if len(all_hepmc_files)>0: hepmc_output = pjoin(self.me_dir,'Events',self.run_name,HepMC_event_output) with misc.TMP_directory() as tmp_dir: @@ -4860,8 +4860,8 @@ def wait_monitoring(Idle, Running, Done): break header.close() tail = open(pjoin(tmp_dir,'tail.hepmc'),'w') - n_tail = 0 - + n_tail = 0 + for line in misc.reverse_readline(all_hepmc_files[-1]): if line.startswith('HepMC::'): n_tail += 1 @@ -4871,7 +4871,7 @@ def wait_monitoring(Idle, Running, Done): tail.close() if n_tail>1: raise MadGraph5Error('HEPMC files should only have one trailing command.') - ###################################################################### + ###################################################################### # This is the most efficient way of putting together HEPMC's, *BUT* # # WARNING: NEED TO RENDER THE CODE BELOW SAFE TOWARDS INJECTION # ###################################################################### @@ -4888,12 +4888,12 @@ def wait_monitoring(Idle, Running, Done): elif sys.platform == 'darwin': # sed on MAC has slightly different synthax than on os.system(' '.join(['sed','-i',"''","'%s;$d'"% - (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) - else: - # other UNIX systems + (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) + else: + # other UNIX systems os.system(' '.join(['sed','-i']+["-e '%id'"%(i+1) for i in range(n_head)]+ ["-e '$d'",hepmc_file])) - + os.system(' '.join(['cat',pjoin(tmp_dir,'header.hepmc')]+all_hepmc_files+ [pjoin(tmp_dir,'tail.hepmc'),'>',hepmc_output])) @@ -4915,12 +4915,12 @@ def wait_monitoring(Idle, Running, Done): 'Inclusive cross section:' not in '\n'.join(open(pythia_log,'r').readlines()[-20:]): logger.warning('Fail to produce a pythia8 output. More info in \n %s'%pythia_log) return - + # Plot for Pythia8 successful = self.create_plot('Pythia8') if not successful: logger.warning('Failed to produce Pythia8 merging plots.') - + self.to_store.append('pythia8') # Study matched cross-sections @@ -4931,7 +4931,7 @@ def wait_monitoring(Idle, Running, Done): if self.options['run_mode']==0 or (self.options['run_mode']==2 and self.options['nb_core']==1): PY8_extracted_information['sigma_m'],PY8_extracted_information['Nacc'],\ PY8_extracted_information['Ntry'] = self.parse_PY8_log_file( - pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) + pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) else: logger.warning('Pythia8 cross-section could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1. YYYYY') @@ -4944,8 +4944,8 @@ def wait_monitoring(Idle, Running, Done): Ntry = PY8_extracted_information['Ntry'] sigma_m = PY8_extracted_information['sigma_m'] # Compute pythia error - error = self.results[self.run_name].return_tag(self.run_tag)['error'] - try: + error = self.results[self.run_name].return_tag(self.run_tag)['error'] + try: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) except ZeroDivisionError: # Cannot compute error @@ -4966,31 +4966,31 @@ def wait_monitoring(Idle, Running, Done): else: logger.warning('Pythia8 merged cross-sections could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1.XXXXX') - PY8_extracted_information['cross_sections'] = {} - + PY8_extracted_information['cross_sections'] = {} + cross_sections = PY8_extracted_information['cross_sections'] if cross_sections: - # Filter the cross_sections specified an keep only the ones + # Filter the cross_sections specified an keep only the ones # with central parameters and a different merging scale a_float_re = '[\+|-]?\d+(\.\d*)?([EeDd][\+|-]?\d+)?' central_merging_re = re.compile( '^\s*Weight_MERGING\s*=\s*(?P%s)\s*$'%a_float_re, - re.IGNORECASE) + re.IGNORECASE) cross_sections = dict( (float(central_merging_re.match(xsec).group('merging')),value) - for xsec, value in cross_sections.items() if not + for xsec, value in cross_sections.items() if not central_merging_re.match(xsec) is None) central_scale = PY8_Card['JetMatching:qCut'] if \ int(self.run_card['ickkw'])==1 else PY8_Card['Merging:TMS'] if central_scale in cross_sections: self.results.add_detail('cross_pythia8', cross_sections[central_scale][0]) self.results.add_detail('error_pythia8', cross_sections[central_scale][1]) - + #logger.info('Pythia8 merged cross-sections are:') #for scale in sorted(cross_sections.keys()): # logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ - # (scale,cross_sections[scale][0],cross_sections[scale][1])) - + # (scale,cross_sections[scale][0],cross_sections[scale][1])) + xsecs_file = open(pjoin(self.me_dir,'Events',self.run_name, '%s_merged_xsecs.txt'%tag),'w') if cross_sections: @@ -5003,9 +5003,9 @@ def wait_monitoring(Idle, Running, Done): xsecs_file.write('Cross-sections could not be read from the'+\ "XML node 'xsection' of the .dat file produced by Pythia8.") xsecs_file.close() - + #Update the banner - # We add directly the pythia command card because it has the full + # We add directly the pythia command card because it has the full # information self.banner.add(pythia_cmd_card) @@ -5022,13 +5022,13 @@ def wait_monitoring(Idle, Running, Done): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + def parse_PY8_log_file(self, log_file_path): """ Parse a log file to extract number of event and cross-section. """ pythiare = re.compile("Les Houches User Process\(es\)\s*\d+\s*\|\s*(?P\d+)\s*(?P\d+)\s*(?P\d+)\s*\|\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") pythia_xsec_re = re.compile("Inclusive cross section\s*:\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") sigma_m, Nacc, Ntry = None, None, None - for line in misc.BackRead(log_file_path): + for line in misc.BackRead(log_file_path): info = pythiare.search(line) if not info: # Also try to obtain the cross-section and error from the final xsec line of pythia8 log @@ -5058,7 +5058,7 @@ def parse_PY8_log_file(self, log_file_path): raise self.InvalidCmd("Could not find cross-section and event number information "+\ "in Pythia8 log\n '%s'."%log_file_path) - + def extract_cross_sections_from_DJR(self,djr_output): """Extract cross-sections from a djr XML output.""" import xml.dom.minidom as minidom @@ -5075,11 +5075,11 @@ def extract_cross_sections_from_DJR(self,djr_output): [float(xsec.childNodes[0].data.split()[0]), float(xsec.childNodes[0].data.split()[1])]) for xsec in xsections) - + def do_pythia(self, line): """launch pythia""" - - + + # Check argument's validity args = self.split_arg(line) if '--no_default' in args: @@ -5089,12 +5089,12 @@ def do_pythia(self, line): args.remove('--no_default') else: no_default = False - + if not self.run_name: self.check_pythia(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) self.check_pythia(args) @@ -5102,7 +5102,7 @@ def do_pythia(self, line): logger.error('pythia-pgs require event_norm to be on sum. Do not run pythia6') return - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1]) if self.options['automatic_html_opening']: @@ -5114,35 +5114,35 @@ def do_pythia(self, line): self.banner = banner_mod.recover_banner(self.results, 'pythia') pythia_src = pjoin(self.options['pythia-pgs_path'],'src') - + self.results.add_detail('run_mode', 'madevent') self.update_status('Running Pythia', 'pythia') try: os.remove(pjoin(self.me_dir,'Events','pythia.done')) except Exception: - pass - + pass + ## LAUNCHING PYTHIA # check that LHAPATH is define. if not re.search(r'^\s*LHAPATH=%s/PDFsets' % pythia_src, - open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), + open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), re.M): f = open(pjoin(self.me_dir,'Cards','pythia_card.dat'),'a') f.write('\n LHAPATH=%s/PDFsets' % pythia_src) f.close() tag = self.run_tag pythia_log = pjoin(self.me_dir, 'Events', self.run_name , '%s_pythia.log' % tag) - #self.cluster.launch_and_wait('../bin/internal/run_pythia', + #self.cluster.launch_and_wait('../bin/internal/run_pythia', # argument= [pythia_src], stdout= pythia_log, # stderr=subprocess.STDOUT, # cwd=pjoin(self.me_dir,'Events')) output_files = ['pythia_events.hep'] if self.run_card['use_syst']: output_files.append('syst.dat') - if self.run_card['ickkw'] == 1: + if self.run_card['ickkw'] == 1: output_files += ['beforeveto.tree', 'xsecs.tree', 'events.tree'] - + os.environ['PDG_MASS_TBL'] = pjoin(pythia_src,'mass_width_2004.mc') self.cluster.launch_and_wait(pjoin(pythia_src, 'pythia'), input_files=[pjoin(self.me_dir, "Events", "unweighted_events.lhe"), @@ -5152,23 +5152,23 @@ def do_pythia(self, line): stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events')) - + os.remove(pjoin(self.me_dir, "Events", "unweighted_events.lhe")) if not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): logger.warning('Fail to produce pythia output. More info in \n %s' % pythia_log) return - + self.to_store.append('pythia') - + # Find the matched cross-section if int(self.run_card['ickkw']): # read the line from the bottom of the file - #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, + #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, # '%s_pythia.log' % tag)) - pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") - for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, + pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") + for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, '%s_pythia.log' % tag)): info = pythiare.search(line) if not info: @@ -5188,16 +5188,16 @@ def do_pythia(self, line): self.results.add_detail('nb_event_pythia', Nacc) #compute pythia error error = self.results[self.run_name].return_tag(self.run_tag)['error'] - if Nacc: + if Nacc: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) else: error_m = 10000 * sigma_m # works both for fixed number of generated events and fixed accepted events self.results.add_detail('error_pythia', error_m) - break + break #pythia_log.close() - + pydir = pjoin(self.options['pythia-pgs_path'], 'src') eradir = self.options['exrootanalysis_path'] madir = self.options['madanalysis_path'] @@ -5216,12 +5216,12 @@ def do_pythia(self, line): # Creating LHE file self.run_hep2lhe(banner_path) - + if int(self.run_card['ickkw']): misc.gzip(pjoin(self.me_dir,'Events','beforeveto.tree'), - stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + - if self.run_card['use_syst'] in self.true: # Calculate syscalc info based on syst.dat try: @@ -5233,7 +5233,7 @@ def do_pythia(self, line): # Store syst.dat misc.gzip(pjoin(self.me_dir,'Events', 'syst.dat'), stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_syst.dat.gz')) - + # Store syscalc.dat if os.path.exists(pjoin(self.me_dir, 'Events', 'syscalc.dat')): filename = pjoin(self.me_dir, 'Events' ,self.run_name, @@ -5253,7 +5253,7 @@ def do_pythia(self, line): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + ################################################################################ def do_remove(self, line): @@ -5263,8 +5263,8 @@ def do_remove(self, line): run, tag, mode = self.check_remove(args) if 'banner' in mode: mode.append('all') - - + + if run == 'all': # Check first if they are not a run with a name run. if os.path.exists(pjoin(self.me_dir, 'Events', 'all')): @@ -5280,7 +5280,7 @@ def do_remove(self, line): logger.info(error) pass # run already clear return - + # Check that run exists if not os.path.exists(pjoin(self.me_dir, 'Events', run)): raise self.InvalidCmd('No run \'%s\' detected' % run) @@ -5294,7 +5294,7 @@ def do_remove(self, line): # Found the file to delete - + to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) to_delete += misc.glob('*', pjoin(self.me_dir, 'HTML', run)) # forbid the banner to be removed @@ -5314,7 +5314,7 @@ def do_remove(self, line): if os.path.exists(pjoin(self.me_dir, 'Events', run, 'unweighted_events.lhe.gz')): to_delete.append('unweighted_events.lhe.gz') if os.path.exists(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')): - to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) + to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) if nb_rm != len(to_delete): logger.warning('Be carefull that partonic information are on the point to be removed.') if 'all' in mode: @@ -5327,8 +5327,8 @@ def do_remove(self, line): if 'delphes' not in mode: to_delete = [f for f in to_delete if 'delphes' not in f] if 'parton' not in mode: - to_delete = [f for f in to_delete if 'delphes' in f - or 'pgs' in f + to_delete = [f for f in to_delete if 'delphes' in f + or 'pgs' in f or 'pythia' in f] if not self.force and len(to_delete): question = 'Do you want to delete the following files?\n %s' % \ @@ -5336,7 +5336,7 @@ def do_remove(self, line): ans = self.ask(question, 'y', choices=['y','n']) else: ans = 'y' - + if ans == 'y': for file2rm in to_delete: if os.path.exists(pjoin(self.me_dir, 'Events', run, file2rm)): @@ -5374,7 +5374,7 @@ def do_remove(self, line): if ans == 'y': for file2rm in to_delete: os.remove(file2rm) - + if 'banner' in mode: to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) if tag: @@ -5389,8 +5389,8 @@ def do_remove(self, line): return elif any(['banner' not in os.path.basename(p) for p in to_delete]): if to_delete: - raise MadGraph5Error('''Some output still exists for this run. - Please remove those output first. Do for example: + raise MadGraph5Error('''Some output still exists for this run. + Please remove those output first. Do for example: remove %s all banner ''' % run) else: @@ -5400,7 +5400,7 @@ def do_remove(self, line): return else: logger.info('''The banner is not removed. In order to remove it run: - remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) + remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) # update database. self.results.clean(mode, run, tag) @@ -5420,7 +5420,7 @@ def do_plot(self, line): logger.info('plot for run %s' % self.run_name) if not self.force: self.ask_edit_cards(['plot_card.dat'], args, plot=True) - + if any([arg in ['all','parton'] for arg in args]): filename = pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe') if os.path.exists(filename+'.gz'): @@ -5438,8 +5438,8 @@ def do_plot(self, line): except Exception: pass else: - logger.info('No valid files for partonic plot') - + logger.info('No valid files for partonic plot') + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_events.lhe' % self.run_tag) @@ -5452,10 +5452,10 @@ def do_plot(self, line): stdout= "%s.gz" % filename) else: logger.info('No valid files for pythia plot') - - + + if any([arg in ['all','pgs'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) @@ -5464,15 +5464,15 @@ def do_plot(self, line): misc.gzip(filename) else: logger.info('No valid files for pgs plot') - + if any([arg in ['all','delphes'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) if os.path.exists(filename): self.create_plot('Delphes') - misc.gzip(filename) + misc.gzip(filename) else: logger.info('No valid files for delphes plot') @@ -5488,9 +5488,9 @@ def do_syscalc(self, line): if self.ninitial == 1: logger.error('SysCalc can\'t be run for decay processes') return - + logger.info('Calculating systematics for run %s' % self.run_name) - + self.ask_edit_cards(['run_card.dat'], args, plot=False) self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) if any([arg in ['all','parton'] for arg in args]): @@ -5504,7 +5504,7 @@ def do_syscalc(self, line): stdout="%s.gz" % filename) else: logger.info('No valid files for parton level systematics run.') - + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_syst.dat' % self.run_tag) @@ -5525,17 +5525,17 @@ def do_syscalc(self, line): else: logger.info('No valid files for pythia level') - + def store_result(self): - """ tar the pythia results. This is done when we are quite sure that + """ tar the pythia results. This is done when we are quite sure that the pythia output will not be use anymore """ if not self.run_name: return - + if not self.to_store: - return - + return + tag = self.run_card['run_tag'] self.update_status('storing files of previous run', level=None,\ error=True) @@ -5546,14 +5546,14 @@ def store_result(self): misc.gzip(pjoin(self.me_dir,'Events',self.run_name,"unweighted_events.lhe")) if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) - + if 'pythia' in self.to_store: self.update_status('Storing Pythia files of previous run', level='pythia', error=True) p = pjoin(self.me_dir,'Events') n = self.run_name t = tag self.to_store.remove('pythia') - misc.gzip(pjoin(p,'pythia_events.hep'), + misc.gzip(pjoin(p,'pythia_events.hep'), stdout=pjoin(p, str(n),'%s_pythia_events.hep' % t),forceexternal=True) if 'pythia8' in self.to_store: @@ -5581,26 +5581,26 @@ def store_result(self): os.system("mv " + file_path + hepmc_fileformat + " " + move_hepmc_path) self.update_status('Done', level='pythia',makehtml=False,error=True) - self.results.save() - + self.results.save() + self.to_store = [] - def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, + def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, run_type='', mode=None, **opt): """ """ argument = [str(arg) for arg in argument] if mode is None: mode = self.cluster_mode - + # ensure that exe is executable if os.path.exists(exe) and not os.access(exe, os.X_OK): os.system('chmod +x %s ' % exe) elif (cwd and os.path.exists(pjoin(cwd, exe))) and not \ os.access(pjoin(cwd, exe), os.X_OK): os.system('chmod +x %s ' % pjoin(cwd, exe)) - + if mode == 0: - self.update_status((remaining, 1, + self.update_status((remaining, 1, self.total_jobs - remaining -1, run_type), level=None, force=False) start = time.time() #os.system('cd %s; ./%s' % (cwd,exe)) @@ -5613,24 +5613,24 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, elif mode in [1,2]: exename = os.path.basename(exe) # For condor cluster, create the input/output files - if 'ajob' in exename: + if 'ajob' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat','dname.mg', pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) - + output_files = [] required_output = [] - + #Find the correct PDF input file input_files.append(self.get_pdf_input_filename()) - + #Find the correct ajob Gre = re.compile("\s*j=(G[\d\.\w]+)") origre = re.compile("grid_directory=(G[\d\.\w]+)") - try : + try : fsock = open(exe) except Exception: fsock = open(pjoin(cwd,exe)) @@ -5648,21 +5648,21 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if os.path.isdir(pjoin(cwd,G)): input_files.append(G) required_output.append('%s/results.dat' % G) - + if origre.search(text): G_grid = origre.search(text).groups()[0] input_files.append(pjoin(G_grid, 'ftn26')) - + #submitting - self.cluster.submit2(exe, stdout=stdout, cwd=cwd, + self.cluster.submit2(exe, stdout=stdout, cwd=cwd, input_files=input_files, output_files=output_files, required_output=required_output) elif 'survey' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5671,7 +5671,7 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [] required_output = [] - + #Find the correct ajob suffix = "_%s" % int(float(argument[0])) if suffix == '_0': @@ -5685,12 +5685,12 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if '.' in argument[0]: offset = int(str(argument[0]).split('.')[1]) else: - offset = 0 - + offset = 0 + if offset ==0 or offset == int(float(argument[0])): if os.path.exists(pjoin(cwd, G, 'input_app.txt')): os.remove(pjoin(cwd, G, 'input_app.txt')) - + if os.path.exists(os.path.realpath(pjoin(cwd, G, 'ftn25'))): if offset == 0 or offset == int(float(argument[0])): os.remove(pjoin(cwd, G, 'ftn25')) @@ -5706,16 +5706,16 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, pass #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, required_output=required_output, **opt) elif "refine_splitted.sh" in exename: input_files = ['madevent','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5725,25 +5725,25 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [argument[0]] required_output = [] for G in output_files: - required_output.append('%s/results.dat' % G) + required_output.append('%s/results.dat' % G) input_files.append(pjoin(argument[1], "input_app.txt")) input_files.append(pjoin(argument[1], "ftn26")) - + #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, - required_output=required_output, **opt) + required_output=required_output, **opt) + + - - else: self.cluster.submit(exe, argument=argument, stdout=stdout, cwd=cwd, **opt) - + ############################################################################ def find_madevent_mode(self): """Find if Madevent is in Group mode or not""" - + # The strategy is too look in the files Source/run_configs.inc # if we found: ChanPerJob=3 then it's a group mode. file_path = pjoin(self.me_dir, 'Source', 'run_config.inc') @@ -5752,11 +5752,11 @@ def find_madevent_mode(self): return 'group' else: return 'v4' - + ############################################################################ def monitor(self, run_type='monitor', mode=None, html=False): """ monitor the progress of running job """ - + starttime = time.time() if mode is None: @@ -5772,8 +5772,8 @@ def monitor(self, run_type='monitor', mode=None, html=False): else: update_status = lambda idle, run, finish: None update_first = None - try: - self.cluster.wait(self.me_dir, update_status, update_first=update_first) + try: + self.cluster.wait(self.me_dir, update_status, update_first=update_first) except Exception as error: logger.info(error) if not self.force: @@ -5788,24 +5788,24 @@ def monitor(self, run_type='monitor', mode=None, html=False): raise except KeyboardInterrupt as error: self.cluster.remove() - raise - - + raise + - ############################################################################ + + ############################################################################ def configure_directory(self, html_opening=True): - """ All action require before any type of run """ + """ All action require before any type of run """ # Basic check assert os.path.exists(pjoin(self.me_dir,'SubProcesses')) # environmental variables to be included in make_opts self.make_opts_var = {} - + #see when the last file was modified time_mod = max([os.path.getmtime(pjoin(self.me_dir,'Cards','run_card.dat')), os.path.getmtime(pjoin(self.me_dir,'Cards','param_card.dat'))]) - + if self.configured >= time_mod and hasattr(self, 'random') and hasattr(self, 'run_card'): #just ensure that cluster specific are correctly handled if self.cluster: @@ -5820,7 +5820,7 @@ def configure_directory(self, html_opening=True): #open only once the web page # Change current working directory self.launching_dir = os.getcwd() - + # Check if we need the MSSM special treatment model = self.find_model_name() if model == 'mssm' or model.startswith('mssm-'): @@ -5828,14 +5828,14 @@ def configure_directory(self, html_opening=True): mg5_param = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') check_param_card.convert_to_mg5card(param_card, mg5_param) check_param_card.check_valid_param_card(mg5_param) - + # limit the number of event to 100k self.check_nb_events() # this is in order to avoid conflicts between runs with and without # lhapdf. not needed anymore the makefile handles it automaticallu #misc.compile(['clean4pdf'], cwd = pjoin(self.me_dir, 'Source')) - + self.make_opts_var['pdlabel1'] = '' self.make_opts_var['pdlabel2'] = '' if self.run_card['pdlabel1'] in ['eva', 'iww']: @@ -5866,7 +5866,7 @@ def configure_directory(self, html_opening=True): self.copy_lep_densities(self.run_card['pdlabel'], pjoin(self.me_dir, 'Source')) self.make_opts_var['pdlabel1'] = 'ee' self.make_opts_var['pdlabel2'] = 'ee' - + # set random number if self.run_card['iseed'] != 0: self.random = int(self.run_card['iseed']) @@ -5885,18 +5885,18 @@ def configure_directory(self, html_opening=True): break else: self.random = random.randint(1, 30107) - + #set random seed for python part of the code if self.run_card['python_seed'] == -2: #-2 means same as run_card import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] elif self.run_card['python_seed'] >= 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] if self.run_card['ickkw'] == 2: logger.info('Running with CKKW matching') self.treat_ckkw_matching() @@ -5905,12 +5905,12 @@ def configure_directory(self, html_opening=True): self.update_make_opts(self.run_card) # reset list of Gdirectory self.Gdirs = None - + # create param_card.inc and run_card.inc self.do_treatcards('') - + logger.info("compile Source Directory") - + # Compile for name in [ 'all']:#, '../bin/internal/combine_events']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) @@ -5933,7 +5933,7 @@ def configure_directory(self, html_opening=True): os.remove(pjoin(self.me_dir, 'lib','libbias.a')) force_subproc_clean = True - + # Finally compile the bias module as well if self.run_card['bias_module'] not in ['dummy',None]: logger.debug("Compiling the bias module '%s'"%bias_name) @@ -5945,7 +5945,7 @@ def configure_directory(self, html_opening=True): 'INVALID' in str(bias_module_valid).upper(): raise InvalidCmd("The bias module '%s' cannot be used because of:\n%s"% (bias_name,bias_module_valid)) - + self.compile(arg=[], cwd=os.path.join(self.me_dir, 'Source','BIAS',bias_name)) self.proc_characteristics['bias_module']=bias_name # Update the proc_characterstics file @@ -5954,7 +5954,7 @@ def configure_directory(self, html_opening=True): if force_subproc_clean: # Make sure that madevent will be recompiled - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] for nb_proc,subdir in enumerate(subproc): Pdir = pjoin(self.me_dir, 'SubProcesses',subdir.strip()) @@ -5971,20 +5971,20 @@ def configure_directory(self, html_opening=True): ############################################################################ @staticmethod def check_dir(path, default=''): - """check if the directory exists. if so return the path otherwise the + """check if the directory exists. if so return the path otherwise the default""" - + if os.path.isdir(path): return path else: return default - + ############################################################################ def get_Gdir(self, Pdir=None, symfact=None): """get the list of Gdirectory if not yet saved.""" - + if hasattr(self, "Gdirs") and self.Gdirs: if self.me_dir in self.Gdirs[0]: if Pdir is None: @@ -6000,8 +6000,8 @@ def get_Gdir(self, Pdir=None, symfact=None): Pdirs = self.get_Pdir() - Gdirs = {self.me_dir:[]} - mfactors = {} + Gdirs = {self.me_dir:[]} + mfactors = {} for P in Pdirs: Gdirs[P] = [] #for the next line do not use P, since in readonly mode it might not have symfact @@ -6012,7 +6012,7 @@ def get_Gdir(self, Pdir=None, symfact=None): mfactors[pjoin(P, "G%s" % tag)] = mfactor self.Gdirs = (Gdirs, mfactors) return self.get_Gdir(Pdir, symfact=symfact) - + ############################################################################ def set_run_name(self, name, tag=None, level='parton', reload_card=False, allow_new_tag=True): @@ -6030,8 +6030,8 @@ def get_last_tag(self, level): tagRun = self.results[self.run_name][i] if tagRun.pythia or tagRun.shower or tagRun.pythia8 : return tagRun['tag'] - - + + # when are we force to change the tag new_run:previous run requiring changes upgrade_tag = {'parton': ['parton','pythia','pgs','delphes','madanalysis5_hadron','madanalysis5_parton', 'rivet'], 'pythia': ['pythia','pgs','delphes','madanalysis5_hadron'], @@ -6044,7 +6044,7 @@ def get_last_tag(self, level): 'syscalc':[], 'rivet':['rivet']} - if name == self.run_name: + if name == self.run_name: if reload_card: run_card = pjoin(self.me_dir, 'Cards','run_card.dat') self.run_card = banner_mod.RunCard(run_card) @@ -6064,13 +6064,13 @@ def get_last_tag(self, level): break return get_last_tag(self, level) - + # save/clean previous run if self.run_name: self.store_result() # store new name self.run_name = name - + new_tag = False # First call for this run -> set the banner self.banner = banner_mod.recover_banner(self.results, level, name) @@ -6079,8 +6079,8 @@ def get_last_tag(self, level): else: # Read run_card run_card = pjoin(self.me_dir, 'Cards','run_card.dat') - self.run_card = banner_mod.RunCard(run_card) - + self.run_card = banner_mod.RunCard(run_card) + if tag: self.run_card['run_tag'] = tag new_tag = True @@ -6093,7 +6093,7 @@ def get_last_tag(self, level): self.results.update('add run %s' % name, 'all', makehtml=False) else: for tag in upgrade_tag[level]: - + if getattr(self.results[self.run_name][-1], tag): # LEVEL is already define in the last tag -> need to switch tag tag = self.get_available_tag() @@ -6103,8 +6103,8 @@ def get_last_tag(self, level): if not new_tag: # We can add the results to the current run tag = self.results[self.run_name][-1]['tag'] - self.run_card['run_tag'] = tag # ensure that run_tag is correct - + self.run_card['run_tag'] = tag # ensure that run_tag is correct + if allow_new_tag and (name in self.results and not new_tag): self.results.def_current(self.run_name) else: @@ -6113,15 +6113,15 @@ def get_last_tag(self, level): self.run_tag = self.run_card['run_tag'] return get_last_tag(self, level) - - - + + + ############################################################################ def check_nb_events(self): - """Find the number of event in the run_card, and check that this is not + """Find the number of event in the run_card, and check that this is not too large""" - + nb_event = int(self.run_card['nevents']) if nb_event > 1000000: logger.warning("Attempting to generate more than 1M events") @@ -6133,20 +6133,20 @@ def check_nb_events(self): return - - ############################################################################ + + ############################################################################ def update_random(self): """ change random number""" - + self.random += 3 if self.random > 30081*30081: # can't use too big random number raise MadGraph5Error('Random seed too large ' + str(self.random) + ' > 30081*30081') - if self.run_card['python_seed'] == -2: + if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.random) + random.seed(self.random) random.mg_seedset = self.random - + ############################################################################ def save_random(self): """save random number in appropirate file""" @@ -6155,14 +6155,14 @@ def save_random(self): fsock.writelines('r=%s\n' % self.random) def do_quit(self, *args, **opts): - + return common_run.CommonRunCmd.do_quit(self, *args, **opts) #return CmdExtended.do_quit(self, *args, **opts) - + ############################################################################ def treat_CKKW_matching(self): """check for ckkw""" - + lpp1 = self.run_card['lpp1'] lpp2 = self.run_card['lpp2'] e1 = self.run_card['ebeam1'] @@ -6170,19 +6170,19 @@ def treat_CKKW_matching(self): pd = self.run_card['pdlabel'] lha = self.run_card['lhaid'] xq = self.run_card['xqcut'] - translation = {'e1': e1, 'e2':e2, 'pd':pd, + translation = {'e1': e1, 'e2':e2, 'pd':pd, 'lha':lha, 'xq':xq} if lpp1 or lpp2: - # Remove ':s from pd + # Remove ':s from pd if pd.startswith("'"): pd = pd[1:] if pd.endswith("'"): - pd = pd[:-1] + pd = pd[:-1] if xq >2 or xq ==2: xq = 2 - + # find data file if pd == "lhapdf": issudfile = 'lib/issudgrid-%(e1)s-%(e2)s-%(pd)s-%(lha)s-%(xq)s.dat.gz' @@ -6192,9 +6192,9 @@ def treat_CKKW_matching(self): issudfile = pjoin(self.webbin, issudfile % translation) else: issudfile = pjoin(self.me_dir, issudfile % translation) - + logger.info('Sudakov grid file: %s' % issudfile) - + # check that filepath exists if os.path.exists(issudfile): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') @@ -6203,20 +6203,20 @@ def treat_CKKW_matching(self): msg = 'No sudakov grid file for parameter choice. Start to generate it. This might take a while' logger.info(msg) self.update_status('GENERATE SUDAKOV GRID', level='parton') - + for i in range(-2,6): - self.cluster.submit('%s/gensudgrid ' % self.dirbin, + self.cluster.submit('%s/gensudgrid ' % self.dirbin, argument = ['%d'%i], - cwd=self.me_dir, + cwd=self.me_dir, stdout=open(pjoin(self.me_dir, 'gensudgrid%s.log' % i),'w')) self.monitor() for i in range(-2,6): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') os.system('cat %s/gensudgrid%s.log >> %s' % (self.me_dir, path)) misc.gzip(path, stdout=issudfile) - + ############################################################################ - def create_root_file(self, input='unweighted_events.lhe', + def create_root_file(self, input='unweighted_events.lhe', output='unweighted_events.root' ): """create the LHE root file """ self.update_status('Creating root files', level='parton') @@ -6233,14 +6233,14 @@ def create_root_file(self, input='unweighted_events.lhe', totar = False torm = True input = input[:-3] - + try: - misc.call(['%s/ExRootLHEFConverter' % eradir, + misc.call(['%s/ExRootLHEFConverter' % eradir, input, output], cwd=pjoin(self.me_dir, 'Events')) except Exception: logger.warning('fail to produce Root output [problem with ExRootAnalysis]') - + if totar: if os.path.exists('%s.gz' % input): try: @@ -6251,13 +6251,13 @@ def create_root_file(self, input='unweighted_events.lhe', misc.gzip(input) if torm: os.remove(input) - + def run_syscalc(self, mode='parton', event_path=None, output=None): - """create the syscalc output""" + """create the syscalc output""" if self.run_card['use_syst'] not in self.true: return - + scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): return @@ -6265,12 +6265,12 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): if self.run_card['event_norm'] != 'sum': logger.critical('SysCalc works only when event_norm is on \'sum\'.') return - logger.info('running SysCalc on mode %s' % mode) - + logger.info('running SysCalc on mode %s' % mode) + # Restore the old default for SysCalc+PY6 if self.run_card['sys_matchscale']=='auto': self.run_card['sys_matchscale'] = "30 50" - + # Check that all pdfset are correctly installed lhaid = [self.run_card.get_lhapdf_id()] if '&&' in self.run_card['sys_pdf']: @@ -6285,20 +6285,20 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.debug(str(error)) logger.warning('Systematic computation requires lhapdf to run. Bypass SysCalc') return - + # Copy all the relevant PDF sets [self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid] - + to_syscalc={'sys_scalefact': self.run_card['sys_scalefact'], 'sys_alpsfact': self.run_card['sys_alpsfact'], 'sys_matchscale': self.run_card['sys_matchscale'], 'sys_scalecorrelation': self.run_card['sys_scalecorrelation'], 'sys_pdf': self.run_card['sys_pdf']} - - tag = self.run_card['run_tag'] + + tag = self.run_card['run_tag'] card = pjoin(self.me_dir, 'bin','internal', 'syscalc_card.dat') template = open(pjoin(self.me_dir, 'bin','internal', 'syscalc_template.dat')).read() - + if '&&' in to_syscalc['sys_pdf']: to_syscalc['sys_pdf'] = to_syscalc['sys_pdf'].split('#',1)[0].replace('&&',' \n ') else: @@ -6311,8 +6311,8 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): new.append(d) else: new[-1] += ' %s' % d - to_syscalc['sys_pdf'] = '\n'.join(new) - + to_syscalc['sys_pdf'] = '\n'.join(new) + if to_syscalc['sys_pdf'].lower() in ['', 'f', 'false', 'none', '.false.']: to_syscalc['sys_pdf'] = '' if to_syscalc['sys_alpsfact'].lower() in ['', 'f', 'false', 'none','.false.']: @@ -6320,17 +6320,17 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): - + # check if the scalecorrelation parameter is define: if not 'sys_scalecorrelation' in self.run_card: self.run_card['sys_scalecorrelation'] = -1 open(card,'w').write(template % self.run_card) - + if not os.path.exists(card): return False - - + + event_dir = pjoin(self.me_dir, 'Events') if not event_path: @@ -6353,19 +6353,19 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): raise SysCalcError('qcut value for sys_matchscale lower than qcut in pythia_card. Bypass syscalc') if float(value) < xqcut: raise SysCalcError('qcut value for sys_matchscale lower than xqcut in run_card. Bypass syscalc') - - + + event_path = pjoin(event_dir,'syst.dat') output = pjoin(event_dir, 'syscalc.dat') else: raise self.InvalidCmd('Invalid mode %s' % mode) - + if not os.path.exists(event_path): if os.path.exists(event_path+'.gz'): misc.gunzip(event_path+'.gz') else: raise SysCalcError('Events file %s does not exits' % event_path) - + self.update_status('Calculating systematics for %s level' % mode, level = mode.lower()) try: proc = misc.call([os.path.join(scdir, 'sys_calc'), @@ -6374,7 +6374,7 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): stderr = subprocess.STDOUT, cwd=event_dir) # Wait 5 s to make sure file is finished writing - time.sleep(5) + time.sleep(5) except OSError as error: logger.error('fail to run syscalc: %s. Please check that SysCalc is correctly installed.' % error) else: @@ -6382,11 +6382,11 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.warning('SysCalc Failed. Please read the associate log to see the reason. Did you install the associate PDF set?') elif mode == 'parton': files.mv(output, event_path) - + self.update_status('End syscalc for %s level' % mode, level = mode.lower(), makehtml=False) - - return True + + return True action_switcher = AskRun @@ -6399,23 +6399,23 @@ def ask_run_configuration(self, mode=None, args=[]): passing_cmd.append('reweight=ON') if '-M' in args or '--madspin' in args: passing_cmd.append('madspin=ON') - + switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, mode=mode, line_args=args, force=self.force, first_cmd=passing_cmd, return_instance=True) # - self.switch = switch # store the value of the switch for plugin purpose + self.switch = switch # store the value of the switch for plugin purpose if 'dynamical' in switch: mode = 'auto' - + # Now that we know in which mode we are check that all the card #exists (copy default if needed) - + cards = ['param_card.dat', 'run_card.dat'] if switch['shower'] == 'Pythia6': cards.append('pythia_card.dat') if switch['shower'] == 'Pythia8': - cards.append('pythia8_card.dat') + cards.append('pythia8_card.dat') if switch['detector'] in ['PGS','DELPHES+PGS']: cards.append('pgs_card.dat') if switch['detector'] in ['Delphes', 'DELPHES+PGS']: @@ -6438,29 +6438,29 @@ def ask_run_configuration(self, mode=None, args=[]): cards.append('rivet_card.dat') self.keep_cards(cards) - + first_cmd = cmd_switch.get_cardcmd() - + if os.path.isfile(pjoin(self.me_dir,'Cards','MadLoopParams.dat')): cards.append('MadLoopParams.dat') - + if self.force: self.check_param_card(pjoin(self.me_dir,'Cards','param_card.dat' )) return switch - + if 'dynamical' in switch and switch['dynamical']: self.ask_edit_cards(cards, plot=False, mode='auto', first_cmd=first_cmd) else: self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) return switch - + ############################################################################ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None): """Ask the question when launching pythia""" - + pythia_suffix = '' if pythia_version==6 else '%d'%pythia_version - + available_mode = ['0', '1'] if pythia_version==6: available_mode.append('2') @@ -6485,10 +6485,10 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = self.ask(question, '0', options) elif not mode: mode = 'auto' - + if mode.isdigit(): mode = name[mode] - + auto = False if mode == 'auto': auto = True @@ -6497,7 +6497,7 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = 'pgs' elif os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')): mode = 'delphes' - else: + else: mode = 'pythia%s'%pythia_suffix logger.info('Will run in mode %s' % mode) # Now that we know in which mode we are check that all the card @@ -6513,15 +6513,15 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) cards.append('delphes_trigger.dat') self.keep_cards(cards, ignore=['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat', 'plot_card.dat']) - + if self.force: return mode - + if not banner: banner = self.banner - + if auto: - self.ask_edit_cards(cards, from_banner=['param', 'run'], + self.ask_edit_cards(cards, from_banner=['param', 'run'], mode='auto', plot=(pythia_version==6), banner=banner ) else: @@ -6529,12 +6529,12 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) plot=(pythia_version==6), banner=banner) return mode - + #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmdShell(MadEventCmd, cmd.CmdShell): - """The command line processor of MadGraph""" + """The command line processor of MadGraph""" @@ -6548,11 +6548,11 @@ class SubProcesses(object): @classmethod def clean(cls): cls.name_to_pdg = {} - + @staticmethod def get_subP(me_dir): """return the list of Subprocesses""" - + out = [] for line in open(pjoin(me_dir,'SubProcesses', 'subproc.mg')): if not line: @@ -6560,9 +6560,9 @@ def get_subP(me_dir): name = line.strip() if os.path.exists(pjoin(me_dir, 'SubProcesses', name)): out.append(pjoin(me_dir, 'SubProcesses', name)) - + return out - + @staticmethod @@ -6623,9 +6623,9 @@ def get_subP_ids(path): particles = re.search("/([\d,-]+)/", line) all_ids.append([int(p) for p in particles.group(1).split(',')]) return all_ids - - -#=============================================================================== + + +#=============================================================================== class GridPackCmd(MadEventCmd): """The command for the gridpack --Those are not suppose to be use interactively--""" @@ -6639,7 +6639,7 @@ def __init__(self, me_dir = None, nb_event=0, seed=0, gran=-1, *completekey, **s self.random = seed self.random_orig = self.random self.granularity = gran - + self.options['automatic_html_opening'] = False #write the grid_card.dat on disk self.nb_event = int(nb_event) @@ -6680,7 +6680,7 @@ def write_RunWeb(self, me_dir): def write_gridcard(self, nb_event, seed, gran): """write the grid_card.dat file at appropriate location""" - + # first try to write grid_card within the gridpack. print("WRITE GRIDCARD", self.me_dir) if self.readonly: @@ -6689,35 +6689,35 @@ def write_gridcard(self, nb_event, seed, gran): fsock = open('grid_card.dat','w') else: fsock = open(pjoin(self.me_dir, 'Cards', 'grid_card.dat'),'w') - + gridpackcard = banner_mod.GridpackCard() gridpackcard['GridRun'] = True gridpackcard['gevents'] = nb_event gridpackcard['gseed'] = seed gridpackcard['ngran'] = gran - + gridpackcard.write(fsock) ############################################################################ def get_Pdir(self): """get the list of Pdirectory if not yet saved.""" - + if hasattr(self, "Pdirs"): if self.me_dir in self.Pdirs[0]: return self.Pdirs - + if not self.readonly: - self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) + self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] else: - self.Pdirs = [l.strip() - for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + self.Pdirs = [l.strip() + for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] + return self.Pdirs - + def prepare_local_dir(self): """create the P directory structure in the local directory""" - + if not self.readonly: os.chdir(self.me_dir) else: @@ -6726,7 +6726,7 @@ def prepare_local_dir(self): os.mkdir(p) files.cp(pjoin(self.me_dir,'SubProcesses',p,'symfact.dat'), pjoin(p, 'symfact.dat')) - + def launch(self, nb_event, seed): """ launch the generation for the grid """ @@ -6742,13 +6742,13 @@ def launch(self, nb_event, seed): if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(seed) + random.seed(seed) random.mg_seedset = seed elif self.run_card['python_seed'] > 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] # 2) Run the refine for the grid self.update_status('Generating Events', level=None) #misc.call([pjoin(self.me_dir,'bin','refine4grid'), @@ -6767,70 +6767,70 @@ def launch(self, nb_event, seed): self.exec_cmd('decay_events -from_cards', postcmd=False) elif self.run_card['use_syst'] and self.run_card['systematics_program'] == 'systematics': self.options['nb_core'] = 1 - self.exec_cmd('systematics %s --from_card' % + self.exec_cmd('systematics %s --from_card' % pjoin('Events', self.run_name, 'unweighted_events.lhe.gz'), postcmd=False,printcmd=False) - + def refine4grid(self, nb_event): """Special refine for gridpack run.""" self.nb_refine += 1 - + precision = nb_event self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) - + # initialize / remove lhapdf mode # self.configure_directory() # All this has been done before self.cluster_mode = 0 # force single machine # Store seed in randinit file, to be read by ranmar.f self.save_random() - + self.update_status('Refine results to %s' % precision, level=None) logger.info("Using random number seed offset = %s" % self.random) refine_opt = {'err_goal': nb_event, 'split_channels': False, - 'ngran':self.granularity, 'readonly': self.readonly} + 'ngran':self.granularity, 'readonly': self.readonly} x_improve = gen_ximprove.gen_ximprove_gridpack(self, refine_opt) x_improve.launch() # create the ajob for the refinment and run those! - self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack - - + self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack + + #bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) #print 'run combine!!!' #combine_runs.CombineRuns(self.me_dir) - + return #update html output Presults = sum_html.collect_result(self) cross, error = Presults.xsec, Presults.xerru self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + #self.update_status('finish refine', 'parton', makehtml=False) #devnull.close() - - - + + + return self.total_jobs = 0 - subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if + subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if P.startswith('P') and os.path.isdir(pjoin(self.me_dir,'SubProcesses', P))] devnull = open(os.devnull, 'w') for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) # clean previous run for match in misc.glob('*ajob*', Pdir): if os.path.basename(match)[:4] in ['ajob', 'wait', 'run.', 'done']: os.remove(pjoin(Pdir, match)) - + logfile = pjoin(Pdir, 'gen_ximprove.log') misc.call([pjoin(bindir, 'gen_ximprove')], @@ -6840,40 +6840,40 @@ def refine4grid(self, nb_event): if os.path.exists(pjoin(Pdir, 'ajob1')): alljobs = misc.glob('ajob*', Pdir) - nb_tot = len(alljobs) + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) if os.path.exists(pjoin(self.me_dir,'error')): self.monitor(html=True) raise MadEventError('Error detected in dir %s: %s' % \ (Pdir, open(pjoin(self.me_dir,'error')).read())) - self.monitor(run_type='All job submitted for refine number %s' % + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) combine_runs.CombineRuns(self.me_dir) - + #update html output cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + self.update_status('finish refine', 'parton', makehtml=False) devnull.close() def do_combine_events(self, line): - """Advanced commands: Launch combine events""" + """Advanced commands: Launch combine events""" if self.readonly: outdir = 'Events' @@ -6895,17 +6895,17 @@ def do_combine_events(self, line): self.banner.add_generation_info(self.results.current['cross'], self.run_card['nevents']) if not hasattr(self, 'random_orig'): self.random_orig = 0 self.banner.change_seed(self.random_orig) - - + + if not os.path.exists(pjoin(outdir, self.run_name)): os.mkdir(pjoin(outdir, self.run_name)) - self.banner.write(pjoin(outdir, self.run_name, + self.banner.write(pjoin(outdir, self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -6915,7 +6915,7 @@ def do_combine_events(self, line): if os.path.exists(pjoin(Gdir, 'events.lhe')): result = sum_html.OneResult('') result.read_results(pjoin(Gdir, 'results.dat')) - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec')*gscalefact[Gdir], result.get('xerru')*gscalefact[Gdir], result.get('axsec')*gscalefact[Gdir] @@ -6924,7 +6924,7 @@ def do_combine_events(self, line): sum_xsec += result.get('xsec')*gscalefact[Gdir] sum_xerru.append(result.get('xerru')*gscalefact[Gdir]) sum_axsec += result.get('axsec')*gscalefact[Gdir] - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.nb_event) @@ -6933,26 +6933,26 @@ def do_combine_events(self, line): AllEvent.add(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() - + self.banner.add_generation_info(sum_xsec, self.nb_event) nb_event = AllEvent.unweight(pjoin(outdir, self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.nb_event, log_level=logging.DEBUG, normalization=self.run_card['event_norm'], proc_charac=self.proc_characteristic) - - + + if partials: for i in range(partials): try: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) self.banner.add_generation_info(sum_xsec, nb_event) if self.run_card['bias_module'].lower() not in ['dummy', 'none']: @@ -6961,7 +6961,7 @@ def do_combine_events(self, line): class MadLoopInitializer(object): """ A container class for the various methods for initializing MadLoop. It is - placed in MadEventInterface because it is used by Madevent for loop-induced + placed in MadEventInterface because it is used by Madevent for loop-induced simulations. """ @staticmethod @@ -6974,7 +6974,7 @@ def make_and_run(dir_name,checkRam=False): if os.path.isfile(pjoin(dir_name,'check')): os.remove(pjoin(dir_name,'check')) os.remove(pjoin(dir_name,'check_sa.o')) - os.remove(pjoin(dir_name,'loop_matrix.o')) + os.remove(pjoin(dir_name,'loop_matrix.o')) # Now run make devnull = open(os.devnull, 'w') start=time.time() @@ -6996,7 +6996,7 @@ def make_and_run(dir_name,checkRam=False): stdout=devnull, stderr=devnull, close_fds=True) try: ptimer.execute() - #poll as often as possible; otherwise the subprocess might + #poll as often as possible; otherwise the subprocess might # "sneak" in some extra memory usage while you aren't looking # Accuracy of .2 seconds is enough for the timing. while ptimer.poll(): @@ -7028,7 +7028,7 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, If mu_r > 0.0, then the renormalization constant value will be hardcoded directly in check_sa.f, if is is 0 it will be set to Sqrt(s) and if it is < 0.0 the value in the param_card.dat is used. - If the split_orders target (i.e. the target squared coupling orders for + If the split_orders target (i.e. the target squared coupling orders for the computation) is != -1, it will be changed in check_sa.f via the subroutine CALL SET_COUPLINGORDERS_TARGET(split_orders).""" @@ -7043,12 +7043,12 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, file_path = pjoin(directories[0],'check_sa.f') if not os.path.isfile(file_path): raise MadGraph5Error('Could not find the location of check_sa.f'+\ - ' from the specified path %s.'%str(file_path)) + ' from the specified path %s.'%str(file_path)) file = open(file_path, 'r') check_sa = file.read() file.close() - + file = open(file_path, 'w') check_sa = re.sub(r"READPS = \S+\)","READPS = %s)"%('.TRUE.' if read_ps \ else '.FALSE.'), check_sa) @@ -7064,42 +7064,42 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, (("%.17e"%mu_r).replace('e','d')),check_sa) elif mu_r < 0.0: check_sa = re.sub(r"MU_R=SQRTS","",check_sa) - + if split_orders > 0: check_sa = re.sub(r"SET_COUPLINGORDERS_TARGET\(-?\d+\)", - "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) - + "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) + file.write(check_sa) file.close() - @staticmethod + @staticmethod def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ req_files = ['HelFilter.dat','LoopFilter.dat'], attempts = [4,15]): - """ Run the initialization of the process in 'run_dir' with success + """ Run the initialization of the process in 'run_dir' with success characterized by the creation of the files req_files in this directory. The directory containing the driving source code 'check_sa.f'. - The list attempt gives the successive number of PS points the + The list attempt gives the successive number of PS points the initialization should be tried with before calling it failed. Returns the number of PS points which were necessary for the init. Notice at least run_dir or SubProc_dir must be provided. A negative attempt number given in input means that quadprec will be forced for initialization.""" - + # If the user does not want detailed info, then set the dictionary # to a dummy one. if infos is None: infos={} - + if SubProc_dir is None and run_dir is None: raise MadGraph5Error('At least one of [SubProc_dir,run_dir] must'+\ ' be provided in run_initialization.') - + # If the user does not specify where is check_sa.f, then it is assumed # to be one levels above run_dir if SubProc_dir is None: SubProc_dir = os.path.abspath(pjoin(run_dir,os.pardir)) - + if run_dir is None: directories =[ dir for dir in misc.glob('P[0-9]*', SubProc_dir) if os.path.isdir(dir) ] @@ -7109,7 +7109,7 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find a valid running directory'+\ ' in %s.'%str(SubProc_dir)) - # Use the presence of the file born_matrix.f to decide if it is a + # Use the presence of the file born_matrix.f to decide if it is a # loop-induced process or not. It's not crucial, but just that because # of the dynamic adjustment of the ref scale used for deciding what are # the zero contributions, more points are neeeded for loop-induced. @@ -7128,9 +7128,9 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ %MLCardPath) else: - MLCard = banner_mod.MadLoopParam(MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) MLCard_orig = banner_mod.MadLoopParam(MLCard) - + # Make sure that LoopFilter really is needed. if not MLCard['UseLoopFilter']: try: @@ -7153,11 +7153,11 @@ def need_init(): proc_prefix+fname)) for fname in my_req_files]) or \ not os.path.isfile(pjoin(run_dir,'check')) or \ not os.access(pjoin(run_dir,'check'), os.X_OK) - + # Check if this is a process without born by checking the presence of the # file born_matrix.f is_loop_induced = os.path.exists(pjoin(run_dir,'born_matrix.f')) - + # For loop induced processes, always attempt quadruple precision if # double precision attempts fail and the user didn't specify himself # quadruple precision initializations attempts @@ -7166,11 +7166,11 @@ def need_init(): use_quad_prec = 1 curr_attempt = 1 - MLCard.set('WriteOutFilters',True) - + MLCard.set('WriteOutFilters',True) + while to_attempt!=[] and need_init(): curr_attempt = to_attempt.pop() - # if the attempt is a negative number it means we must force + # if the attempt is a negative number it means we must force # quadruple precision at initialization time if curr_attempt < 0: use_quad_prec = -1 @@ -7183,11 +7183,11 @@ def need_init(): MLCard.set('ZeroThres',1e-9) # Plus one because the filter are written on the next PS point after curr_attempt = abs(curr_attempt+1) - MLCard.set('MaxAttempts',curr_attempt) + MLCard.set('MaxAttempts',curr_attempt) MLCard.write(pjoin(SubProc_dir,'MadLoopParams.dat')) # initialization is performed. - MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, + MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, npoints = curr_attempt) compile_time, run_time, ram_usage = \ MadLoopInitializer.make_and_run(run_dir) @@ -7200,7 +7200,7 @@ def need_init(): infos['Process_compilation']==None: infos['Process_compilation'] = compile_time infos['Initialization'] = run_time - + MLCard_orig.write(pjoin(SubProc_dir,'MadLoopParams.dat')) if need_init(): return None @@ -7219,8 +7219,8 @@ def need_init(ML_resources_path, proc_prefix, r_files): MLCardPath = pjoin(proc_dir,'SubProcesses','MadLoopParams.dat') if not os.path.isfile(MLCardPath): raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ - %MLCardPath) - MLCard = banner_mod.MadLoopParam(MLCardPath) + %MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) req_files = ['HelFilter.dat','LoopFilter.dat'] # Make sure that LoopFilter really is needed. @@ -7234,9 +7234,9 @@ def need_init(ML_resources_path, proc_prefix, r_files): req_files.remove('HelFilter.dat') except ValueError: pass - + for v_folder in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)): + '%s*'%subproc_prefix)): # Make sure it is a valid MadLoop directory if not os.path.isdir(v_folder) or not os.path.isfile(\ pjoin(v_folder,'loop_matrix.f')): @@ -7247,7 +7247,7 @@ def need_init(ML_resources_path, proc_prefix, r_files): if need_init(pjoin(proc_dir,'SubProcesses','MadLoop5_resources'), proc_prefix, req_files): return True - + return False @staticmethod @@ -7265,7 +7265,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['treatCardsLoopNoInit'], cwd=pjoin(proc_dir,'Source')) else: interface.do_treatcards('all --no_MadLoopInit') - + # First make sure that IREGI and CUTTOOLS are compiled if needed if os.path.exists(pjoin(proc_dir,'Source','CutTools')): misc.compile(arg=['libcuttools'],cwd=pjoin(proc_dir,'Source')) @@ -7273,8 +7273,8 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['libiregi'],cwd=pjoin(proc_dir,'Source')) # Then make sure DHELAS and MODEL are compiled misc.compile(arg=['libmodel'],cwd=pjoin(proc_dir,'Source')) - misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) - + misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) + # Now initialize the MadLoop outputs logger.info('Initializing MadLoop loop-induced matrix elements '+\ '(this can take some time)...') @@ -7283,7 +7283,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, if MG_options: if interface and hasattr(interface, 'cluster') and isinstance(interface.cluster, cluster.MultiCore): mcore = interface.cluster - else: + else: mcore = cluster.MultiCore(**MG_options) else: mcore = cluster.onecore @@ -7294,10 +7294,10 @@ def run_initialization_wrapper(run_dir, infos, attempts): run_dir=run_dir, infos=infos) else: n_PS = MadLoopInitializer.run_initialization( - run_dir=run_dir, infos=infos, attempts=attempts) + run_dir=run_dir, infos=infos, attempts=attempts) infos['nPS'] = n_PS return 0 - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return @@ -7307,21 +7307,21 @@ def wait_monitoring(Idle, Running, Done): init_info = {} # List all virtual folders while making sure they are valid MadLoop folders VirtualFolders = [f for f in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)) if (os.path.isdir(f) or + '%s*'%subproc_prefix)) if (os.path.isdir(f) or os.path.isfile(pjoin(f,'loop_matrix.f')))] logger.debug("Now Initializing MadLoop matrix element in %d folder%s:"%\ (len(VirtualFolders),'s' if len(VirtualFolders)>1 else '')) - logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in + logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in VirtualFolders)) for v_folder in VirtualFolders: init_info[v_folder] = {} - + # We try all multiples of n_PS from 1 to max_mult, first in DP and then # in QP before giving up, or use default values if n_PS is None. max_mult = 3 if n_PS is None: # Then use the default list of number of PS points to try - mcore.submit(run_initialization_wrapper, + mcore.submit(run_initialization_wrapper, [pjoin(v_folder), init_info[v_folder], None]) else: # Use specific set of PS points @@ -7348,8 +7348,8 @@ def wait_monitoring(Idle, Running, Done): '%d PS points (%s), in %.3g(compil.) + %.3g(init.) secs.'%( abs(init['nPS']),'DP' if init['nPS']>0 else 'QP', init['Process_compilation'],init['Initialization'])) - - logger.info('MadLoop initialization finished.') + + logger.info('MadLoop initialization finished.') AskforEditCard = common_run.AskforEditCard @@ -7364,16 +7364,16 @@ def wait_monitoring(Idle, Running, Done): import os import optparse - # Get the directory of the script real path (bin) - # and add it to the current PYTHONPATH + # Get the directory of the script real path (bin) + # and add it to the current PYTHONPATH #root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath( __file__ )))) sys.path.insert(0, root_path) - class MyOptParser(optparse.OptionParser): + class MyOptParser(optparse.OptionParser): class InvalidOption(Exception): pass def error(self, msg=''): raise MyOptParser.InvalidOption(msg) - # Write out nice usage message if called with -h or --help + # Write out nice usage message if called with -h or --help usage = "usage: %prog [options] [FILE] " parser = MyOptParser(usage=usage) parser.add_option("-l", "--logging", default='INFO', @@ -7384,7 +7384,7 @@ def error(self, msg=''): help='force to launch debug mode') parser_error = '' done = False - + for i in range(len(sys.argv)-1): try: (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) @@ -7394,7 +7394,7 @@ def error(self, msg=''): else: args += sys.argv[len(sys.argv)-i:] if not done: - # raise correct error: + # raise correct error: try: (options, args) = parser.parse_args() except MyOptParser.InvalidOption as error: @@ -7407,8 +7407,8 @@ def error(self, msg=''): import subprocess import logging import logging.config - # Set logging level according to the logging level given by options - #logging.basicConfig(level=vars(logging)[options.logging]) + # Set logging level according to the logging level given by options + #logging.basicConfig(level=vars(logging)[options.logging]) import internal import internal.coloring_logging # internal.file = XXX/bin/internal/__init__.py @@ -7431,13 +7431,13 @@ def error(self, msg=''): raise pass - # Call the cmd interface main loop + # Call the cmd interface main loop try: if args: # a single command is provided if '--web' in args: - i = args.index('--web') - args.pop(i) + i = args.index('--web') + args.pop(i) cmd_line = MadEventCmd(me_dir, force_run=True) else: cmd_line = MadEventCmdShell(me_dir, force_run=True) @@ -7457,13 +7457,13 @@ def error(self, msg=''): pass - - - - - - - - + + + + + + + + diff --git a/epochX/cudacpp/gg_ttggg.mad/src/cudacpp_src.mk b/epochX/cudacpp/gg_ttggg.mad/src/cudacpp_src.mk index d4cc628aec..b4e446bc45 100644 --- a/epochX/cudacpp/gg_ttggg.mad/src/cudacpp_src.mk +++ b/epochX/cudacpp/gg_ttggg.mad/src/cudacpp_src.mk @@ -1,12 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories - -THISMK = $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #------------------------------------------------------------------------------- @@ -16,165 +11,24 @@ SHELL := /bin/bash #------------------------------------------------------------------------------- -#=== Configure common compiler flags for CUDA and C++ - -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here - -#------------------------------------------------------------------------------- - #=== Configure the C++ compiler -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) $(USE_NVTX) -fPIC -Wall -Wshadow -Wextra +include ../Source/make_opts + +MG_CXXFLAGS += -fPIC -I. $(USE_NVTX) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS+= -ffast-math # see issue #117 +MG_CXXFLAGS += -ffast-math # see issue #117 endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY # Note: AR, CXX and FC are implicitly defined if not set externally # See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html ###RANLIB = ranlib -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -LDFLAGS = -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -LDFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler (note: NVCC is already exported including ccache) - -###$(info NVCC=$(NVCC)) - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ builds (note: NVCC is already exported including ccache) - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for CUDA and C++ - -# Assuming uname is available, detect if architecture is PowerPC -UNAME_P := $(shell uname -p) - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # BUILD ERROR IF THIS ADDED IN SRC?! -else - ###AR=gcc-ar # needed by -flto - ###RANLIB=gcc-ranlib # needed by -flto - ###CXXFLAGS+= -flto # NB: build error from src/Makefile unless gcc-ar and gcc-ranlib are used - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -#------------------------------------------------------------------------------- - #=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the build flags appropriate to OMPFLAGS ###$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -###$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -###$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -###$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -###$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - CXXFLAGS += -DMGONGPU_HAS_NO_CURAND -else ifneq ($(RNDGEN),hasCurand) - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- @@ -182,28 +36,18 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -###$(info Current directory is $(shell pwd)) -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIRREL = ../lib/$(BUILDDIR) - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR=1 is set)) -else - override BUILDDIR = . - override LIBDIRREL = ../lib - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) -endif -######$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) +# Build directory: +BUILDDIR := build.$(DIRTAG) +LIBDIRREL := ../lib/$(BUILDDIR) # Workaround for Mac #375 (I did not manage to fix rpath with @executable_path): use absolute paths for LIBDIR # (NB: this is quite ugly because it creates the directory if it does not exist - to avoid removing src by mistake) -UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Darwin) override LIBDIR = $(shell mkdir -p $(LIBDIRREL); cd $(LIBDIRREL); pwd) ifeq ($(wildcard $(LIBDIR)),) @@ -223,55 +67,35 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -# Target (and build options): debug -debug: OPTFLAGS = -g -O0 -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ #------------------------------------------------------------------------------- cxx_objects=$(addprefix $(BUILDDIR)/, Parameters_sm.o read_slha.o) -ifneq ($(NVCC),) +ifeq ($(AVX),cuda) +COMPILER=$(NVCC) cu_objects=$(addprefix $(BUILDDIR)/, Parameters_sm_cu.o) +else +COMPILER=$(CXX) +cu_objects= endif # Target (and build rules): common (src) library -ifneq ($(NVCC),) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) $(cu_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(NVCC) -shared -o $@ $(cxx_objects) $(cu_objects) $(LDFLAGS) -else -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(CXX) -shared -o $@ $(cxx_objects) $(LDFLAGS) -endif + mkdir -p $(LIBDIR) + $(COMPILER) -shared -o $@ $(cxx_objects) $(cu_objects) $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- @@ -279,19 +103,7 @@ endif .PHONY: clean clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) -else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe -endif - -cleanall: - @echo - $(MAKE) clean -f $(THISMK) - @echo - rm -rf $(LIBDIR)/build.* - rm -rf build.* + $(RM) -f ../lib/build.*/*.so + $(RM) -rf build.* #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gg_ttggg.mad/src/mgOnGpuCxtypes.h b/epochX/cudacpp/gg_ttggg.mad/src/mgOnGpuCxtypes.h index ca9a9f00c0..3290d314d6 100644 --- a/epochX/cudacpp/gg_ttggg.mad/src/mgOnGpuCxtypes.h +++ b/epochX/cudacpp/gg_ttggg.mad/src/mgOnGpuCxtypes.h @@ -21,10 +21,14 @@ // Complex type in cuda: thrust or cucomplex or cxsmpl #ifdef __CUDACC__ #if defined MGONGPU_CUCXTYPE_THRUST +#ifdef __CLANG__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wtautological-compare" // for icpx2021/clang13 (https://stackoverflow.com/a/15864661) +#endif #include +#ifdef __CLANG__ #pragma clang diagnostic pop +#endif #elif defined MGONGPU_CUCXTYPE_CUCOMPLEX #include #elif not defined MGONGPU_CUCXTYPE_CXSMPL diff --git a/epochX/cudacpp/gg_ttggg.sa/src/cudacpp_src.mk b/epochX/cudacpp/gg_ttggg.sa/src/cudacpp_src.mk index d4cc628aec..c757875347 100644 --- a/epochX/cudacpp/gg_ttggg.sa/src/cudacpp_src.mk +++ b/epochX/cudacpp/gg_ttggg.sa/src/cudacpp_src.mk @@ -1,7 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. +# Further modified by: J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) #=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories @@ -95,50 +95,52 @@ CXXFLAGS += $(OMPFLAGS) # Set the build flags appropriate to each AVX choice (example: "make AVX=none") # [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] # [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) +ifeq ($(NVCC),) + $(info AVX=$(AVX)) + ifeq ($(UNAME_P),ppc64le) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) + endif + else ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) + endif + else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + ifeq ($(AVX),none) + override AVXFLAGS = -march=x86-64 # no SIMD (see #588) + else ifeq ($(AVX),sse4) + override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else ifneq ($(AVX),none) + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif endif + # For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? + CXXFLAGS+= $(AVXFLAGS) endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) # Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") ###$(info FPTYPE=$(FPTYPE)) @@ -182,11 +184,19 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +ifneq ($(NVCC),) + override DIRTAG = cuda_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +else + override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +endif # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +ifneq ($(NVCC),) + override TAG = cuda_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +else + override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +endif # Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 ###$(info Current directory is $(shell pwd)) @@ -223,35 +233,21 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so # Target (and build options): debug debug: OPTFLAGS = -g -O0 debug: all.$(TAG) -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) - #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ @@ -278,20 +274,61 @@ endif # Target: clean the builds .PHONY: clean +BUILD_DIRS := $(wildcard build.*) +NUM_BUILD_DIRS := $(words $(BUILD_DIRS)) + clean: ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) +ifeq ($(NUM_BUILD_DIRS),1) + $(info USEBUILDDIR=1, only one src build directory found.) + rm -rf ../lib/$(BUILD_DIRS) + rm -rf $(BUILD_DIRS) +else ifeq ($(NUM_BUILD_DIRS),0) + $(error USEBUILDDIR=1, but no src build directories are found.) else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe + $(error Multiple src BUILDDIR's found! Use 'cleannone', 'cleansse4', 'cleanavx2', 'clean512y','clean512z', 'cleancuda' or 'cleanall'.) +endif +else + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + rm -f $(BUILDDIR)/*.o $(BUILDDIR)/*.exe endif cleanall: @echo - $(MAKE) clean -f $(THISMK) + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + rm -f $(BUILDDIR)/*.o $(BUILDDIR)/*.exe @echo - rm -rf $(LIBDIR)/build.* + rm -rf ../lib/build.* rm -rf build.* +# Target: clean different builds + +cleannone: + rm -rf ../lib/build.none_* + rm -rf build.none_* + +cleansse4: + rm -rf ../lib/build.sse4_* + rm -rf build.sse4_* + +cleanavx2: + rm -rf ../lib/build.avx2_* + rm -rf build.avx2_* + +clean512y: + rm -rf ../lib/build.512y_* + rm -rf build.512y_* + +clean512z: + rm -rf ../lib/build.512z_* + rm -rf build.512z_* + +cleancuda: + rm -rf ../lib/build.cuda_* + rm -rf build.cuda_* + +cleandir: + rm -f ./*.o ./*.exe + rm -f ../lib/lib$(MG5AMC_COMMONLIB).so + #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gq_ttq.mad/CODEGEN_mad_gq_ttq_log.txt b/epochX/cudacpp/gq_ttq.mad/CODEGEN_mad_gq_ttq_log.txt index 97f5e25170..e60a4a53d5 100644 --- a/epochX/cudacpp/gq_ttq.mad/CODEGEN_mad_gq_ttq_log.txt +++ b/epochX/cudacpp/gq_ttq.mad/CODEGEN_mad_gq_ttq_log.txt @@ -52,7 +52,7 @@ Note that you can still compile and run aMC@NLO with the built-in PDFs Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt import /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gq_ttq.mg The import format was not given, so we guess it as command set stdout_level DEBUG @@ -61,7 +61,7 @@ set zerowidth_tchannel F define q = u c d s u~ c~ d~ s~ INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005373477935791016  +DEBUG: model prefixing takes 0.005306720733642578  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -170,7 +170,7 @@ INFO: Crossed process found for g u~ > t t~ u~, reuse diagrams. INFO: Crossed process found for g c~ > t t~ c~, reuse diagrams. INFO: Crossed process found for g d~ > t t~ d~, reuse diagrams. INFO: Crossed process found for g s~ > t t~ s~, reuse diagrams. -8 processes with 40 diagrams generated in 0.077 s +8 processes with 40 diagrams generated in 0.078 s Total: 8 processes with 40 diagrams output madevent ../TMPOUT/CODEGEN_mad_gq_ttq --hel_recycling=False --vector_size=32 --me_exporter=standalone_cudacpp Load PLUGIN.CUDACPP_OUTPUT @@ -198,7 +198,7 @@ INFO: Combined process g d~ > t t~ d~ WEIGHTED<=3 @1 with process g u~ > t t~ u~ INFO: Combined process g s~ > t t~ s~ WEIGHTED<=3 @1 with process g u~ > t t~ u~ WEIGHTED<=3 @1 INFO: Creating files in directory P1_gu_ttxu DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -215,7 +215,7 @@ INFO: Generating Feynman diagrams for Process: g u > t t~ u WEIGHTED<=3 @1 INFO: Finding symmetric diagrams for subprocess group gu_ttxu INFO: Creating files in directory P1_gux_ttxux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -230,17 +230,17 @@ INFO: Created files CPPProcess.h and CPPProcess.cc in directory ./. DEBUG: vector, subproc_group,self.opt['vector_size'] =  32 True 32 [export_v4.py at line 1872]  INFO: Generating Feynman diagrams for Process: g u~ > t t~ u~ WEIGHTED<=3 @1 INFO: Finding symmetric diagrams for subprocess group gux_ttxux -Generated helas calls for 2 subprocesses (10 diagrams) in 0.031 s -Wrote files for 32 helas calls in 0.219 s +Generated helas calls for 2 subprocesses (10 diagrams) in 0.030 s +Wrote files for 32 helas calls in 0.216 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVV1 routines -ALOHA: aloha creates 2 routines in 0.144 s +ALOHA: aloha creates 2 routines in 0.147 s DEBUG: Entering PLUGIN_ProcessExporter.convert_model (create the model) [output.py at line 202]  ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVV1 routines -ALOHA: aloha creates 4 routines in 0.132 s +ALOHA: aloha creates 4 routines in 0.129 s FFV1 FFV1 FFV1 @@ -260,12 +260,14 @@ save configuration file to /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CO INFO: Use Fortran compiler gfortran INFO: Use c++ compiler g++ INFO: Generate web pages +DEBUG: standardise /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gq_ttq/Source/make_opts (fix f2py3 and sort make_opts_variables) before applying patch.common DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gq_ttq; patch -p4 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common patching file Source/genps.inc +patching file Source/make_opts patching file Source/makefile patching file SubProcesses/makefile +patching file bin/internal/banner.py patching file bin/internal/gen_ximprove.py -Hunk #1 succeeded at 391 (offset 6 lines). patching file bin/internal/madevent_interface.py DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gq_ttq/SubProcesses/P1_gu_ttxu; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f @@ -295,8 +297,8 @@ Run "open index.html" to see more information about this process. quit real 0m1.929s -user 0m1.701s -sys 0m0.227s +user 0m1.681s +sys 0m0.247s ************************************************************ * * * W E L C O M E to * @@ -322,7 +324,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gq_ttq/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards run quit INFO: @@ -352,7 +354,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_gq_ttq/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards param quit INFO: diff --git a/epochX/cudacpp/gq_ttq.mad/Source/make_opts b/epochX/cudacpp/gq_ttq.mad/Source/make_opts index e4b87ee6ad..435bed0dc7 100644 --- a/epochX/cudacpp/gq_ttq.mad/Source/make_opts +++ b/epochX/cudacpp/gq_ttq.mad/Source/make_opts @@ -1,7 +1,7 @@ DEFAULT_CPP_COMPILER=g++ DEFAULT_F2PY_COMPILER=f2py3 DEFAULT_F_COMPILER=gfortran -GLOBAL_FLAG=-O3 -ffast-math -fbounds-check +GLOBAL_FLAG=-O3 -ffast-math MACFLAG= MG5AMC_VERSION=SpecifiedByMG5aMCAtRunTime PYTHIA8_PATH=NotInstalled @@ -13,31 +13,53 @@ BIASLIBDIR=../../../lib/ BIASLIBRARY=libbias.$(libext) # Rest of the makefile -ifeq ($(origin FFLAGS),undefined) -FFLAGS= -w -fPIC -#FFLAGS+= -g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none -endif -FFLAGS += $(GLOBAL_FLAG) +#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) + +# Detect O/S kernel (Linux, Darwin...) +UNAME_S := $(shell uname -s) + +# Detect architecture (x86_64, ppc64le...) +UNAME_P := $(shell uname -p) + +#------------------------------------------------------------------------------- # REMOVE MACFLAG IF NOT ON MAC OR FOR F2PY -UNAME := $(shell uname -s) ifdef f2pymode MACFLAG= else -ifneq ($(UNAME), Darwin) +ifneq ($(UNAME_S), Darwin) MACFLAG= endif endif +############################################################ +# Default compiler flags +# To change optimisation level, override these as follows: +# make CXXFLAGS="-O0 -g" +# or export them as environment variables +# For debugging Fortran, one could e.g. use: +# FCFLAGS="-g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none" +############################################################ +FCFLAGS ?= $(GLOBAL_FLAG) -fbounds-check +CXXFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG +NVCCFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG -use_fast_math -lineinfo +LDFLAGS ?= $(STDLIB) -ifeq ($(origin CXXFLAGS),undefined) -CXXFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +ifneq ($(FFLAGS),) +# Madgraph used to use FFLAGS, so the user probably tries to change the flags specifically for madgraph: +FCFLAGS = $(FFLAGS) endif -ifeq ($(origin CFLAGS),undefined) -CFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +# Madgraph-specific flags: +WARNFLAGS = -Wall -Wshadow -Wextra +ifeq (,$(findstring -std=,$(CXXFLAGS))) +CXXSTANDARD= -std=c++17 endif +MG_FCFLAGS += -fPIC -w +MG_CXXFLAGS += -fPIC $(CXXSTANDARD) $(WARNFLAGS) $(MACFLAG) +MG_NVCCFLAGS += -fPIC $(CXXSTANDARD) --forward-unknown-to-host-compiler $(WARNFLAGS) +MG_LDFLAGS += $(MACFLAG) # Set FC unless it's defined by an environment variable ifeq ($(origin FC),default) @@ -49,45 +71,40 @@ endif # Increase the number of allowed charcters in a Fortran line ifeq ($(FC), ftn) -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler else VERS="$(shell $(FC) --version | grep ifort -i)" ifeq ($(VERS), "") -FFLAGS+= -ffixed-line-length-132 +MG_FCFLAGS += -ffixed-line-length-132 else -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler endif endif -UNAME := $(shell uname -s) -ifeq ($(origin LDFLAGS), undefined) -LDFLAGS=$(STDLIB) $(MACFLAG) -endif - # Options: dynamic, lhapdf # Option dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) dylibext=dylib else dylibext=so endif ifdef dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) libext=dylib -FFLAGS+= -fno-common -LDFLAGS += -bundle +MG_FCFLAGS += -fno-common +MG_LDFLAGS += -bundle define CREATELIB $(FC) -dynamiclib -undefined dynamic_lookup -o $(1) $(2) endef else libext=so -FFLAGS+= -fPIC -LDFLAGS += -shared +MG_FCFLAGS += -fPIC +MG_LDFLAGS += -shared define CREATELIB -$(FC) $(FFLAGS) $(LDFLAGS) -o $(1) $(2) +$(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MG_LDFLAGS) $(LDFLAGS) -o $(1) $(2) endef endif else @@ -101,17 +118,9 @@ endif # Option lhapdf ifneq ($(lhapdf),) -CXXFLAGS += $(shell $(lhapdf) --cppflags) +MG_CXXFLAGS += $(shell $(lhapdf) --cppflags) alfas_functions=alfas_functions_lhapdf llhapdf+= $(shell $(lhapdf) --cflags --libs) -lLHAPDF -# check if we need to activate c++11 (for lhapdf6.2) -ifeq ($(origin CXX),default) -ifeq ($lhapdfversion$lhapdfsubversion,62) -CXX=$(DEFAULT_CPP_COMPILER) -std=c++11 -else -CXX=$(DEFAULT_CPP_COMPILER) -endif -endif else alfas_functions=alfas_functions llhapdf= @@ -120,4 +129,207 @@ endif # Helper function to check MG5 version define CHECK_MG5AMC_VERSION python -c 'import re; from distutils.version import StrictVersion; print StrictVersion("$(MG5AMC_VERSION)") >= StrictVersion("$(1)") if re.match("^[\d\.]+$$","$(MG5AMC_VERSION)") else True;' -endef \ No newline at end of file +endef + +#------------------------------------------------------------------------------- + +# Set special cases for non-gcc/clang builds +# AVX below gets overridden from outside in architecture-specific builds +AVX ?= none +# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] +# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] +$(info AVX=$(AVX)) +ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + endif +else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif +endif + +# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? +MG_CXXFLAGS+= $(AVXFLAGS) + +#------------------------------------------------------------------------------- + +#=== Configure the CUDA compiler if available + +# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) +# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below +ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside + $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") + override CUDA_HOME=disabled +endif + +# If CUDA_HOME is not set, try to set it from the location of nvcc +ifndef CUDA_HOME + CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) + $(info CUDA_HOME="$(CUDA_HOME)") +endif + +# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists +ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) + NVCC = $(CUDA_HOME)/bin/nvcc + USE_NVTX ?=-DUSE_NVTX + # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html + # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ + # Default: use compute capability 70 (Volta architecture), and embed PTX to support later architectures, too. + # Set MADGRAPH_CUDA_ARCHITECTURE to the desired value to change the default. + # Build for multiple architectures using a space-separated list, e.g. MADGRAPH_CUDA_ARCHITECTURE="70 80" + MADGRAPH_CUDA_ARCHITECTURE ?= 70 + # Generate PTX for the first architecture: + CUARCHFLAGS := --generate-code arch=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)),code=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)) + # Generate device code for all architectures: + CUARCHFLAGS += $(foreach arch,$(MADGRAPH_CUDA_ARCHITECTURE), --generate-code arch=compute_$(arch),code=sm_$(arch)) + + CUINC = -I$(CUDA_HOME)/include/ + CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! + MG_LDFLAGS += $(CURANDLIBFLAGS) + MG_NVCCFLAGS += $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) + +else ifeq ($(AVX),cuda) + $(error nvcc is not visible in PATH. Either add it to PATH or export CUDA_HOME to compile with cuda) + ifeq ($(AVX),cuda) + $(error Cannot compile for cuda without NVCC) + endif +endif + +# Set the host C++ compiler for nvcc via "-ccbin " +# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) +MG_NVCCFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) + +# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) +ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) +MG_NVCCFLAGS += -allow-unsupported-compiler +endif + +#------------------------------------------------------------------------------- + +#=== Configure ccache for C++ and CUDA builds + +# Enable ccache if USECCACHE=1 +ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) + override CXX:=ccache $(CXX) +endif + +ifneq ($(NVCC),) + ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) + override NVCC:=ccache $(NVCC) + endif +endif + +#------------------------------------------------------------------------------- + +#=== Configure PowerPC-specific compiler flags for C++ and CUDA + +# PowerPC-specific CXX / CUDA compiler flags (being reviewed) +ifeq ($(UNAME_P),ppc64le) + MG_CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 + MG_NVCCFLAGS+= -Xcompiler -mno-float128 + + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + endif +endif + +#------------------------------------------------------------------------------- +#=== Apple-specific compiler/linker options + +# Add -std=c++17 explicitly to avoid build errors on macOS +# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" +ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) +MG_CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 +endif + +ifeq ($(UNAME_S),Darwin) +STDLIB = -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) +MG_LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" +else +MG_LDFLAGS += -Xlinker --no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +endif + +#------------------------------------------------------------------------------- + +#=== C++/CUDA-specific flags for floating-point types and random generators to use + +# Set the default FPTYPE (floating point type) choice +FPTYPE ?= m + +# Set the default HELINL (inline helicities?) choice +HELINL ?= 0 + +# Set the default HRDCOD (hardcode cIPD physics parameters?) choice +HRDCOD ?= 0 + +# Set the default RNDGEN (random number generator) choice +ifeq ($(NVCC),) + RNDGEN ?= hasNoCurand +else + RNDGEN ?= hasCurand +endif + +# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so sub-makes don't go back to the defaults +export AVX +export AVXFLAGS +export FPTYPE +export HELINL +export HRDCOD +export RNDGEN + +#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN + +# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") +# $(info FPTYPE=$(FPTYPE)) +ifeq ($(FPTYPE),d) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE +else ifeq ($(FPTYPE),f) + COMMONFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT +else ifeq ($(FPTYPE),m) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT +else + $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) +endif + +# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") +# $(info HELINL=$(HELINL)) +ifeq ($(HELINL),1) + COMMONFLAGS += -DMGONGPU_INLINE_HELAMPS +else ifneq ($(HELINL),0) + $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") +# $(info HRDCOD=$(HRDCOD)) +ifeq ($(HRDCOD),1) + COMMONFLAGS += -DMGONGPU_HARDCODE_PARAM +else ifneq ($(HRDCOD),0) + $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") +$(info RNDGEN=$(RNDGEN)) +ifeq ($(RNDGEN),hasNoCurand) + override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND + override CURANDLIBFLAGS = +else ifeq ($(RNDGEN),hasCurand) + CXXFLAGSCURAND = $(CUINC) +else + $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) +endif + +MG_CXXFLAGS += $(COMMONFLAGS) +MG_NVCCFLAGS += $(COMMONFLAGS) + +#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gq_ttq.mad/Source/makefile b/epochX/cudacpp/gq_ttq.mad/Source/makefile index 00c73099a0..407b1b753e 100644 --- a/epochX/cudacpp/gq_ttq.mad/Source/makefile +++ b/epochX/cudacpp/gq_ttq.mad/Source/makefile @@ -10,8 +10,8 @@ include make_opts # Source files -PROCESS= hfill.o matrix.o myamp.o -DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o +PROCESS = hfill.o matrix.o myamp.o +DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o HBOOK = hfill.o hcurve.o hbook1.o hbook2.o GENERIC = $(alfas_functions).o transpole.o invarients.o hfill.o pawgraphs.o ran1.o \ rw_events.o rw_routines.o kin_functions.o open_file.o basecode.o setrun.o \ @@ -22,7 +22,7 @@ GENSUDGRID = gensudgrid.o is-sud.o setrun_gen.o rw_routines.o open_file.o # Locally compiled libraries -LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) +LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) # Binaries @@ -32,6 +32,9 @@ BINARIES = $(BINDIR)gen_ximprove $(BINDIR)gensudgrid $(BINDIR)combine_runs all: $(LIBRARIES) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(LIBDIR)libbias.$(libext) +%.o: %.f *.inc + $(FC) -I. $(MG_FCFLAGS) $(FCFLAGS) -c $< -o $@ + # Libraries $(LIBDIR)libdsample.$(libext): $(DSAMPLE) @@ -39,36 +42,35 @@ $(LIBDIR)libdsample.$(libext): $(DSAMPLE) $(LIBDIR)libgeneric.$(libext): $(GENERIC) $(call CREATELIB, $@, $^) $(LIBDIR)libdhelas.$(libext): DHELAS - cd DHELAS; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libpdf.$(libext): PDF make_opts - cd PDF; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" ifneq (,$(filter edff chff, $(pdlabel1) $(pdlabel2))) $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make ; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" else $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make -f makefile_dummy; cd ../../ -endif + $(MAKE) -C $< -f makefile_dummy FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" +endif $(LIBDIR)libcernlib.$(libext): CERNLIB - cd CERNLIB; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" # The bias library is here the dummy by default; compilation of other ones specified in the run_card will be done by MG5aMC directly. $(LIBDIR)libbias.$(libext): BIAS/dummy - cd BIAS/dummy; make; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libmodel.$(libext): MODEL param_card.inc - cd MODEL; make + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" param_card.inc: ../Cards/param_card.dat ../bin/madevent treatcards param + touch $@ # madevent doesn't update the time stamp if there's nothing to do -$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o - $(FC) $(LDFLAGS) -o $@ $^ -#$(BINDIR)combine_events: $(COMBINE) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) run_card.inc $(LIBDIR)libbias.$(libext) -# $(FC) -o $@ $(COMBINE) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC $(llhapdf) $(LDFLAGS) -lbias +$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o + $(FC) $(MG_LDFLAGS) $(LDFLAGS) -o $@ $^ $(BINDIR)gensudgrid: $(GENSUDGRID) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libcernlib.$(libext) - $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(LDFLAGS) + $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(MG_LDFLAGS) $(LDFLAGS) # Dependencies @@ -85,6 +87,7 @@ rw_events.o: rw_events.f run_config.inc run_card.inc: ../Cards/run_card.dat ../bin/madevent treatcards run + touch $@ # madevent doesn't update the time stamp if there's nothing to do clean4pdf: rm -f ../lib/libpdf.$(libext) @@ -120,7 +123,7 @@ $(LIBDIR)libiregi.a: $(IREGIDIR) cd $(IREGIDIR); make ln -sf ../Source/$(IREGIDIR)libiregi.a $(LIBDIR)libiregi.a -cleanSource: +clean: $(RM) *.o $(LIBRARIES) $(BINARIES) cd PDF; make clean; cd .. cd PDF/gammaUPC; make clean; cd ../../ @@ -132,11 +135,3 @@ cleanSource: cd BIAS/ptj_bias; make clean; cd ../.. if [ -d $(CUTTOOLSDIR) ]; then cd $(CUTTOOLSDIR); make clean; cd ..; fi if [ -d $(IREGIDIR) ]; then cd $(IREGIDIR); make clean; cd ..; fi - -clean: cleanSource - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make clean; cd -; done; - -cleanavx: - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; -cleanall: cleanSource # THIS IS THE ONE - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/Bridge.h b/epochX/cudacpp/gq_ttq.mad/SubProcesses/Bridge.h index bf8b5e024d..c263f39a62 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/Bridge.h +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/Bridge.h @@ -236,7 +236,7 @@ namespace mg5amcCpu #ifdef __CUDACC__ if( ( m_nevt < s_gputhreadsmin ) || ( m_nevt % s_gputhreadsmin != 0 ) ) throw std::runtime_error( "Bridge constructor: nevt should be a multiple of " + std::to_string( s_gputhreadsmin ) ); - while( m_nevt != m_gpublocks * m_gputhreads ) + while( m_nevt != static_cast( m_gpublocks * m_gputhreads ) ) { m_gputhreads /= 2; if( m_gputhreads < s_gputhreadsmin ) @@ -266,7 +266,7 @@ namespace mg5amcCpu template void Bridge::set_gpugrid( const int gpublocks, const int gputhreads ) { - if( m_nevt != gpublocks * gputhreads ) + if( m_nevt != static_cast( gpublocks * gputhreads ) ) throw std::runtime_error( "Bridge: gpublocks*gputhreads must equal m_nevt in set_gpugrid" ); m_gpublocks = gpublocks; m_gputhreads = gputhreads; diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/MadgraphTest.h b/epochX/cudacpp/gq_ttq.mad/SubProcesses/MadgraphTest.h index ef40624c88..b0f2250c25 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/MadgraphTest.h +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/MadgraphTest.h @@ -199,10 +199,6 @@ class MadgraphTest : public testing::TestWithParam } }; -// Since we link both the CPU-only and GPU tests into the same executable, we prevent -// a multiply defined symbol by only compiling this in the non-CUDA phase: -#ifndef __CUDACC__ - /// Compare momenta and matrix elements. /// This uses an implementation of TestDriverBase to run a madgraph workflow, /// and compares momenta and matrix elements with a reference file. @@ -307,6 +303,4 @@ TEST_P( MadgraphTest, CompareMomentaAndME ) } } -#endif // __CUDACC__ - #endif /* MADGRAPHTEST_H_ */ diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/MatrixElementKernels.cc b/epochX/cudacpp/gq_ttq.mad/SubProcesses/MatrixElementKernels.cc index 74b5239ebf..2d6f27cd5d 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/MatrixElementKernels.cc +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/MatrixElementKernels.cc @@ -196,6 +196,9 @@ namespace mg5amcGpu void MatrixElementKernelDevice::setGrid( const int gpublocks, const int gputhreads ) { + m_gpublocks = gpublocks; + m_gputhreads = gputhreads; + if( m_gpublocks == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gpublocks must be > 0 in setGrid" ); if( m_gputhreads == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gputhreads must be > 0 in setGrid" ); if( this->nevt() != m_gpublocks * m_gputhreads ) throw std::runtime_error( "MatrixElementKernelDevice: nevt mismatch in setGrid" ); diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gu_ttxu/check_sa.cc b/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gu_ttxu/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gu_ttxu/check_sa.cc +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gu_ttxu/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gu_ttxu/counters.cc b/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gu_ttxu/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gu_ttxu/counters.cc +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gu_ttxu/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gux_ttxux/check_sa.cc b/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gux_ttxux/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gux_ttxux/check_sa.cc +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gux_ttxux/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gux_ttxux/counters.cc b/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gux_ttxux/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gux_ttxux/counters.cc +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/P1_gux_ttxux/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk index 509307506b..a522ddb335 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk @@ -1,56 +1,41 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: use ':=' to ensure that the value of CUDACPP_MAKEFILE is not modified further down after including make_opts -#=== NB: use 'override' to ensure that the value can not be modified from the outside -override CUDACPP_MAKEFILE := $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) -###$(info CUDACPP_MAKEFILE='$(CUDACPP_MAKEFILE)') - -#=== NB: different names (e.g. cudacpp.mk and cudacpp_src.mk) are used in the Subprocess and src directories -override CUDACPP_SRC_MAKEFILE = cudacpp_src.mk - -#------------------------------------------------------------------------------- - -#=== Use bash in the Makefile (https://www.gnu.org/software/make/manual/html_node/Choosing-the-Shell.html) - -SHELL := /bin/bash - -#------------------------------------------------------------------------------- - -#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) - -# Detect O/S kernel (Linux, Darwin...) -UNAME_S := $(shell uname -s) -###$(info UNAME_S='$(UNAME_S)') - -# Detect architecture (x86_64, ppc64le...) -UNAME_P := $(shell uname -p) -###$(info UNAME_P='$(UNAME_P)') - -#------------------------------------------------------------------------------- - -#=== Include the common MG5aMC Makefile options - -# OM: this is crucial for MG5aMC flag consistency/documentation -# AV: temporarely comment this out because it breaks cudacpp builds -ifneq ($(wildcard ../../Source/make_opts),) -include ../../Source/make_opts -endif +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. + +# This makefile extends the Fortran makefile called "makefile" + +CUDACPP_SRC_MAKEFILE = cudacpp_src.mk + +# Self-invocation with adapted flags: +cppnative: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=native AVXFLAGS="-march=native" cppbuild +cppnone: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=none AVXFLAGS= cppbuild +cppsse4: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=sse4 AVXFLAGS=-march=nehalem cppbuild +cppavx2: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=avx2 AVXFLAGS=-march=haswell cppbuild +cppavx512y: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512y AVXFLAGS="-march=skylake-avx512 -mprefer-vector-width=256" cppbuild +cppavx512z: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512z AVXFLAGS="-march=skylake-avx512 -DMGONGPU_PVW512" cppbuild +cuda: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=cuda cudabuild #------------------------------------------------------------------------------- #=== Configure common compiler flags for C++ and CUDA +# NB: The base flags are defined in the fortran "makefile" + +# Include directories +INCFLAGS = -I. -I../../src -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here +MG_CXXFLAGS += $(INCFLAGS) +MG_NVCCFLAGS += $(INCFLAGS) # Dependency on src directory -MG5AMC_COMMONLIB = mg5amc_common -LIBFLAGS = -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -INCFLAGS += -I../../src +MG5AMC_COMMONLIB = mg5amc_common # Compiler-specific googletest build directory (#125 and #738) ifneq ($(shell $(CXX) --version | grep '^Intel(R) oneAPI DPC++/C++ Compiler'),) @@ -99,356 +84,42 @@ endif #------------------------------------------------------------------------------- -#=== Configure the C++ compiler - -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) -Wall -Wshadow -Wextra -ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS += -ffast-math # see issue #117 -endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY - -# Optionally add debug flags to display the full list of flags (eg on Darwin) -###CXXFLAGS+= -v - -# Note: AR, CXX and FC are implicitly defined if not set externally -# See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html - -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler - -# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) -# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below -ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside - $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") - override CUDA_HOME=disabled -endif - -# If CUDA_HOME is not set, try to set it from the location of nvcc -ifndef CUDA_HOME - CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) - $(warning CUDA_HOME was not set: using "$(CUDA_HOME)") -endif - -# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists -ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) - NVCC = $(CUDA_HOME)/bin/nvcc - USE_NVTX ?=-DUSE_NVTX - # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html - # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ - # Default: use compute capability 70 for V100 (CERN lxbatch, CERN itscrd, Juwels Cluster). - # Embed device code for 70, and PTX for 70+. - # Export MADGRAPH_CUDA_ARCHITECTURE (comma-separated list) to use another value or list of values (see #533). - # Examples: use 60 for P100 (Piz Daint), 80 for A100 (Juwels Booster, NVidia raplab/Curiosity). - MADGRAPH_CUDA_ARCHITECTURE ?= 70 - ###CUARCHFLAGS = -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=compute_$(MADGRAPH_CUDA_ARCHITECTURE) -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=sm_$(MADGRAPH_CUDA_ARCHITECTURE) # Older implementation (AV): go back to this one for multi-GPU support #533 - ###CUARCHFLAGS = --gpu-architecture=compute_$(MADGRAPH_CUDA_ARCHITECTURE) --gpu-code=sm_$(MADGRAPH_CUDA_ARCHITECTURE),compute_$(MADGRAPH_CUDA_ARCHITECTURE) # Newer implementation (SH): cannot use this as-is for multi-GPU support #533 - comma:=, - CUARCHFLAGS = $(foreach arch,$(subst $(comma), ,$(MADGRAPH_CUDA_ARCHITECTURE)),-gencode arch=compute_$(arch),code=compute_$(arch) -gencode arch=compute_$(arch),code=sm_$(arch)) - CUINC = -I$(CUDA_HOME)/include/ - ifeq ($(RNDGEN),hasNoCurand) - CURANDLIBFLAGS= - else - CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! - endif - CUOPTFLAGS = -lineinfo - CUFLAGS = $(foreach opt, $(OPTFLAGS), -Xcompiler $(opt)) $(CUOPTFLAGS) $(INCFLAGS) $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) -use_fast_math - ###CUFLAGS += -Xcompiler -Wall -Xcompiler -Wextra -Xcompiler -Wshadow - ###NVCC_VERSION = $(shell $(NVCC) --version | grep 'Cuda compilation tools' | cut -d' ' -f5 | cut -d, -f1) - CUFLAGS += -std=c++17 # need CUDA >= 11.2 (see #333): this is enforced in mgOnGpuConfig.h - # Without -maxrregcount: baseline throughput: 6.5E8 (16384 32 12) up to 7.3E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 160 # improves throughput: 6.9E8 (16384 32 12) up to 7.7E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 128 # improves throughput: 7.3E8 (16384 32 12) up to 7.6E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 96 # degrades throughput: 4.1E8 (16384 32 12) up to 4.5E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 64 # degrades throughput: 1.7E8 (16384 32 12) flat at 1.7E8 (65536 128 12) -else ifneq ($(origin REQUIRE_CUDA),undefined) - # If REQUIRE_CUDA is set but no cuda is found, stop here (e.g. for CI tests on GPU #443) - $(error No cuda installation found (set CUDA_HOME or make nvcc visible in PATH)) -else - # No cuda. Switch cuda compilation off and go to common random numbers in C++ - $(warning CUDA_HOME is not set or is invalid: export CUDA_HOME to compile with cuda) - override NVCC= - override USE_NVTX= - override CUINC= - override CURANDLIBFLAGS= -endif -export NVCC -export CUFLAGS - -# Set the host C++ compiler for nvcc via "-ccbin " -# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) -CUFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) - -# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) -ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) -CUFLAGS += -allow-unsupported-compiler -endif - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ and CUDA builds - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif -ifneq ($(NVCC),) - ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) - override NVCC:=ccache $(NVCC) - endif -endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for C++ and CUDA - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # would increase to none=4.08-4.12E6, sse4=4.99-5.03E6! -else - ###CXXFLAGS+= -flto # also on Intel this would increase throughputs by a factor 2 to 4... - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -# PowerPC-specific CUDA compiler flags (to be reviewed!) -ifeq ($(UNAME_P),ppc64le) - CUFLAGS+= -Xcompiler -mno-float128 -endif - -#------------------------------------------------------------------------------- - #=== Configure defaults and check if user-defined choices exist for OMPFLAGS, AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the default OMPFLAGS choice -ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on Intel (was ok without nvcc but not ok with nvcc before #578) -else ifneq ($(shell $(CXX) --version | egrep '^(clang)'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on clang (was not ok without or with nvcc before #578) -###else ifneq ($(shell $(CXX) --version | egrep '^(Apple clang)'),) # AV for Mac (Apple clang compiler) -else ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) +OMPFLAGS ?= -fopenmp +ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) override OMPFLAGS = # AV disable OpenMP MT on Apple clang (builds fail in the CI #578) -###override OMPFLAGS = -fopenmp # OM reenable OpenMP MT on Apple clang? (AV Oct 2023: this still fails in the CI) -else -override OMPFLAGS = -fopenmp # enable OpenMP MT by default on all other platforms -###override OMPFLAGS = # disable OpenMP MT on all other platforms (default before #575) -endif - -# Set the default AVX (vectorization) choice -ifeq ($(AVX),) - ifeq ($(UNAME_P),ppc64le) - ###override AVX = none - override AVX = sse4 - else ifeq ($(UNAME_P),arm) - ###override AVX = none - override AVX = sse4 - else ifeq ($(wildcard /proc/cpuinfo),) - override AVX = none - $(warning Using AVX='$(AVX)' because host SIMD features cannot be read from /proc/cpuinfo) - else ifeq ($(shell grep -m1 -c avx512vl /proc/cpuinfo)$(shell $(CXX) --version | grep ^clang),1) - override AVX = 512y - ###$(info Using AVX='$(AVX)' as no user input exists) - else - override AVX = avx2 - ifneq ($(shell grep -m1 -c avx512vl /proc/cpuinfo),1) - $(warning Using AVX='$(AVX)' because host does not support avx512vl) - else - $(warning Using AVX='$(AVX)' because this is faster than avx512vl for clang) - endif - endif -else - ###$(info Using AVX='$(AVX)' according to user input) -endif - -# Set the default FPTYPE (floating point type) choice -ifeq ($(FPTYPE),) - override FPTYPE = d -endif - -# Set the default HELINL (inline helicities?) choice -ifeq ($(HELINL),) - override HELINL = 0 -endif - -# Set the default HRDCOD (hardcode cIPD physics parameters?) choice -ifeq ($(HRDCOD),) - override HRDCOD = 0 -endif - -# Set the default RNDGEN (random number generator) choice -ifeq ($(RNDGEN),) - ifeq ($(NVCC),) - override RNDGEN = hasNoCurand - else ifeq ($(RNDGEN),) - override RNDGEN = hasCurand - endif endif -# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so that it is not necessary to pass them to the src Makefile too -export AVX -export FPTYPE -export HELINL -export HRDCOD -export RNDGEN +# Export here, so sub makes don't fall back to the defaults: export OMPFLAGS -#------------------------------------------------------------------------------- - -#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN - -# Set the build flags appropriate to OMPFLAGS -$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS - CUFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM - CUFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND -else ifeq ($(RNDGEN),hasCurand) - override CXXFLAGSCURAND = -else - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- #=== Configure build directories and build lockfiles === -# Build directory "short" tag (defines target and path to the optional build directory) -# (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) - -# Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) -# (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) - -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIR = ../../lib/$(BUILDDIR) - override LIBDIRRPATH = '$$ORIGIN/../$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is set = 1)) -else - override BUILDDIR = . - override LIBDIR = ../../lib - override LIBDIRRPATH = '$$ORIGIN/$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) +# Build directory "short" tag (defines target and path to the build directory) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +CUDACPP_BUILDDIR = build.$(DIRTAG) +CUDACPP_LIBDIR := ../../lib/$(CUDACPP_BUILDDIR) +LIBDIRRPATH := '$$ORIGIN:$$ORIGIN/../$(CUDACPP_LIBDIR)' +ifneq ($(AVX),) + $(info Building CUDACPP in CUDACPP_BUILDDIR=$(CUDACPP_BUILDDIR). Libs in $(CUDACPP_LIBDIR)) endif -###override INCDIR = ../../include -###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) -# On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH +# On Linux, set rpath to CUDACPP_LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables or shared libraries ($ORIGIN on Linux) -# On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary +# On Darwin, building libraries with absolute paths in CUDACPP_LIBDIR makes this unnecessary ifeq ($(UNAME_S),Darwin) override CXXLIBFLAGSRPATH = override CULIBFLAGSRPATH = - override CXXLIBFLAGSRPATH2 = - override CULIBFLAGSRPATH2 = else # RPATH to cuda/cpp libs when linking executables override CXXLIBFLAGSRPATH = -Wl,-rpath,$(LIBDIRRPATH) override CULIBFLAGSRPATH = -Xlinker -rpath,$(LIBDIRRPATH) - # RPATH to common lib when linking cuda/cpp libs - override CXXLIBFLAGSRPATH2 = -Wl,-rpath,'$$ORIGIN' - override CULIBFLAGSRPATH2 = -Xlinker -rpath,'$$ORIGIN' endif # Setting LD_LIBRARY_PATH or DYLD_LIBRARY_PATH in the RUNTIME is no longer necessary (neither on Linux nor on Mac) @@ -458,107 +129,68 @@ override RUNTIME = #=== Makefile TARGETS and build rules below #=============================================================================== -cxx_main=$(BUILDDIR)/check.exe -fcxx_main=$(BUILDDIR)/fcheck.exe +cxx_main=$(CUDACPP_BUILDDIR)/check.exe +fcxx_main=$(CUDACPP_BUILDDIR)/fcheck.exe -ifneq ($(NVCC),) -cu_main=$(BUILDDIR)/gcheck.exe -fcu_main=$(BUILDDIR)/fgcheck.exe -else -cu_main= -fcu_main= -endif - -testmain=$(BUILDDIR)/runTest.exe +cu_main=$(CUDACPP_BUILDDIR)/gcheck.exe +fcu_main=$(CUDACPP_BUILDDIR)/fgcheck.exe ifneq ($(GTESTLIBS),) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) $(testmain) -else -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) +testmain=$(CUDACPP_BUILDDIR)/runTest.exe +cutestmain=$(CUDACPP_BUILDDIR)/runTest_cuda.exe endif -# Target (and build options): debug -MAKEDEBUG= -debug: OPTFLAGS = -g -O0 -debug: CUOPTFLAGS = -G -debug: MAKEDEBUG := debug -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -$(BUILDDIR)/.build.$(TAG): - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @if [ "$(oldtagsb)" != "" ]; then echo "Cannot build for tag=$(TAG) as old builds exist for other tags:"; echo " $(oldtagsb)"; echo "Please run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @touch $(BUILDDIR)/.build.$(TAG) +cppbuild: $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(cxx_main) $(fcxx_main) $(testmain) +cudabuild: $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(cu_main) $(fcu_main) $(cutestmain) # Generic target and build rules: objects from CUDA compilation -ifneq ($(NVCC),) -$(BUILDDIR)/%.o : %.cu *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cu *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c $< -o $@ -$(BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ -endif +$(CUDACPP_BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ # Generic target and build rules: objects from C++ compilation # (NB do not include CUINC here! add it only for NVTX or curand #679) -$(BUILDDIR)/%.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Apply special build flags only to CrossSectionKernel.cc and gCrossSectionKernel.cu (no fast math, see #117 and #516) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS := $(filter-out -ffast-math,$(CXXFLAGS)) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math +$(CUDACPP_BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math ifneq ($(NVCC),) -$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -fno-fast-math +$(CUDACPP_BUILDDIR)/gCrossSectionKernels.o: NVCCFLAGS += -Xcompiler -fno-fast-math endif endif # Apply special build flags only to check_sa.o and gcheck_sa.o (NVTX in timermap.h, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) -$(BUILDDIR)/gcheck_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) # Apply special build flags only to check_sa and CurandRandomNumberKernel (curand headers, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gcheck_sa.o: CUFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gCurandRandomNumberKernel.o: CUFLAGS += $(CXXFLAGSCURAND) -ifeq ($(RNDGEN),hasCurand) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CUINC) -endif +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) + # Avoid "warning: builtin __has_trivial_... is deprecated; use __is_trivially_... instead" in nvcc with icx2023 (#592) ifneq ($(shell $(CXX) --version | egrep '^(Intel)'),) ifneq ($(NVCC),) -CUFLAGS += -Xcompiler -Wno-deprecated-builtins +MG_NVCCFLAGS += -Xcompiler -Wno-deprecated-builtins endif endif -# Avoid clang warning "overriding '-ffp-contract=fast' option with '-ffp-contract=on'" (#516) -# This patch does remove the warning, but I prefer to keep it disabled for the moment... -###ifneq ($(shell $(CXX) --version | egrep '^(clang|Apple clang|Intel)'),) -###$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -Wno-overriding-t-option -###ifneq ($(NVCC),) -###$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -Wno-overriding-t-option -###endif -###endif - #### Apply special build flags only to CPPProcess.cc (-flto) ###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += -flto -#### Apply special build flags only to CPPProcess.cc (AVXFLAGS) -###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += $(AVXFLAGS) - #------------------------------------------------------------------------------- -# Target (and build rules): common (src) library -commonlib : $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc $(BUILDDIR)/.build.$(TAG) - $(MAKE) -C ../../src $(MAKEDEBUG) -f $(CUDACPP_SRC_MAKEFILE) +$(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc + $(MAKE) AVX=$(AVX) AVXFLAGS="$(AVXFLAGS)" -C ../../src -f $(CUDACPP_SRC_MAKEFILE) #------------------------------------------------------------------------------- @@ -566,162 +198,123 @@ processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') ###$(info processid_short=$(processid_short)) MG5AMC_CXXLIB = mg5amc_$(processid_short)_cpp -cxx_objects_lib=$(BUILDDIR)/CPPProcess.o $(BUILDDIR)/MatrixElementKernels.o $(BUILDDIR)/BridgeKernels.o $(BUILDDIR)/CrossSectionKernels.o -cxx_objects_exe=$(BUILDDIR)/CommonRandomNumberKernel.o $(BUILDDIR)/RamboSamplingKernels.o +cxx_objects_lib=$(CUDACPP_BUILDDIR)/CPPProcess.o $(CUDACPP_BUILDDIR)/MatrixElementKernels.o $(CUDACPP_BUILDDIR)/BridgeKernels.o $(CUDACPP_BUILDDIR)/CrossSectionKernels.o +cxx_objects_exe=$(CUDACPP_BUILDDIR)/CommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/RamboSamplingKernels.o -ifneq ($(NVCC),) MG5AMC_CULIB = mg5amc_$(processid_short)_cuda -cu_objects_lib=$(BUILDDIR)/gCPPProcess.o $(BUILDDIR)/gMatrixElementKernels.o $(BUILDDIR)/gBridgeKernels.o $(BUILDDIR)/gCrossSectionKernels.o -cu_objects_exe=$(BUILDDIR)/gCommonRandomNumberKernel.o $(BUILDDIR)/gRamboSamplingKernels.o -endif +cu_objects_lib=$(CUDACPP_BUILDDIR)/gCPPProcess.o $(CUDACPP_BUILDDIR)/gMatrixElementKernels.o $(CUDACPP_BUILDDIR)/gBridgeKernels.o $(CUDACPP_BUILDDIR)/gCrossSectionKernels.o +cu_objects_exe=$(CUDACPP_BUILDDIR)/gCommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/gRamboSamplingKernels.o # Target (and build rules): C++ and CUDA shared libraries -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) - $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) - -ifneq ($(NVCC),) -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) - $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -endif +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) + $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) $(MG_LDFLAGS) $(LDFLAGS) -#------------------------------------------------------------------------------- - -# Target (and build rules): Fortran include files -###$(INCDIR)/%.inc : ../%.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### \cp $< $@ +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) + $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) #------------------------------------------------------------------------------- # Target (and build rules): C++ and CUDA standalone executables -$(cxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cxx_main): $(BUILDDIR)/check_sa.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o - $(CXX) -o $@ $(BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o $(CURANDLIBFLAGS) -ifneq ($(NVCC),) +$(cxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(cxx_main): $(CUDACPP_BUILDDIR)/check_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) + ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(cu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(cu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(cu_main): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(cu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cu_main): $(BUILDDIR)/gcheck_sa.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o - $(NVCC) -o $@ $(BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o $(CURANDLIBFLAGS) +$(cu_main): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif +$(cu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(cu_main): $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- - -# Generic target and build rules: objects from Fortran compilation -$(BUILDDIR)/%.o : %.f *.inc - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(FC) -I. -c $< -o $@ - -# Generic target and build rules: objects from Fortran compilation -###$(BUILDDIR)/%.o : %.f *.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi -### $(FC) -I. -I$(INCDIR) -c $< -o $@ - -# Target (and build rules): Fortran standalone executables -###$(BUILDDIR)/fcheck_sa.o : $(INCDIR)/fbridge.inc +# Check executables: ifeq ($(UNAME_S),Darwin) -$(fcxx_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 +$(fcxx_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif -$(fcxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcxx_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) - $(CXX) -o $@ $(BUILDDIR)/fcheck_sa.o $(OMPFLAGS) $(BUILDDIR)/fsampler.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) +$(fcxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(fcxx_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(cxx_objects_exe) $(OMPFLAGS) $(CUDACPP_BUILDDIR)/fsampler.o -lgfortran -L$(CUDACPP_LIBDIR) $(MG_LDFLAGS) $(LDFLAGS) -ifneq ($(NVCC),) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(fcu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(fcu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(fcu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(fcu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') endif ifeq ($(UNAME_S),Darwin) -$(fcu_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 -endif -$(fcu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcu_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) - $(NVCC) -o $@ $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) +$(fcu_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif +$(fcu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(fcu_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(cu_objects_exe) -lgfortran $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- # Target (and build rules): test objects and test executable -$(BUILDDIR)/testxxx.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx_cu.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx_cu.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -endif +$(testmain) $(cutestmain): $(GTESTLIBS) +$(testmain) $(cutestmain): INCFLAGS += $(GTESTINC) +$(testmain) $(cutestmain): MG_LDFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main -$(BUILDDIR)/testmisc.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(CUDACPP_BUILDDIR)/testxxx.o $(CUDACPP_BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) testxxx_cc_ref.txt +$(testmain): $(CUDACPP_BUILDDIR)/testxxx.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions +$(cutestmain): $(CUDACPP_BUILDDIR)/testxxx_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc_cu.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests -endif -$(BUILDDIR)/runTest.o: $(GTESTLIBS) -$(BUILDDIR)/runTest.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/runTest.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/runTest.o +$(CUDACPP_BUILDDIR)/testmisc.o $(CUDACPP_BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/testmisc.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(cutestmain): $(CUDACPP_BUILDDIR)/testmisc_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests + + +$(CUDACPP_BUILDDIR)/runTest.o $(CUDACPP_BUILDDIR)/runTest_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/runTest.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/runTest.o +$(cutestmain): $(CUDACPP_BUILDDIR)/runTest_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/runTest_cu.o + -ifneq ($(NVCC),) -$(BUILDDIR)/runTest_cu.o: $(GTESTLIBS) -$(BUILDDIR)/runTest_cu.o: INCFLAGS += $(GTESTINC) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(testmain): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(testmain): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cutestmain): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cutestmain): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(testmain): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(testmain): $(BUILDDIR)/runTest_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/runTest_cu.o +$(cutestmain): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif -$(testmain): $(GTESTLIBS) -$(testmain): INCFLAGS += $(GTESTINC) -$(testmain): LIBFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main ifneq ($(OMPFLAGS),) ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -$(testmain): LIBFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) +$(testmain): MG_LDFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -$(testmain): LIBFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 +$(testmain): MG_LDFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 ###else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) ###$(testmain): LIBFLAGS += ???? # OMP is not supported yet by cudacpp for Apple clang (see #578 and #604) else -$(testmain): LIBFLAGS += -lgomp +$(testmain): MG_LDFLAGS += -lgomp endif endif -ifeq ($(NVCC),) # link only runTest.o -$(testmain): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) - $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -ldl -pthread $(LIBFLAGS) -else # link both runTest.o and runTest_cu.o -$(testmain): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) - $(NVCC) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) -ldl $(LIBFLAGS) -lcuda -endif +$(testmain): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(testmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) + $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -pthread $(MG_LDFLAGS) $(LDFLAGS) + +$(cutestmain): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cutestmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) + $(NVCC) -o $@ $(cu_objects_lib) $(cu_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -lcuda $(MG_LDFLAGS) $(LDFLAGS) # Use target gtestlibs to build only googletest ifneq ($(GTESTLIBS),) @@ -731,72 +324,15 @@ endif # Use flock (Linux only, no Mac) to allow 'make -j' if googletest has not yet been downloaded https://stackoverflow.com/a/32666215 $(GTESTLIBS): ifneq ($(shell which flock 2>/dev/null),) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - flock $(BUILDDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) + flock $(TESTDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) else if [ -d $(TESTDIR) ]; then $(MAKE) -C $(TESTDIR); fi endif #------------------------------------------------------------------------------- -# Target: build all targets in all AVX modes (each AVX mode in a separate build directory) -# Split the avxall target into five separate targets to allow parallel 'make -j avxall' builds -# (Hack: add a fbridge.inc dependency to avxall, to ensure it is only copied once for all AVX modes) -avxnone: - @echo - $(MAKE) USEBUILDDIR=1 AVX=none -f $(CUDACPP_MAKEFILE) - -avxsse4: - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 -f $(CUDACPP_MAKEFILE) - -avxavx2: - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 -f $(CUDACPP_MAKEFILE) - -avx512y: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y -f $(CUDACPP_MAKEFILE) - -avx512z: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z -f $(CUDACPP_MAKEFILE) - -ifeq ($(UNAME_P),ppc64le) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else ifeq ($(UNAME_P),arm) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 avxavx2 avx512y avx512z -avxall: avxnone avxsse4 avxavx2 avx512y avx512z -endif - -#------------------------------------------------------------------------------- - -# Target: clean the builds -.PHONY: clean - -clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(BUILDDIR) -else - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe - rm -f $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(LIBDIR)/lib$(MG5AMC_CULIB).so -endif - $(MAKE) -C ../../src clean -f $(CUDACPP_SRC_MAKEFILE) -### rm -rf $(INCDIR) - -cleanall: - @echo - $(MAKE) USEBUILDDIR=0 clean -f $(CUDACPP_MAKEFILE) - @echo - $(MAKE) USEBUILDDIR=0 -C ../../src cleanall -f $(CUDACPP_SRC_MAKEFILE) - rm -rf build.* - # Target: clean the builds as well as the gtest installation(s) -distclean: cleanall +distclean: clean cleansrc ifneq ($(wildcard $(TESTDIRCOMMON)),) $(MAKE) -C $(TESTDIRCOMMON) clean endif @@ -848,50 +384,55 @@ endif #------------------------------------------------------------------------------- -# Target: check (run the C++ test executable) +# Target: check/gcheck (run the C++ test executable) # [NB THIS IS WHAT IS USED IN THE GITHUB CI!] -ifneq ($(NVCC),) -check: runTest cmpFcheck cmpFGcheck -else check: runTest cmpFcheck -endif +gcheck: + $(MAKE) AVX=cuda runTest cmpFGcheck # Target: runTest (run the C++ test executable runTest.exe) -runTest: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/runTest.exe +ifneq ($(AVX),cuda) +runTest: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest.exe +else +runTest: cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest_cuda.exe +endif + # Target: runCheck (run the C++ standalone executable check.exe, with a small number of events) -runCheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/check.exe -p 2 32 2 +runCheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe -p 2 32 2 # Target: runGcheck (run the CUDA standalone executable gcheck.exe, with a small number of events) -runGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/gcheck.exe -p 2 32 2 +runGcheck: AVX=cuda +runGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe -p 2 32 2 # Target: runFcheck (run the Fortran standalone executable - with C++ MEs - fcheck.exe, with a small number of events) -runFcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 +runFcheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 # Target: runFGcheck (run the Fortran standalone executable - with CUDA MEs - fgcheck.exe, with a small number of events) -runFGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 +runFGcheck: AVX=cuda +runFGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 # Target: cmpFcheck (compare ME results from the C++ and Fortran with C++ MEs standalone executables, with a small number of events) -cmpFcheck: all.$(TAG) +cmpFcheck: cppbuild @echo - @echo "$(BUILDDIR)/check.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi # Target: cmpFGcheck (compare ME results from the CUDA and Fortran with CUDA MEs standalone executables, with a small number of events) -cmpFGcheck: all.$(TAG) +cmpFGcheck: AVX=cuda +cmpFGcheck: + $(MAKE) AVX=cuda cudabuild @echo - @echo "$(BUILDDIR)/gcheck.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fgcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi -# Target: memcheck (run the CUDA standalone executable gcheck.exe with a small number of events through cuda-memcheck) -memcheck: all.$(TAG) - $(RUNTIME) $(CUDA_HOME)/bin/cuda-memcheck --check-api-memory-access yes --check-deprecated-instr yes --check-device-heap yes --demangle full --language c --leak-check full --racecheck-report all --report-api-errors all --show-backtrace yes --tool memcheck --track-unused-memory yes $(BUILDDIR)/gcheck.exe -p 2 32 2 - -#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/makefile b/epochX/cudacpp/gq_ttq.mad/SubProcesses/makefile index d572486c2e..b69917ee1f 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/makefile +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/makefile @@ -1,27 +1,30 @@ SHELL := /bin/bash -include ../../Source/make_opts -FFLAGS+= -w +# Include general setup +OPTIONS_MAKEFILE := ../../Source/make_opts +include $(OPTIONS_MAKEFILE) # Enable the C preprocessor https://gcc.gnu.org/onlinedocs/gfortran/Preprocessing-Options.html -FFLAGS+= -cpp +MG_FCFLAGS += -cpp +MG_CXXFLAGS += -I. -# Compile counters with -O3 as in the cudacpp makefile (avoid being "unfair" to Fortran #740) -CXXFLAGS = -O3 -Wall -Wshadow -Wextra +all: help cppnative + +# Target if user does not specify target +help: + $(info No target specified.) + $(info Viable targets are 'cppnative' (default), 'cppnone', 'cppsse4', 'cppavx2', 'cpp512y', 'cpp512z' and 'cuda') + $(info Or 'cppall' for all C++ targets) + $(info Or 'ALL' for all C++ and cuda targets) -# Add -std=c++17 explicitly to avoid build errors on macOS -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 -endif -# Enable ccache if USECCACHE=1 +# Enable ccache for C++ if USECCACHE=1 (do not enable it for Fortran since it is not supported for Fortran) ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) override CXX:=ccache $(CXX) endif -ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) - override FC:=ccache $(FC) -endif +###ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) +### override FC:=ccache $(FC) +###endif # Load additional dependencies of the bias module, if present ifeq (,$(wildcard ../bias_dependencies)) @@ -46,34 +49,25 @@ else MADLOOP_LIB = endif -LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias - -processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') -CUDACPP_MAKEFILE=cudacpp.mk -# NB1 Using ":=" below instead of "=" is much faster (it only runs the subprocess once instead of many times) -# NB2 Use '|&' in CUDACPP_BUILDDIR to avoid confusing errors about googletest #507 -# NB3 Do not add a comment inlined "CUDACPP_BUILDDIR=$(shell ...) # comment" as otherwise a trailing space is included... -# NB4 The variables relevant to the cudacpp Makefile must be explicitly passed to $(shell...) -CUDACPP_MAKEENV:=$(shell echo '$(.VARIABLES)' | tr " " "\n" | egrep "(USEBUILDDIR|AVX|FPTYPE|HELINL|HRDCOD)") -###$(info CUDACPP_MAKEENV=$(CUDACPP_MAKEENV)) -###$(info $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))")) -CUDACPP_BUILDDIR:=$(shell $(MAKE) $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))") -f $(CUDACPP_MAKEFILE) -pn 2>&1 | awk '/Building/{print $$3}' | sed s/BUILDDIR=//) -ifeq ($(CUDACPP_BUILDDIR),) -$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) -else -$(info CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)') -endif -CUDACPP_COMMONLIB=mg5amc_common -CUDACPP_CXXLIB=mg5amc_$(processid_short)_cpp -CUDACPP_CULIB=mg5amc_$(processid_short)_cuda - +LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias LIBS = $(LIBDIR)libbias.$(libext) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(MADLOOP_LIB) $(LOOP_LIBS) ifneq ("$(wildcard ../../Source/RUNNING)","") LINKLIBS += -lrunning - LIBS += $(LIBDIR)librunning.$(libext) + LIBS += $(LIBDIR)librunning.$(libext) endif +SOURCEDIR_GUARD:=../../Source/.timestamp_guard +# We use $(SOURCEDIR_GUARD) to figure out if Source is out of date. The Source makefile doesn't correctly +# update all files, so we need a proxy that is updated every time we run "$(MAKE) -C ../../Source". +$(SOURCEDIR_GUARD) ../../Source/discretesampler.mod &: ../../Source/*.f ../../Cards/param_card.dat ../../Cards/run_card.dat +ifneq ($(shell which flock 2>/dev/null),) + flock ../../Source/.lock -c "$(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD)" +else + $(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD) +endif + +$(LIBS): $(SOURCEDIR_GUARD) # Source files @@ -91,82 +85,83 @@ PROCESS= myamp.o genps.o unwgt.o setcuts.o get_color.o \ DSIG=driver.o $(patsubst %.f, %.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) DSIG_cudacpp=driver_cudacpp.o $(patsubst %.f, %_cudacpp.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) -SYMMETRY = symmetry.o idenparts.o +SYMMETRY = symmetry.o idenparts.o -# Binaries +# cudacpp targets: +CUDACPP_MAKEFILE := cudacpp.mk +ifneq (,$(wildcard $(CUDACPP_MAKEFILE))) +include $(CUDACPP_MAKEFILE) +endif -ifeq ($(UNAME),Darwin) -LDFLAGS += -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) -LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" -else -LDFLAGS += -Wl,--no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +ifeq ($(CUDACPP_BUILDDIR),) +$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) endif +CUDACPP_COMMONLIB=mg5amc_common +CUDACPP_CXXLIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so +CUDACPP_CULIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so -all: $(PROG)_fortran $(CUDACPP_BUILDDIR)/$(PROG)_cpp # also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (#503) +# Set up OpenMP if supported +OMPFLAGS ?= -fopenmp ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp LINKLIBS += -liomp5 # see #578 LINKLIBS += -lintlc # undefined reference to `_intel_fast_memcpy' else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -override OMPFLAGS = -fopenmp $(CUDACPP_BUILDDIR)/$(PROG)_cpp: LINKLIBS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -override OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang -else -override OMPFLAGS = -fopenmp +OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang endif -$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o - $(FC) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) - -$(LIBS): .libs -.libs: ../../Cards/param_card.dat ../../Cards/run_card.dat - cd ../../Source; make - touch $@ +# Binaries -$(CUDACPP_BUILDDIR)/.cudacpplibs: - $(MAKE) -f $(CUDACPP_MAKEFILE) - touch $@ +$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) # On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables ($ORIGIN on Linux) # On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary -ifeq ($(UNAME_S),Darwin) - override LIBFLAGSRPATH = -else ifeq ($(USEBUILDDIR),1) - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' -else - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/$(LIBDIR)' +ifneq ($(UNAME_S),Darwin) + LIBFLAGSRPATH := -Wl,-rpath,'$$ORIGIN:$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' endif -.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link +.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link madevent_cppnone_link madevent_cppsse4_link madevent_cppavx2_link madevent_cpp512y_link madevent_cpp512z_link clean cleanall cleansrc madevent_fortran_link: $(PROG)_fortran rm -f $(PROG) ln -s $(PROG)_fortran $(PROG) -madevent_cpp_link: $(CUDACPP_BUILDDIR)/$(PROG)_cpp - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) +madevent_cppnone_link: AVX=none +madevent_cppnone_link: cppnone + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -madevent_cuda_link: $(CUDACPP_BUILDDIR)/$(PROG)_cuda - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) +madevent_cppavx2_link: AVX=avx2 +madevent_cppavx2_link: cppavx2 + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512y_link: AVX=512y +madevent_cpp512y_link: cppavx512y + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512z_link: AVX=512z +madevent_cpp512z_link: cppavx512z + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -# Building $(PROG)_cpp also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (improved patch for cpp-only builds #503) -$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o $(CUDACPP_BUILDDIR)/.cudacpplibs - $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CXXLIB) $(LIBFLAGSRPATH) $(LDFLAGS) - if [ -f $(LIBDIR)/$(CUDACPP_BUILDDIR)/lib$(CUDACPP_CULIB).* ]; then $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CULIB) $(LIBFLAGSRPATH) $(LDFLAGS); fi +madevent_cuda_link: AVX=cuda +madevent_cuda_link: cuda + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) -$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(CUDACPP_BUILDDIR)/$(PROG)_cpp +$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(LIBS) $(CUDACPP_CXXLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) + +$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(LIBS) $(CUDACPP_CULIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) counters.o: counters.cc timer.h - $(CXX) $(CXXFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ ompnumthreads.o: ompnumthreads.cc ompnumthreads.h - $(CXX) -I. $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) $(FC) -o $(PROG)_forhel $(PROCESS) $(MATRIX_HEL) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) $(OMPFLAGS) @@ -174,27 +169,14 @@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) gensym: $(SYMMETRY) configs.inc $(LIBS) $(FC) -o gensym $(SYMMETRY) -L$(LIBDIR) $(LINKLIBS) $(LDFLAGS) -###ifeq (,$(wildcard fbridge.inc)) # Pointless: fbridge.inc always exists as this is the cudacpp-modified makefile! -###$(LIBDIR)libmodel.$(libext): ../../Cards/param_card.dat -### cd ../../Source/MODEL; make -### -###$(LIBDIR)libgeneric.$(libext): ../../Cards/run_card.dat -### cd ../../Source; make -### -###$(LIBDIR)libpdf.$(libext): -### cd ../../Source/PDF; make -### -###$(LIBDIR)libgammaUPC.$(libext): -### cd ../../Source/PDF/gammaUPC; make -###endif # Add source so that the compiler finds the DiscreteSampler module. $(MATRIX): %.o: %.f - $(FC) $(FFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC -%.o: %.f - $(FC) $(FFLAGS) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC + $(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC +%.o $(CUDACPP_BUILDDIR)/%.o: %.f + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -I../../Source/ -I../../Source/PDF/gammaUPC -c $< -o $@ %_cudacpp.o: %.f - $(FC) $(FFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ # Dependencies @@ -215,60 +197,42 @@ unwgt.o: genps.inc nexternal.inc symswap.inc cluster.inc run.inc message.inc \ initcluster.o: message.inc # Extra dependencies on discretesampler.mod +../../Source/discretesampler.mod: ../../Source/DiscreteSampler.f -auto_dsig.o: .libs -driver.o: .libs -driver_cudacpp.o: .libs -$(MATRIX): .libs -genps.o: .libs +auto_dsig.o: ../../Source/discretesampler.mod +driver.o: ../../Source/discretesampler.mod +driver_cudacpp.o: ../../Source/discretesampler.mod +$(MATRIX): ../../Source/discretesampler.mod +genps.o: ../../Source/discretesampler.mod # Cudacpp avxall targets -UNAME_P := $(shell uname -p) ifeq ($(UNAME_P),ppc64le) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else ifeq ($(UNAME_P),arm) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else -avxall: avxnone avxsse4 avxavx2 avx512y avx512z +cppall: cppnative cppnone cppsse4 cppavx2 cppavx512y cppavx512z endif -avxnone: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=none - -avxsse4: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 - -avxavx2: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 - -avx512y: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y - -avx512z: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z - -###endif - -# Clean (NB: 'make clean' in Source calls 'make clean' in all P*) +ALL: cppall cuda -clean: # Clean builds: fortran in this Pn; cudacpp executables for one AVX in this Pn - $(RM) *.o gensym $(PROG) $(PROG)_fortran $(PROG)_forhel $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(CUDACPP_BUILDDIR)/$(PROG)_cuda +# Clean all architecture-specific builds: +clean: + $(RM) *.o gensym $(PROG) $(PROG)_* + $(RM) -rf build.*/*{.o,.so,.exe,.dylib,madevent_*} + @for dir in build.*; do if [ -z "$$(ls -A $${dir})" ]; then rm -r $${dir}; else echo "Not cleaning $${dir}; not empty"; fi; done -cleanavxs: clean # Clean builds: fortran in this Pn; cudacpp for all AVX in this Pn and in src - $(MAKE) -f $(CUDACPP_MAKEFILE) cleanall - rm -f $(CUDACPP_BUILDDIR)/.cudacpplibs - rm -f .libs +cleanall: cleansrc + for PROCESS in ../P[0-9]*; do $(MAKE) -C $${PROCESS} clean; done -cleanall: # Clean builds: fortran in all P* and in Source; cudacpp for all AVX in all P* and in src - make -C ../../Source cleanall - rm -rf $(LIBDIR)libbias.$(libext) - rm -f ../../Source/*.mod ../../Source/*/*.mod +# Clean one architecture-specific build +clean%: + $(RM) -r build.$*_* -distclean: cleanall # Clean all fortran and cudacpp builds as well as the googletest installation - $(MAKE) -f $(CUDACPP_MAKEFILE) distclean +# Clean common source directories (interferes with other P*) +cleansrc: + make -C ../../Source clean + $(RM) -f $(SOURCEDIR_GUARD) ../../Source/{*.mod,.lock} ../../Source/*/*.mod + $(RM) -r $(LIBDIR)libbias.$(libext) + if [ -d ../../src ]; then $(MAKE) -C ../../src -f cudacpp_src.mk clean; fi diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/runTest.cc b/epochX/cudacpp/gq_ttq.mad/SubProcesses/runTest.cc index d4a760a71b..6c77775fb2 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/runTest.cc +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/runTest.cc @@ -243,18 +243,20 @@ struct CUDATest : public CUDA_CPU_TestBase // Use two levels of macros to force stringification at the right level // (see https://gcc.gnu.org/onlinedocs/gcc-3.0.1/cpp_3.html#SEC17 and https://stackoverflow.com/a/3419392) // Google macro is in https://github.com/google/googletest/blob/master/googletest/include/gtest/gtest-param-test.h +/* clang-format off */ #define TESTID_CPU( s ) s##_CPU #define XTESTID_CPU( s ) TESTID_CPU( s ) #define MG_INSTANTIATE_TEST_SUITE_CPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); #define TESTID_GPU( s ) s##_GPU #define XTESTID_GPU( s ) TESTID_GPU( s ) #define MG_INSTANTIATE_TEST_SUITE_GPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); +/* clang-format on */ #ifdef __CUDACC__ MG_INSTANTIATE_TEST_SUITE_GPU( XTESTID_GPU( MG_EPOCH_PROCESS_ID ), MadgraphTest ); diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/testxxx.cc b/epochX/cudacpp/gq_ttq.mad/SubProcesses/testxxx.cc index 3361fe5aa9..1d315f6d75 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/testxxx.cc +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/testxxx.cc @@ -40,7 +40,7 @@ namespace mg5amcCpu { std::string FPEhandlerMessage = "unknown"; int FPEhandlerIevt = -1; - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU): '" << FPEhandlerMessage << "' ievt=" << FPEhandlerIevt << std::endl; @@ -71,11 +71,10 @@ TEST( XTESTID( MG_EPOCH_PROCESS_ID ), testxxx ) constexpr bool testEvents = !dumpEvents; // run the test? constexpr fptype toleranceXXXs = std::is_same::value ? 1.E-15 : 1.E-5; // Constant parameters - constexpr int neppM = MemoryAccessMomenta::neppM; // AOSOA layout constexpr int np4 = CPPProcess::np4; - const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') - assert( nevt % neppM == 0 ); // nevt must be a multiple of neppM - assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV + const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') + assert( nevt % MemoryAccessMomenta::neppM == 0 ); // nevt must be a multiple of neppM + assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV // Fill in the input momenta #ifdef __CUDACC__ mg5amcGpu::PinnedHostBufferMomenta hstMomenta( nevt ); // AOSOA[npagM][npar=4][np4=4][neppM] diff --git a/epochX/cudacpp/gq_ttq.mad/bin/internal/banner.py b/epochX/cudacpp/gq_ttq.mad/bin/internal/banner.py index bd1517985f..b408679c2f 100755 --- a/epochX/cudacpp/gq_ttq.mad/bin/internal/banner.py +++ b/epochX/cudacpp/gq_ttq.mad/bin/internal/banner.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,7 +53,7 @@ MADEVENT = False import madgraph.various.misc as misc import madgraph.iolibs.file_writers as file_writers - import madgraph.iolibs.files as files + import madgraph.iolibs.files as files import models.check_param_card as param_card_reader from madgraph import MG5DIR, MadGraph5Error, InvalidCmd @@ -80,36 +80,36 @@ class Banner(dict): 'mgproccard': 'MGProcCard', 'mgruncard': 'MGRunCard', 'ma5card_parton' : 'MA5Card_parton', - 'ma5card_hadron' : 'MA5Card_hadron', + 'ma5card_hadron' : 'MA5Card_hadron', 'mggenerationinfo': 'MGGenerationInfo', 'mgpythiacard': 'MGPythiaCard', 'mgpgscard': 'MGPGSCard', 'mgdelphescard': 'MGDelphesCard', 'mgdelphestrigger': 'MGDelphesTrigger', 'mgshowercard': 'MGShowerCard' } - + forbid_cdata = ['initrwgt'] - + def __init__(self, banner_path=None): """ """ if isinstance(banner_path, Banner): dict.__init__(self, banner_path) self.lhe_version = banner_path.lhe_version - return + return else: dict.__init__(self) - + #Look at the version if MADEVENT: self['mgversion'] = '#%s\n' % open(pjoin(MEDIR, 'MGMEVersion.txt')).read() else: info = misc.get_pkg_info() self['mgversion'] = info['version']+'\n' - + self.lhe_version = None - + if banner_path: self.read_banner(banner_path) @@ -123,7 +123,7 @@ def __init__(self, banner_path=None): 'mgruncard':'run_card.dat', 'mgpythiacard':'pythia_card.dat', 'mgpgscard' : 'pgs_card.dat', - 'mgdelphescard':'delphes_card.dat', + 'mgdelphescard':'delphes_card.dat', 'mgdelphestrigger':'delphes_trigger.dat', 'mg5proccard':'proc_card_mg5.dat', 'mgproccard': 'proc_card.dat', @@ -137,10 +137,10 @@ def __init__(self, banner_path=None): 'mgshowercard':'shower_card.dat', 'pythia8':'pythia8_card.dat', 'ma5card_parton':'madanalysis5_parton_card.dat', - 'ma5card_hadron':'madanalysis5_hadron_card.dat', + 'ma5card_hadron':'madanalysis5_hadron_card.dat', 'run_settings':'' } - + def read_banner(self, input_path): """read a banner""" @@ -151,7 +151,7 @@ def read_banner(self, input_path): def split_iter(string): return (x.groups(0)[0] for x in re.finditer(r"([^\n]*\n)", string, re.DOTALL)) input_path = split_iter(input_path) - + text = '' store = False for line in input_path: @@ -170,13 +170,13 @@ def split_iter(string): text += line else: text += '%s%s' % (line, '\n') - - #reaching end of the banner in a event file avoid to read full file + + #reaching end of the banner in a event file avoid to read full file if "
" in line: break elif "" in line: break - + def __getattribute__(self, attr): """allow auto-build for the run_card/param_card/... """ try: @@ -187,23 +187,23 @@ def __getattribute__(self, attr): return self.charge_card(attr) - + def change_lhe_version(self, version): """change the lhe version associate to the banner""" - + version = float(version) if version < 3: version = 1 elif version > 3: raise Exception("Not Supported version") self.lhe_version = version - + def get_cross(self, witherror=False): """return the cross-section of the file""" if "init" not in self: raise Exception - + text = self["init"].split('\n') cross = 0 error = 0 @@ -217,13 +217,13 @@ def get_cross(self, witherror=False): return cross else: return cross, math.sqrt(error) - + def scale_init_cross(self, ratio): """modify the init information with the associate scale""" assert "init" in self - + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -231,29 +231,29 @@ def scale_init_cross(self, ratio): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break pid = int(pid) - + line = " %+13.7e %+13.7e %+13.7e %i" % \ (ratio*float(xsec), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + def get_pdg_beam(self): """return the pdg of each beam""" - + assert "init" in self - + all_lines = self["init"].split('\n') pdg1,pdg2,_ = all_lines[0].split(None, 2) return int(pdg1), int(pdg2) - + def load_basic(self, medir): """ Load the proc_card /param_card and run_card """ - + self.add(pjoin(medir,'Cards', 'param_card.dat')) self.add(pjoin(medir,'Cards', 'run_card.dat')) if os.path.exists(pjoin(medir, 'SubProcesses', 'procdef_mg5.dat')): @@ -261,29 +261,29 @@ def load_basic(self, medir): self.add(pjoin(medir,'Cards', 'proc_card_mg5.dat')) else: self.add(pjoin(medir,'Cards', 'proc_card.dat')) - + def change_seed(self, seed): """Change the seed value in the banner""" # 0 = iseed p = re.compile(r'''^\s*\d+\s*=\s*iseed''', re.M) new_seed_str = " %s = iseed" % seed self['mgruncard'] = p.sub(new_seed_str, self['mgruncard']) - + def add_generation_info(self, cross, nb_event): """add info on MGGeneration""" - + text = """ # Number of Events : %s # Integrated weight (pb) : %s """ % (nb_event, cross) self['MGGenerationInfo'] = text - + ############################################################################ # SPLIT BANNER ############################################################################ def split(self, me_dir, proc_card=True): """write the banner in the Cards directory. - proc_card argument is present to avoid the overwrite of proc_card + proc_card argument is present to avoid the overwrite of proc_card information""" for tag, text in self.items(): @@ -305,37 +305,37 @@ def check_pid(self, pid2label): """special routine removing width/mass of particles not present in the model This is usefull in case of loop model card, when we want to use the non loop model.""" - + if not hasattr(self, 'param_card'): self.charge_card('slha') - + for tag in ['mass', 'decay']: block = self.param_card.get(tag) for data in block: pid = data.lhacode[0] - if pid not in list(pid2label.keys()): + if pid not in list(pid2label.keys()): block.remove((pid,)) def get_lha_strategy(self): """get the lha_strategy: how the weight have to be handle by the shower""" - + if not self["init"]: raise Exception("No init block define") - + data = self["init"].split('\n')[0].split() if len(data) != 10: misc.sprint(len(data), self['init']) raise Exception("init block has a wrong format") return int(float(data[-2])) - + def set_lha_strategy(self, value): """set the lha_strategy: how the weight have to be handle by the shower""" - + if not (-4 <= int(value) <= 4): six.reraise(Exception, "wrong value for lha_strategy", value) if not self["init"]: raise Exception("No init block define") - + all_lines = self["init"].split('\n') data = all_lines[0].split() if len(data) != 10: @@ -351,13 +351,13 @@ def modify_init_cross(self, cross, allow_zero=False): assert isinstance(cross, dict) # assert "all" in cross assert "init" in self - + cross = dict(cross) for key in cross.keys(): if isinstance(key, str) and key.isdigit() and int(key) not in cross: cross[int(key)] = cross[key] - - + + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -365,7 +365,7 @@ def modify_init_cross(self, cross, allow_zero=False): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break @@ -383,23 +383,23 @@ def modify_init_cross(self, cross, allow_zero=False): (float(cross[pid]), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + ############################################################################ # WRITE BANNER ############################################################################ def write(self, output_path, close_tag=True, exclude=[]): """write the banner""" - + if isinstance(output_path, str): ff = open(output_path, 'w') else: ff = output_path - + if MADEVENT: header = open(pjoin(MEDIR, 'Source', 'banner_header.txt')).read() else: header = open(pjoin(MG5DIR,'Template', 'LO', 'Source', 'banner_header.txt')).read() - + if not self.lhe_version: self.lhe_version = self.get('run_card', 'lhe_version', default=1.0) if float(self.lhe_version) < 3: @@ -412,7 +412,7 @@ def write(self, output_path, close_tag=True, exclude=[]): for tag in [t for t in self.ordered_items if t in list(self.keys())]+ \ [t for t in self.keys() if t not in self.ordered_items]: - if tag in ['init'] or tag in exclude: + if tag in ['init'] or tag in exclude: continue capitalized_tag = self.capitalized_items[tag] if tag in self.capitalized_items else tag start_data, stop_data = '', '' @@ -422,19 +422,19 @@ def write(self, output_path, close_tag=True, exclude=[]): stop_data = ']]>\n' out = '<%(tag)s>%(start_data)s\n%(text)s\n%(stop_data)s\n' % \ {'tag':capitalized_tag, 'text':self[tag].strip(), - 'start_data': start_data, 'stop_data':stop_data} + 'start_data': start_data, 'stop_data':stop_data} try: ff.write(out) except: ff.write(out.encode('utf-8')) - - + + if not '/header' in exclude: out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) if 'init' in self and not 'init' in exclude: text = self['init'] @@ -444,22 +444,22 @@ def write(self, output_path, close_tag=True, exclude=[]): ff.write(out) except: ff.write(out.encode('utf-8')) - + if close_tag: - out = '\n' + out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) return ff - - + + ############################################################################ # BANNER ############################################################################ def add(self, path, tag=None): """Add the content of the file to the banner""" - + if not tag: card_name = os.path.basename(path) if 'param_card' in card_name: @@ -505,33 +505,33 @@ def add_text(self, tag, text): if tag == 'param_card': tag = 'slha' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' - + self[tag.lower()] = text - - + + def charge_card(self, tag): """Build the python object associated to the card""" - + if tag in ['param_card', 'param']: tag = 'slha' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse'], 'invalid card %s' % tag - + if tag == 'slha': param_card = self[tag].split('\n') self.param_card = param_card_reader.ParamCard(param_card) @@ -544,56 +544,56 @@ def charge_card(self, tag): self.proc_card = ProcCard(proc_card) return self.proc_card elif tag =='mgshowercard': - shower_content = self[tag] + shower_content = self[tag] if MADEVENT: import internal.shower_card as shower_card else: import madgraph.various.shower_card as shower_card self.shower_card = shower_card.ShowerCard(shower_content, True) - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.shower_card.testing = False return self.shower_card elif tag =='foanalyse': - analyse_content = self[tag] + analyse_content = self[tag] if MADEVENT: import internal.FO_analyse_card as FO_analyse_card else: import madgraph.various.FO_analyse_card as FO_analyse_card - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.FOanalyse_card = FO_analyse_card.FOAnalyseCard(analyse_content, True) self.FOanalyse_card.testing = False return self.FOanalyse_card - + def get_detail(self, tag, *arg, **opt): """return a specific """ - + if tag in ['param_card', 'param']: tag = 'slha' attr_tag = 'param_card' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], '%s not recognized' % tag - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) + self.charge_card(attr_tag) card = getattr(self, attr_tag) if len(arg) == 0: @@ -613,7 +613,7 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 2 and tag == 'slha': try: return card[arg[0]].get(arg[1:]) @@ -621,15 +621,15 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 0: return card else: raise Exception("Unknow command") - + #convenient alias get = get_detail - + def set(self, tag, *args): """modify one of the cards""" @@ -637,27 +637,27 @@ def set(self, tag, *args): tag = 'slha' attr_tag = 'param_card' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized' - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) - + self.charge_card(attr_tag) + card = getattr(self, attr_tag) if len(args) ==2: if tag == 'mg5proccard': @@ -666,20 +666,20 @@ def set(self, tag, *args): card[args[0]] = args[1] else: card[args[:-1]] = args[-1] - - + + @misc.multiple_try() def add_to_file(self, path, seed=None, out=None): """Add the banner to a file and change the associate seed in the banner""" if seed is not None: self.set("run_card", "iseed", seed) - + if not out: path_out = "%s.tmp" % path else: path_out = out - + ff = self.write(path_out, close_tag=False, exclude=['MGGenerationInfo', '/header', 'init']) ff.write("## END BANNER##\n") @@ -698,44 +698,44 @@ def add_to_file(self, path, seed=None, out=None): files.mv(path_out, path) - + def split_banner(banner_path, me_dir, proc_card=True): """a simple way to split a banner""" - + banner = Banner(banner_path) banner.split(me_dir, proc_card) - + def recover_banner(results_object, level, run=None, tag=None): """as input we receive a gen_crossxhtml.AllResults object. This define the current banner and load it """ - + if not run: - try: - _run = results_object.current['run_name'] - _tag = results_object.current['tag'] + try: + _run = results_object.current['run_name'] + _tag = results_object.current['tag'] except Exception: return Banner() else: _run = run if not tag: - try: - _tag = results_object[run].tags[-1] + try: + _tag = results_object[run].tags[-1] except Exception as error: if os.path.exists( pjoin(results_object.path,'Events','%s_banner.txt' % (run))): tag = None else: - return Banner() + return Banner() else: _tag = tag - - path = results_object.path - if tag: + + path = results_object.path + if tag: banner_path = pjoin(path,'Events',run,'%s_%s_banner.txt' % (run, tag)) else: banner_path = pjoin(results_object.path,'Events','%s_banner.txt' % (run)) - + if not os.path.exists(banner_path): if level != "parton" and tag != _tag: return recover_banner(results_object, level, _run, results_object[_run].tags[0]) @@ -754,12 +754,12 @@ def recover_banner(results_object, level, run=None, tag=None): return Banner(lhe.banner) # security if the banner was remove (or program canceled before created it) - return Banner() - + return Banner() + banner = Banner(banner_path) - - - + + + if level == 'pythia': if 'mgpythiacard' in banner: del banner['mgpythiacard'] @@ -768,13 +768,13 @@ def recover_banner(results_object, level, run=None, tag=None): if tag in banner: del banner[tag] return banner - + class InvalidRunCard(InvalidCmd): pass class ProcCard(list): """Basic Proccard object""" - + history_header = \ '#************************************************************\n' + \ '#* MadGraph5_aMC@NLO *\n' + \ @@ -798,10 +798,10 @@ class ProcCard(list): '#* run as ./bin/mg5_aMC filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - - - - + + + + def __init__(self, init=None): """ initialize a basic proc_card""" self.info = {'model': 'sm', 'generate':None, @@ -810,13 +810,13 @@ def __init__(self, init=None): if init: self.read(init) - + def read(self, init): """read the proc_card and save the information""" - + if isinstance(init, str): #path to file init = open(init, 'r') - + store_line = '' for line in init: line = line.rstrip() @@ -828,28 +828,28 @@ def read(self, init): store_line = "" if store_line: raise Exception("WRONG CARD FORMAT") - - + + def move_to_last(self, cmd): """move an element to the last history.""" for line in self[:]: if line.startswith(cmd): self.remove(line) list.append(self, line) - + def append(self, line): """"add a line in the proc_card perform automatically cleaning""" - + line = line.strip() cmds = line.split() if len(cmds) == 0: return - + list.append(self, line) - + # command type: cmd = cmds[0] - + if cmd == 'output': # Remove previous outputs from history self.clean(allow_for_removal = ['output'], keep_switch=True, @@ -875,7 +875,7 @@ def append(self, line): elif cmds[1] == 'proc_v4': #full cleaning self[:] = [] - + def clean(self, to_keep=['set','add','load'], remove_bef_last=None, @@ -884,13 +884,13 @@ def clean(self, to_keep=['set','add','load'], keep_switch=False): """Remove command in arguments from history. All command before the last occurrence of 'remove_bef_last' - (including it) will be removed (but if another options tells the opposite). + (including it) will be removed (but if another options tells the opposite). 'to_keep' is a set of line to always keep. - 'to_remove' is a set of line to always remove (don't care about remove_bef_ + 'to_remove' is a set of line to always remove (don't care about remove_bef_ status but keep_switch acts.). - if 'allow_for_removal' is define only the command in that list can be + if 'allow_for_removal' is define only the command in that list can be remove of the history for older command that remove_bef_lb1. all parameter - present in to_remove are always remove even if they are not part of this + present in to_remove are always remove even if they are not part of this list. keep_switch force to keep the statement remove_bef_??? which changes starts the removal mode. @@ -900,8 +900,8 @@ def clean(self, to_keep=['set','add','load'], if __debug__ and allow_for_removal: for arg in to_keep: assert arg not in allow_for_removal - - + + nline = -1 removal = False #looping backward @@ -912,7 +912,7 @@ def clean(self, to_keep=['set','add','load'], if not removal and remove_bef_last: if self[nline].startswith(remove_bef_last): removal = True - switch = True + switch = True # if this is the switch and is protected pass to the next element if switch and keep_switch: @@ -923,12 +923,12 @@ def clean(self, to_keep=['set','add','load'], if any([self[nline].startswith(arg) for arg in to_remove]): self.pop(nline) continue - + # Only if removal mode is active! if removal: if allow_for_removal: # Only a subset of command can be removed - if any([self[nline].startswith(arg) + if any([self[nline].startswith(arg) for arg in allow_for_removal]): self.pop(nline) continue @@ -936,10 +936,10 @@ def clean(self, to_keep=['set','add','load'], # All command have to be remove but protected self.pop(nline) continue - + # update the counter to pass to the next element nline -= 1 - + def get(self, tag, default=None): if isinstance(tag, int): list.__getattr__(self, tag) @@ -954,32 +954,32 @@ def get(self, tag, default=None): except ValueError: name, content = line[7:].split(None,1) out.append((name, content)) - return out + return out else: return self.info[tag] - + def write(self, path): """write the proc_card to a given path""" - + fsock = open(path, 'w') fsock.write(self.history_header) for line in self: while len(line) > 70: - sub, line = line[:70]+"\\" , line[70:] + sub, line = line[:70]+"\\" , line[70:] fsock.write(sub+"\n") else: fsock.write(line+"\n") - -class InvalidCardEdition(InvalidCmd): pass - + +class InvalidCardEdition(InvalidCmd): pass + class ConfigFile(dict): """ a class for storing/dealing with input file. - """ + """ def __init__(self, finput=None, **opt): """initialize a new instance. input can be an instance of MadLoopParam, - a file, a path to a file, or simply Nothing""" - + a file, a path to a file, or simply Nothing""" + if isinstance(finput, self.__class__): dict.__init__(self) for key in finput.__dict__: @@ -989,7 +989,7 @@ def __init__(self, finput=None, **opt): return else: dict.__init__(self) - + # Initialize it with all the default value self.user_set = set() self.auto_set = set() @@ -1000,15 +1000,15 @@ def __init__(self, finput=None, **opt): self.comments = {} # comment associated to parameters. can be display via help message # store the valid options for a given parameter. self.allowed_value = {} - + self.default_setup() self.plugin_input(finput) - + # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, **opt) - + @@ -1028,7 +1028,7 @@ def __add__(self, other): base = self.__class__(self) #base = copy.copy(self) base.update((key.lower(),value) for key, value in other.items()) - + return base def __radd__(self, other): @@ -1036,26 +1036,26 @@ def __radd__(self, other): new = copy.copy(other) new.update((key, value) for key, value in self.items()) return new - + def __contains__(self, key): return dict.__contains__(self, key.lower()) def __iter__(self): - + for name in super(ConfigFile, self).__iter__(): yield self.lower_to_case[name.lower()] - - + + #iter = super(ConfigFile, self).__iter__() #misc.sprint(iter) #return (self.lower_to_case[name] for name in iter) - + def keys(self): return [name for name in self] - + def items(self): return [(name,self[name]) for name in self] - + @staticmethod def warn(text, level, raiseerror=False): """convenient proxy to raiseerror/print warning""" @@ -1071,11 +1071,11 @@ def warn(text, level, raiseerror=False): log = lambda t: logger.log(level, t) elif level: log = level - + return log(text) def post_set(self, name, value, change_userdefine, raiseerror): - + if value is None: value = self[name] @@ -1087,25 +1087,25 @@ def post_set(self, name, value, change_userdefine, raiseerror): return getattr(self, 'post_set_%s' % name)(value, change_userdefine, raiseerror) else: raise - + def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): """set the attribute and set correctly the type if the value is a string. change_userdefine on True if we have to add the parameter in user_set """ - + if not len(self): #Should never happen but when deepcopy/pickle self.__init__() - + name = name.strip() - lower_name = name.lower() - + lower_name = name.lower() + # 0. check if this parameter is a system only one if change_userdefine and lower_name in self.system_only: text='%s is a private entry which can not be modify by the user. Keep value at %s' % (name,self[name]) self.warn(text, 'critical', raiseerror) return - + #1. check if the parameter is set to auto -> pass it to special if lower_name in self: targettype = type(dict.__getitem__(self, lower_name)) @@ -1115,22 +1115,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): self.user_set.remove(lower_name) #keep old value. self.post_set(lower_name, 'auto', change_userdefine, raiseerror) - return + return elif lower_name in self.auto_set: self.auto_set.remove(lower_name) - + # 2. Find the type of the attribute that we want if lower_name in self.list_parameter: targettype = self.list_parameter[lower_name] - - - + + + if isinstance(value, str): # split for each comma/space value = value.strip() if value.startswith('[') and value.endswith(']'): value = value[1:-1] - #do not perform split within a " or ' block + #do not perform split within a " or ' block data = re.split(r"((? bad input dropped.append(val) - + if not new_values: text= "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ % (value, name, self[lower_name]) text += "allowed values are any list composed of the following entries: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) - return self.warn(text, 'warning', raiseerror) - elif dropped: + return self.warn(text, 'warning', raiseerror) + elif dropped: text = "some value for entry '%s' are not valid. Invalid items are: '%s'.\n" \ % (name, dropped) text += "value will be set to %s" % new_values - text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) + text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) self.warn(text, 'warning') values = new_values # make the assignment - dict.__setitem__(self, lower_name, values) + dict.__setitem__(self, lower_name, values) if change_userdefine: self.user_set.add(lower_name) #check for specific action - return self.post_set(lower_name, None, change_userdefine, raiseerror) + return self.post_set(lower_name, None, change_userdefine, raiseerror) elif lower_name in self.dict_parameter: - targettype = self.dict_parameter[lower_name] + targettype = self.dict_parameter[lower_name] full_reset = True #check if we just update the current dict or not - + if isinstance(value, str): value = value.strip() # allowed entry: @@ -1209,7 +1209,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): # name , value => just add the entry # name value => just add the entry # {name1:value1, name2:value2} => full reset - + # split for each comma/space if value.startswith('{') and value.endswith('}'): new_value = {} @@ -1219,23 +1219,23 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): x, y = pair.split(':') x, y = x.strip(), y.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] new_value[x] = y value = new_value elif ',' in value: x,y = value.split(',') value = {x.strip():y.strip()} full_reset = False - + elif ':' in value: x,y = value.split(':') value = {x.strip():y.strip()} - full_reset = False + full_reset = False else: x,y = value.split() value = {x:y} - full_reset = False - + full_reset = False + if isinstance(value, dict): for key in value: value[key] = self.format_variable(value[key], targettype, name=name) @@ -1248,7 +1248,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - elif name in self: + elif name in self: targettype = type(self[name]) else: logger.debug('Trying to add argument %s in %s. ' % (name, self.__class__.__name__) +\ @@ -1256,22 +1256,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): suggestions = [k for k in self.keys() if k.startswith(name[0].lower())] if len(suggestions)>0: logger.debug("Did you mean one of the following: %s"%suggestions) - self.add_param(lower_name, self.format_variable(UnknownType(value), + self.add_param(lower_name, self.format_variable(UnknownType(value), UnknownType, name)) self.lower_to_case[lower_name] = name if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - + value = self.format_variable(value, targettype, name=name) #check that the value is allowed: if lower_name in self.allowed_value and '*' not in self.allowed_value[lower_name]: valid = False allowed = self.allowed_value[lower_name] - + # check if the current value is allowed or not (set valid to True) if value in allowed: - valid=True + valid=True elif isinstance(value, str): value = value.lower().strip() allowed = [str(v).lower() for v in allowed] @@ -1279,7 +1279,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): i = allowed.index(value) value = self.allowed_value[lower_name][i] valid=True - + if not valid: # act if not valid: text = "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ @@ -1303,7 +1303,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, if __debug__: if lower_name in self: raise Exception("Duplicate case for %s in %s" % (name,self.__class__)) - + dict.__setitem__(self, lower_name, value) self.lower_to_case[lower_name] = name if isinstance(value, list): @@ -1318,12 +1318,12 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, elif isinstance(value, dict): allvalues = list(value.values()) if any([type(allvalues[0]) != type(v) for v in allvalues]): - raise Exception("All entry should have the same type") - self.dict_parameter[lower_name] = type(allvalues[0]) + raise Exception("All entry should have the same type") + self.dict_parameter[lower_name] = type(allvalues[0]) if '__type__' in value: del value['__type__'] dict.__setitem__(self, lower_name, value) - + if allowed and allowed != ['*']: self.allowed_value[lower_name] = allowed if lower_name in self.list_parameter: @@ -1333,8 +1333,8 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, assert value in allowed or '*' in allowed #elif isinstance(value, bool) and allowed != ['*']: # self.allowed_value[name] = [True, False] - - + + if system: self.system_only.add(lower_name) if comment: @@ -1342,7 +1342,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, def do_help(self, name): """return a minimal help for the parameter""" - + out = "## Information on parameter %s from class %s\n" % (name, self.__class__.__name__) if name.lower() in self: out += "## current value: %s (parameter should be of type %s)\n" % (self[name], type(self[name])) @@ -1351,7 +1351,7 @@ def do_help(self, name): else: out += "## Unknown for this class\n" if name.lower() in self.user_set: - out += "## This value is considered as being set by the user\n" + out += "## This value is considered as being set by the user\n" else: out += "## This value is considered as being set by the system\n" if name.lower() in self.allowed_value: @@ -1359,17 +1359,17 @@ def do_help(self, name): out += "Allowed value are: %s\n" % ','.join([str(p) for p in self.allowed_value[name.lower()]]) else: out += "Suggested value are : %s\n " % ','.join([str(p) for p in self.allowed_value[name.lower()] if p!='*']) - + logger.info(out) return out @staticmethod def guess_type_from_value(value): "try to guess the type of the string --do not use eval as it might not be safe" - + if not isinstance(value, str): return str(value.__class__.__name__) - + #use ast.literal_eval to be safe since value is untrusted # add a timeout to mitigate infinite loop, memory stack attack with misc.stdchannel_redirected(sys.stdout, os.devnull): @@ -1388,7 +1388,7 @@ def guess_type_from_value(value): @staticmethod def format_variable(value, targettype, name="unknown"): """assign the value to the attribute for the given format""" - + if isinstance(targettype, str): if targettype in ['str', 'int', 'float', 'bool']: targettype = eval(targettype) @@ -1412,7 +1412,7 @@ def format_variable(value, targettype, name="unknown"): (name, type(value), targettype, value)) else: raise InvalidCmd("Wrong input type for %s found %s and expecting %s for value %s" %\ - (name, type(value), targettype, value)) + (name, type(value), targettype, value)) else: if targettype != UnknownType: value = value.strip() @@ -1441,8 +1441,8 @@ def format_variable(value, targettype, name="unknown"): value = int(value) elif value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} - value =int(value[:-1]) * convert[value[-1]] - elif '/' in value or '*' in value: + value =int(value[:-1]) * convert[value[-1]] + elif '/' in value or '*' in value: try: split = re.split('(\*|/)',value) v = float(split[0]) @@ -1461,7 +1461,7 @@ def format_variable(value, targettype, name="unknown"): try: value = float(value.replace('d','e')) except ValueError: - raise InvalidCmd("%s can not be mapped to an integer" % value) + raise InvalidCmd("%s can not be mapped to an integer" % value) try: new_value = int(value) except ValueError: @@ -1471,7 +1471,7 @@ def format_variable(value, targettype, name="unknown"): value = new_value else: raise InvalidCmd("incorect input: %s need an integer for %s" % (value,name)) - + elif targettype == float: if value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} @@ -1496,33 +1496,33 @@ def format_variable(value, targettype, name="unknown"): value = v else: raise InvalidCmd("type %s is not handle by the card" % targettype) - + return value - - + + def __getitem__(self, name): - + lower_name = name.lower() if __debug__: if lower_name not in self: if lower_name in [key.lower() for key in self] : raise Exception("Some key are not lower case %s. Invalid use of the class!"\ % [key for key in self if key.lower() != key]) - + if lower_name in self.auto_set: return 'auto' - + return dict.__getitem__(self, name.lower()) - + get = __getitem__ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): """convenient way to change attribute. changeifuserset=False means that the value is NOT change is the value is not on default. - user=True, means that the value will be marked as modified by the user - (potentially preventing future change to the value) + user=True, means that the value will be marked as modified by the user + (potentially preventing future change to the value) """ # changeifuserset=False -> we need to check if the user force a value. @@ -1530,8 +1530,8 @@ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): if name.lower() in self.user_set: #value modified by the user -> do nothing return - self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) - + self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) + class RivetCard(ConfigFile): @@ -1706,7 +1706,7 @@ def setRelevantParamCard(self, f_params, f_relparams): yexec_dict = {} yexec_line = exec_line + "yaxis_relvar = " + self['yaxis_relvar'] exec(yexec_line, locals(), yexec_dict) - if self['yaxis_label'] == "": + if self['yaxis_label'] == "": self['yaxis_label'] = "yaxis_relvar" f_relparams.write("{0} = {1}\n".format(self['yaxis_label'], yexec_dict['yaxis_relvar'])) else: @@ -1715,11 +1715,11 @@ def setRelevantParamCard(self, f_params, f_relparams): class ProcCharacteristic(ConfigFile): """A class to handle information which are passed from MadGraph to the madevent - interface.""" - + interface.""" + def default_setup(self): """initialize the directory to the default value""" - + self.add_param('loop_induced', False) self.add_param('has_isr', False) self.add_param('has_fsr', False) @@ -1735,16 +1735,16 @@ def default_setup(self): self.add_param('pdg_initial1', [0]) self.add_param('pdg_initial2', [0]) self.add_param('splitting_types',[], typelist=str) - self.add_param('perturbation_order', [], typelist=str) - self.add_param('limitations', [], typelist=str) - self.add_param('hel_recycling', False) + self.add_param('perturbation_order', [], typelist=str) + self.add_param('limitations', [], typelist=str) + self.add_param('hel_recycling', False) self.add_param('single_color', True) - self.add_param('nlo_mixed_expansion', True) + self.add_param('nlo_mixed_expansion', True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1752,49 +1752,49 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: if '#' in line: line = line.split('#',1)[0] if not line: continue - + if '=' in line: key, value = line.split('=',1) self[key.strip()] = value - + def write(self, outputpath): """write the file""" template ="# Information about the process #\n" template +="#########################################\n" - + fsock = open(outputpath, 'w') fsock.write(template) - + for key, value in self.items(): fsock.write(" %s = %s \n" % (key, value)) - - fsock.close() - + + fsock.close() + class GridpackCard(ConfigFile): """an object for the GridpackCard""" - + def default_setup(self): """default value for the GridpackCard""" - + self.add_param("GridRun", True) self.add_param("gevents", 2500) self.add_param("gseed", 1) - self.add_param("ngran", -1) - + self.add_param("ngran", -1) + def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1802,7 +1802,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -1812,19 +1812,19 @@ def read(self, finput): self[line[1].strip()] = line[0].replace('\'','').strip() def write(self, output_file, template=None): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'grid_card_default.dat') else: template = pjoin(MEDIR, 'Cards', 'grid_card_default.dat') - + text = "" - for line in open(template,'r'): + for line in open(template,'r'): nline = line.split('#')[0] nline = nline.split('!')[0] comment = line[len(nline):] @@ -1832,19 +1832,19 @@ def write(self, output_file, template=None): if len(nline) != 2: text += line elif nline[1].strip() in self: - text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) + text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) else: logger.info('Adding missing parameter %s to current run_card (with default value)' % nline[1].strip()) - text += line - + text += line + if isinstance(output_file, str): fsock = open(output_file,'w') else: fsock = output_file - + fsock.write(text) fsock.close() - + class PY8Card(ConfigFile): """ Implements the Pythia8 card.""" @@ -1868,7 +1868,7 @@ def add_default_subruns(self, type): def default_setup(self): """ Sets up the list of available PY8 parameters.""" - + # Visible parameters # ================== self.add_param("Main:numberOfEvents", -1) @@ -1877,11 +1877,11 @@ def default_setup(self): self.add_param("JetMatching:qCut", -1.0, always_write_to_card=False) self.add_param("JetMatching:doShowerKt",False,always_write_to_card=False) # -1 means that it is automatically set. - self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) + self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) # for CKKWL merging self.add_param("Merging:TMS", -1.0, always_write_to_card=False) self.add_param("Merging:Process", '', always_write_to_card=False) - # -1 means that it is automatically set. + # -1 means that it is automatically set. self.add_param("Merging:nJetMax", -1, always_write_to_card=False) # for both merging, chose whether to also consider different merging # scale values for the extra weights related to scale and PDF variations. @@ -1918,10 +1918,10 @@ def default_setup(self): comment='This allows to turn on/off hadronization alltogether.') self.add_param("partonlevel:mpi", True, hidden=True, always_write_to_card=False, comment='This allows to turn on/off MPI alltogether.') - self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, + self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, always_write_to_card=False, comment='This parameter is automatically set to True by MG5aMC when doing MLM merging with PY8.') - + # for MLM merging self.add_param("JetMatching:merge", False, hidden=True, always_write_to_card=False, comment='Specifiy if we are merging sample of different multiplicity.') @@ -1931,9 +1931,9 @@ def default_setup(self): comment='Value of the merging scale below which one does not even write the HepMC event.') self.add_param("JetMatching:doVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') - self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) + self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) self.add_param("JetMatching:setMad", False, hidden=True, always_write_to_card=False, - comment='Specify one must read inputs from the MadGraph banner.') + comment='Specify one must read inputs from the MadGraph banner.') self.add_param("JetMatching:coneRadius", 1.0, hidden=True, always_write_to_card=False) self.add_param("JetMatching:nQmatch",4,hidden=True, always_write_to_card=False) # for CKKWL merging (common with UMEPS, UNLOPS) @@ -1946,7 +1946,7 @@ def default_setup(self): self.add_param("Merging:applyVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') self.add_param("Merging:includeWeightInXsection", True, hidden=True, always_write_to_card=False, - comment='If turned off, then the option belows forces PY8 to keep the original weight.') + comment='If turned off, then the option belows forces PY8 to keep the original weight.') self.add_param("Merging:muRen", 91.188, hidden=True, always_write_to_card=False, comment='Set renormalization scales of the 2->2 process.') self.add_param("Merging:muFacInME", 91.188, hidden=True, always_write_to_card=False, @@ -1958,7 +1958,7 @@ def default_setup(self): # To be added in subruns for CKKWL self.add_param("Merging:mayRemoveDecayProducts", False, hidden=True, always_write_to_card=False) self.add_param("Merging:doKTMerging", False, hidden=True, always_write_to_card=False) - self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) + self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) self.add_param("Merging:doPTLundMerging", False, hidden=True, always_write_to_card=False) # Special Pythia8 paremeters useful to simplify the shower. @@ -1975,33 +1975,33 @@ def default_setup(self): # Add parameters controlling the subruns execution flow. # These parameters should not be part of PY8SubRun daughter. self.add_default_subruns('parameters') - + def __init__(self, *args, **opts): - # Parameters which are not printed in the card unless they are - # 'user_set' or 'system_set' or part of the + # Parameters which are not printed in the card unless they are + # 'user_set' or 'system_set' or part of the # self.hidden_params_to_always_print set. self.hidden_param = [] self.hidden_params_to_always_write = set() self.visible_params_to_always_write = set() # List of parameters that should never be written out given the current context. self.params_to_never_write = set() - + # Parameters which have been set by the system (i.e. MG5 itself during # the regular course of the shower interface) self.system_set = set() - + # Add attributes controlling the subruns execution flow. # These attributes should not be part of PY8SubRun daughter. self.add_default_subruns('attributes') - - # Parameters which have been set by the + + # Parameters which have been set by the super(PY8Card, self).__init__(*args, **opts) - def add_param(self, name, value, hidden=False, always_write_to_card=True, + def add_param(self, name, value, hidden=False, always_write_to_card=True, comment=None): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. The option 'hidden' decides whether the parameter should be visible to the user. The option 'always_write_to_card' decides whether it should @@ -2017,7 +2017,7 @@ def add_param(self, name, value, hidden=False, always_write_to_card=True, self.hidden_params_to_always_write.add(name) else: if always_write_to_card: - self.visible_params_to_always_write.add(name) + self.visible_params_to_always_write.add(name) if not comment is None: if not isinstance(comment, str): raise MadGraph5Error("Option 'comment' must be a string, not"+\ @@ -2036,7 +2036,7 @@ def add_subrun(self, py8_subrun): self.subruns[py8_subrun['Main:subrun']] = py8_subrun if not 'LHEFInputs:nSubruns' in self.user_set: self['LHEFInputs:nSubruns'] = max(self.subruns.keys()) - + def userSet(self, name, value, **opts): """Set an attribute of this card, following a user_request""" self.__setitem__(name, value, change_userdefine=True, **opts) @@ -2044,10 +2044,10 @@ def userSet(self, name, value, **opts): self.system_set.remove(name.lower()) def vetoParamWriteOut(self, name): - """ Forbid the writeout of a specific parameter of this card when the + """ Forbid the writeout of a specific parameter of this card when the "write" function will be invoked.""" self.params_to_never_write.add(name.lower()) - + def systemSet(self, name, value, **opts): """Set an attribute of this card, independently of a specific user request and only if not already user_set.""" @@ -2058,7 +2058,7 @@ def systemSet(self, name, value, **opts): if force or name.lower() not in self.user_set: self.__setitem__(name, value, change_userdefine=False, **opts) self.system_set.add(name.lower()) - + def MadGraphSet(self, name, value, **opts): """ Sets a card attribute, but only if it is absent or not already user_set.""" @@ -2068,18 +2068,18 @@ def MadGraphSet(self, name, value, **opts): force = False if name.lower() not in self or (force or name.lower() not in self.user_set): self.__setitem__(name, value, change_userdefine=False, **opts) - self.system_set.add(name.lower()) - + self.system_set.add(name.lower()) + def defaultSet(self, name, value, **opts): self.__setitem__(name, value, change_userdefine=False, **opts) - + @staticmethod def pythia8_formatting(value, formatv=None): """format the variable into pythia8 card convention. The type is detected by default""" if not formatv: if isinstance(value,UnknownType): - formatv = 'unknown' + formatv = 'unknown' elif isinstance(value, bool): formatv = 'bool' elif isinstance(value, int): @@ -2095,7 +2095,7 @@ def pythia8_formatting(value, formatv=None): formatv = 'str' else: assert formatv - + if formatv == 'unknown': # No formatting then return str(value) @@ -2116,7 +2116,7 @@ def pythia8_formatting(value, formatv=None): elif formatv == 'float': return '%.10e' % float(value) elif formatv == 'shortfloat': - return '%.3f' % float(value) + return '%.3f' % float(value) elif formatv == 'str': return "%s" % value elif formatv == 'list': @@ -2124,9 +2124,9 @@ def pythia8_formatting(value, formatv=None): return ','.join([PY8Card.pythia8_formatting(arg, 'shortfloat') for arg in value]) else: return ','.join([PY8Card.pythia8_formatting(arg) for arg in value]) - - def write(self, output_file, template, read_subrun=False, + + def write(self, output_file, template, read_subrun=False, print_only_visible=False, direct_pythia_input=False, add_missing=True): """ Write the card to output_file using a specific template. > 'print_only_visible' specifies whether or not the hidden parameters @@ -2143,28 +2143,28 @@ def write(self, output_file, template, read_subrun=False, or p.lower() in self.user_set] # Filter against list of parameters vetoed for write-out visible_param = [p for p in visible_param if p.lower() not in self.params_to_never_write] - + # Now the hidden param which must be written out if print_only_visible: hidden_output_param = [] else: hidden_output_param = [p for p in self if p.lower() in self.hidden_param and not p.lower() in self.user_set and - (p.lower() in self.hidden_params_to_always_write or + (p.lower() in self.hidden_params_to_always_write or p.lower() in self.system_set)] # Filter against list of parameters vetoed for write-out hidden_output_param = [p for p in hidden_output_param if p not in self.params_to_never_write] - + if print_only_visible: subruns = [] else: if not read_subrun: subruns = sorted(self.subruns.keys()) - + # Store the subruns to write in a dictionary, with its ID in key # and the corresponding stringstream in value subruns_to_write = {} - + # Sort these parameters nicely so as to put together parameters # belonging to the same group (i.e. prefix before the ':' in their name). def group_params(params): @@ -2191,7 +2191,7 @@ def group_params(params): # First dump in a temporary_output (might need to have a second pass # at the very end to update 'LHEFInputs:nSubruns') output = StringIO.StringIO() - + # Setup template from which to read if isinstance(template, str): if os.path.isfile(template): @@ -2199,7 +2199,7 @@ def group_params(params): elif '\n' in template: tmpl = StringIO.StringIO(template) else: - raise Exception("File input '%s' not found." % file_input) + raise Exception("File input '%s' not found." % file_input) elif template is None: # Then use a dummy empty StringIO, hence skipping the reading tmpl = StringIO.StringIO() @@ -2257,8 +2257,8 @@ def group_params(params): # Remove all of its variables (so that nothing is overwritten) DummySubrun.clear() DummySubrun.write(subruns_to_write[int(value)], - tmpl, read_subrun=True, - print_only_visible=print_only_visible, + tmpl, read_subrun=True, + print_only_visible=print_only_visible, direct_pythia_input=direct_pythia_input) logger.info('Adding new unknown subrun with ID %d.'% @@ -2267,7 +2267,7 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - + # Change parameters which must be output if param in visible_param: new_value = PY8Card.pythia8_formatting(self[param]) @@ -2286,10 +2286,10 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - - # Substitute the value. + + # Substitute the value. # If it is directly the pytia input, then don't write the param if it - # is not in the list of visible_params_to_always_write and was + # is not in the list of visible_params_to_always_write and was # not user_set or system_set if ((not direct_pythia_input) or (param.lower() in self.visible_params_to_always_write) or @@ -2304,16 +2304,16 @@ def group_params(params): output.write(template%(param_entry, value_entry.replace(value,new_value))) - + # Proceed to next line last_pos = tmpl.tell() line = tmpl.readline() - + # If add_missing is False, make sure to empty the list of remaining parameters if not add_missing: visible_param = [] hidden_output_param = [] - + # Now output the missing parameters. Warn about visible ones. if len(visible_param)>0 and not template is None: output.write( @@ -2343,12 +2343,12 @@ def group_params(params): """%(' for subrun %d'%self['Main:subrun'] if 'Main:subrun' in self else '')) for param in hidden_output_param: if param.lower() in self.comments: - comment = '\n'.join('! %s'%c for c in + comment = '\n'.join('! %s'%c for c in self.comments[param.lower()].split('\n')) output.write(comment+'\n') output.write('%s=%s\n'%(param,PY8Card.pythia8_formatting(self[param]))) - - # Don't close the file if we were reading a subrun, but simply write + + # Don't close the file if we were reading a subrun, but simply write # output and return now if read_subrun: output_file.write(output.getvalue()) @@ -2382,12 +2382,12 @@ def group_params(params): out.close() else: output_file.write(output.getvalue()) - + def read(self, file_input, read_subrun=False, setter='default'): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file. - The setter option choses the authority that sets potential - modified/new parameters. It can be either: + The setter option choses the authority that sets potential + modified/new parameters. It can be either: 'default' or 'user' or 'system'""" if isinstance(file_input, str): if "\n" in file_input: @@ -2423,8 +2423,8 @@ def read(self, file_input, read_subrun=False, setter='default'): raise MadGraph5Error("Could not read line '%s' of Pythia8 card."%\ line) if '!' in value: - value,_ = value.split('!',1) - + value,_ = value.split('!',1) + # Read a subrun if detected: if param=='Main:subrun': if read_subrun: @@ -2451,7 +2451,7 @@ def read(self, file_input, read_subrun=False, setter='default'): last_pos = finput.tell() line = finput.readline() continue - + # Read parameter. The case of a parameter not defined in the card is # handled directly in ConfigFile. @@ -2478,7 +2478,7 @@ def add_default_subruns(self, type): def __init__(self, *args, **opts): """ Initialize a subrun """ - + # Force user to set it manually. subrunID = -1 if 'subrun_id' in opts: @@ -2489,7 +2489,7 @@ def __init__(self, *args, **opts): def default_setup(self): """Sets up the list of available PY8SubRun parameters.""" - + # Add all default PY8Card parameters super(PY8SubRun, self).default_setup() # Make sure they are all hidden @@ -2501,33 +2501,33 @@ def default_setup(self): self.add_param("Main:subrun", -1) self.add_param("Beams:LHEF", "events.lhe.gz") - + class RunBlock(object): """ Class for a series of parameter in the run_card that can be either visible or hidden. - name: allow to set in the default run_card $name to set where that + name: allow to set in the default run_card $name to set where that block need to be inserted template_on: information to include is block is active template_off: information to include is block is not active on_fields/off_fields: paramater associated to the block - can be specify but are otherwise automatically but + can be specify but are otherwise automatically but otherwise determined from the template. - + function: status(self,run_card) -> return which template need to be used check_validity(self, runcard) -> sanity check - create_default_for_process(self, run_card, proc_characteristic, - history, proc_def) + create_default_for_process(self, run_card, proc_characteristic, + history, proc_def) post_set_XXXX(card, value, change_userdefine, raiseerror) -> fct called when XXXXX is set post_set(card, value, change_userdefine, raiseerror, **opt) -> fct called when a parameter is changed - -> no access to parameter name + -> no access to parameter name -> not called if post_set_XXXX is defined """ - - + + def __init__(self, name, template_on, template_off, on_fields=False, off_fields=False): self.name = name @@ -2550,7 +2550,7 @@ def fields(self): def find_fields_from_template(template): """ return the list of fields from a template. checking line like %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ - + return re.findall(r"^\s*%\((.*)\)s\s*=\s*\1", template, re.M) def get_template(self, card): @@ -2565,7 +2565,7 @@ def get_unused_template(self, card): if self.status(card): return self.template_off else: - return self.template_on + return self.template_on def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -2594,20 +2594,20 @@ def manage_parameters(self, card, written, to_write): written.add(name) if name in to_write: to_write.remove(name) - + def check_validity(self, runcard): """run self consistency check here --avoid to use runcard[''] = xxx here since it can trigger post_set function""" return def create_default_for_process(self, run_card, proc_characteristic, history, proc_def): - return + return # @staticmethod # def post_set(card, value, change_userdefine, raiseerror, **opt): # """default action to run when a parameter of the block is defined. # Here we do not know which parameter is modified. if this is needed. # then one need to define post_set_XXXXX(card, value, change_userdefine, raiseerror) -# and then only that function is used +# and then only that function is used # """ # # if 'pdlabel' in card.user_set: @@ -2621,7 +2621,7 @@ class RunCard(ConfigFile): blocks = [] parameter_in_block = {} - allowed_lep_densities = {} + allowed_lep_densities = {} default_include_file = 'run_card.inc' default_autodef_file = 'run.inc' donewarning = [] @@ -2637,7 +2637,7 @@ def plugin_input(self, finput): curr_dir = os.path.dirname(os.path.dirname(finput.name)) elif isinstance(finput, str): curr_dir = os.path.dirname(os.path.dirname(finput)) - + if curr_dir: if os.path.exists(pjoin(curr_dir, 'bin', 'internal', 'plugin_run_card')): # expected format {} passing everything as optional argument @@ -2646,7 +2646,7 @@ def plugin_input(self, finput): continue opts = dict(eval(line)) self.add_param(**opts) - + @classmethod def fill_post_set_from_blocks(cls): """set the post_set function for any parameter defined in a run_block""" @@ -2659,8 +2659,8 @@ def fill_post_set_from_blocks(cls): elif hasattr(block, 'post_set'): setattr(cls, 'post_set_%s' % parameter, block.post_set) cls.parameter_in_block[parameter] = block - - + + def __new__(cls, finput=None, **opt): cls.fill_post_set_from_blocks() @@ -2718,9 +2718,9 @@ def __new__(cls, finput=None, **opt): return super(RunCard, cls).__new__(cls, finput, **opt) def __init__(self, *args, **opts): - + # The following parameter are updated in the defaultsetup stage. - + #parameter for which no warning should be raised if not define self.hidden_param = [] # in which include file the parameer should be written @@ -2739,11 +2739,11 @@ def __init__(self, *args, **opts): self.cuts_parameter = {} # parameter added where legacy requires an older value. self.system_default = {} - + self.display_block = [] # set some block to be displayed self.fct_mod = {} # {param: (fct_pointer, *argument, **opts)} - self.cut_class = {} + self.cut_class = {} self.warned=False @@ -2776,11 +2776,11 @@ def get_lepton_densities(cls): else: cls.allowed_lep_densities[identity].append(name) - def add_param(self, name, value, fortran_name=None, include=True, + def add_param(self, name, value, fortran_name=None, include=True, hidden=False, legacy=False, cut=False, system=False, sys_default=None, autodef=False, fct_mod=None, **opts): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. fortran_name: defines what is the associate name in the f77 code include: defines if we have to put the value in the include file @@ -2795,7 +2795,7 @@ def add_param(self, name, value, fortran_name=None, include=True, fct_mod: defines a function to run if the parameter is modify in the include file options of **opts: - allowed: list of valid options. '*' means anything else should be allowed. - empty list means anything possible as well. + empty list means anything possible as well. - comment: add comment for writing/help - typelist: type of the list if default is empty """ @@ -2823,9 +2823,9 @@ def add_param(self, name, value, fortran_name=None, include=True, self.fct_mod[name] = fct_mod def read(self, finput, consistency=True, unknown_warning=True, **opt): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -2836,7 +2836,7 @@ def read(self, finput, consistency=True, unknown_warning=True, **opt): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -2864,8 +2864,8 @@ def add_unknown_entry(self, name, value, unknow_warning): This is based on the guess_entry_fromname for the various syntax providing input. This then call add_param accordingly. - This function does not returns anything. - """ + This function does not returns anything. + """ if name == "dsqrt_q2fact1" and not self.LO: raise InvalidRunCard("Looks like you passed a LO run_card for a NLO run. Please correct") @@ -2903,7 +2903,7 @@ def add_unknown_entry(self, name, value, unknow_warning): " The type was assigned to %s. \n"+\ " The definition of that variable will %sbe automatically added to fortran file %s\n"+\ " The value of that variable will %sbe passed to the fortran code via fortran file %s",\ - name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, + name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, "" if opts.get('autodef', False) else "not", "" if opts.get('autodef', False) in [True,False] else opts.get('autodef'), "" if opts.get('include', True) else "not", "" if opts.get('include', True) in [True,False] else opts.get('include')) RunCard.donewarning.append(name) @@ -2923,19 +2923,19 @@ def valid_line(self, line, tmp): return False elif line.strip().startswith('%'): parameter = line[line.find('(')+1:line.find(')')] - + try: cond = self.cuts_parameter[parameter] except KeyError: return True - - + + if template_options.get(cond, default) or cond is True: return True else: - return False + return False else: - return True + return True def reset_simd(self, old_value, new_value, name, *args, **opts): @@ -2946,28 +2946,28 @@ def make_clean(self,old_value, new_value, name, dir): raise Exception('pass make clean for ', dir) def make_Ptouch(self,old_value, new_value, name, reset): - raise Exception('pass Ptouch for ', reset) - + raise Exception('pass Ptouch for ', reset) + def write(self, output_file, template=None, python_template=False, write_hidden=False, template_options=None, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" - to_write = set(self.user_set) + to_write = set(self.user_set) written = set() if not template: raise Exception if not template_options: template_options = collections.defaultdict(str) - + if python_template: text = open(template,'r').read() - text = text.split('\n') + text = text.split('\n') # remove if templating - text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] + text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] for l in text if self.valid_line(l, template_options)] text ='\n'.join(text) - + if python_template and not to_write: import string if self.blocks: @@ -2981,14 +2981,14 @@ def write(self, output_file, template=None, python_template=False, if not self.list_parameter: text = text % self else: - data = dict((key.lower(),value) for key, value in self.items()) + data = dict((key.lower(),value) for key, value in self.items()) for name in self.list_parameter: if self.list_parameter[name] != str: data[name] = ', '.join(str(v) for v in data[name]) else: data[name] = "['%s']" % "', '".join(str(v) for v in data[name]) text = text % data - else: + else: text = "" for line in open(template,'r'): nline = line.split('#')[0] @@ -3005,11 +3005,11 @@ def write(self, output_file, template=None, python_template=False, this_group = this_group[0] text += this_group.get_template(self) % self this_group.manage_parameters(self, written, to_write) - + elif len(nline) != 2: text += line elif nline[1].strip() in self: - + name = nline[1].strip().lower() value = self[name] if name in self.list_parameter: @@ -3026,15 +3026,15 @@ def write(self, output_file, template=None, python_template=False, else: endline = '' text += ' %s\t= %s %s%s' % (value, name, comment, endline) - written.add(name) + written.add(name) if name in to_write: to_write.remove(name) else: logger.info('Adding missing parameter %s to current %s (with default value)', (name, self.filename)) - written.add(name) - text += line + written.add(name) + text += line for b in self.blocks: if b.status(self): @@ -3057,7 +3057,7 @@ def write(self, output_file, template=None, python_template=False, else: #partial writting -> add only what is needed to_add = [] - for line in b.get_template(self).split('\n'): + for line in b.get_template(self).split('\n'): nline = line.split('#')[0] nline = nline.split('!')[0] nline = nline.split('=') @@ -3072,8 +3072,8 @@ def write(self, output_file, template=None, python_template=False, continue #already include before else: to_add.append(line % {nline[1].strip():value, name:value}) - written.add(name) - + written.add(name) + if name in to_write: to_write.remove(name) else: @@ -3095,13 +3095,13 @@ def write(self, output_file, template=None, python_template=False, text += '\n'.join(to_add) if to_write or write_hidden: - text+="""#********************************************************************* + text+="""#********************************************************************* # Additional hidden parameters #********************************************************************* -""" +""" if write_hidden: # - # do not write hidden parameter not hidden for this template + # do not write hidden parameter not hidden for this template # if python_template: written = written.union(set(re.findall('\%\((\w*)\)s', open(template,'r').read(), re.M))) @@ -3129,7 +3129,7 @@ def get_last_value_include(self, output_dir): if inc file does not exist we will return the current value (i.e. set has no change) """ - #remember that + #remember that # default_include_file is a class variable # self.includepath is on the form include_path : [list of param ] out = {} @@ -3165,7 +3165,7 @@ def get_value_from_include(self, path, list_of_params, output_dir): with open(pjoin(output_dir,path), 'r') as fsock: text = fsock.read() - + for name in list_of_params: misc.sprint(name, name in self.fortran_name) misc.sprint(self.fortran_name[name] if name in self.fortran_name[name] else name) @@ -3191,11 +3191,11 @@ def get_value_from_include(self, path, list_of_params, output_dir): misc.sprint(self.fortran_name) misc.sprint(text) raise Exception - return out + return out def get_default(self, name, default=None, log_level=None): - """return self[name] if exist otherwise default. log control if we + """return self[name] if exist otherwise default. log control if we put a warning or not if we use the default value""" lower_name = name.lower() @@ -3216,13 +3216,13 @@ def get_default(self, name, default=None, log_level=None): log_level = 20 if not default: default = dict.__getitem__(self, name.lower()) - + logger.log(log_level, '%s missed argument %s. Takes default: %s' % (self.filename, name, default)) self[name] = default return default else: - return self[name] + return self[name] def mod_inc_pdlabel(self, value): """flag pdlabel has 'dressed' if one of the special lepton PDF with beamstralung. @@ -3237,16 +3237,16 @@ def edit_dummy_fct_from_file(self, filelist, outdir): filelist is a list of input files (given by the user) containing a series of function to be placed in replacement of standard (typically dummy) functions of the code. - This use LO/NLO class attribute that defines which function name need to - be placed in which file. + This use LO/NLO class attribute that defines which function name need to + be placed in which file. First time this is used, a backup of the original file is done in order to - recover if the user remove some of those files. + recover if the user remove some of those files. The function present in the file are determined automatically via regular expression. and only that function is replaced in the associated file. - function in the filelist starting with user_ will also be include within the + function in the filelist starting with user_ will also be include within the dummy_fct.f file """ @@ -3269,7 +3269,7 @@ def edit_dummy_fct_from_file(self, filelist, outdir): fsock = file_writers.FortranWriter(tmp,'w') function_text = fsock.remove_routine(text, fct) fsock.close() - test = open(tmp,'r').read() + test = open(tmp,'r').read() if fct not in self.dummy_fct_file: if fct.startswith('user_'): self.dummy_fct_file[fct] = self.dummy_fct_file['user_'] @@ -3315,22 +3315,22 @@ def guess_entry_fromname(self, name, value): - vartype: type of the variable - name: name of the variable (stripped from metadata) - options: additional options for the add_param - rules: - - if name starts with str_, int_, float_, bool_, list_, dict_ then + rules: + - if name starts with str_, int_, float_, bool_, list_, dict_ then - vartype is set accordingly - name is strip accordingly - otherwise guessed from value (which is string) - if name contains min/max - vartype is set to float - options has an added {'cut':True} - - suffixes like + - suffixes like - will be removed from named - will be added in options (for add_param) as {'cut':True} see add_param documentation for the list of supported options - if include is on False set autodef to False (i.e. enforce it False for future change) """ - # local function + # local function def update_typelist(value, name, opts): """convert a string to a list and update opts to keep track of the type """ value = value.strip() @@ -3358,7 +3358,7 @@ def update_typelist(value, name, opts): opts[key] = val name = name.replace("<%s=%s>" %(key,val), '') - # get vartype + # get vartype # first check that name does not force it supported_type = ["str", "float", "int", "bool", "list", "dict"] if "_" in name and name.split("_")[0].lower() in supported_type: @@ -3406,13 +3406,13 @@ def f77_formatting(value, formatv=None): value = str(value).lower() else: assert formatv - + if formatv == 'bool': if str(value) in ['1','T','.true.','True']: return '.true.' else: return '.false.' - + elif formatv == 'int': try: return str(int(value)) @@ -3422,12 +3422,12 @@ def f77_formatting(value, formatv=None): return str(int(fl)) else: raise - + elif formatv == 'float': if isinstance(value, str): value = value.replace('d','e') return ('%.10e' % float(value)).replace('e','d') - + elif formatv == 'str': # Check if it is a list if value.strip().startswith('[') and value.strip().endswith(']'): @@ -3437,20 +3437,20 @@ def f77_formatting(value, formatv=None): enumerate(elements)] else: return "'%s'" % value - - + + def check_validity(self, log_level=30): """check that parameter missing in the card are set to the expected value""" for name, value in self.system_default.items(): self.set(name, value, changeifuserset=False) - + for name in self.includepath[False]: to_bypass = self.hidden_param + list(self.legacy_parameter.keys()) if name not in to_bypass: - self.get_default(name, log_level=log_level) + self.get_default(name, log_level=log_level) for name in self.legacy_parameter: if self[name] != self.legacy_parameter[name]: @@ -3458,28 +3458,28 @@ def check_validity(self, log_level=30): for block in self.blocks: block.check_validity(self) - + def update_system_parameter_for_include(self): - """update hidden system only parameter for the correct writtin in the + """update hidden system only parameter for the correct writtin in the include""" return - + def write_include_file(self, output_dir, output_file=None): """Write the various include file in output_dir. The entry True of self.includepath will be written in run_card.inc The entry False will not be written anywhere output_file allows testing by providing stream. - This also call the function to add variable definition for the - variable with autodef=True (handle by write_autodef function) + This also call the function to add variable definition for the + variable with autodef=True (handle by write_autodef function) """ - + # ensure that all parameter are coherent and fix those if needed self.check_validity() - + #ensusre that system only parameter are correctly set self.update_system_parameter_for_include() @@ -3490,10 +3490,10 @@ def write_include_file(self, output_dir, output_file=None): self.write_autodef(output_dir, output_file=None) # check/fix status of customised functions self.edit_dummy_fct_from_file(self["custom_fcts"], os.path.dirname(output_dir)) - + for incname in self.includepath: self.write_one_include_file(output_dir, incname, output_file) - + for name,value in value_in_old_include.items(): if value != self[name]: self.fct_mod[name][0](value, self[name], name, *self.fct_mod[name][1],**self.fct_mod[name][2]) @@ -3515,13 +3515,13 @@ def write_one_include_file(self, output_dir, incname, output_file=None): fsock = file_writers.FortranWriter(pjoin(output_dir,pathinc+'.tmp')) - for key in self.includepath[incname]: + for key in self.includepath[incname]: #define the fortran name if key in self.fortran_name: fortran_name = self.fortran_name[key] else: fortran_name = key - + if incname in self.include_as_parameter: fsock.writelines('INTEGER %s\n' % fortran_name) #get the value with warning if the user didn't set it @@ -3534,7 +3534,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): # in case of a list, add the length of the list as 0th # element in fortran. Only in case of integer or float # list (not for bool nor string) - targettype = self.list_parameter[key] + targettype = self.list_parameter[key] if targettype is bool: pass elif targettype is int: @@ -3550,7 +3550,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): elif isinstance(value, dict): for fortran_name, onevalue in value.items(): line = '%s = %s \n' % (fortran_name, self.f77_formatting(onevalue)) - fsock.writelines(line) + fsock.writelines(line) elif isinstance(incname,str) and 'compile' in incname: if incname in self.include_as_parameter: line = 'PARAMETER (%s=%s)' %( fortran_name, value) @@ -3585,7 +3585,7 @@ def write_autodef(self, output_dir, output_file=None): filetocheck = dict(self.definition_path) if True not in self.definition_path: filetocheck[True] = [] - + for incname in filetocheck: if incname is True: @@ -3598,7 +3598,7 @@ def write_autodef(self, output_dir, output_file=None): if output_file: fsock = output_file input = fsock.getvalue() - + else: input = open(pjoin(output_dir,pathinc),'r').read() # do not define fsock here since we might not need to overwrite it @@ -3608,7 +3608,7 @@ def write_autodef(self, output_dir, output_file=None): previous = re.findall(re_pat, input, re.M) # now check which one needed to be added (and remove those identicaly defined) to_add = [] - for key in filetocheck[incname]: + for key in filetocheck[incname]: curr_type = self[key].__class__.__name__ length = "" if curr_type in [list, "list"]: @@ -3640,10 +3640,10 @@ def write_autodef(self, output_dir, output_file=None): fsock.truncate(0) fsock.seek(0) - # remove outdated lines + # remove outdated lines lines = input.split('\n') if previous: - out = [line for line in lines if not re.search(re_pat, line, re.M) or + out = [line for line in lines if not re.search(re_pat, line, re.M) or re.search(re_pat, line, re.M).groups() not in previous] else: out = lines @@ -3662,7 +3662,7 @@ def write_autodef(self, output_dir, output_file=None): stop = out.index('C STOP USER COMMON BLOCK') out = out[:start]+ out[stop+1:] #add new common-block - if self.definition_path[incname]: + if self.definition_path[incname]: out.append("C START USER COMMON BLOCK") if isinstance(pathinc , str): filename = os.path.basename(pathinc).split('.',1)[0] @@ -3675,10 +3675,10 @@ def write_autodef(self, output_dir, output_file=None): filename = filename.upper() out.append(" COMMON/USER_CUSTOM_%s/%s" %(filename,','.join( self.definition_path[incname]))) out.append('C STOP USER COMMON BLOCK') - + if not output_file: fsock.writelines(out) - fsock.close() + fsock.close() else: # for iotest out = ["%s\n" %l for l in out] @@ -3702,7 +3702,7 @@ def get_idbmup(lpp): def get_banner_init_information(self): """return a dictionary with the information needed to write the first line of the block of the lhe file.""" - + output = {} output["idbmup1"] = self.get_idbmup(self['lpp1']) output["idbmup2"] = self.get_idbmup(self['lpp2']) @@ -3713,7 +3713,7 @@ def get_banner_init_information(self): output["pdfsup1"] = self.get_pdf_id(self["pdlabel"]) output["pdfsup2"] = self.get_pdf_id(self["pdlabel"]) return output - + def get_pdf_id(self, pdf): if pdf == "lhapdf": lhaid = self["lhaid"] @@ -3721,19 +3721,19 @@ def get_pdf_id(self, pdf): return lhaid[0] else: return lhaid - else: + else: try: return {'none': 0, 'iww': 0, 'eva':0, 'edff':0, 'chff':0, 'cteq6_m':10000,'cteq6_l':10041,'cteq6l1':10042, 'nn23lo':246800,'nn23lo1':247000,'nn23nlo':244800 - }[pdf] + }[pdf] except: - return 0 - + return 0 + def get_lhapdf_id(self): return self.get_pdf_id(self['pdlabel']) - def remove_all_cut(self): + def remove_all_cut(self): """remove all the cut""" for name in self.cuts_parameter: @@ -3749,7 +3749,7 @@ def remove_all_cut(self): elif 'eta' in name: self[name] = -1 else: - self[name] = 0 + self[name] = 0 ################################################################################################ ### Define various template subpart for the LO Run_card @@ -3767,11 +3767,11 @@ def remove_all_cut(self): %(nb_proton1)s = nb_proton1 # number of proton for the first beam %(nb_neutron1)s = nb_neutron1 # number of neutron for the first beam %(mass_ion1)s = mass_ion1 # mass of the heavy ion (first beam) -# Note that seting differently the two beams only work if you use +# Note that seting differently the two beams only work if you use # group_subprocess=False when generating your matrix-element %(nb_proton2)s = nb_proton2 # number of proton for the second beam %(nb_neutron2)s = nb_neutron2 # number of neutron for the second beam - %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) + %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ template_off = "# To see heavy ion options: type \"update ion_pdf\"" @@ -3834,11 +3834,11 @@ def remove_all_cut(self): # Frame for polarization ------------------------------------------------------------------------------------ template_on = \ """#********************************************************************* -# Frame where to evaluate the matrix-element (not the cut!) for polarization +# Frame where to evaluate the matrix-element (not the cut!) for polarization #********************************************************************* %(me_frame)s = me_frame ! list of particles to sum-up to define the rest-frame ! in which to evaluate the matrix-element - ! [1,2] means the partonic center of mass + ! [1,2] means the partonic center of mass """ template_off = "" frame_block = RunBlock('frame', template_on=template_on, template_off=template_off) @@ -3891,7 +3891,7 @@ def remove_all_cut(self): # CONTROL The extra running scale (not QCD) * # Such running is NOT include in systematics computation * #*********************************************************************** - %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale + %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode %(mue_over_ref)s = mue_over_ref ! ratio to mur if dynamical scale """ @@ -3908,10 +3908,10 @@ def remove_all_cut(self): %(tmin_for_channel)s = tmin_for_channel ! limit the non-singular reach of --some-- channel of integration related to T-channel diagram (value between -1 and 0), -1 is no impact %(survey_splitting)s = survey_splitting ! for loop-induced control how many core are used at survey for the computation of a single iteration. %(survey_nchannel_per_job)s = survey_nchannel_per_job ! control how many Channel are integrated inside a single job on cluster/multicore - %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) + %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) +#********************************************************************* +# Compilation flag. #********************************************************************* -# Compilation flag. -#********************************************************************* %(global_flag)s = global_flag ! fortran optimization flag use for the all code. %(aloha_flag)s = aloha_flag ! fortran optimization flag for aloha function. Suggestions: '-ffast-math' %(matrix_flag)s = matrix_flag ! fortran optimization flag for matrix.f function. Suggestions: '-O3' @@ -3948,7 +3948,7 @@ def check_validity(self, card): if card['pdlabel'] != card['pdlabel1']: dict.__setitem__(card, 'pdlabel', card['pdlabel1']) elif card['pdlabel1'] in sum(card.allowed_lep_densities.values(),[]): - raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") + raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel2'] in sum(card.allowed_lep_densities.values(),[]): raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel1'] == 'none': @@ -3962,7 +3962,7 @@ def check_validity(self, card): dict.__setitem__(card, 'pdlabel2', card['pdlabel']) if abs(card['lpp1']) == 1 == abs(card['lpp2']) and card['pdlabel1'] != card['pdlabel2']: - raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") + raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -4028,7 +4028,7 @@ def post_set(card, value, change_userdefine, raiseerror, name='unknown', **opt): if name == 'fixed_fac_scale2' and 'fixed_fac_scale1' not in card.user_set: dict.__setitem__(card, 'fixed_fac_scale1', card['fixed_fac_scale']) if name == 'fixed_fac_scale1' and 'fixed_fac_scale2' not in card.user_set: - dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) + dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) def status(self, card): @@ -4061,32 +4061,32 @@ def status(self, card): class RunCardLO(RunCard): """an object to handle in a nice way the run_card information""" - + blocks = [heavy_ion_block, beam_pol_block, syscalc_block, ecut_block, frame_block, eva_scale_block, mlm_block, ckkw_block, psoptim_block, pdlabel_block, fixedfacscale, running_block] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), "get_dummy_x1": pjoin("SubProcesses","dummy_fct.f"), - "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), + "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), "dummy_boostframe": pjoin("SubProcesses","dummy_fct.f"), "user_dynamical_scale": pjoin("SubProcesses","dummy_fct.f"), "bias_wgt_custom": pjoin("SubProcesses","dummy_fct.f"), "user_": pjoin("SubProcesses","dummy_fct.f") # all function starting by user will be added to that file } - + include_as_parameter = ['vector.inc'] if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_lo.dat") - + def default_setup(self): """default value for the run_card.dat""" - + self.add_param("run_tag", "tag_1", include=False) self.add_param("gridpack", False) self.add_param("time_of_flight", -1.0, include=False) - self.add_param("nevents", 10000) + self.add_param("nevents", 10000) self.add_param("iseed", 0) self.add_param("python_seed", -2, include=False, hidden=True, comment="controlling python seed [handling in particular the final unweighting].\n -1 means use default from random module.\n -2 means set to same value as iseed") self.add_param("lpp1", 1, fortran_name="lpp(1)", allowed=[-1,1,0,2,3,9,-2,-3,4,-4], @@ -4106,7 +4106,7 @@ def default_setup(self): self.add_param('nb_neutron1', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(1)", comment='For heavy ion physics nb of neutron in the ion (for both beam but if group_subprocess was False)') self.add_param('nb_neutron2', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(2)", - comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') + comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') self.add_param('mass_ion1', -1.0, hidden=True, fortran_name="mass_ion(1)", allowed=[-1,0, 0.938, 207.9766521*0.938, 0.000511, 0.105, '*'], comment='For heavy ion physics mass in GeV of the ion (of beam 1)') @@ -4133,11 +4133,11 @@ def default_setup(self): self.add_param("mue_over_ref", 1.0, hidden=True, comment='ratio mu_other/mu for dynamical scale') self.add_param("ievo_eva",0,hidden=True, allowed=[0,1],fortran_name="ievo_eva", comment='eva: 0 for EW pdf muf evolution by q^2; 1 for evo by pT^2') - + # Bias module options self.add_param("bias_module", 'None', include=False, hidden=True) self.add_param('bias_parameters', {'__type__':1.0}, include='BIAS/bias.inc', hidden=True) - + #matching self.add_param("scalefact", 1.0) self.add_param("ickkw", 0, allowed=[0,1], hidden=True, comment="\'0\' for standard fixed order computation.\n\'1\' for MLM merging activates alphas and pdf re-weighting according to a kt clustering of the QCD radiation.") @@ -4221,7 +4221,7 @@ def default_setup(self): self.add_param("mmaa", 0.0, cut='aa') self.add_param("mmll", 0.0, cut='ll') self.add_param("mmjjmax", -1.0, cut='jj') - self.add_param("mmbbmax", -1.0, cut='bb') + self.add_param("mmbbmax", -1.0, cut='bb') self.add_param("mmaamax", -1.0, cut='aa') self.add_param("mmllmax", -1.0, cut='ll') self.add_param("mmnl", 0.0, cut='LL') @@ -4231,9 +4231,9 @@ def default_setup(self): self.add_param("ptllmax", -1.0, cut='ll') self.add_param("xptj", 0.0, cut='jj') self.add_param("xptb", 0.0, cut='bb') - self.add_param("xpta", 0.0, cut='aa') + self.add_param("xpta", 0.0, cut='aa') self.add_param("xptl", 0.0, cut='ll') - # ordered pt jet + # ordered pt jet self.add_param("ptj1min", 0.0, cut='jj') self.add_param("ptj1max", -1.0, cut='jj') self.add_param("ptj2min", 0.0, cut='jj') @@ -4241,7 +4241,7 @@ def default_setup(self): self.add_param("ptj3min", 0.0, cut='jjj') self.add_param("ptj3max", -1.0, cut='jjj') self.add_param("ptj4min", 0.0, cut='j'*4) - self.add_param("ptj4max", -1.0, cut='j'*4) + self.add_param("ptj4max", -1.0, cut='j'*4) self.add_param("cutuse", 0, cut='jj') # ordered pt lepton self.add_param("ptl1min", 0.0, cut='l'*2) @@ -4249,7 +4249,7 @@ def default_setup(self): self.add_param("ptl2min", 0.0, cut='l'*2) self.add_param("ptl2max", -1.0, cut='l'*2) self.add_param("ptl3min", 0.0, cut='l'*3) - self.add_param("ptl3max", -1.0, cut='l'*3) + self.add_param("ptl3max", -1.0, cut='l'*3) self.add_param("ptl4min", 0.0, cut='l'*4) self.add_param("ptl4max", -1.0, cut='l'*4) # Ht sum of jets @@ -4257,7 +4257,7 @@ def default_setup(self): self.add_param("htjmax", -1.0, cut='j'*2) self.add_param("ihtmin", 0.0, cut='J'*2) self.add_param("ihtmax", -1.0, cut='J'*2) - self.add_param("ht2min", 0.0, cut='J'*3) + self.add_param("ht2min", 0.0, cut='J'*3) self.add_param("ht3min", 0.0, cut='J'*3) self.add_param("ht4min", 0.0, cut='J'*4) self.add_param("ht2max", -1.0, cut='J'*3) @@ -4267,7 +4267,7 @@ def default_setup(self): self.add_param("ptgmin", 0.0, cut='aj') self.add_param("r0gamma", 0.4, hidden=True) self.add_param("xn", 1.0, hidden=True) - self.add_param("epsgamma", 1.0, hidden=True) + self.add_param("epsgamma", 1.0, hidden=True) self.add_param("isoem", True, hidden=True) self.add_param("xetamin", 0.0, cut='jj') self.add_param("deltaeta", 0.0, cut='j'*2) @@ -4280,7 +4280,7 @@ def default_setup(self): self.add_param("use_syst", True) self.add_param('systematics_program', 'systematics', include=False, hidden=True, comment='Choose which program to use for systematics computation: none, systematics, syscalc') self.add_param('systematics_arguments', ['--mur=0.5,1,2', '--muf=0.5,1,2', '--pdf=errorset'], include=False, hidden=True, comment='Choose the argment to pass to the systematics command. like --mur=0.25,1,4. Look at the help of the systematics function for more details.') - + self.add_param("sys_scalefact", "0.5 1 2", include=False, hidden=True) self.add_param("sys_alpsfact", "None", include=False, hidden=True) self.add_param("sys_matchscale", "auto", include=False, hidden=True) @@ -4315,8 +4315,8 @@ def default_setup(self): self.add_param('aloha_flag', '', include=False, hidden=True, comment='global fortran compilation flag, suggestion: -ffast-math', fct_mod=(self.make_clean, ('Source/DHELAS'),{})) self.add_param('matrix_flag', '', include=False, hidden=True, comment='fortran compilation flag for the matrix-element files, suggestion -O3', - fct_mod=(self.make_Ptouch, ('matrix'),{})) - self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', + fct_mod=(self.make_Ptouch, ('matrix'),{})) + self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', fortran_name='VECSIZE_MEMMAX', fct_mod=(self.reset_simd,(),{})) # parameter allowing to define simple cut via the pdg @@ -4329,24 +4329,24 @@ def default_setup(self): self.add_param('eta_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False) - + self.add_param('pdg_cut',[0], system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], system=True) # store pt min self.add_param('ptmax4pdg',[-1.], system=True) self.add_param('Emin4pdg',[0.], system=True) # store pt min - self.add_param('Emax4pdg',[-1.], system=True) + self.add_param('Emax4pdg',[-1.], system=True) self.add_param('etamin4pdg',[0.], system=True) # store pt min - self.add_param('etamax4pdg',[-1.], system=True) + self.add_param('etamax4pdg',[-1.], system=True) self.add_param('mxxmin4pdg',[-1.], system=True) self.add_param('mxxpart_antipart', [False], system=True) - - - + + + def check_validity(self): """ """ - + super(RunCardLO, self).check_validity() - + #Make sure that nhel is only either 0 (i.e. no MC over hel) or #1 (MC over hel with importance sampling). In particular, it can #no longer be > 1. @@ -4357,12 +4357,12 @@ def check_validity(self): "not %s." % self['nhel']) if int(self['maxjetflavor']) > 6: raise InvalidRunCard('maxjetflavor should be lower than 5! (6 is partly supported)') - + if len(self['pdgs_for_merging_cut']) > 1000: raise InvalidRunCard("The number of elements in "+\ "'pdgs_for_merging_cut' should not exceed 1000.") - + # some cut need to be deactivated in presence of isolation if self['ptgmin'] > 0: if self['pta'] > 0: @@ -4370,18 +4370,18 @@ def check_validity(self): self['pta'] = 0.0 if self['draj'] > 0: logger.warning('draj cut discarded since photon isolation is used') - self['draj'] = 0.0 - - # special treatment for gridpack use the gseed instead of the iseed + self['draj'] = 0.0 + + # special treatment for gridpack use the gseed instead of the iseed if self['gridrun']: self['iseed'] = self['gseed'] - + #Some parameter need to be fixed when using syscalc #if self['use_syst']: # if self['scalefact'] != 1.0: # logger.warning('Since use_syst=T, changing the value of \'scalefact\' to 1') # self['scalefact'] = 1.0 - + # CKKW Treatment if self['ickkw'] > 0: if self['ickkw'] != 1: @@ -4399,7 +4399,7 @@ def check_validity(self): raise InvalidRunCard('maxjetflavor at 6 is NOT supported for matching!') if self['ickkw'] == 2: # add warning if ckkw selected but the associate parameter are empty - self.get_default('highestmult', log_level=20) + self.get_default('highestmult', log_level=20) self.get_default('issgridfile', 'issudgrid.dat', log_level=20) if self['xqcut'] > 0: if self['ickkw'] == 0: @@ -4412,13 +4412,13 @@ def check_validity(self): if self['drjl'] != 0: if 'drjl' in self.user_set: logger.warning('Since icckw>0, changing the value of \'drjl\' to 0') - self['drjl'] = 0 - if not self['auto_ptj_mjj']: + self['drjl'] = 0 + if not self['auto_ptj_mjj']: if self['mmjj'] > self['xqcut']: logger.warning('mmjj > xqcut (and auto_ptj_mjj = F). MMJJ set to 0') - self['mmjj'] = 0.0 - - # check validity of the pdf set + self['mmjj'] = 0.0 + + # check validity of the pdf set # note that pdlabel is automatically set to lhapdf if pdlabel1 or pdlabel2 is set to lhapdf if self['pdlabel'] == 'lhapdf': #add warning if lhaid not define @@ -4426,7 +4426,7 @@ def check_validity(self): mod = False for i in [1,2]: - lpp = 'lpp%i' %i + lpp = 'lpp%i' %i pdlabelX = 'pdlabel%i' % i if self[lpp] == 0: # nopdf if self[pdlabelX] != 'none': @@ -4459,12 +4459,12 @@ def check_validity(self): raise InvalidRunCard( "Heavy ion mode is only supported for lpp1=1/2") if self['lpp2'] not in [1,2]: if self['nb_proton2'] !=1 or self['nb_neutron2'] !=0: - raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") + raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") # check that fixed_fac_scale(1/2) is setting as expected # if lpp=2/3/4 -> default is that beam in fixed scale - # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are + # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are # check that both fixed_fac_scale1/2 are defined together # ensure that fixed_fac_scale1 and fixed_fac_scale2 are setup as needed if 'fixed_fac_scale1' in self.user_set: @@ -4475,13 +4475,13 @@ def check_validity(self): elif 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale1 are defined but not fixed_fac_scale2. The value of fixed_fac_scale2 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale2'] = self['fixed_fac_scale'] - elif self['lpp2'] !=0: + elif self['lpp2'] !=0: raise Exception('fixed_fac_scale2 not defined while fixed_fac_scale1 is. Please fix your run_card.') elif 'fixed_fac_scale2' in self.user_set: if 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale2 are defined but not fixed_fac_scale1. The value of fixed_fac_scale1 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale1'] = self['fixed_fac_scale'] - elif self['lpp1'] !=0: + elif self['lpp1'] !=0: raise Exception('fixed_fac_scale1 not defined while fixed_fac_scale2 is. Please fix your run_card.') else: if 'fixed_fac_scale' in self.user_set: @@ -4500,12 +4500,12 @@ def check_validity(self): logger.warning('fixed_fac_scale1 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale1']) logger.warning('fixed_fac_scale2 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale2']) - # check if lpp = + # check if lpp = if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]): for i in [1,2]: if abs(self['lpp%s' % i ]) in [3,4] and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: logger.warning("Vector boson from lepton PDF is using fixed scale value of muf [dsqrt_q2fact%s]. Looks like you kept the default value (Mz). Is this really the cut-off that you want to use?" % i) - + if abs(self['lpp%s' % i ]) == 2 and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: if self['pdlabel'] in ['edff','chff']: logger.warning("Since 3.5.0 exclusive photon-photon processes in ultraperipheral proton and nuclear collisions from gamma-UPC (arXiv:2207.03012) will ignore the factorisation scale.") @@ -4515,10 +4515,10 @@ def check_validity(self): if six.PY2 and self['hel_recycling']: self['hel_recycling'] = False - logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. + logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. In general this optimization speeds up the computation by a factor of two.""") - + # check that ebeam is bigger than the associated mass. for i in [1,2]: if self['lpp%s' % i ] not in [1,2]: @@ -4529,13 +4529,13 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") - elif self['ebeam%i' % i] < self['mass_ion%i' % i]: + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + elif self['ebeam%i' % i] < self['mass_ion%i' % i]: if self['ebeam%i' %i] == 0: logger.warning("At rest ion mode set: Energy beam set to %s" % self['mass_ion%i' % i]) self.set('ebeam%i' %i, self['mass_ion%i' % i]) - - + + # check the tmin_for_channel is negative if self['tmin_for_channel'] == 0: raise InvalidRunCard('tmin_for_channel can not be set to 0.') @@ -4543,15 +4543,15 @@ def check_validity(self): logger.warning('tmin_for_channel should be negative. Will be using -%f instead' % self['tmin_for_channel']) self.set('tmin_for_channel', -self['tmin_for_channel']) - + def update_system_parameter_for_include(self): """system parameter need to be setupe""" - + # polarization self['frame_id'] = sum(2**(n) for n in self['me_frame']) - + # set the pdg_for_cut fortran parameter - pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + + pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + list(self['e_min_pdg'].keys()) +list(self['e_max_pdg'].keys()) + list(self['eta_min_pdg'].keys()) +list(self['eta_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys()) + list(self['mxx_only_part_antipart'].keys())) @@ -4559,15 +4559,15 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different pdgs are allowed for pdg specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative pdg code') - - + + if any(pdg in pdg_to_cut for pdg in [1,2,3,4,5,21,22,11,13,15]): raise Exception("Can not use PDG related cut for light quark/b quark/lepton/gluon/photon") - + if pdg_to_cut: self['pdg_cut'] = list(pdg_to_cut) self['ptmin4pdg'] = [] @@ -4595,7 +4595,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -4605,11 +4605,11 @@ def update_system_parameter_for_include(self): self['ptmax4pdg'] = [-1.] self['Emax4pdg'] = [-1.] self['etamax4pdg'] =[-1.] - self['mxxmin4pdg'] =[0.] + self['mxxmin4pdg'] =[0.] self['mxxpart_antipart'] = [False] - - - + + + def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules process 1->N all cut set on off. @@ -4626,7 +4626,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if proc_characteristic['loop_induced']: self['nhel'] = 1 self['pdgs_for_merging_cut'] = proc_characteristic['colored_pdgs'] - + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() @@ -4636,7 +4636,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): # check for beam_id beam_id = set() beam_id_split = [set(), set()] - for proc in proc_def: + for proc in proc_def: for oneproc in proc: for i,leg in enumerate(oneproc['legs']): if not leg['state']: @@ -4654,20 +4654,20 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): maxjetflavor = max([4]+[abs(i) for i in beam_id if -7< i < 7]) self['maxjetflavor'] = maxjetflavor self['asrwgtflavor'] = maxjetflavor - + if any(i in beam_id for i in [1,-1,2,-2,3,-3,4,-4,5,-5,21,22]): # check for e p collision if any(id in beam_id for id in [11,-11,13,-13]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [11,-11,13,-13]): - self['lpp1'] = 0 - self['lpp2'] = 1 - self['ebeam1'] = '1k' - self['ebeam2'] = '6500' + self['lpp1'] = 0 + self['lpp2'] = 1 + self['ebeam1'] = '1k' + self['ebeam2'] = '6500' else: - self['lpp1'] = 1 - self['lpp2'] = 0 - self['ebeam1'] = '6500' + self['lpp1'] = 1 + self['lpp2'] = 0 + self['ebeam1'] = '6500' self['ebeam2'] = '1k' # UPC for p p collision @@ -4677,7 +4677,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam1'] = '6500' self['ebeam2'] = '6500' self['pdlabel'] = 'edff' - + elif any(id in beam_id for id in [11,-11,13,-13]): self['lpp1'] = 0 self['lpp2'] = 0 @@ -4688,7 +4688,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('ecut') self.display_block.append('beam_pol') - + # check for possibility of eva eva_in_b1 = any(i in beam_id_split[0] for i in [23,24,-24]) #,12,-12,14,-14]) @@ -4701,10 +4701,10 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['nhel'] = 1 self['pdlabel'] = 'eva' self['fixed_fac_scale'] = True - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') elif eva_in_b1: - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') self['pdlabel1'] = 'eva' self['fixed_fac_scale1'] = True self['nhel'] = 1 @@ -4724,7 +4724,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['pdlabel2'] = 'eva' self['fixed_fac_scale2'] = True self['nhel'] = 1 - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') for i in beam_id_split[0]: if abs(i) == 11: self['lpp1'] = math.copysign(3,i) @@ -4740,34 +4740,34 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if any(i in beam_id for i in [22,23,24,-24,12,-12,14,-14]): self.display_block.append('eva_scale') - # automatic polarisation of the beam if neutrino beam + # automatic polarisation of the beam if neutrino beam if any(id in beam_id for id in [12,-12,14,-14,16,-16]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [12,14,16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = -100 if not all(id in [12,14,16] for id in beam_id_split[0]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1]. %s') elif any(id in beam_id_split[0] for id in [-12,-14,-16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[0]): - logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') + logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') if any(id in beam_id_split[1] for id in [12,14,16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = -100 if not all(id in [12,14,16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') elif any(id in beam_id_split[1] for id in [-12,-14,-16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') - + # Check if need matching min_particle = 99 max_particle = 0 @@ -4798,12 +4798,12 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - + break + if matching: self['ickkw'] = 1 self['xqcut'] = 30 - #self['use_syst'] = False + #self['use_syst'] = False self['drjj'] = 0 self['drjl'] = 0 self['sys_alpsfact'] = "0.5 1 2" @@ -4811,8 +4811,8 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('mlm') self.display_block.append('ckkw') self['dynamical_scale_choice'] = -1 - - + + # For interference module, the systematics are wrong. # automatically set use_syst=F and set systematics_program=none no_systematics = False @@ -4826,14 +4826,14 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): continue break - + if interference or no_systematics: self['use_syst'] = False self['systematics_program'] = 'none' if interference: self['dynamical_scale_choice'] = 3 self['sde_strategy'] = 2 - + # set default integration strategy # interference case is already handle above # here pick strategy 2 if only one QCD color flow @@ -4852,7 +4852,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if pure_lepton and proton_initial: self['sde_strategy'] = 1 else: - # check if multi-jet j + # check if multi-jet j is_multijet = True for proc in proc_def: if any(abs(j.get('id')) not in jet_id for j in proc[0]['legs']): @@ -4860,7 +4860,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): break if is_multijet: self['sde_strategy'] = 2 - + # if polarization is used, set the choice of the frame in the run_card # But only if polarization is used for massive particles for plist in proc_def: @@ -4870,7 +4870,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): model = proc.get('model') particle = model.get_particle(l.get('id')) if particle.get('mass').lower() != 'zero': - self.display_block.append('frame') + self.display_block.append('frame') break else: continue @@ -4894,15 +4894,15 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): proc = proc_list[0] if proc['forbidden_onsh_s_channels']: self['sde_strategy'] = 1 - + if 'fix_scale' in proc_characteristic['limitations']: self['fixed_ren_scale'] = 1 self['fixed_fac_scale'] = 1 if self['ickkw'] == 1: logger.critical("MLM matching/merging not compatible with the model! You need to use another method to remove the double counting!") self['ickkw'] = 0 - - # define class of particles present to hide all the cuts associated to + + # define class of particles present to hide all the cuts associated to # not present class cut_class = collections.defaultdict(int) for proc in proc_def: @@ -4925,41 +4925,41 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): one_proc_cut['L'] += 1 elif abs(pdg) in [12,14,16]: one_proc_cut['n'] += 1 - one_proc_cut['L'] += 1 + one_proc_cut['L'] += 1 elif str(oneproc.get('model').get_particle(pdg)['mass']) != 'ZERO': one_proc_cut['H'] += 1 - + for key, nb in one_proc_cut.items(): cut_class[key] = max(cut_class[key], nb) self.cut_class = dict(cut_class) self.cut_class[''] = True #avoid empty - + # If model has running functionality add the additional parameter model = proc_def[0][0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Read file input/default_run_card_lo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'run_card.dat') python_template = True else: template = pjoin(MEDIR, 'Cards', 'run_card_default.dat') python_template = False - + hid_lines = {'default':True}#collections.defaultdict(itertools.repeat(True).next) if isinstance(output_file, str): @@ -4975,9 +4975,9 @@ def write(self, output_file, template=None, python_template=False, hid_lines[k1+k2] = True super(RunCardLO, self).write(output_file, template=template, - python_template=python_template, + python_template=python_template, template_options=hid_lines, - **opt) + **opt) class InvalidMadAnalysis5Card(InvalidCmd): @@ -4986,19 +4986,19 @@ class InvalidMadAnalysis5Card(InvalidCmd): class MadAnalysis5Card(dict): """ A class to store a MadAnalysis5 card. Very basic since it is basically free format.""" - + _MG5aMC_escape_tag = '@MG5aMC' - + _default_hadron_inputs = ['*.hepmc', '*.hep', '*.stdhep', '*.lhco','*.root'] _default_parton_inputs = ['*.lhe'] _skip_analysis = False - + @classmethod def events_can_be_reconstructed(cls, file_path): """ Checks from the type of an event file whether it can be reconstructed or not.""" return not (file_path.endswith('.lhco') or file_path.endswith('.lhco.gz') or \ file_path.endswith('.root') or file_path.endswith('.root.gz')) - + @classmethod def empty_analysis(cls): """ A method returning the structure of an empty analysis """ @@ -5012,7 +5012,7 @@ def empty_reconstruction(cls): 'reco_output':'lhe'} def default_setup(self): - """define the default value""" + """define the default value""" self['mode'] = 'parton' self['inputs'] = [] # None is the default stdout level, it will be set automatically by MG5aMC @@ -5025,8 +5025,8 @@ def default_setup(self): # of this class and some other property could be added to this dictionary # in the future. self['analyses'] = {} - # The recasting structure contains on set of commands and one set of - # card lines. + # The recasting structure contains on set of commands and one set of + # card lines. self['recasting'] = {'commands':[],'card':[]} # Add the default trivial reconstruction to use an lhco input # This is just for the structure @@ -5035,7 +5035,7 @@ def default_setup(self): 'root_input': MadAnalysis5Card.empty_reconstruction()} self['reconstruction']['lhco_input']['reco_output']='lhco' - self['reconstruction']['root_input']['reco_output']='root' + self['reconstruction']['root_input']['reco_output']='root' # Specify in which order the analysis/recasting were specified self['order'] = [] @@ -5049,7 +5049,7 @@ def __init__(self, finput=None,mode=None): return else: dict.__init__(self) - + # Initialize it with all the default value self.default_setup() if not mode is None: @@ -5058,15 +5058,15 @@ def __init__(self, finput=None,mode=None): # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, mode=mode) - + def read(self, input, mode=None): """ Read an MA5 card""" - + if mode not in [None,'parton','hadron']: raise MadGraph5Error('A MadAnalysis5Card can be read online the modes'+ "'parton' or 'hadron'") card_mode = mode - + if isinstance(input, (file, StringIO.StringIO)): input_stream = input elif isinstance(input, str): @@ -5099,10 +5099,10 @@ def read(self, input, mode=None): except ValueError: option = line[len(self._MG5aMC_escape_tag):] option = option.strip() - + if option=='inputs': self['inputs'].extend([v.strip() for v in value.split(',')]) - + elif option == 'skip_analysis': self._skip_analysis = True @@ -5118,7 +5118,7 @@ def read(self, input, mode=None): except: raise InvalidMadAnalysis5Card( "MA5 output level specification '%s' is incorrect."%str(value)) - + elif option=='analysis_name': current_type = 'analyses' current_name = value @@ -5127,7 +5127,7 @@ def read(self, input, mode=None): "Analysis '%s' already defined in MadAnalysis5 card"%current_name) else: self[current_type][current_name] = MadAnalysis5Card.empty_analysis() - + elif option=='set_reconstructions': try: reconstructions = eval(value) @@ -5142,7 +5142,7 @@ def read(self, input, mode=None): "analysis in a MadAnalysis5 card.") self[current_type][current_name]['reconstructions']=reconstructions continue - + elif option=='reconstruction_name': current_type = 'reconstruction' current_name = value @@ -5161,7 +5161,7 @@ def read(self, input, mode=None): raise InvalidMadAnalysis5Card( "Option '%s' can only take the values 'lhe' or 'root'"%option) self['reconstruction'][current_name]['reco_output'] = value.lower() - + elif option.startswith('recasting'): current_type = 'recasting' try: @@ -5171,11 +5171,11 @@ def read(self, input, mode=None): if len(self['recasting'][current_name])>0: raise InvalidMadAnalysis5Card( "Only one recasting can be defined in MadAnalysis5 hadron card") - + else: raise InvalidMadAnalysis5Card( "Unreckognized MG5aMC instruction in MadAnalysis5 card: '%s'"%option) - + if option in ['analysis_name','reconstruction_name'] or \ option.startswith('recasting'): self['order'].append((current_type,current_name)) @@ -5209,7 +5209,7 @@ def read(self, input, mode=None): self['inputs'] = self._default_hadron_inputs else: self['inputs'] = self._default_parton_inputs - + # Make sure at least one reconstruction is specified for each hadron # level analysis and that it exists. if self['mode']=='hadron': @@ -5221,7 +5221,7 @@ def read(self, input, mode=None): analysis['reconstructions']): raise InvalidMadAnalysis5Card('A reconstructions specified in'+\ " analysis '%s' is not defined."%analysis_name) - + def write(self, output): """ Write an MA5 card.""" @@ -5232,7 +5232,7 @@ def write(self, output): else: raise MadGraph5Error('Incorrect input for the write function of'+\ ' the MadAnalysis5Card card. Received argument type is: %s'%str(type(output))) - + output_lines = [] if self._skip_analysis: output_lines.append('%s skip_analysis'%self._MG5aMC_escape_tag) @@ -5240,11 +5240,11 @@ def write(self, output): if not self['stdout_lvl'] is None: output_lines.append('%s stdout_lvl=%s'%(self._MG5aMC_escape_tag,self['stdout_lvl'])) for definition_type, name in self['order']: - + if definition_type=='analyses': output_lines.append('%s analysis_name = %s'%(self._MG5aMC_escape_tag,name)) output_lines.append('%s set_reconstructions = %s'%(self._MG5aMC_escape_tag, - str(self['analyses'][name]['reconstructions']))) + str(self['analyses'][name]['reconstructions']))) elif definition_type=='reconstruction': output_lines.append('%s reconstruction_name = %s'%(self._MG5aMC_escape_tag,name)) elif definition_type=='recasting': @@ -5254,23 +5254,23 @@ def write(self, output): output_lines.extend(self[definition_type][name]) elif definition_type in ['reconstruction']: output_lines.append('%s reco_output = %s'%(self._MG5aMC_escape_tag, - self[definition_type][name]['reco_output'])) + self[definition_type][name]['reco_output'])) output_lines.extend(self[definition_type][name]['commands']) elif definition_type in ['analyses']: - output_lines.extend(self[definition_type][name]['commands']) - + output_lines.extend(self[definition_type][name]['commands']) + output_stream.write('\n'.join(output_lines)) - + return - - def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, + + def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, UFO_model_path=None, run_tag=''): - """ Returns a list of tuples ('AnalysisTag',['commands']) specifying - the commands of the MadAnalysis runs required from this card. - At parton-level, the number of such commands is the number of analysis + """ Returns a list of tuples ('AnalysisTag',['commands']) specifying + the commands of the MadAnalysis runs required from this card. + At parton-level, the number of such commands is the number of analysis asked for. In the future, the idea is that the entire card can be processed in one go from MA5 directly.""" - + if isinstance(inputs_arg, list): inputs = inputs_arg elif isinstance(inputs_arg, str): @@ -5278,21 +5278,21 @@ def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, else: raise MadGraph5Error("The function 'get_MA5_cmds' can only take "+\ " a string or a list for the argument 'inputs_arg'") - + if len(inputs)==0: raise MadGraph5Error("The function 'get_MA5_cmds' must have "+\ " at least one input specified'") - + if run_dir_path is None: run_dir_path = os.path.dirname(inputs_arg) - + cmds_list = [] - + UFO_load = [] # first import the UFO if provided if UFO_model_path: UFO_load.append('import %s'%UFO_model_path) - + def get_import(input, type=None): """ Generates the MA5 import commands for that event file. """ dataset_name = os.path.basename(input).split('.')[0] @@ -5304,7 +5304,7 @@ def get_import(input, type=None): if not type is None: res.append('set %s.type = %s'%(dataset_name, type)) return res - + fifo_status = {'warned_fifo':False,'fifo_used_up':False} def warn_fifo(input): if not input.endswith('.fifo'): @@ -5317,7 +5317,7 @@ def warn_fifo(input): logger.warning('Only the first MA5 analysis/reconstructions can be run on a fifo. Subsequent runs will skip fifo inputs.') fifo_status['warned_fifo'] = True return True - + # Then the event file(s) input(s) inputs_load = [] for input in inputs: @@ -5325,16 +5325,16 @@ def warn_fifo(input): if len(inputs) > 1: inputs_load.append('set main.stacking_method = superimpose') - + submit_command = 'submit %s'%submit_folder+'_%s' - + # Keep track of the reconstruction outpus in the MA5 workflow # Keys are reconstruction names and values are .lhe.gz reco file paths. # We put by default already the lhco/root ones present reconstruction_outputs = { - 'lhco_input':[f for f in inputs if + 'lhco_input':[f for f in inputs if f.endswith('.lhco') or f.endswith('.lhco.gz')], - 'root_input':[f for f in inputs if + 'root_input':[f for f in inputs if f.endswith('.root') or f.endswith('.root.gz')]} # If a recasting card has to be written out, chose here its path @@ -5343,7 +5343,7 @@ def warn_fifo(input): # Make sure to only run over one analysis over each fifo. for definition_type, name in self['order']: - if definition_type == 'reconstruction': + if definition_type == 'reconstruction': analysis_cmds = list(self['reconstruction'][name]['commands']) reco_outputs = [] for i_input, input in enumerate(inputs): @@ -5365,8 +5365,8 @@ def warn_fifo(input): analysis_cmds.append( submit_command%('reco_%s_%d'%(name,i_input+1))) analysis_cmds.append('remove reco_events') - - reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) + + reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) for rec_out in reco_outputs] if len(reco_outputs)>0: cmds_list.append(('_reco_%s'%name,analysis_cmds)) @@ -5386,7 +5386,7 @@ def warn_fifo(input): analysis_cmds = ['set main.mode = parton'] else: analysis_cmds = [] - analysis_cmds.extend(sum([get_import(rec_out) for + analysis_cmds.extend(sum([get_import(rec_out) for rec_out in reconstruction_outputs[reco]],[])) analysis_cmds.extend(self['analyses'][name]['commands']) analysis_cmds.append(submit_command%('%s_%s'%(name,reco))) @@ -5427,12 +5427,12 @@ def warn_fifo(input): %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode """ running_block_nlo = RunBlock('RUNNING', template_on=template_on, template_off="") - + class RunCardNLO(RunCard): """A class object for the run_card for a (aMC@)NLO pocess""" - + LO = False - + blocks = [running_block_nlo] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), @@ -5443,11 +5443,11 @@ class RunCardNLO(RunCard): if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_nlo.dat") - - + + def default_setup(self): """define the default value""" - + self.add_param('run_tag', 'tag_1', include=False) self.add_param('nevents', 10000) self.add_param('req_acc', -1.0, include=False) @@ -5455,27 +5455,27 @@ def default_setup(self): self.add_param("time_of_flight", -1.0, include=False) self.add_param('event_norm', 'average') #FO parameter - self.add_param('req_acc_fo', 0.01, include=False) + self.add_param('req_acc_fo', 0.01, include=False) self.add_param('npoints_fo_grid', 5000, include=False) self.add_param('niters_fo_grid', 4, include=False) - self.add_param('npoints_fo', 10000, include=False) + self.add_param('npoints_fo', 10000, include=False) self.add_param('niters_fo', 6, include=False) #seed and collider self.add_param('iseed', 0) - self.add_param('lpp1', 1, fortran_name='lpp(1)') - self.add_param('lpp2', 1, fortran_name='lpp(2)') + self.add_param('lpp1', 1, fortran_name='lpp(1)') + self.add_param('lpp2', 1, fortran_name='lpp(2)') self.add_param('ebeam1', 6500.0, fortran_name='ebeam(1)') - self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') + self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') self.add_param('pdlabel', 'nn23nlo', allowed=['lhapdf', 'emela', 'cteq6_m','cteq6_d','cteq6_l','cteq6l1', 'nn23lo','nn23lo1','nn23nlo','ct14q00','ct14q07','ct14q14','ct14q21'] +\ - sum(self.allowed_lep_densities.values(),[]) ) + sum(self.allowed_lep_densities.values(),[]) ) self.add_param('lhaid', [244600],fortran_name='lhaPDFid') self.add_param('pdfscheme', 0) # whether to include or not photon-initiated processes in lepton collisions self.add_param('photons_from_lepton', True) self.add_param('lhapdfsetname', ['internal_use_only'], system=True) - # stuff for lepton collisions - # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set - # whether the current PDF set has or not beamstrahlung + # stuff for lepton collisions + # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set + # whether the current PDF set has or not beamstrahlung self.add_param('has_bstrahl', False, system=True) # renormalisation scheme of alpha self.add_param('alphascheme', 0, system=True) @@ -5486,31 +5486,31 @@ def default_setup(self): # w contribution included or not in the running of alpha self.add_param('w_run', 1, system=True) #shower and scale - self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') + self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') self.add_param('shower_scale_factor',1.0) self.add_param('mcatnlo_delta', False) self.add_param('fixed_ren_scale', False) self.add_param('fixed_fac_scale', False) self.add_param('fixed_extra_scale', True, hidden=True, system=True) # set system since running from Ellis-Sexton scale not implemented - self.add_param('mur_ref_fixed', 91.118) + self.add_param('mur_ref_fixed', 91.118) self.add_param('muf1_ref_fixed', -1.0, hidden=True) - self.add_param('muf_ref_fixed', 91.118) + self.add_param('muf_ref_fixed', 91.118) self.add_param('muf2_ref_fixed', -1.0, hidden=True) - self.add_param('mue_ref_fixed', 91.118, hidden=True) - self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', + self.add_param('mue_ref_fixed', 91.118, hidden=True) + self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', allowed = [-2,-1,0,1,2,3,10], comment="\'-1\' is based on CKKW back clustering (following feynman diagram).\n \'1\' is the sum of transverse energy.\n '2' is HT (sum of the transverse mass)\n '3' is HT/2, '0' allows to use the user_hook definition (need to be defined via custom_fct entry) ") self.add_param('fixed_qes_scale', False, hidden=True) self.add_param('qes_ref_fixed', -1.0, hidden=True) self.add_param('mur_over_ref', 1.0) - self.add_param('muf_over_ref', 1.0) - self.add_param('muf1_over_ref', -1.0, hidden=True) + self.add_param('muf_over_ref', 1.0) + self.add_param('muf1_over_ref', -1.0, hidden=True) self.add_param('muf2_over_ref', -1.0, hidden=True) self.add_param('mue_over_ref', 1.0, hidden=True, system=True) # forbid the user to modigy due to incorrect handling of the Ellis-Sexton scale self.add_param('qes_over_ref', -1.0, hidden=True) self.add_param('reweight_scale', [True], fortran_name='lscalevar') - self.add_param('rw_rscale_down', -1.0, hidden=True) + self.add_param('rw_rscale_down', -1.0, hidden=True) self.add_param('rw_rscale_up', -1.0, hidden=True) - self.add_param('rw_fscale_down', -1.0, hidden=True) + self.add_param('rw_fscale_down', -1.0, hidden=True) self.add_param('rw_fscale_up', -1.0, hidden=True) self.add_param('rw_rscale', [1.0,2.0,0.5], fortran_name='scalevarR') self.add_param('rw_fscale', [1.0,2.0,0.5], fortran_name='scalevarF') @@ -5523,60 +5523,60 @@ def default_setup(self): #technical self.add_param('folding', [1,1,1], include=False) - + #merging self.add_param('ickkw', 0, allowed=[-1,0,3,4], comment=" - 0: No merging\n - 3: FxFx Merging : http://amcatnlo.cern.ch/FxFx_merging.htm\n - 4: UNLOPS merging (No interface within MG5aMC)\n - -1: NNLL+NLO jet-veto computation. See arxiv:1412.8408 [hep-ph]") self.add_param('bwcutoff', 15.0) - #cuts + #cuts self.add_param('jetalgo', 1.0) - self.add_param('jetradius', 0.7) + self.add_param('jetradius', 0.7) self.add_param('ptj', 10.0 , cut=True) - self.add_param('etaj', -1.0, cut=True) - self.add_param('gamma_is_j', True) + self.add_param('etaj', -1.0, cut=True) + self.add_param('gamma_is_j', True) self.add_param('ptl', 0.0, cut=True) - self.add_param('etal', -1.0, cut=True) + self.add_param('etal', -1.0, cut=True) self.add_param('drll', 0.0, cut=True) - self.add_param('drll_sf', 0.0, cut=True) + self.add_param('drll_sf', 0.0, cut=True) self.add_param('mll', 0.0, cut=True) - self.add_param('mll_sf', 30.0, cut=True) - self.add_param('rphreco', 0.1) - self.add_param('etaphreco', -1.0) - self.add_param('lepphreco', True) - self.add_param('quarkphreco', True) + self.add_param('mll_sf', 30.0, cut=True) + self.add_param('rphreco', 0.1) + self.add_param('etaphreco', -1.0) + self.add_param('lepphreco', True) + self.add_param('quarkphreco', True) self.add_param('ptgmin', 20.0, cut=True) - self.add_param('etagamma', -1.0) + self.add_param('etagamma', -1.0) self.add_param('r0gamma', 0.4) - self.add_param('xn', 1.0) + self.add_param('xn', 1.0) self.add_param('epsgamma', 1.0) - self.add_param('isoem', True) + self.add_param('isoem', True) self.add_param('maxjetflavor', 4, hidden=True) - self.add_param('pineappl', False) + self.add_param('pineappl', False) self.add_param('lhe_version', 3, hidden=True, include=False) - + # customization self.add_param("custom_fcts",[],typelist="str", include=False, comment="list of files containing function that overwritte dummy function of the code (like adding cuts/...)") #internal variable related to FO_analyse_card self.add_param('FO_LHE_weight_ratio',1e-3, hidden=True, system=True) - self.add_param('FO_LHE_postprocessing',['grouping','random'], + self.add_param('FO_LHE_postprocessing',['grouping','random'], hidden=True, system=True, include=False) - + # parameter allowing to define simple cut via the pdg self.add_param('pt_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('pt_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False, hidden=True) - + #hidden parameter that are transfer to the fortran code self.add_param('pdg_cut',[0], hidden=True, system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], hidden=True, system=True) # store pt min self.add_param('ptmax4pdg',[-1.], hidden=True, system=True) self.add_param('mxxmin4pdg',[0.], hidden=True, system=True) self.add_param('mxxpart_antipart', [False], hidden=True, system=True) - + def check_validity(self): """check the validity of the various input""" - + super(RunCardNLO, self).check_validity() # for lepton-lepton collisions, ignore 'pdlabel' and 'lhaid' @@ -5588,12 +5588,12 @@ def check_validity(self): # for dressed lepton collisions, check that the lhaid is a valid one if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]) + ['emela']: raise InvalidRunCard('pdlabel %s not allowed for dressed-lepton collisions' % self['pdlabel']) - + elif self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' self['reweight_pdf']=[False] logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') - + if self['lpp1'] == 0 == self['lpp2']: if self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' @@ -5601,8 +5601,8 @@ def check_validity(self): logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') # For FxFx merging, make sure that the following parameters are set correctly: - if self['ickkw'] == 3: - # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed + if self['ickkw'] == 3: + # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed scales=['fixed_ren_scale','fixed_fac_scale','fixed_QES_scale'] for scale in scales: if self[scale]: @@ -5615,7 +5615,7 @@ def check_validity(self): self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency in FxFx merging, dynamical_scale_choice has been set to -1 (default)''' ,'$MG:BOLD') - + # 2. Use kT algorithm for jets with pseudo-code size R=1.0 jetparams=['jetradius','jetalgo'] for jetparam in jetparams: @@ -5628,8 +5628,8 @@ def check_validity(self): self["dynamical_scale_choice"] = [-1] self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency with the jet veto, the scale which will be used is ptj. dynamical_scale_choice will be set at -1.''' - ,'$MG:BOLD') - + ,'$MG:BOLD') + # For interface to PINEAPPL, need to use LHAPDF and reweighting to get scale uncertainties if self['pineappl'] and self['pdlabel'].lower() != 'lhapdf': raise InvalidRunCard('PineAPPL generation only possible with the use of LHAPDF') @@ -5661,7 +5661,7 @@ def check_validity(self): if (self['rw_fscale_down'] != -1.0 and ['rw_fscale_down'] not in self['rw_fscale']) or\ (self['rw_fscale_up'] != -1.0 and ['rw_fscale_up'] not in self['rw_fscale']): self['rw_fscale']=[1.0,self['rw_fscale_up'],self['rw_fscale_down']] - + # PDF reweighting check if any(self['reweight_pdf']): # check that we use lhapdf if reweighting is ON @@ -5672,7 +5672,7 @@ def check_validity(self): if self['pdlabel'] != "lhapdf": self['reweight_pdf']=[self['reweight_pdf'][0]] self['lhaid']=[self['lhaid'][0]] - + # make sure set have reweight_scale and dyn_scale_choice of length 1 when fixed scales: if self['fixed_ren_scale'] and self['fixed_fac_scale']: self['reweight_scale']=[self['reweight_scale'][0]] @@ -5685,7 +5685,7 @@ def check_validity(self): self['reweight_pdf']=self['reweight_pdf']*len(self['lhaid']) logger.warning("Setting 'reweight_pdf' for all 'lhaid' to %s" % self['reweight_pdf'][0]) if len(self['reweight_scale']) == 1 and len(self['dynamical_scale_choice']) != 1: - self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) + self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) logger.warning("Setting 'reweight_scale' for all 'dynamical_scale_choice' to %s" % self['reweight_pdf'][0]) # Check that there are no identical elements in lhaid or dynamical_scale_choice @@ -5693,7 +5693,7 @@ def check_validity(self): raise InvalidRunCard("'lhaid' has two or more identical entries. They have to be all different for the code to work correctly.") if len(self['dynamical_scale_choice']) != len(set(self['dynamical_scale_choice'])): raise InvalidRunCard("'dynamical_scale_choice' has two or more identical entries. They have to be all different for the code to work correctly.") - + # Check that lenght of lists are consistent if len(self['reweight_pdf']) != len(self['lhaid']): raise InvalidRunCard("'reweight_pdf' and 'lhaid' lists should have the same length") @@ -5730,7 +5730,7 @@ def check_validity(self): if len(self['folding']) != 3: raise InvalidRunCard("'folding' should contain exactly three integers") for ifold in self['folding']: - if ifold not in [1,2,4,8]: + if ifold not in [1,2,4,8]: raise InvalidRunCard("The three 'folding' parameters should be equal to 1, 2, 4, or 8.") # Check MC@NLO-Delta if self['mcatnlo_delta'] and not self['parton_shower'].lower() == 'pythia8': @@ -5746,11 +5746,11 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938 GeV") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") def update_system_parameter_for_include(self): - + # set the pdg_for_cut fortran parameter pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys())+ list(self['mxx_only_part_antipart'].keys())) @@ -5758,12 +5758,12 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different PDGs are allowed for PDG specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative PDG codes') - - + + if any(pdg in pdg_to_cut for pdg in [21,22,11,13,15]+ list(range(self['maxjetflavor']+1))): # Note that this will double check in the fortran code raise Exception("Can not use PDG related cuts for massless SM particles/leptons") @@ -5790,7 +5790,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -5800,12 +5800,12 @@ def update_system_parameter_for_include(self): self['mxxpart_antipart'] = [False] def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', 'run_card.dat') python_template = True else: @@ -5818,7 +5818,7 @@ def write(self, output_file, template=None, python_template=False, **opt): def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules - e+ e- beam -> lpp:0 ebeam:500 + e+ e- beam -> lpp:0 ebeam:500 p p beam -> set maxjetflavor automatically process with tagged photons -> gamma_is_j = false process without QED splittings -> gamma_is_j = false, recombination = false @@ -5844,19 +5844,19 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam2'] = 500 else: self['lpp1'] = 0 - self['lpp2'] = 0 - + self['lpp2'] = 0 + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() # check for tagged photons tagged_particles = set() - + # If model has running functionality add the additional parameter model = proc_def[0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Check if need matching min_particle = 99 @@ -5885,7 +5885,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: idsmin = [l['id'] for l in procmin['legs']] break - + for procmax in proc_def: if len(procmax['legs']) != max_particle: continue @@ -5901,9 +5901,9 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - - if matching: + break + + if matching: self['ickkw'] = 3 self['fixed_ren_scale'] = False self["fixed_fac_scale"] = False @@ -5911,17 +5911,17 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self["jetalgo"] = 1 self["jetradius"] = 1 self["parton_shower"] = "PYTHIA8" - + # Read file input/default_run_card_nlo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + class MadLoopParam(ConfigFile): """ a class for storing/dealing with the file MadLoopParam.dat contains a parser to read it, facilities to write a new file,... """ - + _ID_reduction_tool_map = {1:'CutTools', 2:'PJFry++', 3:'IREGI', @@ -5929,10 +5929,10 @@ class MadLoopParam(ConfigFile): 5:'Samurai', 6:'Ninja', 7:'COLLIER'} - + def default_setup(self): """initialize the directory to the default value""" - + self.add_param("MLReductionLib", "6|7|1") self.add_param("IREGIMODE", 2) self.add_param("IREGIRECY", True) @@ -5954,7 +5954,7 @@ def default_setup(self): self.add_param("HelicityFilterLevel", 2) self.add_param("LoopInitStartOver", False) self.add_param("HelInitStartOver", False) - self.add_param("UseQPIntegrandForNinja", True) + self.add_param("UseQPIntegrandForNinja", True) self.add_param("UseQPIntegrandForCutTools", True) self.add_param("COLLIERMode", 1) self.add_param("COLLIERComputeUVpoles", True) @@ -5966,9 +5966,9 @@ def default_setup(self): self.add_param("COLLIERUseInternalStabilityTest",True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -5976,7 +5976,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % input) - + previous_line= '' for line in finput: if previous_line.startswith('#'): @@ -5985,20 +5985,20 @@ def read(self, finput): if len(value) and value[0] not in ['#', '!']: self.__setitem__(name, value, change_userdefine=True) previous_line = line - - + + def write(self, outputpath, template=None,commentdefault=False): - + if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', + template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', 'Cards', 'MadLoopParams.dat') else: template = pjoin(MEDIR, 'Cards', 'MadLoopParams_default.dat') fsock = open(template, 'r') template = fsock.readlines() fsock.close() - + if isinstance(outputpath, str): output = open(outputpath, 'w') else: @@ -6019,7 +6019,7 @@ def f77format(value): return value else: raise Exception("Can not format input %s" % type(value)) - + name = '' done = set() for line in template: @@ -6034,12 +6034,12 @@ def f77format(value): elif line.startswith('#'): name = line[1:].split()[0] output.write(line) - - - - - -class eMELA_info(ConfigFile): + + + + + +class eMELA_info(ConfigFile): """ a class for eMELA (LHAPDF-like) info files """ path = '' @@ -6053,7 +6053,7 @@ def __init__(self, finput, me_dir): def read(self, finput): - if isinstance(finput, file): + if isinstance(finput, file): lines = finput.open().read().split('\n') self.path = finput.name else: @@ -6066,7 +6066,7 @@ def read(self, finput): k, v = l.split(':', 1) # ignore further occurrences of : try: self[k.strip()] = eval(v) - except (NameError, SyntaxError): + except (NameError, SyntaxError): self[k.strip()] = v def default_setup(self): @@ -6091,7 +6091,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): +"powers of alpha should be reweighted a posteriori") - logger.info('Updating variables according to %s' % self.path) + logger.info('Updating variables according to %s' % self.path) # Flavours in the running of alpha nd, nu, nl = self['eMELA_ActiveFlavoursAlpha'] self.log_and_update(banner, 'run_card', 'ndnq_run', nd) @@ -6130,8 +6130,8 @@ def update_epdf_emela_variables(self, banner, uvscheme): logger.warning('Cannot treat the following renormalisation schemes for ME and PDFs: %d, %d' \ % (uvscheme, uvscheme_pdf)) - # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref - # also check that the com energy is equal to qref, otherwise print a + # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref + # also check that the com energy is equal to qref, otherwise print a # warning if uvscheme_pdf == 1: qref = self['eMELA_AlphaQref'] @@ -6144,23 +6144,23 @@ def update_epdf_emela_variables(self, banner, uvscheme): # LL / NLL PDF (0/1) pdforder = self['eMELA_PerturbativeOrder'] - # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) + # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) # 4->mixed (leptonic); 5-> nobeta (leptonic); 6->delta (leptonic) # if LL, use nobeta scheme unless LEGACYLLPDF > 0 if pdforder == 0: if 'eMELA_LEGACYLLPDF' not in self.keys() or self['eMELA_LEGACYLLPDF'] in [-1, 0]: self.log_and_update(banner, 'run_card', 'pdfscheme', 5) - elif self['eMELA_LEGACYLLPDF'] == 1: + elif self['eMELA_LEGACYLLPDF'] == 1: # mixed self.log_and_update(banner, 'run_card', 'pdfscheme', 4) - elif self['eMELA_LEGACYLLPDF'] == 2: + elif self['eMELA_LEGACYLLPDF'] == 2: # eta self.log_and_update(banner, 'run_card', 'pdfscheme', 2) - elif self['eMELA_LEGACYLLPDF'] == 3: + elif self['eMELA_LEGACYLLPDF'] == 3: # beta self.log_and_update(banner, 'run_card', 'pdfscheme', 3) elif pdforder == 1: - # for NLL, use eMELA_FactorisationSchemeInt = 0/1 + # for NLL, use eMELA_FactorisationSchemeInt = 0/1 # for delta/MSbar if self['eMELA_FactorisationSchemeInt'] == 0: # MSbar @@ -6177,7 +6177,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): - + def log_and_update(self, banner, card, par, v): """update the card parameter par to value v diff --git a/epochX/cudacpp/gq_ttq.mad/bin/internal/gen_ximprove.py b/epochX/cudacpp/gq_ttq.mad/bin/internal/gen_ximprove.py index 5fd170d18d..cc842aa50f 100755 --- a/epochX/cudacpp/gq_ttq.mad/bin/internal/gen_ximprove.py +++ b/epochX/cudacpp/gq_ttq.mad/bin/internal/gen_ximprove.py @@ -2,18 +2,18 @@ # # Copyright (c) 2014 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch # ################################################################################ """ A python file to replace the fortran script gen_ximprove. - This script analyses the result of the survey/ previous refine and + This script analyses the result of the survey/ previous refine and creates the jobs for the following script. """ from __future__ import division @@ -66,77 +66,77 @@ class gensym(object): """a class to call the fortran gensym executable and handle it's output in order to create the various job that are needed for the survey""" - + #convenient shortcut for the formatting of variable @ staticmethod def format_variable(*args): return bannermod.ConfigFile.format_variable(*args) - + combining_job = 2 # number of channel by ajob - splitted_grid = False + splitted_grid = False min_iterations = 3 mode= "survey" - + def __init__(self, cmd, opt=None): - + try: super(gensym, self).__init__(cmd, opt) except TypeError: pass - - # Run statistics, a dictionary of RunStatistics(), with + + # Run statistics, a dictionary of RunStatistics(), with self.run_statistics = {} - + self.cmd = cmd self.run_card = cmd.run_card self.me_dir = cmd.me_dir - - + + # dictionary to keep track of the precision when combining iteration self.cross = collections.defaultdict(int) self.abscross = collections.defaultdict(int) self.sigma = collections.defaultdict(int) self.chi2 = collections.defaultdict(int) - + self.splitted_grid = False if self.cmd.proc_characteristics['loop_induced']: nexternal = self.cmd.proc_characteristics['nexternal'] self.splitted_grid = max(2, (nexternal-2)**2) if hasattr(self.cmd, "opts") and self.cmd.opts['accuracy'] == 0.1: self.cmd.opts['accuracy'] = 0.02 - + if isinstance(cmd.cluster, cluster.MultiCore) and self.splitted_grid > 1: self.splitted_grid = int(cmd.cluster.nb_core**0.5) if self.splitted_grid == 1 and cmd.cluster.nb_core >1: self.splitted_grid = 2 - + #if the user defines it in the run_card: if self.run_card['survey_splitting'] != -1: self.splitted_grid = self.run_card['survey_splitting'] if self.run_card['survey_nchannel_per_job'] != 1 and 'survey_nchannel_per_job' in self.run_card.user_set: - self.combining_job = self.run_card['survey_nchannel_per_job'] + self.combining_job = self.run_card['survey_nchannel_per_job'] elif self.run_card['hard_survey'] > 1: self.combining_job = 1 - - + + self.splitted_Pdir = {} self.splitted_for_dir = lambda x,y: self.splitted_grid self.combining_job_for_Pdir = lambda x: self.combining_job self.lastoffset = {} - + done_warning_zero_coupling = False def get_helicity(self, to_submit=True, clean=True): """launch a single call to madevent to get the list of non zero helicity""" - - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc P_zero_result = [] nb_tot_proc = len(subproc) - job_list = {} - - + job_list = {} + + for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -154,7 +154,7 @@ def get_helicity(self, to_submit=True, clean=True): p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts - + (stdout, _) = p.communicate(''.encode()) stdout = stdout.decode('ascii',errors='ignore') if stdout: @@ -166,11 +166,11 @@ def get_helicity(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir, 'error')): os.remove(pjoin(self.me_dir, 'error')) continue # bypass bad process - + self.cmd.compile(['madevent_forhel'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'madevent_forhel')): - raise Exception('Error make madevent_forhel not successful') - + raise Exception('Error make madevent_forhel not successful') + if not os.path.exists(pjoin(Pdir, 'Hel')): os.mkdir(pjoin(Pdir, 'Hel')) ff = open(pjoin(Pdir, 'Hel', 'input_app.txt'),'w') @@ -180,15 +180,15 @@ def get_helicity(self, to_submit=True, clean=True): try: os.remove(pjoin(Pdir, 'Hel','results.dat')) except Exception: - pass + pass # Launch gensym - p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, + p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=pjoin(Pdir,'Hel'), shell=True) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(" ".encode()) stdout = stdout.decode('ascii',errors='ignore') if os.path.exists(pjoin(self.me_dir, 'error')): - raise Exception(pjoin(self.me_dir,'error')) + raise Exception(pjoin(self.me_dir,'error')) # note a continue is not enough here, we have in top to link # the matrixX_optim.f to matrixX_orig.f to let the code to work # after this error. @@ -203,7 +203,7 @@ def get_helicity(self, to_submit=True, clean=True): zero_gc = list() all_zampperhel = set() all_bad_amps_perhel = set() - + for line in stdout.splitlines(): if "=" not in line and ":" not in line: continue @@ -229,22 +229,22 @@ def get_helicity(self, to_submit=True, clean=True): "%s\n" % (' '.join(zero_gc)) +\ "This will slow down the computation. Please consider using restricted model:\n" +\ "https://answers.launchpad.net/mg5amcnlo/+faq/2312") - - + + all_good_hels = collections.defaultdict(list) for me_index, hel in all_hel: - all_good_hels[me_index].append(int(hel)) - + all_good_hels[me_index].append(int(hel)) + #print(all_hel) if self.run_card['hel_zeroamp']: all_bad_amps = collections.defaultdict(list) for me_index, amp in all_zamp: all_bad_amps[me_index].append(int(amp)) - + all_bad_amps_perhel = collections.defaultdict(list) for me_index, hel, amp in all_zampperhel: - all_bad_amps_perhel[me_index].append((int(hel),int(amp))) - + all_bad_amps_perhel[me_index].append((int(hel),int(amp))) + elif all_zamp: nb_zero = sum(int(a[1]) for a in all_zamp) if zero_gc: @@ -254,7 +254,7 @@ def get_helicity(self, to_submit=True, clean=True): else: logger.warning("The optimization detected that you have %i zero matrix-element for this SubProcess: %s.\n" % nb_zero +\ "This part can optimize if you set the flag hel_zeroamp to True in the run_card.") - + #check if we need to do something and write associate information" data = [all_hel, all_zamp, all_bad_amps_perhel] if not self.run_card['hel_zeroamp']: @@ -266,14 +266,14 @@ def get_helicity(self, to_submit=True, clean=True): old_data = open(pjoin(Pdir,'Hel','selection')).read() if old_data == data: continue - - + + with open(pjoin(Pdir,'Hel','selection'),'w') as fsock: - fsock.write(data) - - + fsock.write(data) + + for matrix_file in misc.glob('matrix*orig.f', Pdir): - + split_file = matrix_file.split('/') me_index = split_file[-1][len('matrix'):-len('_orig.f')] @@ -289,11 +289,11 @@ def get_helicity(self, to_submit=True, clean=True): #good_hels = sorted(list(good_hels)) good_hels = [str(x) for x in sorted(all_good_hels[me_index])] if self.run_card['hel_zeroamp']: - + bad_amps = [str(x) for x in sorted(all_bad_amps[me_index])] bad_amps_perhel = [x for x in sorted(all_bad_amps_perhel[me_index])] else: - bad_amps = [] + bad_amps = [] bad_amps_perhel = [] if __debug__: mtext = open(matrix_file).read() @@ -310,7 +310,7 @@ def get_helicity(self, to_submit=True, clean=True): recycler.set_input(matrix_file) recycler.set_output(out_file) - recycler.set_template(templ_file) + recycler.set_template(templ_file) recycler.generate_output_file() del recycler @@ -321,19 +321,19 @@ def get_helicity(self, to_submit=True, clean=True): return {}, P_zero_result - + def launch(self, to_submit=True, clean=True): """ """ if not hasattr(self, 'subproc'): - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc - + P_zero_result = [] # check the number of times where they are no phase-space - + nb_tot_proc = len(subproc) - job_list = {} + job_list = {} for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.
(previous processes already running)' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -341,7 +341,7 @@ def launch(self, to_submit=True, clean=True): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) logger.info(' %s ' % subdir) - + # clean previous run if clean: for match in misc.glob('*ajob*', Pdir): @@ -349,17 +349,17 @@ def launch(self, to_submit=True, clean=True): os.remove(match) for match in misc.glob('G*', Pdir): if os.path.exists(pjoin(match,'results.dat')): - os.remove(pjoin(match, 'results.dat')) + os.remove(pjoin(match, 'results.dat')) if os.path.exists(pjoin(match, 'ftn25')): - os.remove(pjoin(match, 'ftn25')) - + os.remove(pjoin(match, 'ftn25')) + #compile gensym self.cmd.compile(['gensym'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'gensym')): - raise Exception('Error make gensym not successful') - + raise Exception('Error make gensym not successful') + # Launch gensym - p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, + p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(''.encode()) @@ -367,8 +367,8 @@ def launch(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir,'error')): files.mv(pjoin(self.me_dir,'error'), pjoin(Pdir,'ajob.no_ps.log')) P_zero_result.append(subdir) - continue - + continue + jobs = stdout.split() job_list[Pdir] = jobs try: @@ -386,8 +386,8 @@ def launch(self, to_submit=True, clean=True): continue else: if done: - raise Exception('Parsing error in gensym: %s' % stdout) - job_list[Pdir] = l.split() + raise Exception('Parsing error in gensym: %s' % stdout) + job_list[Pdir] = l.split() done = True if not done: raise Exception('Parsing error in gensym: %s' % stdout) @@ -408,16 +408,16 @@ def launch(self, to_submit=True, clean=True): if to_submit: self.submit_to_cluster(job_list) job_list = {} - + return job_list, P_zero_result - + def resubmit(self, min_precision=1.0, resubmit_zero=False): """collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - + job_list, P_zero_result = self.launch(to_submit=False, clean=False) - + for P , jobs in dict(job_list).items(): misc.sprint(jobs) to_resub = [] @@ -434,7 +434,7 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): elif max(one_result.xerru, one_result.xerrc)/one_result.xsec > min_precision: to_resub.append(job) else: - to_resub.append(job) + to_resub.append(job) if to_resub: for G in to_resub: try: @@ -442,19 +442,19 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): except Exception as error: misc.sprint(error) pass - misc.sprint(to_resub) + misc.sprint(to_resub) self.submit_to_cluster({P: to_resub}) - - - - - - - - - - - + + + + + + + + + + + def submit_to_cluster(self, job_list): """ """ @@ -467,7 +467,7 @@ def submit_to_cluster(self, job_list): nexternal = self.cmd.proc_characteristics['nexternal'] current = open(pjoin(path, "nexternal.inc")).read() ext = re.search(r"PARAMETER \(NEXTERNAL=(\d+)\)", current).group(1) - + if self.run_card['job_strategy'] == 2: self.splitted_grid = 2 if nexternal == int(ext): @@ -498,18 +498,18 @@ def submit_to_cluster(self, job_list): return self.submit_to_cluster_no_splitting(job_list) else: return self.submit_to_cluster_splitted(job_list) - - + + def submit_to_cluster_no_splitting(self, job_list): """submit the survey without the parralelization. This is the old mode which is still usefull in single core""" - - # write the template file for the parameter file + + # write the template file for the parameter file self.write_parameter(parralelization=False, Pdirs=list(job_list.keys())) - - + + # launch the job with the appropriate grouping - for Pdir, jobs in job_list.items(): + for Pdir, jobs in job_list.items(): jobs = list(jobs) i=0 while jobs: @@ -518,16 +518,16 @@ def submit_to_cluster_no_splitting(self, job_list): for _ in range(self.combining_job_for_Pdir(Pdir)): if jobs: to_submit.append(jobs.pop(0)) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=to_submit, cwd=pjoin(self.me_dir,'SubProcesses' , Pdir)) - + def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): """prepare the input_file for submitting the channel""" - + if 'SubProcesses' not in Pdir: Pdir = pjoin(self.me_dir, 'SubProcesses', Pdir) @@ -535,8 +535,8 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): self.splitted_Pdir[(Pdir, G)] = int(nb_job) - # 1. write the new input_app.txt - run_card = self.cmd.run_card + # 1. write the new input_app.txt + run_card = self.cmd.run_card options = {'event' : submit_ps, 'maxiter': 1, 'miniter': 1, @@ -545,29 +545,29 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): else run_card['nhel'], 'gridmode': -2, 'channel' : G - } - + } + Gdir = pjoin(Pdir, 'G%s' % G) - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + # 2. check that ftn25 exists. - assert os.path.exists(pjoin(Gdir, "ftn25")) - - + assert os.path.exists(pjoin(Gdir, "ftn25")) + + # 3. Submit the new jobs #call back function - packet = cluster.Packet((Pdir, G, step+1), + packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, (Pdir, G, step+1)) - + if step ==0: - self.lastoffset[(Pdir, G)] = 0 - - # resubmit the new jobs + self.lastoffset[(Pdir, G)] = 0 + + # resubmit the new jobs for i in range(int(nb_job)): name = "G%s_%s" % (G,i+1) self.lastoffset[(Pdir, G)] += 1 - offset = self.lastoffset[(Pdir, G)] + offset = self.lastoffset[(Pdir, G)] self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'refine_splitted.sh'), argument=[name, 'G%s'%G, offset], cwd= Pdir, @@ -575,9 +575,9 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): def submit_to_cluster_splitted(self, job_list): - """ submit the version of the survey with splitted grid creation - """ - + """ submit the version of the survey with splitted grid creation + """ + #if self.splitted_grid <= 1: # return self.submit_to_cluster_no_splitting(job_list) @@ -592,7 +592,7 @@ def submit_to_cluster_splitted(self, job_list): for job in jobs: packet = cluster.Packet((Pdir, job, 1), self.combine_iteration, (Pdir, job, 1)) - for i in range(self.splitted_for_dir(Pdir, job)): + for i in range(self.splitted_for_dir(Pdir, job)): self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[i+1, job], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), @@ -601,15 +601,15 @@ def submit_to_cluster_splitted(self, job_list): def combine_iteration(self, Pdir, G, step): grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - - # Compute the number of events used for this run. + + # Compute the number of events used for this run. nb_events = grid_calculator.target_evt Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) - + # 4. make the submission of the next iteration # Three cases - less than 3 iteration -> continue # - more than 3 and less than 5 -> check error @@ -627,15 +627,15 @@ def combine_iteration(self, Pdir, G, step): need_submit = False else: need_submit = True - + elif step >= self.cmd.opts['iterations']: need_submit = False elif self.cmd.opts['accuracy'] < 0: #check for luminosity raise Exception("Not Implemented") elif self.abscross[(Pdir,G)] == 0: - need_submit = False - else: + need_submit = False + else: across = self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) tot_across = self.get_current_axsec() if across == 0: @@ -646,20 +646,20 @@ def combine_iteration(self, Pdir, G, step): need_submit = True else: need_submit = False - - + + if cross: grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_events,mode=self.mode, conservative_factor=5.0) - - xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) - if float(cross)!=0.0 and float(error)!=0.0 else 8) + + xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) + if float(cross)!=0.0 and float(error)!=0.0 else 8) if need_submit: message = "%%s/G%%s is at %%%s +- %%.3g pb. Now submitting iteration #%s."%(xsec_format, step+1) logger.info(message%\ - (os.path.basename(Pdir), G, float(cross), + (os.path.basename(Pdir), G, float(cross), float(error)*float(cross))) self.resubmit_survey(Pdir,G, Gdirs, step) elif cross: @@ -670,26 +670,26 @@ def combine_iteration(self, Pdir, G, step): newGpath = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(newGpath): os.mkdir(newGpath) - + # copy the new grid: - files.cp(pjoin(Gdirs[0], 'ftn25'), + files.cp(pjoin(Gdirs[0], 'ftn25'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, 'ftn26')) - + # copy the events fsock = open(pjoin(newGpath, 'events.lhe'), 'w') for Gdir in Gdirs: - fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) - + fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) + # copy one log - files.cp(pjoin(Gdirs[0], 'log.txt'), + files.cp(pjoin(Gdirs[0], 'log.txt'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G)) - - + + # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) else: logger.info("Survey finished for %s/G%s [0 cross]", os.path.basename(Pdir),G) - + Gdir = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(Gdir): os.mkdir(Gdir) @@ -697,21 +697,21 @@ def combine_iteration(self, Pdir, G, step): files.cp(pjoin(Gdirs[0], 'log.txt'), Gdir) # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) - + return 0 def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): """ exclude_sub_jobs is to remove some of the subjobs if a numerical issue is detected in one of them. Warning is issue when this occurs. """ - + # 1. create an object to combine the grid information and fill it grid_calculator = combine_grid.grid_information(self.run_card['nhel']) - + for i in range(self.splitted_for_dir(Pdir, G)): if i in exclude_sub_jobs: continue - path = pjoin(Pdir, "G%s_%s" % (G, i+1)) + path = pjoin(Pdir, "G%s_%s" % (G, i+1)) fsock = misc.mult_try_open(pjoin(path, 'results.dat')) one_result = grid_calculator.add_results_information(fsock) fsock.close() @@ -723,9 +723,9 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): fsock.close() os.remove(pjoin(path, 'results.dat')) #os.remove(pjoin(path, 'grid_information')) - - - + + + #2. combine the information about the total crossection / error # start by keep the interation in memory cross, across, sigma = grid_calculator.get_cross_section() @@ -736,12 +736,12 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): if maxwgt: nunwgt = grid_calculator.get_nunwgt(maxwgt) # Make sure not to apply the security below during the first step of the - # survey. Also, disregard channels with a contribution relative to the + # survey. Also, disregard channels with a contribution relative to the # total cross-section smaller than 1e-8 since in this case it is unlikely # that this channel will need more than 1 event anyway. apply_instability_security = False rel_contrib = 0.0 - if (self.__class__ != gensym or step > 1): + if (self.__class__ != gensym or step > 1): Pdir_across = 0.0 Gdir_across = 0.0 for (mPdir,mG) in self.abscross.keys(): @@ -750,7 +750,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): (self.sigma[(mPdir,mG)]+1e-99)) if mG == G: Gdir_across += (self.abscross[(mPdir,mG)]/ - (self.sigma[(mPdir,mG)]+1e-99)) + (self.sigma[(mPdir,mG)]+1e-99)) rel_contrib = abs(Gdir_across/(Pdir_across+1e-99)) if rel_contrib > (1.0e-8) and \ nunwgt < 2 and len(grid_calculator.results) > 1: @@ -770,14 +770,14 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): exclude_sub_jobs = list(exclude_sub_jobs) exclude_sub_jobs.append(th_maxwgt[-1][1]) grid_calculator.results.run_statistics['skipped_subchannel'] += 1 - + # Add some monitoring of the problematic events - gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) + gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) if os.path.isfile(pjoin(gPath,'events.lhe')): lhe_file = lhe_parser.EventFile(pjoin(gPath,'events.lhe')) discardedPath = pjoin(Pdir,'DiscardedUnstableEvents') if not os.path.exists(discardedPath): - os.mkdir(discardedPath) + os.mkdir(discardedPath) if os.path.isdir(discardedPath): # Keep only the event with a maximum weight, as it surely # is the problematic one. @@ -790,10 +790,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): lhe_file.close() evtRecord.write(pjoin(gPath,'events.lhe').read()) evtRecord.close() - + return self.combine_grid(Pdir, G, step, exclude_sub_jobs) - + if across !=0: if sigma != 0: self.cross[(Pdir,G)] += cross**3/sigma**2 @@ -814,10 +814,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): self.chi2[(Pdir,G)] = 0 cross = self.cross[(Pdir,G)] error = 0 - + else: error = 0 - + grid_calculator.results.compute_values(update_statistics=True) if (str(os.path.basename(Pdir)), G) in self.run_statistics: self.run_statistics[(str(os.path.basename(Pdir)), G)]\ @@ -825,8 +825,8 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): else: self.run_statistics[(str(os.path.basename(Pdir)), G)] = \ grid_calculator.results.run_statistics - - self.warnings_from_statistics(G, grid_calculator.results.run_statistics) + + self.warnings_from_statistics(G, grid_calculator.results.run_statistics) stats_msg = grid_calculator.results.run_statistics.nice_output( '/'.join([os.path.basename(Pdir),'G%s'%G])) @@ -836,7 +836,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): # Clean up grid_information to avoid border effects in case of a crash for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) - try: + try: os.remove(pjoin(path, 'grid_information')) except OSError as oneerror: if oneerror.errno != 2: @@ -850,7 +850,7 @@ def warnings_from_statistics(self,G,stats): return EPS_fraction = float(stats['exceptional_points'])/stats['n_madloop_calls'] - + msg = "Channel %s has encountered a fraction of %.3g\n"+ \ "of numerically unstable loop matrix element computations\n"+\ "(which could not be rescued using quadruple precision).\n"+\ @@ -861,16 +861,16 @@ def warnings_from_statistics(self,G,stats): elif EPS_fraction > 0.01: logger.critical((msg%(G,EPS_fraction)).replace('might', 'can')) raise Exception((msg%(G,EPS_fraction)).replace('might', 'can')) - + def get_current_axsec(self): - + across = 0 for (Pdir,G) in self.abscross: across += self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) return across - + def write_results(self, grid_calculator, cross, error, Pdir, G, step): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -888,7 +888,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step): maxwgt = grid_calculator.get_max_wgt() nunwgt = grid_calculator.get_nunwgt() luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -897,20 +897,20 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s %s 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross), fstr(maxwgt)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - + def resubmit_survey(self, Pdir, G, Gdirs, step): """submit the next iteration of the survey""" # 1. write the new input_app.txt to double the number of points - run_card = self.cmd.run_card + run_card = self.cmd.run_card options = {'event' : 2**(step) * self.cmd.opts['points'] / self.splitted_grid, 'maxiter': 1, 'miniter': 1, @@ -919,18 +919,18 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): else run_card['nhel'], 'gridmode': -2, 'channel' : '' - } - + } + if int(options['helicity']) == 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + for Gdir in Gdirs: - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + + #2. resubmit the new jobs packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, \ - (Pdir, G, step+1)) + (Pdir, G, step+1)) nb_step = len(Gdirs) * (step+1) for i,subdir in enumerate(Gdirs): subdir = subdir.rsplit('_',1)[1] @@ -938,34 +938,34 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): offset = nb_step+i+1 offset=str(offset) tag = "%s.%s" % (subdir, offset) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[tag, G], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), packet_member=packet) - + def write_parameter_file(self, path, options): """ """ - + template =""" %(event)s %(maxiter)s %(miniter)s !Number of events and max and min iterations %(accuracy)s !Accuracy %(gridmode)s !Grid Adjustment 0=none, 2=adjust 1 !Suppress Amplitude 1=yes %(helicity)s !Helicity Sum/event 0=exact - %(channel)s """ + %(channel)s """ options['event'] = int(options['event']) open(path, 'w').write(template % options) - - + + def write_parameter(self, parralelization, Pdirs=None): """Write the parameter of the survey run""" run_card = self.cmd.run_card - + options = {'event' : self.cmd.opts['points'], 'maxiter': self.cmd.opts['iterations'], 'miniter': self.min_iterations, @@ -975,36 +975,36 @@ def write_parameter(self, parralelization, Pdirs=None): 'gridmode': 2, 'channel': '' } - + if int(options['helicity'])== 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + if parralelization: options['gridmode'] = -2 options['maxiter'] = 1 #this is automatic in dsample anyway options['miniter'] = 1 #this is automatic in dsample anyway options['event'] /= self.splitted_grid - + if not Pdirs: Pdirs = self.subproc - + for Pdir in Pdirs: - path =pjoin(Pdir, 'input_app.txt') + path =pjoin(Pdir, 'input_app.txt') self.write_parameter_file(path, options) - - -class gen_ximprove(object): - - + + +class gen_ximprove(object): + + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job @@ -1022,7 +1022,7 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_gridpack) elif cls.force_class == 'loop_induced': return super(gen_ximprove, cls).__new__(gen_ximprove_share) - + if cmd.proc_characteristics['loop_induced']: return super(gen_ximprove, cls).__new__(gen_ximprove_share) elif gen_ximprove.format_variable(cmd.run_card['gridpack'], bool): @@ -1031,31 +1031,31 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_share) else: return super(gen_ximprove, cls).__new__(gen_ximprove_v4) - - + + def __init__(self, cmd, opt=None): - + try: super(gen_ximprove, self).__init__(cmd, opt) except TypeError: pass - + self.run_statistics = {} self.cmd = cmd self.run_card = cmd.run_card run_card = self.run_card self.me_dir = cmd.me_dir - + #extract from the run_card the information that we need. self.gridpack = run_card['gridpack'] self.nhel = run_card['nhel'] if "nhel_refine" in run_card: self.nhel = run_card["nhel_refine"] - + if self.run_card['refine_evt_by_job'] != -1: self.max_request_event = run_card['refine_evt_by_job'] - - + + # Default option for the run self.gen_events = True self.parralel = False @@ -1066,7 +1066,7 @@ def __init__(self, cmd, opt=None): # parameter for the gridpack run self.nreq = 2000 self.iseed = 4321 - + # placeholder for information self.results = 0 #updated in launch/update_html @@ -1074,16 +1074,16 @@ def __init__(self, cmd, opt=None): self.configure(opt) elif isinstance(opt, bannermod.GridpackCard): self.configure_gridpack(opt) - + def __call__(self): return self.launch() - + def launch(self): - """running """ - + """running """ + #start the run self.handle_seed() - self.results = sum_html.collect_result(self.cmd, + self.results = sum_html.collect_result(self.cmd, main_dir=pjoin(self.cmd.me_dir,'SubProcesses')) #main_dir is for gridpack readonly mode if self.gen_events: # We run to provide a given number of events @@ -1095,15 +1095,15 @@ def launch(self): def configure(self, opt): """Defines some parameter of the run""" - + for key, value in opt.items(): if key in self.__dict__: targettype = type(getattr(self, key)) setattr(self, key, self.format_variable(value, targettype, key)) else: raise Exception('%s not define' % key) - - + + # special treatment always do outside the loop to avoid side effect if 'err_goal' in opt: if self.err_goal < 1: @@ -1113,24 +1113,24 @@ def configure(self, opt): logger.info("Generating %s unweighted events." % self.err_goal) self.gen_events = True self.err_goal = self.err_goal * self.gen_events_security # security - + def handle_seed(self): """not needed but for gridpack --which is not handle here for the moment""" return - - + + def find_job_for_event(self): """return the list of channel that need to be improved""" - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) - - goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 + + goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) - all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) - + all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) + to_refine = [] for C in all_channels: if C.get('axsec') == 0: @@ -1141,61 +1141,61 @@ def find_job_for_event(self): elif C.get('xerr') > max(C.get('axsec'), (1/(100*math.sqrt(self.err_goal)))*all_channels[-1].get('axsec')): to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru + - class gen_ximprove_v4(gen_ximprove): - + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + super(gen_ximprove_v4, self).__init__(cmd, opt) - + if cmd.opts['accuracy'] < cmd._survey_options['accuracy'][1]: self.increase_precision(cmd._survey_options['accuracy'][1]/cmd.opts['accuracy']) @@ -1203,7 +1203,7 @@ def reset_multijob(self): for path in misc.glob(pjoin('*', '*','multijob.dat'), pjoin(self.me_dir, 'SubProcesses')): open(path,'w').write('0\n') - + def write_multijob(self, Channel, nb_split): """ """ if nb_split <=1: @@ -1211,7 +1211,7 @@ def write_multijob(self, Channel, nb_split): f = open(pjoin(self.me_dir, 'SubProcesses', Channel.get('name'), 'multijob.dat'), 'w') f.write('%i\n' % nb_split) f.close() - + def increase_precision(self, rate=3): #misc.sprint(rate) if rate < 3: @@ -1222,25 +1222,25 @@ def increase_precision(self, rate=3): rate = rate -2 self.max_event_in_iter = int((rate+1) * 10000) self.min_events = int(rate+2) * 2500 - self.gen_events_security = 1 + 0.1 * (rate+2) - + self.gen_events_security = 1 + 0.1 * (rate+2) + if int(self.nhel) == 1: self.min_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//3) self.max_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//2) - - + + alphabet = "abcdefghijklmnopqrstuvwxyz" def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() #reset the potential multijob of previous run self.reset_multijob() - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. @@ -1257,17 +1257,17 @@ def get_job_for_event(self): else: for i in range(len(to_refine) //3): new_order.append(to_refine[i]) - new_order.append(to_refine[-2*i-1]) + new_order.append(to_refine[-2*i-1]) new_order.append(to_refine[-2*i-2]) if len(to_refine) % 3 == 1: - new_order.append(to_refine[i+1]) + new_order.append(to_refine[i+1]) elif len(to_refine) % 3 == 2: - new_order.append(to_refine[i+2]) + new_order.append(to_refine[i+2]) #ensure that the reordering is done nicely assert set([id(C) for C in to_refine]) == set([id(C) for C in new_order]) - to_refine = new_order - - + to_refine = new_order + + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target @@ -1279,7 +1279,7 @@ def get_job_for_event(self): nb_split = self.max_splitting nb_split=max(1, nb_split) - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1296,21 +1296,21 @@ def get_job_for_event(self): nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + # write the multi-job information self.write_multijob(C, nb_split) - + packet = cluster.Packet((C.parent_name, C.name), combine_runs.CombineRuns, (pjoin(self.me_dir, 'SubProcesses', C.parent_name)), {"subproc": C.name, "nb_split":nb_split}) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1321,7 +1321,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': packet, + 'packet': packet, } if nb_split == 1: @@ -1334,19 +1334,19 @@ def get_job_for_event(self): if self.keep_grid_for_refine: new_info['base_directory'] = info['directory'] jobs.append(new_info) - - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def create_ajob(self, template, jobs, write_dir=None): """create the ajob""" - + if not jobs: return if not write_dir: write_dir = pjoin(self.me_dir, 'SubProcesses') - + #filter the job according to their SubProcess directory # no mix submition P2job= collections.defaultdict(list) for j in jobs: @@ -1355,11 +1355,11 @@ def create_ajob(self, template, jobs, write_dir=None): for P in P2job.values(): self.create_ajob(template, P, write_dir) return - - + + #Here we can assume that all job are for the same directory. path = pjoin(write_dir, jobs[0]['P_dir']) - + template_text = open(template, 'r').read() # special treatment if needed to combine the script # computes how many submition miss one job @@ -1384,8 +1384,8 @@ def create_ajob(self, template, jobs, write_dir=None): skip1=0 combining_job =1 nb_sub = len(jobs) - - + + nb_use = 0 for i in range(nb_sub): script_number = i+1 @@ -1404,14 +1404,14 @@ def create_ajob(self, template, jobs, write_dir=None): info["base_directory"] = "./" fsock.write(template_text % info) nb_use += nb_job - + fsock.close() return script_number def get_job_for_precision(self): """create the ajob to achieve a give precision on the total cross-section""" - + assert self.err_goal <=1 xtot = abs(self.results.xsec) logger.info("Working on precision: %s %%" %(100*self.err_goal)) @@ -1428,46 +1428,46 @@ def get_job_for_precision(self): rerr *=rerr if not len(to_refine): return - - # change limit since most don't contribute + + # change limit since most don't contribute limit = math.sqrt((self.err_goal * xtot)**2 - rerr/math.sqrt(len(to_refine))) for C in to_refine[:]: cerr = C.mfactor*(C.xerru + len(to_refine)*C.xerrc) if cerr < limit: to_refine.remove(C) - + # all the channel are now selected. create the channel information logger.info('need to improve %s channels' % len(to_refine)) - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. # loop over the channel to refine for C in to_refine: - + #1. Determine how many events we need in each iteration yerr = C.mfactor*(C.xerru+len(to_refine)*C.xerrc) nevents = 0.2*C.nevents*(yerr/limit)**2 - + nb_split = int((nevents*(C.nunwgt/C.nevents)/self.max_request_event/ (2**self.min_iter-1))**(2/3)) nb_split = max(nb_split, 1) - # **(2/3) to slow down the increase in number of jobs + # **(2/3) to slow down the increase in number of jobs if nb_split > self.max_splitting: nb_split = self.max_splitting - + if nb_split >1: nevents = nevents / nb_split self.write_multijob(C, nb_split) # forbid too low/too large value nevents = min(self.min_event_in_iter, max(self.max_event_in_iter, nevents)) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1487,38 +1487,38 @@ def get_job_for_precision(self): new_info['offset'] = i+1 new_info['directory'] += self.alphabet[i % 26] + str((i+1)//26) jobs.append(new_info) - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru @@ -1528,27 +1528,27 @@ class gen_ximprove_v4_nogridupdate(gen_ximprove_v4): # some hardcoded value which impact the generation gen_events_security = 1.1 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 400 # split jobs if a channel if it needs more than that + max_request_event = 400 # split jobs if a channel if it needs more than that max_event_in_iter = 500 min_event_in_iter = 250 - max_splitting = 260 # maximum duplication of a given channel - min_iter = 2 + max_splitting = 260 # maximum duplication of a given channel + min_iter = 2 max_iter = 6 keep_grid_for_refine = True - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + gen_ximprove.__init__(cmd, opt) - + if cmd.proc_characteristics['loopinduced'] and \ cmd.proc_characteristics['nexternal'] > 2: self.increase_parralelization(cmd.proc_characteristics['nexternal']) - + def increase_parralelization(self, nexternal): - self.max_splitting = 1000 - + self.max_splitting = 1000 + if self.run_card['refine_evt_by_job'] != -1: pass elif nexternal == 3: @@ -1563,27 +1563,27 @@ def increase_parralelization(self, nexternal): class gen_ximprove_share(gen_ximprove, gensym): """Doing the refine in multicore. Each core handle a couple of PS point.""" - nb_ps_by_job = 2000 + nb_ps_by_job = 2000 mode = "refine" gen_events_security = 1.15 # Note the real security is lower since we stop the jobs if they are at 96% # of this target. def __init__(self, *args, **opts): - + super(gen_ximprove_share, self).__init__(*args, **opts) self.generated_events = {} self.splitted_for_dir = lambda x,y : self.splitted_Pdir[(x,y)] - + def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - + goal_lum, to_refine = self.find_job_for_event() self.goal_lum = goal_lum - + # loop over the channel to refine to find the number of PS point to launch total_ps_points = 0 channel_to_ps_point = [] @@ -1593,7 +1593,7 @@ def get_job_for_event(self): os.remove(pjoin(self.me_dir, "SubProcesses",C.parent_name, C.name, "events.lhe")) except: pass - + #1. Compute the number of points are needed to reach target needed_event = goal_lum*C.get('axsec') if needed_event == 0: @@ -1609,18 +1609,18 @@ def get_job_for_event(self): nb_split = 1 if nb_split > self.max_splitting: nb_split = self.max_splitting - nevents = self.max_event_in_iter * self.max_splitting + nevents = self.max_event_in_iter * self.max_splitting else: nevents = self.max_event_in_iter * nb_split if nevents > self.max_splitting*self.max_event_in_iter: logger.warning("Channel %s/%s has a very low efficiency of unweighting. Might not be possible to reach target" % \ (C.name, C.parent_name)) - nevents = self.max_event_in_iter * self.max_splitting - - total_ps_points += nevents - channel_to_ps_point.append((C, nevents)) - + nevents = self.max_event_in_iter * self.max_splitting + + total_ps_points += nevents + channel_to_ps_point.append((C, nevents)) + if self.cmd.options["run_mode"] == 1: if self.cmd.options["cluster_size"]: nb_ps_by_job = total_ps_points /int(self.cmd.options["cluster_size"]) @@ -1634,7 +1634,7 @@ def get_job_for_event(self): nb_ps_by_job = total_ps_points / self.cmd.options["nb_core"] else: nb_ps_by_job = self.nb_ps_by_job - + nb_ps_by_job = int(max(nb_ps_by_job, 500)) for C, nevents in channel_to_ps_point: @@ -1648,20 +1648,20 @@ def get_job_for_event(self): self.create_resubmit_one_iter(C.parent_name, C.name[1:], submit_ps, nb_job, step=0) needed_event = goal_lum*C.get('xsec') logger.debug("%s/%s : need %s event. Need %s split job of %s points", C.parent_name, C.name, needed_event, nb_job, submit_ps) - - + + def combine_iteration(self, Pdir, G, step): - + grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - + # collect all the generated_event Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) assert len(grid_calculator.results) == len(Gdirs) == self.splitted_for_dir(Pdir, G) - - + + # Check how many events are going to be kept after un-weighting. needed_event = cross * self.goal_lum if needed_event == 0: @@ -1671,19 +1671,19 @@ def combine_iteration(self, Pdir, G, step): if self.err_goal >=1: if needed_event > self.gen_events_security * self.err_goal: needed_event = int(self.gen_events_security * self.err_goal) - + if (Pdir, G) in self.generated_events: old_nunwgt, old_maxwgt = self.generated_events[(Pdir, G)] else: old_nunwgt, old_maxwgt = 0, 0 - + if old_nunwgt == 0 and os.path.exists(pjoin(Pdir,"G%s" % G, "events.lhe")): # possible for second refine. lhe = lhe_parser.EventFile(pjoin(Pdir,"G%s" % G, "events.lhe")) old_nunwgt = lhe.unweight(None, trunc_error=0.005, log_level=0) old_maxwgt = lhe.max_wgt - - + + maxwgt = max(grid_calculator.get_max_wgt(), old_maxwgt) new_evt = grid_calculator.get_nunwgt(maxwgt) @@ -1695,35 +1695,35 @@ def combine_iteration(self, Pdir, G, step): one_iter_nb_event = max(grid_calculator.get_nunwgt(),1) drop_previous_iteration = False # compare the number of events to generate if we discard the previous iteration - n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) n_target_combined = (needed_event-nunwgt) / efficiency if n_target_one_iter < n_target_combined: # the last iteration alone has more event that the combine iteration. - # it is therefore interesting to drop previous iteration. + # it is therefore interesting to drop previous iteration. drop_previous_iteration = True nunwgt = one_iter_nb_event maxwgt = grid_calculator.get_max_wgt() new_evt = nunwgt - efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) - + efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + try: if drop_previous_iteration: raise IOError output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'a') except IOError: output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'w') - + misc.call(["cat"] + [pjoin(d, "events.lhe") for d in Gdirs], stdout=output_file) output_file.close() # For large number of iteration. check the number of event by doing the # real unweighting. - if nunwgt < 0.6 * needed_event and step > self.min_iter: + if nunwgt < 0.6 * needed_event and step > self.min_iter: lhe = lhe_parser.EventFile(output_file.name) old_nunwgt =nunwgt nunwgt = lhe.unweight(None, trunc_error=0.01, log_level=0) - - + + self.generated_events[(Pdir, G)] = (nunwgt, maxwgt) # misc.sprint("Adding %s event to %s. Currently at %s" % (new_evt, G, nunwgt)) @@ -1742,21 +1742,21 @@ def combine_iteration(self, Pdir, G, step): nevents = grid_calculator.results[0].nevents if nevents == 0: # possible if some integral returns 0 nevents = max(g.nevents for g in grid_calculator.results) - + need_ps_point = (needed_event - nunwgt)/(efficiency+1e-99) - need_job = need_ps_point // nevents + 1 - + need_job = need_ps_point // nevents + 1 + if step < self.min_iter: # This is normal but check if we are on the good track - job_at_first_iter = nb_split_before/2**(step-1) + job_at_first_iter = nb_split_before/2**(step-1) expected_total_job = job_at_first_iter * (2**self.min_iter-1) done_job = job_at_first_iter * (2**step-1) expected_remaining_job = expected_total_job - done_job - logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) + logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) # increase if needed but not too much need_job = min(need_job, expected_remaining_job*1.25) - + nb_job = (need_job-0.5)//(2**(self.min_iter-step)-1) + 1 nb_job = max(1, nb_job) grid_calculator.write_grid_for_submission(Pdir,G, @@ -1768,7 +1768,7 @@ def combine_iteration(self, Pdir, G, step): nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) #self.create_job(Pdir, G, nb_job, nevents, step) - + elif step < self.max_iter: if step + 1 == self.max_iter: need_job = 1.20 * need_job # avoid to have just too few event. @@ -1777,21 +1777,21 @@ def combine_iteration(self, Pdir, G, step): grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_job*nevents ,mode=self.mode, conservative_factor=self.max_iter) - - + + logger.info("%s/G%s is at %i/%i ('%.2g%%') event. Resubmit %i job at iteration %i." \ % (os.path.basename(Pdir), G, int(nunwgt),int(needed_event)+1, (float(nunwgt)/needed_event)*100.0 if needed_event>0.0 else 0.0, nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) - - + + return 0 - - + + def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -1807,7 +1807,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency nevents = nunwgt # make the unweighting to compute the number of events: luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -1816,23 +1816,23 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s 0.0 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - - - + + + class gen_ximprove_gridpack(gen_ximprove_v4): - - min_iter = 1 + + min_iter = 1 max_iter = 13 - max_request_event = 1e12 # split jobs if a channel if it needs more than that + max_request_event = 1e12 # split jobs if a channel if it needs more than that max_event_in_iter = 4000 min_event_in_iter = 500 combining_job = sys.maxsize @@ -1844,7 +1844,7 @@ def __new__(cls, *args, **opts): return super(gen_ximprove_gridpack, cls).__new__(cls, *args, **opts) def __init__(self, *args, **opts): - + self.ngran = -1 self.gscalefact = {} self.readonly = False @@ -1855,23 +1855,23 @@ def __init__(self, *args, **opts): self.readonly = opts['readonly'] super(gen_ximprove_gridpack,self).__init__(*args, **opts) if self.ngran == -1: - self.ngran = 1 - + self.ngran = 1 + def find_job_for_event(self): """return the list of channel that need to be improved""" import random - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) self.gscalefact = {} - + xtot = self.results.axsec - goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 + goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 # logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) all_channels.sort(key=lambda x : x.get('luminosity'), reverse=True) - + to_refine = [] for C in all_channels: tag = C.get('name') @@ -1885,27 +1885,27 @@ def find_job_for_event(self): #need to generate events logger.debug('request events for ', C.get('name'), 'cross=', C.get('axsec'), 'needed events = ', goal_lum * C.get('axsec')) - to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + to_refine.append(C) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. - + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target needed_event = max(goal_lum*C.get('axsec'), self.ngran) nb_split = 1 - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1920,13 +1920,13 @@ def get_job_for_event(self): # forbid too low/too large value nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': os.path.basename(C.parent_name), + 'P_dir': os.path.basename(C.parent_name), 'offset': 1, # need to be change for splitted job 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'nevents': nevents, #int(nevents*self.gen_events_security)+1, @@ -1938,7 +1938,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': None, + 'packet': None, } if self.readonly: @@ -1946,11 +1946,11 @@ def get_job_for_event(self): info['base_directory'] = basedir jobs.append(info) - - write_dir = '.' if self.readonly else None - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) - + + write_dir = '.' if self.readonly else None + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) + done = [] for j in jobs: if j['P_dir'] in done: @@ -1967,22 +1967,22 @@ def get_job_for_event(self): write_dir = '.' if self.readonly else pjoin(self.me_dir, 'SubProcesses') self.check_events(goal_lum, to_refine, jobs, write_dir) - + def check_events(self, goal_lum, to_refine, jobs, Sdir): """check that we get the number of requested events if not resubmit.""" - + new_jobs = [] - + for C, job_info in zip(to_refine, jobs): - P = job_info['P_dir'] + P = job_info['P_dir'] G = job_info['channel'] axsec = C.get('axsec') - requested_events= job_info['requested_event'] - + requested_events= job_info['requested_event'] + new_results = sum_html.OneResult((P,G)) new_results.read_results(pjoin(Sdir,P, 'G%s'%G, 'results.dat')) - + # need to resubmit? if new_results.get('nunwgt') < requested_events: pwd = pjoin(os.getcwd(),job_info['P_dir'],'G%s'%G) if self.readonly else \ @@ -1992,10 +1992,10 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): job_info['offset'] += 1 new_jobs.append(job_info) files.mv(pjoin(pwd, 'events.lhe'), pjoin(pwd, 'events.lhe.previous')) - + if new_jobs: - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) + done = [] for j in new_jobs: if j['P_dir'] in done: @@ -2015,9 +2015,9 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): files.put_at_end(pjoin(pwd, 'events.lhe'),pjoin(pwd, 'events.lhe.previous')) return self.check_events(goal_lum, to_refine, new_jobs, Sdir) - - - - + + + + diff --git a/epochX/cudacpp/gq_ttq.mad/bin/internal/madevent_interface.py b/epochX/cudacpp/gq_ttq.mad/bin/internal/madevent_interface.py index cb6bf4ca57..8abba3f33f 100755 --- a/epochX/cudacpp/gq_ttq.mad/bin/internal/madevent_interface.py +++ b/epochX/cudacpp/gq_ttq.mad/bin/internal/madevent_interface.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,10 +53,10 @@ # Special logger for the Cmd Interface logger = logging.getLogger('madevent.stdout') # -> stdout logger_stderr = logging.getLogger('madevent.stderr') # ->stderr - + try: import madgraph -except ImportError as error: +except ImportError as error: # import from madevent directory MADEVENT = True import internal.extended_cmd as cmd @@ -92,7 +92,7 @@ import madgraph.various.lhe_parser as lhe_parser # import madgraph.various.histograms as histograms # imported later to not slow down the loading of the code import models.check_param_card as check_param_card - from madgraph.iolibs.files import ln + from madgraph.iolibs.files import ln from madgraph import InvalidCmd, MadGraph5Error, MG5DIR, ReadWrite @@ -113,10 +113,10 @@ class CmdExtended(common_run.CommonRunCmd): next_possibility = { 'start': [], } - + debug_output = 'ME5_debug' error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' - error_debug += 'More information is found in \'%(debug)s\'.\n' + error_debug += 'More information is found in \'%(debug)s\'.\n' error_debug += 'Please attach this file to your report.' config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' @@ -124,18 +124,18 @@ class CmdExtended(common_run.CommonRunCmd): keyboard_stop_msg = """stopping all operation in order to quit MadGraph5_aMC@NLO please enter exit""" - + # Define the Error InvalidCmd = InvalidCmd ConfigurationError = MadGraph5Error def __init__(self, me_dir, options, *arg, **opt): """Init history and line continuation""" - + # Tag allowing/forbiding question self.force = False - - # If possible, build an info line with current version number + + # If possible, build an info line with current version number # and date, from the VERSION text file info = misc.get_pkg_info() info_line = "" @@ -150,7 +150,7 @@ def __init__(self, me_dir, options, *arg, **opt): else: version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() info_line = "#* VERSION %s %s *\n" % \ - (version, (24 - len(version)) * ' ') + (version, (24 - len(version)) * ' ') # Create a header for the history file. # Remember to fill in time at writeout time! @@ -177,7 +177,7 @@ def __init__(self, me_dir, options, *arg, **opt): '#* run as ./bin/madevent.py filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - + if info_line: info_line = info_line[1:] @@ -203,11 +203,11 @@ def __init__(self, me_dir, options, *arg, **opt): "* *\n" + \ "************************************************************") super(CmdExtended, self).__init__(me_dir, options, *arg, **opt) - + def get_history_header(self): - """return the history header""" + """return the history header""" return self.history_header % misc.get_time_info() - + def stop_on_keyboard_stop(self): """action to perform to close nicely on a keyboard interupt""" try: @@ -219,20 +219,20 @@ def stop_on_keyboard_stop(self): self.add_error_log_in_html(KeyboardInterrupt) except: pass - + def postcmd(self, stop, line): """ Update the status of the run for finishing interactive command """ - - stop = super(CmdExtended, self).postcmd(stop, line) + + stop = super(CmdExtended, self).postcmd(stop, line) # relaxing the tag forbidding question self.force = False - + if not self.use_rawinput: return stop - + if self.results and not self.results.current: return stop - + arg = line.split() if len(arg) == 0: return stop @@ -240,41 +240,41 @@ def postcmd(self, stop, line): return stop if isinstance(self.results.status, str) and self.results.status == 'Stop by the user': self.update_status('%s Stop by the user' % arg[0], level=None, error=True) - return stop + return stop elif not self.results.status: return stop elif str(arg[0]) in ['exit','quit','EOF']: return stop - + try: - self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], + self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], level=None, error=True) except Exception: misc.sprint('update_status fails') pass - - + + def nice_user_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() - return cmd.Cmd.nice_user_error(self, error, line) - + return cmd.Cmd.nice_user_error(self, error, line) + def nice_config_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() stop = cmd.Cmd.nice_config_error(self, error, line) - - + + try: debug_file = open(self.debug_output, 'a') debug_file.write(open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat'))) debug_file.close() except: - pass + pass return stop - + def nice_error_handling(self, error, line): """If a ME run is currently running add a link in the html output""" @@ -294,7 +294,7 @@ def nice_error_handling(self, error, line): proc_card = pjoin(self.me_dir,'Cards','proc_card_mg5.dat') if os.path.exists(proc_card): self.banner.add(proc_card) - + out_dir = pjoin(self.me_dir, 'Events', self.run_name) if not os.path.isdir(out_dir): os.mkdir(out_dir) @@ -307,7 +307,7 @@ def nice_error_handling(self, error, line): else: pass else: - self.add_error_log_in_html() + self.add_error_log_in_html() stop = cmd.Cmd.nice_error_handling(self, error, line) try: debug_file = open(self.debug_output, 'a') @@ -316,14 +316,14 @@ def nice_error_handling(self, error, line): except: pass return stop - - + + #=============================================================================== # HelpToCmd #=============================================================================== class HelpToCmd(object): """ The Series of help routine for the MadEventCmd""" - + def help_pythia(self): logger.info("syntax: pythia [RUN] [--run_options]") logger.info("-- run pythia on RUN (current one by default)") @@ -352,29 +352,29 @@ def help_banner_run(self): logger.info(" Path should be the path of a valid banner.") logger.info(" RUN should be the name of a run of the current directory") self.run_options_help([('-f','answer all question by default'), - ('--name=X', 'Define the name associated with the new run')]) - + ('--name=X', 'Define the name associated with the new run')]) + def help_open(self): logger.info("syntax: open FILE ") logger.info("-- open a file with the appropriate editor.") logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') logger.info(' the path to the last created/used directory is used') logger.info(' The program used to open those files can be chosen in the') - logger.info(' configuration file ./input/mg5_configuration.txt') - - + logger.info(' configuration file ./input/mg5_configuration.txt') + + def run_options_help(self, data): if data: logger.info('-- local options:') for name, info in data: logger.info(' %s : %s' % (name, info)) - + logger.info("-- session options:") - logger.info(" Note that those options will be kept for the current session") + logger.info(" Note that those options will be kept for the current session") logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) logger.info(" --multicore : Run in multi-core configuration") logger.info(" --nb_core=X : limit the number of core to use to X.") - + def help_generate_events(self): logger.info("syntax: generate_events [run_name] [options]",) @@ -398,16 +398,16 @@ def help_initMadLoop(self): logger.info(" -f : Bypass the edition of MadLoopParams.dat.",'$MG:color:BLUE') logger.info(" -r : Refresh of the existing filters (erasing them if already present).",'$MG:color:BLUE') logger.info(" --nPS= : Specify how many phase-space points should be tried to set up the filters.",'$MG:color:BLUE') - + def help_calculate_decay_widths(self): - + if self.ninitial != 1: logger.warning("This command is only valid for processes of type A > B C.") logger.warning("This command can not be run in current context.") logger.warning("") - + logger.info("syntax: calculate_decay_widths [run_name] [options])") logger.info("-- Calculate decay widths and enter widths and BRs in param_card") logger.info(" for a series of processes of type A > B C ...") @@ -428,8 +428,8 @@ def help_survey(self): logger.info("-- evaluate the different channel associate to the process") self.run_options_help([("--" + key,value[-1]) for (key,value) in \ self._survey_options.items()]) - - + + def help_restart_gridpack(self): logger.info("syntax: restart_gridpack --precision= --restart_zero") @@ -439,14 +439,14 @@ def help_launch(self): logger.info("syntax: launch [run_name] [options])") logger.info(" --alias for either generate_events/calculate_decay_widths") logger.info(" depending of the number of particles in the initial state.") - + if self.ninitial == 1: logger.info("For this directory this is equivalent to calculate_decay_widths") self.help_calculate_decay_widths() else: logger.info("For this directory this is equivalent to $generate_events") self.help_generate_events() - + def help_refine(self): logger.info("syntax: refine require_precision [max_channel] [--run_options]") logger.info("-- refine the LAST run to achieve a given precision.") @@ -454,14 +454,14 @@ def help_refine(self): logger.info(' or the required relative error') logger.info(' max_channel:[5] maximal number of channel per job') self.run_options_help([]) - + def help_combine_events(self): """ """ logger.info("syntax: combine_events [run_name] [--tag=tag_name] [--run_options]") logger.info("-- Combine the last run in order to write the number of events") logger.info(" asked in the run_card.") self.run_options_help([]) - + def help_store_events(self): """ """ logger.info("syntax: store_events [--run_options]") @@ -481,7 +481,7 @@ def help_import(self): logger.info("syntax: import command PATH") logger.info("-- Execute the command present in the file") self.run_options_help([]) - + def help_syscalc(self): logger.info("syntax: syscalc [RUN] [%s] [-f | --tag=]" % '|'.join(self._plot_mode)) logger.info("-- calculate systematics information for the RUN (current run by default)") @@ -506,18 +506,18 @@ class AskRun(cmd.ControlSwitch): ('madspin', 'Decay onshell particles'), ('reweight', 'Add weights to events for new hypp.') ] - + def __init__(self, question, line_args=[], mode=None, force=False, *args, **opt): - + self.check_available_module(opt['mother_interface'].options) self.me_dir = opt['mother_interface'].me_dir super(AskRun,self).__init__(self.to_control, opt['mother_interface'], *args, **opt) - - + + def check_available_module(self, options): - + self.available_module = set() if options['pythia-pgs_path']: self.available_module.add('PY6') @@ -540,32 +540,32 @@ def check_available_module(self, options): self.available_module.add('Rivet') else: logger.warning("Rivet program installed but no parton shower with hepmc output detected.\n Please install pythia8") - + if not MADEVENT or ('mg5_path' in options and options['mg5_path']): self.available_module.add('MadSpin') if misc.has_f2py() or options['f2py_compiler']: self.available_module.add('reweight') -# old mode to activate the shower +# old mode to activate the shower def ans_parton(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if value is None: self.set_all_off() else: logger.warning('Invalid command: parton=%s' % value) - - + + # -# HANDLING SHOWER +# HANDLING SHOWER # def get_allowed_shower(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_shower'): return self.allowed_shower - + self.allowed_shower = [] if 'PY6' in self.available_module: self.allowed_shower.append('Pythia6') @@ -574,9 +574,9 @@ def get_allowed_shower(self): if self.allowed_shower: self.allowed_shower.append('OFF') return self.allowed_shower - + def set_default_shower(self): - + if 'PY6' in self.available_module and\ os.path.exists(pjoin(self.me_dir,'Cards','pythia_card.dat')): self.switch['shower'] = 'Pythia6' @@ -590,10 +590,10 @@ def set_default_shower(self): def check_value_shower(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_shower(): return True - + value =value.lower() if value in ['py6','p6','pythia_6'] and 'PY6' in self.available_module: return 'Pythia6' @@ -601,13 +601,13 @@ def check_value_shower(self, value): return 'Pythia8' else: return False - - -# old mode to activate the shower + + +# old mode to activate the shower def ans_pythia(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if 'PY6' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return @@ -621,13 +621,13 @@ def ans_pythia(self, value=None): self.set_switch('shower', 'OFF') else: logger.warning('Invalid command: pythia=%s' % value) - - + + def consistency_shower_detector(self, vshower, vdetector): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower == 'OFF': @@ -635,35 +635,35 @@ def consistency_shower_detector(self, vshower, vdetector): return 'OFF' if vshower == 'Pythia8' and vdetector == 'PGS': return 'OFF' - + return None - + # # HANDLING DETECTOR # def get_allowed_detector(self): """return valid entry for the switch""" - + if hasattr(self, 'allowed_detector'): - return self.allowed_detector - + return self.allowed_detector + self.allowed_detector = [] if 'PGS' in self.available_module: self.allowed_detector.append('PGS') if 'Delphes' in self.available_module: self.allowed_detector.append('Delphes') - + if self.allowed_detector: self.allowed_detector.append('OFF') - return self.allowed_detector + return self.allowed_detector def set_default_detector(self): - + self.set_default_shower() #ensure that this one is called first! - + if 'PGS' in self.available_module and self.switch['shower'] == 'Pythia6'\ and os.path.exists(pjoin(self.me_dir,'Cards','pgs_card.dat')): self.switch['detector'] = 'PGS' @@ -674,16 +674,16 @@ def set_default_detector(self): self.switch['detector'] = 'OFF' else: self.switch['detector'] = 'Not Avail.' - -# old mode to activate pgs + +# old mode to activate pgs def ans_pgs(self, value=None): """None: means that the user type 'pgs' - value: means that the user type pgs=value""" - + value: means that the user type pgs=value""" + if 'PGS' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return - + if value is None: self.set_all_off() self.switch['shower'] = 'Pythia6' @@ -696,16 +696,16 @@ def ans_pgs(self, value=None): else: logger.warning('Invalid command: pgs=%s' % value) - + # old mode to activate Delphes def ans_delphes(self, value=None): """None: means that the user type 'delphes' - value: means that the user type delphes=value""" - + value: means that the user type delphes=value""" + if 'Delphes' not in self.available_module: logger.warning('Delphes not available. Ignore commmand') return - + if value is None: self.set_all_off() if 'PY6' in self.available_module: @@ -718,15 +718,15 @@ def ans_delphes(self, value=None): elif value == 'off': self.set_switch('detector', 'OFF') else: - logger.warning('Invalid command: pgs=%s' % value) + logger.warning('Invalid command: pgs=%s' % value) def consistency_detector_shower(self,vdetector, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ - + if vdetector == 'PGS' and vshower != 'Pythia6': return 'Pythia6' if vdetector == 'Delphes' and vshower not in ['Pythia6', 'Pythia8']: @@ -744,28 +744,28 @@ def consistency_detector_shower(self,vdetector, vshower): # def get_allowed_analysis(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_analysis'): return self.allowed_analysis - + self.allowed_analysis = [] if 'ExRoot' in self.available_module: self.allowed_analysis.append('ExRoot') if 'MA4' in self.available_module: self.allowed_analysis.append('MadAnalysis4') if 'MA5' in self.available_module: - self.allowed_analysis.append('MadAnalysis5') + self.allowed_analysis.append('MadAnalysis5') if 'Rivet' in self.available_module: - self.allowed_analysis.append('Rivet') - + self.allowed_analysis.append('Rivet') + if self.allowed_analysis: self.allowed_analysis.append('OFF') - + return self.allowed_analysis - + def check_analysis(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_analysis(): return True if value.lower() in ['ma4', 'madanalysis4', 'madanalysis_4','4']: @@ -786,30 +786,30 @@ def consistency_shower_analysis(self, vshower, vanalysis): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'OFF' #new value for analysis - + return None - + def consistency_analysis_shower(self, vanalysis, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'Pythia8' #new value for analysis - + return None def set_default_analysis(self): """initialise the switch for analysis""" - + if 'MA4' in self.available_module and \ os.path.exists(pjoin(self.me_dir,'Cards','plot_card.dat')): self.switch['analysis'] = 'MadAnalysis4' @@ -818,46 +818,46 @@ def set_default_analysis(self): or os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat'))): self.switch['analysis'] = 'MadAnalysis5' elif 'ExRoot' in self.available_module: - self.switch['analysis'] = 'ExRoot' - elif self.get_allowed_analysis(): + self.switch['analysis'] = 'ExRoot' + elif self.get_allowed_analysis(): self.switch['analysis'] = 'OFF' else: self.switch['analysis'] = 'Not Avail.' - + # # MADSPIN handling # def get_allowed_madspin(self): """ ON|OFF|onshell """ - + if hasattr(self, 'allowed_madspin'): return self.allowed_madspin - + self.allowed_madspin = [] if 'MadSpin' in self.available_module: self.allowed_madspin = ['OFF',"ON",'onshell',"full"] return self.allowed_madspin - + def check_value_madspin(self, value): """handle alias and valid option not present in get_allowed_madspin""" - + if value.upper() in self.get_allowed_madspin(): return True elif value.lower() in self.get_allowed_madspin(): return True - + if 'MadSpin' not in self.available_module: return False - + if value.lower() in ['madspin', 'full']: return 'full' elif value.lower() in ['none']: return 'none' - - + + def set_default_madspin(self): """initialise the switch for madspin""" - + if 'MadSpin' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): self.switch['madspin'] = 'ON' @@ -865,10 +865,10 @@ def set_default_madspin(self): self.switch['madspin'] = 'OFF' else: self.switch['madspin'] = 'Not Avail.' - + def get_cardcmd_for_madspin(self, value): """set some command to run before allowing the user to modify the cards.""" - + if value == 'onshell': return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] elif value in ['full', 'madspin']: @@ -877,36 +877,36 @@ def get_cardcmd_for_madspin(self, value): return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] else: return [] - + # # ReWeight handling # def get_allowed_reweight(self): """ return the list of valid option for reweight=XXX """ - + if hasattr(self, 'allowed_reweight'): return getattr(self, 'allowed_reweight') - + if 'reweight' not in self.available_module: self.allowed_reweight = [] return self.allowed_reweight = ['OFF', 'ON'] - + # check for plugin mode plugin_path = self.mother_interface.plugin_path opts = misc.from_plugin_import(plugin_path, 'new_reweight', warning=False) self.allowed_reweight += opts - + def set_default_reweight(self): """initialise the switch for reweight""" - + if 'reweight' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): self.switch['reweight'] = 'ON' else: self.switch['reweight'] = 'OFF' else: - self.switch['reweight'] = 'Not Avail.' + self.switch['reweight'] = 'Not Avail.' #=============================================================================== # CheckValidForCmd @@ -916,14 +916,14 @@ class CheckValidForCmd(object): def check_banner_run(self, args): """check the validity of line""" - + if len(args) == 0: self.help_banner_run() raise self.InvalidCmd('banner_run requires at least one argument.') - + tag = [a[6:] for a in args if a.startswith('--tag=')] - - + + if os.path.exists(args[0]): type ='banner' format = self.detect_card_type(args[0]) @@ -931,7 +931,7 @@ def check_banner_run(self, args): raise self.InvalidCmd('The file is not a valid banner.') elif tag: args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) + (args[0], tag)) if not os.path.exists(args[0]): raise self.InvalidCmd('No banner associates to this name and tag.') else: @@ -939,7 +939,7 @@ def check_banner_run(self, args): type = 'run' banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) if not banners: - raise self.InvalidCmd('No banner associates to this name.') + raise self.InvalidCmd('No banner associates to this name.') elif len(banners) == 1: args[0] = banners[0] else: @@ -947,8 +947,8 @@ def check_banner_run(self, args): tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] tag = self.ask('which tag do you want to use?', tags[0], tags) args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) - + (args[0], tag)) + run_name = [arg[7:] for arg in args if arg.startswith('--name=')] if run_name: try: @@ -970,14 +970,14 @@ def check_banner_run(self, args): except Exception: pass self.set_run_name(name) - + def check_history(self, args): """check the validity of line""" - + if len(args) > 1: self.help_history() raise self.InvalidCmd('\"history\" command takes at most one argument') - + if not len(args): return elif args[0] != 'clean': @@ -985,16 +985,16 @@ def check_history(self, args): if dirpath and not os.path.exists(dirpath) or \ os.path.isdir(args[0]): raise self.InvalidCmd("invalid path %s " % dirpath) - + def check_save(self, args): """ check the validity of the line""" - + if len(args) == 0: args.append('options') if args[0] not in self._save_opts: raise self.InvalidCmd('wrong \"save\" format') - + if args[0] != 'options' and len(args) != 2: self.help_save() raise self.InvalidCmd('wrong \"save\" format') @@ -1003,7 +1003,7 @@ def check_save(self, args): if not os.path.exists(basename): raise self.InvalidCmd('%s is not a valid path, please retry' % \ args[1]) - + if args[0] == 'options': has_path = None for arg in args[1:]: @@ -1024,9 +1024,9 @@ def check_save(self, args): has_path = True if not has_path: if '--auto' in arg and self.options['mg5_path']: - args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) + args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) else: - args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) + args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) def check_set(self, args): """ check the validity of the line""" @@ -1039,20 +1039,20 @@ def check_set(self, args): self.help_set() raise self.InvalidCmd('Possible options for set are %s' % \ self._set_options) - + if args[0] in ['stdout_level']: if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \ and not args[1].isdigit(): raise self.InvalidCmd('output_level needs ' + \ - 'a valid level') - + 'a valid level') + if args[0] in ['timeout']: if not args[1].isdigit(): - raise self.InvalidCmd('timeout values should be a integer') - + raise self.InvalidCmd('timeout values should be a integer') + def check_open(self, args): """ check the validity of the line """ - + if len(args) != 1: self.help_open() raise self.InvalidCmd('OPEN command requires exactly one argument') @@ -1069,7 +1069,7 @@ def check_open(self, args): raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file') else: return True - + path = self.me_dir if os.path.isfile(os.path.join(path,args[0])): args[0] = os.path.join(path,args[0]) @@ -1078,7 +1078,7 @@ def check_open(self, args): elif os.path.isfile(os.path.join(path,'HTML',args[0])): args[0] = os.path.join(path,'HTML',args[0]) # special for card with _default define: copy the default and open it - elif '_card.dat' in args[0]: + elif '_card.dat' in args[0]: name = args[0].replace('_card.dat','_card_default.dat') if os.path.isfile(os.path.join(path,'Cards', name)): files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0])) @@ -1086,13 +1086,13 @@ def check_open(self, args): else: raise self.InvalidCmd('No default path for this file') elif not os.path.isfile(args[0]): - raise self.InvalidCmd('No default path for this file') - + raise self.InvalidCmd('No default path for this file') + def check_initMadLoop(self, args): """ check initMadLoop command arguments are valid.""" - + opt = {'refresh': False, 'nPS': None, 'force': False} - + for arg in args: if arg in ['-r','--refresh']: opt['refresh'] = True @@ -1105,14 +1105,14 @@ def check_initMadLoop(self, args): except ValueError: raise InvalidCmd("The number of attempts specified "+ "'%s' is not a valid integer."%n_attempts) - + return opt - + def check_treatcards(self, args): """check that treatcards arguments are valid [param|run|all] [--output_dir=] [--param_card=] [--run_card=] """ - + opt = {'output_dir':pjoin(self.me_dir,'Source'), 'param_card':pjoin(self.me_dir,'Cards','param_card.dat'), 'run_card':pjoin(self.me_dir,'Cards','run_card.dat'), @@ -1129,14 +1129,14 @@ def check_treatcards(self, args): if os.path.isfile(value): card_name = self.detect_card_type(value) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' % (card_name, key)) opt[key] = value elif os.path.isfile(pjoin(self.me_dir,value)): card_name = self.detect_card_type(pjoin(self.me_dir,value)) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' - % (card_name, key)) + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + % (card_name, key)) opt[key] = value else: raise self.InvalidCmd('No such file: %s ' % value) @@ -1154,14 +1154,14 @@ def check_treatcards(self, args): else: self.help_treatcards() raise self.InvalidCmd('Unvalid argument %s' % arg) - - return mode, opt - - + + return mode, opt + + def check_survey(self, args, cmd='survey'): """check that the argument for survey are valid""" - - + + self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) @@ -1183,41 +1183,41 @@ def check_survey(self, args, cmd='survey'): self.help_survey() raise self.InvalidCmd('Too many argument for %s command' % cmd) elif not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], None,'parton', True) args.pop(0) - + return True def check_generate_events(self, args): """check that the argument for generate_events are valid""" - + run = None if args and args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['auto','parton', 'pythia', 'pgs', 'delphes']: self.help_generate_events() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + #if len(args) > 1: # self.help_generate_events() # raise self.InvalidCmd('Too many argument for generate_events command: %s' % cmd) - + return run def check_calculate_decay_widths(self, args): """check that the argument for calculate_decay_widths are valid""" - + if self.ninitial != 1: raise self.InvalidCmd('Can only calculate decay widths for decay processes A > B C ...') @@ -1232,7 +1232,7 @@ def check_calculate_decay_widths(self, args): if len(args) > 1: self.help_calculate_decay_widths() raise self.InvalidCmd('Too many argument for calculate_decay_widths command: %s' % cmd) - + return accuracy @@ -1241,25 +1241,25 @@ def check_multi_run(self, args): """check that the argument for survey are valid""" run = None - + if not len(args): self.help_multi_run() raise self.InvalidCmd("""multi_run command requires at least one argument for the number of times that it call generate_events command""") - + if args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['parton', 'pythia', 'pgs', 'delphes']: self.help_multi_run() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + elif not args[0].isdigit(): self.help_multi_run() @@ -1267,7 +1267,7 @@ def check_multi_run(self, args): #pass nb run to an integer nb_run = args.pop(0) args.insert(0, int(nb_run)) - + return run @@ -1284,7 +1284,7 @@ def check_refine(self, args): self.help_refine() raise self.InvalidCmd('require_precision argument is require for refine cmd') - + if not self.run_name: if self.results.lastrun: self.set_run_name(self.results.lastrun) @@ -1296,17 +1296,17 @@ def check_refine(self, args): else: try: [float(arg) for arg in args] - except ValueError: - self.help_refine() + except ValueError: + self.help_refine() raise self.InvalidCmd('refine arguments are suppose to be number') - + return True - + def check_combine_events(self, arg): """ Check the argument for the combine events command """ - + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] elif not self.run_tag: @@ -1314,53 +1314,53 @@ def check_combine_events(self, arg): else: tag = self.run_tag self.run_tag = tag - + if len(arg) > 1: self.help_combine_events() raise self.InvalidCmd('Too many argument for combine_events command') - + if len(arg) == 1: self.set_run_name(arg[0], self.run_tag, 'parton', True) - + if not self.run_name: if not self.results.lastrun: raise self.InvalidCmd('No run_name currently define. Unable to run combine') else: self.set_run_name(self.results.lastrun) - + return True - + def check_pythia(self, args): """Check the argument for pythia command - syntax: pythia [NAME] + syntax: pythia [NAME] Note that other option are already removed at this point """ - + mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia', 'pgs', 'delphes']: self.help_pythia() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') - + if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' raise self.InvalidCmd(error_msg) - - - + + + tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1368,8 +1368,8 @@ def check_pythia(self, args): if self.results.lastrun: args.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): @@ -1388,21 +1388,21 @@ def check_pythia(self, args): files.ln(input_file, os.path.dirname(output_file)) else: misc.gunzip(input_file, keep=True, stdout=output_file) - + args.append(mode) - + def check_pythia8(self, args): """Check the argument for pythia command - syntax: pythia8 [NAME] + syntax: pythia8 [NAME] Note that other option are already removed at this point - """ + """ mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia','pythia8','delphes']: self.help_pythia8() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') @@ -1410,7 +1410,7 @@ def check_pythia8(self, args): if not self.options['pythia8_path']: logger.info('Retry reading configuration file to find pythia8 path') self.set_configuration() - + if not self.options['pythia8_path'] or not \ os.path.exists(pjoin(self.options['pythia8_path'],'bin','pythia8-config')): error_msg = 'No valid pythia8 path set.\n' @@ -1421,7 +1421,7 @@ def check_pythia8(self, args): raise self.InvalidCmd(error_msg) tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1430,11 +1430,11 @@ def check_pythia8(self, args): args.insert(0, self.results.lastrun) else: raise self.InvalidCmd('No run name currently define. '+ - 'Please add this information.') - + 'Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ - not os.path.exists(pjoin(self.me_dir,'Events',args[0], + not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): raise self.InvalidCmd('No events file corresponding to %s run. ' % args[0]) @@ -1451,9 +1451,9 @@ def check_pythia8(self, args): else: raise self.InvalidCmd('No event file corresponding to %s run. ' % self.run_name) - + args.append(mode) - + def check_remove(self, args): """Check that the remove command is valid""" @@ -1484,33 +1484,33 @@ def check_plot(self, args): madir = self.options['madanalysis_path'] td = self.options['td_path'] - + if not madir or not td: logger.info('Retry to read configuration file to find madanalysis/td') self.set_configuration() madir = self.options['madanalysis_path'] - td = self.options['td_path'] - + td = self.options['td_path'] + if not madir: error_msg = 'No valid MadAnalysis path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) + raise self.InvalidCmd(error_msg) if not td: error_msg = 'No valid td path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') + raise self.InvalidCmd('No run name currently define. Please add this information.') args.append('all') return - + if args[0] not in self._plot_mode: self.set_run_name(args[0], level='plot') del args[0] @@ -1518,45 +1518,45 @@ def check_plot(self, args): args.append('all') elif not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + for arg in args: if arg not in self._plot_mode and arg != self.run_name: self.help_plot() - raise self.InvalidCmd('unknown options %s' % arg) - + raise self.InvalidCmd('unknown options %s' % arg) + def check_syscalc(self, args): """Check the argument for the syscalc command syscalc run_name modes""" scdir = self.options['syscalc_path'] - + if not scdir: logger.info('Retry to read configuration file to find SysCalc') self.set_configuration() scdir = self.options['syscalc_path'] - + if not scdir: error_msg = 'No valid SysCalc path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' error_msg += 'Please note that you need to compile SysCalc first.' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') args.append('all') return #deal options tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] - + if args[0] not in self._syscalc_mode: self.set_run_name(args[0], tag=tag, level='syscalc') del args[0] @@ -1564,61 +1564,61 @@ def check_syscalc(self, args): args.append('all') elif not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') elif tag and tag != self.run_tag: self.set_run_name(self.run_name, tag=tag, level='syscalc') - + for arg in args: if arg not in self._syscalc_mode and arg != self.run_name: self.help_syscalc() - raise self.InvalidCmd('unknown options %s' % arg) + raise self.InvalidCmd('unknown options %s' % arg) if self.run_card['use_syst'] not in self.true: raise self.InvalidCmd('Run %s does not include ' % self.run_name + \ 'systematics information needed for syscalc.') - - + + def check_pgs(self, arg, no_default=False): """Check the argument for pythia command - syntax is "pgs [NAME]" + syntax is "pgs [NAME]" Note that other option are already remove at this point """ - + # If not pythia-pgs path if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] - - + + if len(arg) == 0 and not self.run_name: if self.results.lastrun: arg.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(arg) == 1 and self.run_name == arg[0]: arg.pop(0) - + if not len(arg) and \ not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): if not no_default: self.help_pgs() raise self.InvalidCmd('''No file file pythia_events.hep currently available Please specify a valid run_name''') - - lock = None + + lock = None if len(arg) == 1: prev_tag = self.set_run_name(arg[0], tag, 'pgs') if not os.path.exists(pjoin(self.me_dir,'Events',self.run_name,'%s_pythia_events.hep.gz' % prev_tag)): @@ -1626,25 +1626,25 @@ def check_pgs(self, arg, no_default=False): else: input_file = pjoin(self.me_dir,'Events', self.run_name, '%s_pythia_events.hep.gz' % prev_tag) output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') - lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), + lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), argument=['-c', input_file]) else: - if tag: + if tag: self.run_card['run_tag'] = tag self.set_run_name(self.run_name, tag, 'pgs') - - return lock + + return lock def check_display(self, args): """check the validity of line syntax is "display XXXXX" """ - + if len(args) < 1 or args[0] not in self._display_opts: self.help_display() raise self.InvalidCmd - + if args[0] == 'variable' and len(args) !=2: raise self.InvalidCmd('variable need a variable name') @@ -1654,39 +1654,39 @@ def check_display(self, args): def check_import(self, args): """check the validity of line""" - + if not args: self.help_import() raise self.InvalidCmd('wrong \"import\" format') - + if args[0] != 'command': args.insert(0,'command') - - + + if not len(args) == 2 or not os.path.exists(args[1]): raise self.InvalidCmd('PATH is mandatory for import command\n') - + #=============================================================================== # CompleteForCmd #=============================================================================== class CompleteForCmd(CheckValidForCmd): """ The Series of help routine for the MadGraphCmd""" - - + + def complete_banner_run(self, text, line, begidx, endidx, formatting=True): "Complete the banner run command" try: - - + + args = self.split_arg(line[0:begidx], error=False) - + if args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args \ - if a.endswith(os.path.sep)])) - - + if a.endswith(os.path.sep)])) + + if len(args) > 1: # only options are possible tags = misc.glob('%s_*_banner.txt' % args[1], pjoin(self.me_dir, 'Events' , args[1])) @@ -1697,9 +1697,9 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): else: return self.list_completion(text, tags) return self.list_completion(text, tags +['--name=','-f'], line) - + # First argument - possibilites = {} + possibilites = {} comp = self.path_completion(text, os.path.join('.',*[a for a in args \ if a.endswith(os.path.sep)])) @@ -1711,10 +1711,10 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) run_list = [n.rsplit('/',2)[1] for n in run_list] possibilites['RUN Name'] = self.list_completion(text, run_list) - + return self.deal_multiple_categories(possibilites, formatting) - - + + except Exception as error: print(error) @@ -1732,12 +1732,12 @@ def complete_history(self, text, line, begidx, endidx): if len(args) == 1: return self.path_completion(text) - - def complete_open(self, text, line, begidx, endidx): + + def complete_open(self, text, line, begidx, endidx): """ complete the open command """ args = self.split_arg(line[0:begidx]) - + # Directory continuation if os.path.sep in args[-1] + text: return self.path_completion(text, @@ -1751,10 +1751,10 @@ def complete_open(self, text, line, begidx, endidx): if os.path.isfile(os.path.join(path,'README')): possibility.append('README') if os.path.isdir(os.path.join(path,'Cards')): - possibility += [f for f in os.listdir(os.path.join(path,'Cards')) + possibility += [f for f in os.listdir(os.path.join(path,'Cards')) if f.endswith('.dat')] if os.path.isdir(os.path.join(path,'HTML')): - possibility += [f for f in os.listdir(os.path.join(path,'HTML')) + possibility += [f for f in os.listdir(os.path.join(path,'HTML')) if f.endswith('.html') and 'default' not in f] else: possibility.extend(['./','../']) @@ -1763,7 +1763,7 @@ def complete_open(self, text, line, begidx, endidx): if os.path.exists('MG5_debug'): possibility.append('MG5_debug') return self.list_completion(text, possibility) - + def complete_set(self, text, line, begidx, endidx): "Complete the set command" @@ -1784,27 +1784,27 @@ def complete_set(self, text, line, begidx, endidx): elif len(args) >2 and args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args if a.endswith(os.path.sep)]), - only_dirs = True) - + only_dirs = True) + def complete_survey(self, text, line, begidx, endidx): """ Complete the survey command """ - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + return self.list_completion(text, self._run_options, line) - + complete_refine = complete_survey complete_combine_events = complete_survey complite_store = complete_survey complete_generate_events = complete_survey complete_create_gridpack = complete_survey - + def complete_generate_events(self, text, line, begidx, endidx): """ Complete the generate events""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() @@ -1813,17 +1813,17 @@ def complete_generate_events(self, text, line, begidx, endidx): return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) def complete_initMadLoop(self, text, line, begidx, endidx): "Complete the initMadLoop command" - + numbers = [str(i) for i in range(10)] opts = ['-f','-r','--nPS='] - + args = self.split_arg(line[0:begidx], error=False) if len(line) >=6 and line[begidx-6:begidx]=='--nPS=': return self.list_completion(text, numbers, line) @@ -1840,18 +1840,18 @@ def complete_launch(self, *args, **opts): def complete_calculate_decay_widths(self, text, line, begidx, endidx): """ Complete the calculate_decay_widths command""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + opts = self._run_options + self._calculate_decay_options return self.list_completion(text, opts, line) - + def complete_display(self, text, line, begidx, endidx): - """ Complete the display command""" - + """ Complete the display command""" + args = self.split_arg(line[0:begidx], error=False) if len(args) >= 2 and args[1] =='results': start = line.find('results') @@ -1860,44 +1860,44 @@ def complete_display(self, text, line, begidx, endidx): def complete_multi_run(self, text, line, begidx, endidx): """complete multi run command""" - + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: data = [str(i) for i in range(0,20)] return self.list_completion(text, data, line) - + if line.endswith('run=') and not text: return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - - - + + + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - + def complete_plot(self, text, line, begidx, endidx): """ Complete the plot command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1: return self.list_completion(text, self._plot_mode) else: return self.list_completion(text, self._plot_mode + list(self.results.keys())) - + def complete_syscalc(self, text, line, begidx, endidx, formatting=True): """ Complete the syscalc command """ - + output = {} args = self.split_arg(line[0:begidx], error=False) - + if len(args) <=1: output['RUN_NAME'] = self.list_completion(list(self.results.keys())) output['MODE'] = self.list_completion(text, self._syscalc_mode) @@ -1907,12 +1907,12 @@ def complete_syscalc(self, text, line, begidx, endidx, formatting=True): if run in self.results: tags = ['--tag=%s' % tag['tag'] for tag in self.results[run]] output['options'] += tags - + return self.deal_multiple_categories(output, formatting) - + def complete_remove(self, text, line, begidx, endidx): """Complete the remove command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1 and (text.startswith('--t')): run = args[1] @@ -1932,8 +1932,8 @@ def complete_remove(self, text, line, begidx, endidx): data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1] for n in data] return self.list_completion(text, ['all'] + data) - - + + def complete_shower(self,text, line, begidx, endidx): "Complete the shower command" args = self.split_arg(line[0:begidx], error=False) @@ -1941,7 +1941,7 @@ def complete_shower(self,text, line, begidx, endidx): return self.list_completion(text, self._interfaced_showers) elif len(args)>1 and args[1] in self._interfaced_showers: return getattr(self, 'complete_%s' % text)\ - (text, args[1],line.replace(args[0]+' ',''), + (text, args[1],line.replace(args[0]+' ',''), begidx-len(args[0])-1, endidx-len(args[0])-1) def complete_pythia8(self,text, line, begidx, endidx): @@ -1955,11 +1955,11 @@ def complete_pythia8(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_madanalysis5_parton(self,text, line, begidx, endidx): @@ -1978,19 +1978,19 @@ def complete_madanalysis5_parton(self,text, line, begidx, endidx): else: tmp2 = self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) - return tmp1 + tmp2 + return tmp1 + tmp2 elif '--MA5_stdout_lvl=' in line and not any(arg.startswith( '--MA5_stdout_lvl=') for arg in args): - return self.list_completion(text, - ['--MA5_stdout_lvl=%s'%opt for opt in + return self.list_completion(text, + ['--MA5_stdout_lvl=%s'%opt for opt in ['logging.INFO','logging.DEBUG','logging.WARNING', 'logging.CRITICAL','90']], line) else: - return self.list_completion(text, ['-f', + return self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) def complete_pythia(self,text, line, begidx, endidx): - "Complete the pythia command" + "Complete the pythia command" args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: @@ -2001,16 +2001,16 @@ def complete_pythia(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_pgs(self,text, line, begidx, endidx): "Complete the pythia command" - args = self.split_arg(line[0:begidx], error=False) + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: #return valid run_name data = misc.glob(pjoin('*', '*_pythia_events.hep.gz'), pjoin(self.me_dir, 'Events')) @@ -2019,23 +2019,23 @@ def complete_pgs(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--tag=' ,'--no_default'], line) - return tmp1 + tmp2 + return tmp1 + tmp2 else: - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--tag=','--no_default'], line) - complete_delphes = complete_pgs - complete_rivet = complete_pgs + complete_delphes = complete_pgs + complete_rivet = complete_pgs #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCmd): - """The command line processor of Mad Graph""" - + """The command line processor of Mad Graph""" + LO = True # Truth values @@ -2063,7 +2063,7 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm cluster_mode = 0 queue = 'madgraph' nb_core = None - + next_possibility = { 'start': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]', 'calculate_decay_widths [OPTIONS]', @@ -2080,9 +2080,9 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm 'pgs': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'], 'delphes' : ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'] } - + asking_for_run = AskRun - + ############################################################################ def __init__(self, me_dir = None, options={}, *completekey, **stdin): """ add information to the cmd """ @@ -2095,16 +2095,16 @@ def __init__(self, me_dir = None, options={}, *completekey, **stdin): if self.web: os.system('touch %s' % pjoin(self.me_dir,'Online')) - self.load_results_db() + self.load_results_db() self.results.def_web_mode(self.web) self.Gdirs = None - + self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) self.configured = 0 # time for reading the card self._options = {} # for compatibility with extended_cmd - - + + def pass_in_web_mode(self): """configure web data""" self.web = True @@ -2113,22 +2113,22 @@ def pass_in_web_mode(self): if os.environ['MADGRAPH_BASE']: self.options['mg5_path'] = pjoin(os.environ['MADGRAPH_BASE'],'MG5') - ############################################################################ + ############################################################################ def check_output_type(self, path): """ Check that the output path is a valid madevent directory """ - + bin_path = os.path.join(path,'bin') if os.path.isfile(os.path.join(bin_path,'generate_events')): return True - else: + else: return False ############################################################################ def set_configuration(self, amcatnlo=False, final=True, **opt): - """assign all configuration variable from file + """assign all configuration variable from file loop over the different config file if config_file not define """ - - super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, + + super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, final=final, **opt) if not final: @@ -2171,24 +2171,24 @@ def set_configuration(self, amcatnlo=False, final=True, **opt): if not os.path.exists(pjoin(path, 'sys_calc')): logger.info("No valid SysCalc path found") continue - # No else since the next line reinitialize the option to the + # No else since the next line reinitialize the option to the #previous value anyway self.options[key] = os.path.realpath(path) continue else: self.options[key] = None - - + + return self.options ############################################################################ - def do_banner_run(self, line): + def do_banner_run(self, line): """Make a run from the banner file""" - + args = self.split_arg(line) #check the validity of the arguments - self.check_banner_run(args) - + self.check_banner_run(args) + # Remove previous cards for name in ['delphes_trigger.dat', 'delphes_card.dat', 'pgs_card.dat', 'pythia_card.dat', 'madspin_card.dat', @@ -2197,20 +2197,20 @@ def do_banner_run(self, line): os.remove(pjoin(self.me_dir, 'Cards', name)) except Exception: pass - + banner_mod.split_banner(args[0], self.me_dir, proc_card=False) - + # Check if we want to modify the run if not self.force: ans = self.ask('Do you want to modify the Cards?', 'n', ['y','n']) if ans == 'n': self.force = True - + # Call Generate events self.exec_cmd('generate_events %s %s' % (self.run_name, self.force and '-f' or '')) - - - + + + ############################################################################ def do_display(self, line, output=sys.stdout): """Display current internal status""" @@ -2223,7 +2223,7 @@ def do_display(self, line, output=sys.stdout): #return valid run_name data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1:] for n in data] - + if data: out = {} for name, tag in data: @@ -2235,11 +2235,11 @@ def do_display(self, line, output=sys.stdout): print('the runs available are:') for run_name, tags in out.items(): print(' run: %s' % run_name) - print(' tags: ', end=' ') + print(' tags: ', end=' ') print(', '.join(tags)) else: print('No run detected.') - + elif args[0] == 'options': outstr = " Run Options \n" outstr += " ----------- \n" @@ -2260,8 +2260,8 @@ def do_display(self, line, output=sys.stdout): if value == default: outstr += " %25s \t:\t%s\n" % (key,value) else: - outstr += " %25s \t:\t%s (user set)\n" % (key,value) - outstr += "\n" + outstr += " %25s \t:\t%s (user set)\n" % (key,value) + outstr += "\n" outstr += " Configuration Options \n" outstr += " --------------------- \n" for key, default in self.options_configuration.items(): @@ -2275,15 +2275,15 @@ def do_display(self, line, output=sys.stdout): self.do_print_results(' '.join(args[1:])) else: super(MadEventCmd, self).do_display(line, output) - + def do_save(self, line, check=True, to_keep={}): - """Not in help: Save information to file""" + """Not in help: Save information to file""" args = self.split_arg(line) # Check argument validity if check: self.check_save(args) - + if args[0] == 'options': # First look at options which should be put in MG5DIR/input to_define = {} @@ -2295,7 +2295,7 @@ def do_save(self, line, check=True, to_keep={}): for key, default in self.options_madevent.items(): if self.options[key] != self.options_madevent[key]: to_define[key] = self.options[key] - + if '--all' in args: for key, default in self.options_madgraph.items(): if self.options[key] != self.options_madgraph[key]: @@ -2312,12 +2312,12 @@ def do_save(self, line, check=True, to_keep={}): filepath = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basefile = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basedir = self.me_dir - + if to_keep: to_define = to_keep self.write_configuration(filepath, basefile, basedir, to_define) - - + + def do_edit_cards(self, line): @@ -2326,80 +2326,80 @@ def do_edit_cards(self, line): # Check argument's validity mode = self.check_generate_events(args) self.ask_run_configuration(mode) - + return ############################################################################ - + ############################################################################ def do_restart_gridpack(self, line): """ syntax restart_gridpack --precision=1.0 --restart_zero collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) - + # initialize / remove lhapdf mode #self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) #self.configure_directory() - + gensym = gen_ximprove.gensym(self) - + min_precision = 1.0 resubmit_zero=False if '--precision=' in line: s = line.index('--precision=') + len('--precision=') arg=line[s:].split(1)[0] min_precision = float(arg) - + if '--restart_zero' in line: resubmit_zero = True - - + + gensym.resubmit(min_precision, resubmit_zero) self.monitor(run_type='All jobs submitted for gridpack', html=True) #will be done during the refine (more precisely in gen_ximprove) cross, error = sum_html.make_all_html_results(self) self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(gensym.run_statistics)) - + #self.exec_cmd('combine_events', postcmd=False) #self.exec_cmd('store_events', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) self.exec_cmd('create_gridpack', postcmd=False) - - - ############################################################################ + + + ############################################################################ ############################################################################ def do_generate_events(self, line): """Main Commands: launch the full chain """ - + self.banner = None self.Gdirs = None - + args = self.split_arg(line) # Check argument's validity mode = self.check_generate_events(args) switch_mode = self.ask_run_configuration(mode, args) if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir), None, 'parton') else: self.set_run_name(args[0], None, 'parton', True) args.pop(0) - + self.run_generate_events(switch_mode, args) self.postprocessing() @@ -2420,8 +2420,8 @@ def postprocessing(self): def rivet_postprocessing(self, rivet_config, postprocess_RIVET, postprocess_CONTUR): - # Check number of Rivet jobs to run - run_dirs = [pjoin(self.me_dir, 'Events',run_name) + # Check number of Rivet jobs to run + run_dirs = [pjoin(self.me_dir, 'Events',run_name) for run_name in self.postprocessing_dirs] nb_rivet = len(run_dirs) @@ -2550,10 +2550,10 @@ def wait_monitoring(Idle, Running, Done): wrapper = open(pjoin(self.me_dir, "Analysis", "contur", "run_contur.sh"), "w") wrapper.write(set_env) - + wrapper.write('{0}\n'.format(contur_cmd)) wrapper.close() - + misc.call(["run_contur.sh"], cwd=(pjoin(self.me_dir, "Analysis", "contur"))) logger.info("Contur outputs are stored in {0}".format(pjoin(self.me_dir, "Analysis", "contur","conturPlot"))) @@ -2572,7 +2572,7 @@ def run_generate_events(self, switch_mode, args): self.do_set('run_mode 2') self.do_set('nb_core 1') - if self.run_card['gridpack'] in self.true: + if self.run_card['gridpack'] in self.true: # Running gridpack warmup gridpack_opts=[('accuracy', 0.01), ('points', 2000), @@ -2593,7 +2593,7 @@ def run_generate_events(self, switch_mode, args): # Regular run mode logger.info('Generating %s events with run name %s' % (self.run_card['nevents'], self.run_name)) - + self.exec_cmd('survey %s %s' % (self.run_name,' '.join(args)), postcmd=False) nb_event = self.run_card['nevents'] @@ -2601,7 +2601,7 @@ def run_generate_events(self, switch_mode, args): self.exec_cmd('refine %s' % nb_event, postcmd=False) if not float(self.results.current['cross']): # Zero cross-section. Try to guess why - text = '''Survey return zero cross section. + text = '''Survey return zero cross section. Typical reasons are the following: 1) A massive s-channel particle has a width set to zero. 2) The pdf are zero for at least one of the initial state particles @@ -2613,17 +2613,17 @@ def run_generate_events(self, switch_mode, args): raise ZeroResult('See https://cp3.irmp.ucl.ac.be/projects/madgraph/wiki/FAQ-General-14') else: bypass_run = True - + #we can bypass the following if scan and first result is zero if not bypass_run: self.exec_cmd('refine %s --treshold=%s' % (nb_event,self.run_card['second_refine_treshold']) , postcmd=False) - + self.exec_cmd('combine_events', postcmd=False,printcmd=False) self.print_results_in_shell(self.results.current) if self.run_card['use_syst']: - if self.run_card['systematics_program'] == 'auto': + if self.run_card['systematics_program'] == 'auto': scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): to_use = 'systematics' @@ -2634,26 +2634,26 @@ def run_generate_events(self, switch_mode, args): else: logger.critical('Unvalid options for systematics_program: bypass computation of systematics variations.') to_use = 'none' - + if to_use == 'systematics': if self.run_card['systematics_arguments'] != ['']: self.exec_cmd('systematics %s %s ' % (self.run_name, - ' '.join(self.run_card['systematics_arguments'])), + ' '.join(self.run_card['systematics_arguments'])), postcmd=False, printcmd=False) else: self.exec_cmd('systematics %s --from_card' % self.run_name, - postcmd=False,printcmd=False) + postcmd=False,printcmd=False) elif to_use == 'syscalc': self.run_syscalc('parton') - - - self.create_plot('parton') - self.exec_cmd('store_events', postcmd=False) + + + self.create_plot('parton') + self.exec_cmd('store_events', postcmd=False) if self.run_card['boost_event'].strip() and self.run_card['boost_event'] != 'False': self.boost_events() - - - self.exec_cmd('reweight -from_cards', postcmd=False) + + + self.exec_cmd('reweight -from_cards', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) if self.run_card['time_of_flight']>=0: self.exec_cmd("add_time_of_flight --threshold=%s" % self.run_card['time_of_flight'] ,postcmd=False) @@ -2664,43 +2664,43 @@ def run_generate_events(self, switch_mode, args): self.create_root_file(input , output) self.exec_cmd('madanalysis5_parton --no_default', postcmd=False, printcmd=False) - # shower launches pgs/delphes if needed + # shower launches pgs/delphes if needed self.exec_cmd('shower --no_default', postcmd=False, printcmd=False) self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) self.exec_cmd('rivet --no_default', postcmd=False, printcmd=False) self.store_result() - - if self.allow_notification_center: - misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), - '%s: %s +- %s ' % (self.results.current['run_name'], + + if self.allow_notification_center: + misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), + '%s: %s +- %s ' % (self.results.current['run_name'], self.results.current['cross'], self.results.current['error'])) - + def boost_events(self): - + if not self.run_card['boost_event']: return - + if self.run_card['boost_event'].startswith('lambda'): if not isinstance(self, cmd.CmdShell): raise Exception("boost not allowed online") filter = eval(self.run_card['boost_event']) else: raise Exception - + path = [pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')] - + for p in path: if os.path.exists(p): event_path = p break else: raise Exception("fail to find event file for the boost") - - + + lhe = lhe_parser.EventFile(event_path) with misc.TMP_directory() as tmp_dir: output = lhe_parser.EventFile(pjoin(tmp_dir, os.path.basename(event_path)), 'w') @@ -2711,28 +2711,28 @@ def boost_events(self): event.boost(filter) #write this modify event output.write(str(event)) - output.write('\n') + output.write('\n') lhe.close() - files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) - - - - - + files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) + + + + + def do_initMadLoop(self,line): - """Compile and run MadLoop for a certain number of PS point so as to + """Compile and run MadLoop for a certain number of PS point so as to initialize MadLoop (setup the zero helicity and loop filter.)""" - + args = line.split() # Check argument's validity options = self.check_initMadLoop(args) - + if not options['force']: self.ask_edit_cards(['MadLoopParams.dat'], mode='fixed', plot=False) self.exec_cmd('treatcards loop --no_MadLoopInit') if options['refresh']: - for filter in misc.glob('*Filter*', + for filter in misc.glob('*Filter*', pjoin(self.me_dir,'SubProcesses','MadLoop5_resources')): logger.debug("Resetting filter '%s'."%os.path.basename(filter)) os.remove(filter) @@ -2753,14 +2753,14 @@ def do_initMadLoop(self,line): def do_launch(self, line, *args, **opt): """Main Commands: exec generate_events for 2>N and calculate_width for 1>N""" - + if self.ninitial == 1: logger.info("Note that since 2.3. The launch for 1>N pass in event generation\n"+ " To have the previous behavior use the calculate_decay_widths function") # self.do_calculate_decay_widths(line, *args, **opt) #else: self.do_generate_events(line, *args, **opt) - + def print_results_in_shell(self, data): """Have a nice results prints in the shell, data should be of type: gen_crossxhtml.OneTagResults""" @@ -2770,7 +2770,7 @@ def print_results_in_shell(self, data): if data['run_statistics']: globalstat = sum_html.RunStatistics() - + logger.info(" " ) logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2786,13 +2786,13 @@ def print_results_in_shell(self, data): logger.warning(globalstat.get_warning_text()) logger.info(" ") - + logger.info(" === Results Summary for run: %s tag: %s ===\n" % (data['run_name'],data['tag'])) - + total_time = int(sum(_['cumulative_timing'] for _ in data['run_statistics'].values())) if total_time > 0: logger.info(" Cumulative sequential time for this run: %s"%misc.format_time(total_time)) - + if self.ninitial == 1: logger.info(" Width : %.4g +- %.4g GeV" % (data['cross'], data['error'])) else: @@ -2810,18 +2810,18 @@ def print_results_in_shell(self, data): if len(split)!=3: continue scale, cross, error = split - cross_sections[float(scale)] = (float(cross), float(error)) + cross_sections[float(scale)] = (float(cross), float(error)) if len(cross_sections)>0: logger.info(' Pythia8 merged cross-sections are:') for scale in sorted(cross_sections.keys()): logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ (scale,cross_sections[scale][0],cross_sections[scale][1])) - + else: if self.ninitial == 1: logger.info(" Matched width : %.4g +- %.4g GeV" % (data['cross_pythia'], data['error_pythia'])) else: - logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) + logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) logger.info(" Nb of events after matching/merging : %d" % int(data['nb_event_pythia'])) if self.run_card['use_syst'] in self.true and \ (int(self.run_card['ickkw'])==1 or self.run_card['ktdurham']>0.0 @@ -2838,9 +2838,9 @@ def print_results_in_file(self, data, path, mode='w', format='full'): data should be of type: gen_crossxhtml.OneTagResults""" if not data: return - + fsock = open(path, mode) - + if data['run_statistics']: logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2851,7 +2851,7 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if format == "full": fsock.write(" === Results Summary for run: %s tag: %s process: %s ===\n" % \ (data['run_name'],data['tag'], os.path.basename(self.me_dir))) - + if self.ninitial == 1: fsock.write(" Width : %.4g +- %.4g GeV\n" % (data['cross'], data['error'])) else: @@ -2861,20 +2861,20 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if self.ninitial == 1: fsock.write(" Matched Width : %.4g +- %.4g GeV\n" % (data['cross_pythia'], data['error_pythia'])) else: - fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) + fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) fsock.write(" Nb of events after Matching : %s\n" % data['nb_event_pythia']) fsock.write(" \n" ) elif format == "short": if mode == "w": fsock.write("# run_name tag cross error Nb_event cross_after_matching nb_event_after matching\n") - + if data['cross_pythia'] and data['nb_event_pythia']: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s %(cross_pythia)s %(nb_event_pythia)s\n" else: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s\n" fsock.write(text % data) - - ############################################################################ + + ############################################################################ def do_calculate_decay_widths(self, line): """Main Commands: launch decay width calculation and automatic inclusion of calculated widths and BRs in the param_card.""" @@ -2887,21 +2887,21 @@ def do_calculate_decay_widths(self, line): self.Gdirs = None if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], reload_card=True) args.pop(0) self.configure_directory() - + # Running gridpack warmup opts=[('accuracy', accuracy), # default 0.01 ('points', 1000), ('iterations',9)] logger.info('Calculating decay widths with run name %s' % self.run_name) - + self.exec_cmd('survey %s %s' % \ (self.run_name, " ".join(['--' + opt + '=' + str(val) for (opt,val) \ @@ -2910,26 +2910,26 @@ def do_calculate_decay_widths(self, line): self.refine_mode = "old" # specify how to combine event self.exec_cmd('combine_events', postcmd=False) self.exec_cmd('store_events', postcmd=False) - + self.collect_decay_widths() self.print_results_in_shell(self.results.current) - self.update_status('calculate_decay_widths done', - level='parton', makehtml=False) + self.update_status('calculate_decay_widths done', + level='parton', makehtml=False) + - ############################################################################ def collect_decay_widths(self): - """ Collect the decay widths and calculate BRs for all particles, and put - in param_card form. + """ Collect the decay widths and calculate BRs for all particles, and put + in param_card form. """ - + particle_dict = {} # store the results run_name = self.run_name # Looping over the Subprocesses for P_path in SubProcesses.get_subP(self.me_dir): ids = SubProcesses.get_subP_ids(P_path) - # due to grouping we need to compute the ratio factor for the + # due to grouping we need to compute the ratio factor for the # ungroup resutls (that we need here). Note that initial particles # grouping are not at the same stage as final particle grouping nb_output = len(ids) / (len(set([p[0] for p in ids]))) @@ -2940,30 +2940,30 @@ def collect_decay_widths(self): particle_dict[particles[0]].append([particles[1:], result/nb_output]) except KeyError: particle_dict[particles[0]] = [[particles[1:], result/nb_output]] - + self.update_width_in_param_card(particle_dict, initial = pjoin(self.me_dir, 'Cards', 'param_card.dat'), output=pjoin(self.me_dir, 'Events', run_name, "param_card.dat")) - + @staticmethod def update_width_in_param_card(decay_info, initial=None, output=None): # Open the param_card.dat and insert the calculated decays and BRs - + if not output: output = initial - + param_card_file = open(initial) param_card = param_card_file.read().split('\n') param_card_file.close() decay_lines = [] line_number = 0 - # Read and remove all decays from the param_card + # Read and remove all decays from the param_card while line_number < len(param_card): line = param_card[line_number] if line.lower().startswith('decay'): - # Read decay if particle in decay_info - # DECAY 6 1.455100e+00 + # Read decay if particle in decay_info + # DECAY 6 1.455100e+00 line = param_card.pop(line_number) line = line.split() particle = 0 @@ -2996,7 +2996,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): break line=param_card[line_number] if particle and particle not in decay_info: - # No decays given, only total width + # No decays given, only total width decay_info[particle] = [[[], width]] else: # Not decay line_number += 1 @@ -3004,7 +3004,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): while not param_card[-1] or param_card[-1].startswith('#'): param_card.pop(-1) - # Append calculated and read decays to the param_card + # Append calculated and read decays to the param_card param_card.append("#\n#*************************") param_card.append("# Decay widths *") param_card.append("#*************************") @@ -3018,7 +3018,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): param_card.append("# BR NDA ID1 ID2 ...") brs = [[(val[1]/width).real, val[0]] for val in decay_info[key] if val[1]] for val in sorted(brs, reverse=True): - param_card.append(" %e %i %s # %s" % + param_card.append(" %e %i %s # %s" % (val[0].real, len(val[1]), " ".join([str(v) for v in val[1]]), val[0] * width @@ -3031,7 +3031,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): ############################################################################ def do_multi_run(self, line): - + args = self.split_arg(line) # Check argument's validity mode = self.check_multi_run(args) @@ -3047,7 +3047,7 @@ def do_multi_run(self, line): self.check_param_card(path, run=False) #store it locally to avoid relaunch param_card_iterator, self.param_card_iterator = self.param_card_iterator, [] - + crossoversig = 0 inv_sq_err = 0 nb_event = 0 @@ -3055,8 +3055,8 @@ def do_multi_run(self, line): self.nb_refine = 0 self.exec_cmd('generate_events %s_%s -f' % (main_name, i), postcmd=False) # Update collected value - nb_event += int(self.results[self.run_name][-1]['nb_event']) - self.results.add_detail('nb_event', nb_event , run=main_name) + nb_event += int(self.results[self.run_name][-1]['nb_event']) + self.results.add_detail('nb_event', nb_event , run=main_name) cross = self.results[self.run_name][-1]['cross'] error = self.results[self.run_name][-1]['error'] + 1e-99 crossoversig+=cross/error**2 @@ -3070,7 +3070,7 @@ def do_multi_run(self, line): os.mkdir(pjoin(self.me_dir,'Events', self.run_name)) except Exception: pass - os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' + os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' % {'bin': self.dirbin, 'event': pjoin(self.me_dir,'Events'), 'name': self.run_name}) @@ -3084,19 +3084,19 @@ def do_multi_run(self, line): self.create_root_file('%s/unweighted_events.lhe' % self.run_name, '%s/unweighted_events.root' % self.run_name) - - path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") + + path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") self.create_plot('parton', path, pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') ) - - if not os.path.exists('%s.gz' % path): + + if not os.path.exists('%s.gz' % path): misc.gzip(path) self.update_status('', level='parton') - self.print_results_in_shell(self.results.current) - + self.print_results_in_shell(self.results.current) + cpath = pjoin(self.me_dir,'Cards','param_card.dat') if param_card_iterator: @@ -3112,21 +3112,21 @@ def do_multi_run(self, line): path = pjoin(self.me_dir, 'Events','scan_%s.txt' % scan_name) logger.info("write all cross-section results in %s" % path, '$MG:BOLD') param_card_iterator.write_summary(path) - - ############################################################################ + + ############################################################################ def do_treatcards(self, line, mode=None, opt=None): """Advanced commands: create .inc files from param_card.dat/run_card.dat""" if not mode and not opt: args = self.split_arg(line) mode, opt = self.check_treatcards(args) - + # To decide whether to refresh MadLoop's helicity filters, it is necessary # to check if the model parameters where modified or not, before doing - # anything else. + # anything else. need_MadLoopFilterUpdate = False - # Just to record what triggered the reinitialization of MadLoop for a + # Just to record what triggered the reinitialization of MadLoop for a # nice debug message. type_of_change = '' if not opt['forbid_MadLoopInit'] and self.proc_characteristics['loop_induced'] \ @@ -3137,10 +3137,10 @@ def do_treatcards(self, line, mode=None, opt=None): (os.path.getmtime(paramDat)-os.path.getmtime(paramInc)) > 0.0: need_MadLoopFilterUpdate = True type_of_change = 'model' - + ML_in = pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat') ML_out = pjoin(self.me_dir,"SubProcesses", - "MadLoop5_resources", "MadLoopParams.dat") + "MadLoop5_resources", "MadLoopParams.dat") if (not os.path.isfile(ML_in)) or (not os.path.isfile(ML_out)) or \ (os.path.getmtime(ML_in)-os.path.getmtime(ML_out)) > 0.0: need_MadLoopFilterUpdate = True @@ -3148,7 +3148,7 @@ def do_treatcards(self, line, mode=None, opt=None): #check if no 'Auto' are present in the file self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) - + if mode in ['param', 'all']: model = self.find_model_name() tmp_model = os.path.basename(model) @@ -3160,9 +3160,9 @@ def do_treatcards(self, line, mode=None, opt=None): check_param_card.check_valid_param_card(mg5_param) opt['param_card'] = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') else: - check_param_card.check_valid_param_card(opt['param_card']) - - logger.debug('write compile file for card: %s' % opt['param_card']) + check_param_card.check_valid_param_card(opt['param_card']) + + logger.debug('write compile file for card: %s' % opt['param_card']) param_card = check_param_card.ParamCard(opt['param_card']) outfile = pjoin(opt['output_dir'], 'param_card.inc') ident_card = pjoin(self.me_dir,'Cards','ident_card.dat') @@ -3185,10 +3185,10 @@ def do_treatcards(self, line, mode=None, opt=None): devnull.close() default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat') - need_mp = self.proc_characteristics['loop_induced'] + need_mp = self.proc_characteristics['loop_induced'] param_card.write_inc_file(outfile, ident_card, default, need_mp=need_mp) - - + + if mode in ['run', 'all']: if not hasattr(self, 'run_card'): run_card = banner_mod.RunCard(opt['run_card'], path=pjoin(self.me_dir, 'Cards', 'run_card.dat')) @@ -3202,7 +3202,7 @@ def do_treatcards(self, line, mode=None, opt=None): run_card['lpp2'] = 0 run_card['ebeam1'] = 0 run_card['ebeam2'] = 0 - + # Ensure that the bias parameters has all the required input from the # run_card if run_card['bias_module'].lower() not in ['dummy','none']: @@ -3219,7 +3219,7 @@ def do_treatcards(self, line, mode=None, opt=None): mandatory_file,run_card['bias_module'])) misc.copytree(run_card['bias_module'], pjoin(self.me_dir,'Source','BIAS', os.path.basename(run_card['bias_module']))) - + #check expected parameters for the module. default_bias_parameters = {} start, last = False,False @@ -3244,50 +3244,50 @@ def do_treatcards(self, line, mode=None, opt=None): for pair in line.split(','): if not pair.strip(): continue - x,y =pair.split(':') + x,y =pair.split(':') x=x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y elif ':' in line: x,y = line.split(':') x = x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y for key,value in run_card['bias_parameters'].items(): if key not in default_bias_parameters: logger.warning('%s not supported by the bias module. We discard this entry.', key) else: default_bias_parameters[key] = value - run_card['bias_parameters'] = default_bias_parameters - - - # Finally write the include file + run_card['bias_parameters'] = default_bias_parameters + + + # Finally write the include file run_card.write_include_file(opt['output_dir']) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: - self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, + self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat')) # The writing out of MadLoop filter is potentially dangerous # when running in multi-core with a central disk. So it is turned - # off here. If these filters were not initialized then they will + # off here. If these filters were not initialized then they will # have to be re-computed at the beginning of each run. if 'WriteOutFilters' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('WriteOutFilters'): logger.info( -"""You chose to have MadLoop writing out filters. +"""You chose to have MadLoop writing out filters. Beware that this can be dangerous for local multicore runs.""") self.MadLoopparam.set('WriteOutFilters',False, changeifuserset=False) - + # The conservative settings below for 'CTModeInit' and 'ZeroThres' # help adress issues for processes like g g > h z, and g g > h g - # where there are some helicity configuration heavily suppressed - # (by several orders of magnitude) so that the helicity filter + # where there are some helicity configuration heavily suppressed + # (by several orders of magnitude) so that the helicity filter # needs high numerical accuracy to correctly handle this spread in # magnitude. Also, because one cannot use the Born as a reference - # scale, it is better to force quadruple precision *for the + # scale, it is better to force quadruple precision *for the # initialization points only*. This avoids numerical accuracy issues # when setting up the helicity filters and does not significantly # slow down the run. @@ -3298,21 +3298,21 @@ def do_treatcards(self, line, mode=None, opt=None): # It is a bit superficial to use the level 2 which tries to numerically # map matching helicities (because of CP symmetry typically) together. -# It is useless in the context of MC over helicities and it can +# It is useless in the context of MC over helicities and it can # potentially make the helicity double checking fail. self.MadLoopparam.set('HelicityFilterLevel',1, changeifuserset=False) # To be on the safe side however, we ask for 4 consecutive matching # helicity filters. self.MadLoopparam.set('CheckCycle',4, changeifuserset=False) - + # For now it is tricky to have each channel performing the helicity # double check. What we will end up doing is probably some kind # of new initialization round at the beginning of each launch - # command, to reset the filters. + # command, to reset the filters. self.MadLoopparam.set('DoubleCheckHelicityFilter',False, changeifuserset=False) - + # Thanks to TIR recycling, TIR is typically much faster for Loop-induced # processes when not doing MC over helicities, so that we place OPP last. if not hasattr(self, 'run_card'): @@ -3349,7 +3349,7 @@ def do_treatcards(self, line, mode=None, opt=None): logger.warning( """You chose to also use a lorentz rotation for stability tests (see parameter NRotations_[DP|QP]). Beware that, for optimization purposes, MadEvent uses manual TIR cache clearing which is not compatible - with the lorentz rotation stability test. The number of these rotations to be used will be reset to + with the lorentz rotation stability test. The number of these rotations to be used will be reset to zero by MadLoop. You can avoid this by changing the parameter 'FORCE_ML_HELICITY_SUM' int he matrix.f files to be .TRUE. so that the sum over helicity configurations is performed within MadLoop (in which case the helicity of final state particles cannot be speicfied in the LHE file.""") @@ -3363,15 +3363,15 @@ def do_treatcards(self, line, mode=None, opt=None): # self.MadLoopparam.set('NRotations_DP',0,changeifuserset=False) # Revert to the above to be slightly less robust but twice faster. self.MadLoopparam.set('NRotations_DP',1,changeifuserset=False) - self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) - + self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) + # Finally, the stability tests are slightly less reliable for process - # with less or equal than 4 final state particles because the + # with less or equal than 4 final state particles because the # accessible kinematic is very limited (i.e. lorentz rotations don't # shuffle invariants numerics much). In these cases, we therefore # increase the required accuracy to 10^-7. # This is important for getting g g > z z [QCD] working with a - # ptheavy cut as low as 1 GeV. + # ptheavy cut as low as 1 GeV. if self.proc_characteristics['nexternal']<=4: if ('MLStabThres' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('MLStabThres')>1.0e-7): @@ -3381,12 +3381,12 @@ def do_treatcards(self, line, mode=None, opt=None): than four external legs, so this is not recommended (especially not for g g > z z).""") self.MadLoopparam.set('MLStabThres',1.0e-7,changeifuserset=False) else: - self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) + self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) #write the output file self.MadLoopparam.write(pjoin(self.me_dir,"SubProcesses","MadLoop5_resources", "MadLoopParams.dat")) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: # Now Update MadLoop filters if necessary (if modifications were made to # the model parameters). @@ -3403,12 +3403,12 @@ def do_treatcards(self, line, mode=None, opt=None): elif not opt['forbid_MadLoopInit'] and \ MadLoopInitializer.need_MadLoopInit(self.me_dir): self.exec_cmd('initMadLoop -f') - - ############################################################################ + + ############################################################################ def do_survey(self, line): """Advanced commands: launch survey for the current process """ - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) @@ -3416,7 +3416,7 @@ def do_survey(self, line): if os.path.exists(pjoin(self.me_dir,'error')): os.remove(pjoin(self.me_dir,'error')) - + self.configure_directory() # Save original random number self.random_orig = self.random @@ -3435,9 +3435,9 @@ def do_survey(self, line): P_zero_result = [] # check the number of times where they are no phase-space # File for the loop (for loop induced) - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources')) and cluster.need_transfer(self.options): - tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', + tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', 'MadLoop5_resources.tar.gz'), 'w:gz', dereference=True) tf.add(pjoin(self.me_dir,'SubProcesses','MadLoop5_resources'), arcname='MadLoop5_resources') @@ -3467,7 +3467,7 @@ def do_survey(self, line): except Exception as error: logger.debug(error) pass - + jobs, P_zero_result = ajobcreator.launch() # Check if all or only some fails if P_zero_result: @@ -3481,60 +3481,60 @@ def do_survey(self, line): self.get_Gdir() for P in P_zero_result: self.Gdirs[0][pjoin(self.me_dir,'SubProcesses',P)] = [] - + self.monitor(run_type='All jobs submitted for survey', html=True) if not self.history or 'survey' in self.history[-1] or self.ninitial ==1 or \ self.run_card['gridpack']: #will be done during the refine (more precisely in gen_ximprove) cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(ajobcreator.run_statistics)) self.update_status('End survey', 'parton', makehtml=False) ############################################################################ def pass_in_difficult_integration_mode(self, rate=1): """be more secure for the integration to not miss it due to strong cut""" - + # improve survey options if default if self.opts['points'] == self._survey_options['points'][1]: self.opts['points'] = (rate+2) * self._survey_options['points'][1] if self.opts['iterations'] == self._survey_options['iterations'][1]: self.opts['iterations'] = 1 + rate + self._survey_options['iterations'][1] if self.opts['accuracy'] == self._survey_options['accuracy'][1]: - self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) - + self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) + # Modify run_config.inc in order to improve the refine conf_path = pjoin(self.me_dir, 'Source','run_config.inc') files.cp(conf_path, conf_path + '.bk') # text = open(conf_path).read() - min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) - + min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) + text = re.sub('''\(min_events = \d+\)''', '(min_events = %i )' % min_evt, text) text = re.sub('''\(max_events = \d+\)''', '(max_events = %i )' % max_evt, text) fsock = open(conf_path, 'w') fsock.write(text) fsock.close() - + # Compile for name in ['../bin/internal/gen_ximprove', 'all']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) - - - ############################################################################ + + + ############################################################################ def do_refine(self, line): """Advanced commands: launch survey for the current process """ - devnull = open(os.devnull, 'w') + devnull = open(os.devnull, 'w') self.nb_refine += 1 args = self.split_arg(line) treshold=None - - + + for a in args: if a.startswith('--treshold='): treshold = float(a.split('=',1)[1]) @@ -3548,8 +3548,8 @@ def do_refine(self, line): break # Check argument's validity self.check_refine(args) - - refine_opt = {'err_goal': args[0], 'split_channels': True} + + refine_opt = {'err_goal': args[0], 'split_channels': True} precision = args[0] if len(args) == 2: refine_opt['max_process']= args[1] @@ -3560,15 +3560,15 @@ def do_refine(self, line): # Update random number self.update_random() self.save_random() - + if self.cluster_mode: logger.info('Creating Jobs') self.update_status('Refine results to %s' % precision, level=None) - + self.total_jobs = 0 - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + # cleanning the previous job for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() @@ -3589,14 +3589,14 @@ def do_refine(self, line): level = 5 if value.has_warning(): level = 10 - logger.log(level, + logger.log(level, value.nice_output(str('/'.join([key[0],'G%s'%key[1]]))). replace(' statistics','')) logger.debug(globalstat.nice_output('combined', no_warning=True)) - + if survey_statistics: x_improve.run_statistics = survey_statistics - + x_improve.launch() # create the ajob for the refinment. if not self.history or 'refine' not in self.history[-1]: cross, error = x_improve.update_html() #update html results for survey @@ -3610,9 +3610,9 @@ def do_refine(self, line): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) - + if os.path.exists(pjoin(Pdir, 'ajob1')): cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py @@ -3629,7 +3629,7 @@ def do_refine(self, line): ###self.compile(['all'], cwd=Pdir) alljobs = misc.glob('ajob*', Pdir) - + #remove associated results.dat (ensure to not mix with all data) Gre = re.compile("\s*j=(G[\d\.\w]+)") for job in alljobs: @@ -3637,49 +3637,49 @@ def do_refine(self, line): for Gdir in Gdirs: if os.path.exists(pjoin(Pdir, Gdir, 'results.dat')): os.remove(pjoin(Pdir, Gdir,'results.dat')) - - nb_tot = len(alljobs) + + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), - run_type='Refine number %s on %s (%s/%s)' % + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) - self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, html=True) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + if isinstance(x_improve, gen_ximprove.gen_ximprove_v4): # the merge of the events.lhe is handle in the x_improve class - # for splitted runs. (and partly in store_events). + # for splitted runs. (and partly in store_events). combine_runs.CombineRuns(self.me_dir) self.refine_mode = "old" else: self.refine_mode = "new" - + cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - self.results.add_detail('run_statistics', + self.results.add_detail('run_statistics', dict(self.results.get_detail('run_statistics'))) self.update_status('finish refine', 'parton', makehtml=False) devnull.close() - - ############################################################################ + + ############################################################################ def do_comine_iteration(self, line): """Not in help: Combine a given iteration combine_iteration Pdir Gdir S|R step - S is for survey + S is for survey R is for refine - step is the iteration number (not very critical)""" + step is the iteration number (not very critical)""" self.set_run_name("tmp") self.configure_directory(html_opening=False) @@ -3695,12 +3695,12 @@ def do_comine_iteration(self, line): gensym.combine_iteration(Pdir, Gdir, int(step)) elif mode == "R": refine = gen_ximprove.gen_ximprove_share(self) - refine.combine_iteration(Pdir, Gdir, int(step)) - - + refine.combine_iteration(Pdir, Gdir, int(step)) - - ############################################################################ + + + + ############################################################################ def do_combine_events(self, line): """Advanced commands: Launch combine events""" start=time.time() @@ -3710,11 +3710,11 @@ def do_combine_events(self, line): self.check_combine_events(args) self.update_status('Combining Events', level='parton') - + if self.run_card['gridpack'] and isinstance(self, GridPackCmd): return GridPackCmd.do_combine_events(self, line) - + # Define The Banner tag = self.run_card['run_tag'] # Update the banner with the pythia card @@ -3727,14 +3727,14 @@ def do_combine_events(self, line): self.banner.change_seed(self.random_orig) if not os.path.exists(pjoin(self.me_dir, 'Events', self.run_name)): os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) - self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, + self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -3751,12 +3751,12 @@ def do_combine_events(self, line): os.remove(pjoin(Gdir, 'events.lhe')) continue - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec'), result.get('xerru'), result.get('axsec') ) - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.run_card['nevents']) @@ -3765,13 +3765,13 @@ def do_combine_events(self, line): AllEvent.add(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() if len(AllEvent) == 0: - nb_event = 0 + nb_event = 0 else: nb_event = AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.run_card['nevents'], @@ -3791,22 +3791,22 @@ def do_combine_events(self, line): os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) - + if self.run_card['bias_module'].lower() not in ['dummy', 'none'] and nb_event: self.correct_bias() elif self.run_card['custom_fcts']: self.correct_bias() logger.info("combination of events done in %s s ", time.time()-start) - + self.to_store.append('event') - - ############################################################################ + + ############################################################################ def correct_bias(self): - """check the first event and correct the weight by the bias + """check the first event and correct the weight by the bias and correct the cross-section. - If the event do not have the bias tag it means that the bias is + If the event do not have the bias tag it means that the bias is one modifying the cross-section/shape so we have nothing to do """ @@ -3834,7 +3834,7 @@ def correct_bias(self): output.write('') output.close() lhe.close() - + # MODIFY THE BANNER i.e. INIT BLOCK # ensure information compatible with normalisation choice total_cross = sum(cross[key] for key in cross) @@ -3846,8 +3846,8 @@ def correct_bias(self): elif self.run_card['event_norm'] == 'unity': total_cross = self.results.current['cross'] * total_cross / nb_event for key in cross: - cross[key] *= total_cross / nb_event - + cross[key] *= total_cross / nb_event + bannerfile = lhe_parser.EventFile(pjoin(self.me_dir, 'Events', self.run_name, '.banner.tmp.gz'),'w') banner = banner_mod.Banner(lhe.banner) banner.modify_init_cross(cross) @@ -3862,12 +3862,12 @@ def correct_bias(self): os.remove(lhe.name) os.remove(bannerfile.name) os.remove(output.name) - - + + self.results.current['cross'] = total_cross self.results.current['error'] = 0 - - ############################################################################ + + ############################################################################ def do_store_events(self, line): """Advanced commands: Launch store events""" @@ -3883,16 +3883,16 @@ def do_store_events(self, line): if not os.path.exists(pjoin(self.me_dir, 'Events', run)): os.mkdir(pjoin(self.me_dir, 'Events', run)) if not os.path.exists(pjoin(self.me_dir, 'HTML', run)): - os.mkdir(pjoin(self.me_dir, 'HTML', run)) - + os.mkdir(pjoin(self.me_dir, 'HTML', run)) + # 1) Store overall process information #input = pjoin(self.me_dir, 'SubProcesses', 'results.dat') #output = pjoin(self.me_dir, 'SubProcesses', '%s_results.dat' % run) - #files.cp(input, output) + #files.cp(input, output) # 2) Treat the files present in the P directory - # Ensure that the number of events is different of 0 + # Ensure that the number of events is different of 0 if self.results.current['nb_event'] == 0 and not self.run_card['gridpack']: logger.warning("No event detected. No cleaning performed! This should allow to run:\n" + " cd Subprocesses; ../bin/internal/combine_events\n"+ @@ -3910,18 +3910,18 @@ def do_store_events(self, line): # if os.path.exists(pjoin(G_path, 'results.dat')): # input = pjoin(G_path, 'results.dat') # output = pjoin(G_path, '%s_results.dat' % run) - # files.cp(input, output) + # files.cp(input, output) #except Exception: - # continue + # continue # Store log try: if os.path.exists(pjoin(G_path, 'log.txt')): input = pjoin(G_path, 'log.txt') output = pjoin(G_path, '%s_log.txt' % run) - files.mv(input, output) + files.mv(input, output) except Exception: continue - #try: + #try: # # Grid # for name in ['ftn26']: # if os.path.exists(pjoin(G_path, name)): @@ -3930,7 +3930,7 @@ def do_store_events(self, line): # input = pjoin(G_path, name) # output = pjoin(G_path, '%s_%s' % (run,name)) # files.mv(input, output) - # misc.gzip(pjoin(G_path, output), error=None) + # misc.gzip(pjoin(G_path, output), error=None) #except Exception: # continue # Delete ftn25 to ensure reproducible runs @@ -3940,11 +3940,11 @@ def do_store_events(self, line): # 3) Update the index.html self.gen_card_html() - + # 4) Move the Files present in Events directory E_path = pjoin(self.me_dir, 'Events') O_path = pjoin(self.me_dir, 'Events', run) - + # The events file for name in ['events.lhe', 'unweighted_events.lhe']: finput = pjoin(E_path, name) @@ -3960,30 +3960,30 @@ def do_store_events(self, line): # os.remove(pjoin(O_path, '%s.gz' % name)) # input = pjoin(E_path, name) ## output = pjoin(O_path, name) - + self.update_status('End Parton', level='parton', makehtml=False) devnull.close() - - - ############################################################################ + + + ############################################################################ def do_create_gridpack(self, line): """Advanced commands: Create gridpack from present run""" self.update_status('Creating gridpack', level='parton') # compile gen_ximprove misc.compile(['../bin/internal/gen_ximprove'], cwd=pjoin(self.me_dir, "Source")) - + Gdir = self.get_Gdir() Pdir = set([os.path.dirname(G) for G in Gdir]) - for P in Pdir: + for P in Pdir: allG = misc.glob('G*', path=P) for G in allG: if pjoin(P, G) not in Gdir: logger.debug('removing %s', pjoin(P,G)) shutil.rmtree(pjoin(P,G)) - - + + args = self.split_arg(line) self.check_combine_events(args) if not self.run_tag: self.run_tag = 'tag_1' @@ -3996,13 +3996,13 @@ def do_create_gridpack(self, line): cwd=self.me_dir) misc.call(['./bin/internal/clean'], cwd=self.me_dir) misc.call(['./bin/internal/make_gridpack'], cwd=self.me_dir) - files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), + files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), pjoin(self.me_dir, '%s_gridpack.tar.gz' % self.run_name)) os.system("sed -i.bak \"s/\s*.true.*=.*GridRun/ .false. = GridRun/g\" %s/Cards/grid_card.dat" \ % self.me_dir) self.update_status('gridpack created', level='gridpack') - - ############################################################################ + + ############################################################################ def do_shower(self, line): """launch the shower""" @@ -4010,7 +4010,7 @@ def do_shower(self, line): if len(args)>1 and args[0] in self._interfaced_showers: chosen_showers = [args.pop(0)] elif '--no_default' in line: - # If '--no_default' was specified in the arguments, then only one + # If '--no_default' was specified in the arguments, then only one # shower will be run, depending on which card is present. # but we each of them are called. (each of them check if the file exists) chosen_showers = list(self._interfaced_showers) @@ -4021,9 +4021,9 @@ def do_shower(self, line): shower_priority = ['pythia8','pythia'] chosen_showers = [sorted(chosen_showers,key=lambda sh: shower_priority.index(sh) if sh in shower_priority else len(shower_priority)+1)[0]] - + for shower in chosen_showers: - self.exec_cmd('%s %s'%(shower,' '.join(args)), + self.exec_cmd('%s %s'%(shower,' '.join(args)), postcmd=False, printcmd=False) def do_madanalysis5_parton(self, line): @@ -4039,11 +4039,11 @@ def do_madanalysis5_parton(self, line): def mg5amc_py8_interface_consistency_warning(options): """ Check the consistency of the mg5amc_py8_interface installed with the current MG5 and Pythia8 versions. """ - + # All this is only relevant is Pythia8 is interfaced to MG5 if not options['pythia8_path']: return None - + if not options['mg5amc_py8_interface_path']: return \ """ @@ -4053,7 +4053,7 @@ def mg5amc_py8_interface_consistency_warning(options): Consider installing the MG5_aMC-PY8 interface with the following command: MG5_aMC>install mg5amc_py8_interface """ - + mg5amc_py8_interface_path = options['mg5amc_py8_interface_path'] py8_path = options['pythia8_path'] # If the specified interface path is relative, make it absolut w.r.t MGDIR if @@ -4062,7 +4062,7 @@ def mg5amc_py8_interface_consistency_warning(options): mg5amc_py8_interface_path = pjoin(MG5DIR,mg5amc_py8_interface_path) py8_path = pjoin(MG5DIR,py8_path) - # Retrieve all the on-install and current versions + # Retrieve all the on-install and current versions fsock = open(pjoin(mg5amc_py8_interface_path, 'MG5AMC_VERSION_ON_INSTALL')) MG5_version_on_install = fsock.read().replace('\n','') fsock.close() @@ -4074,7 +4074,7 @@ def mg5amc_py8_interface_consistency_warning(options): MG5_curr_version =misc.get_pkg_info()['version'] try: p = subprocess.Popen(['./get_pythia8_version.py',py8_path], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mg5amc_py8_interface_path) (out, err) = p.communicate() out = out.decode(errors='ignore').replace('\n','') @@ -4084,37 +4084,37 @@ def mg5amc_py8_interface_consistency_warning(options): float(out) except: PY8_curr_version = None - + if not MG5_version_on_install is None and not MG5_curr_version is None: if MG5_version_on_install != MG5_curr_version: return \ """ The current version of MG5_aMC (v%s) is different than the one active when - installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). + installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(MG5_curr_version, MG5_version_on_install) - + if not PY8_version_on_install is None and not PY8_curr_version is None: if PY8_version_on_install != PY8_curr_version: return \ """ The current version of Pythia8 (v%s) is different than the one active when - installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). + installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(PY8_curr_version,PY8_version_on_install) - + return None def setup_Pythia8RunAndCard(self, PY8_Card, run_type): """ Setup the Pythia8 Run environment and card. In particular all the process and run specific parameters of the card are automatically set here. This function returns the path where HEPMC events will be output, if any.""" - + HepMC_event_output = None tag = self.run_tag - + PY8_Card.subruns[0].systemSet('Beams:LHEF',"unweighted_events.lhe.gz") hepmc_format = PY8_Card['HEPMCoutput:file'].lower() @@ -4185,7 +4185,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): misc.mkfifo(fifo_path) # Use defaultSet not to overwrite the current userSet status PY8_Card.defaultSet('HEPMCoutput:file',fifo_path) - HepMC_event_output=fifo_path + HepMC_event_output=fifo_path elif hepmc_format in ['','/dev/null','None']: logger.warning('User disabled the HepMC output of Pythia8.') HepMC_event_output = None @@ -4206,7 +4206,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): # only if it is not already user_set. if PY8_Card['JetMatching:qCut']==-1.0: PY8_Card.MadGraphSet('JetMatching:qCut',1.5*self.run_card['xqcut'], force=True) - + if PY8_Card['JetMatching:qCut']<(1.5*self.run_card['xqcut']): logger.error( 'The MLM merging qCut parameter you chose (%f) is less than '%PY8_Card['JetMatching:qCut']+ @@ -4233,7 +4233,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): if PY8_Card['JetMatching:qCut'] not in qCutList: qCutList.append(PY8_Card['JetMatching:qCut']) PY8_Card.MadGraphSet('SysCalc:qCutList', qCutList, force=True) - + if PY8_Card['SysCalc:qCutList']!='auto': for scale in PY8_Card['SysCalc:qCutList']: @@ -4244,7 +4244,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): "'sys_matchscale' in the run_card) is less than 1.5*xqcut, where xqcut is"+ ' the run_card parameter (=%f)\n'%self.run_card['xqcut']+ 'It would be better/safer to use a larger qCut or a smaller xqcut.') - + # Specific MLM settings # PY8 should not implement the MLM veto since the driver should do it # if merging scale variation is turned on @@ -4294,18 +4294,18 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): CKKW_cut = 'ktdurham' elif self.run_card['ptlund']>0.0 and self.run_card['ktdurham']<=0.0: PY8_Card.subruns[0].MadGraphSet('Merging:doPTLundMerging',True) - CKKW_cut = 'ptlund' + CKKW_cut = 'ptlund' else: raise InvalidCmd("*Either* the 'ptlund' or 'ktdurham' cut in "+\ " the run_card must be turned on to activate CKKW(L) merging"+ " with Pythia8, but *both* cuts cannot be turned on at the same time."+ "\n ptlund=%f, ktdurham=%f."%(self.run_card['ptlund'],self.run_card['ktdurham'])) - + # Automatically set qWeed to the CKKWL cut if not defined by the user. if PY8_Card['SysCalc:qWeed']==-1.0: PY8_Card.MadGraphSet('SysCalc:qWeed',self.run_card[CKKW_cut], force=True) - + # MadGraphSet sets the corresponding value (in system mode) # only if it is not already user_set. if PY8_Card['Merging:TMS']==-1.0: @@ -4319,7 +4319,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): 'The CKKWl merging scale you chose (%f) is less than '%PY8_Card['Merging:TMS']+ 'the %s cut specified in the run_card parameter (=%f).\n'%(CKKW_cut,self.run_card[CKKW_cut])+ 'It is incorrect to use a smaller CKKWl scale than the generation-level %s cut!'%CKKW_cut) - + PY8_Card.MadGraphSet('TimeShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:rapidityOrder',False) @@ -4381,7 +4381,7 @@ def do_pythia8(self, line): try: import madgraph - except ImportError: + except ImportError: import internal.histograms as histograms else: import madgraph.various.histograms as histograms @@ -4400,16 +4400,16 @@ def do_pythia8(self, line): self.check_pythia8(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) - self.check_pythia8(args) + self.check_pythia8(args) # Update the banner with the pythia card if not self.banner or len(self.banner) <=1: # Here the level keyword 'pythia' must not be changed to 'pythia8'. self.banner = banner_mod.recover_banner(self.results, 'pythia') - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1], pythia_version=8, banner=self.banner) @@ -4425,7 +4425,7 @@ def do_pythia8(self, line): #"Please use 'event_norm = average' in the run_card to avoid this problem.") - + if not self.options['mg5amc_py8_interface_path'] or not \ os.path.exists(pjoin(self.options['mg5amc_py8_interface_path'], 'MG5aMC_PY8_interface')): @@ -4444,16 +4444,16 @@ def do_pythia8(self, line): # Again here 'pythia' is just a keyword for the simulation level. self.update_status('\033[92mRunning Pythia8 [arXiv:1410.3012]\033[0m', 'pythia8') - - tag = self.run_tag + + tag = self.run_tag # Now write Pythia8 card # Start by reading, starting from the default one so that the 'user_set' # tag are correctly set. - PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', + PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', 'pythia8_card_default.dat')) PY8_Card.read(pjoin(self.me_dir, 'Cards', 'pythia8_card.dat'), setter='user') - + run_type = 'default' merged_run_types = ['MLM','CKKW'] if int(self.run_card['ickkw'])==1: @@ -4471,7 +4471,7 @@ def do_pythia8(self, line): cmd_card = StringIO.StringIO() PY8_Card.write(cmd_card,pjoin(self.me_dir,'Cards','pythia8_card_default.dat'), direct_pythia_input=True) - + # Now setup the preamble to make sure that everything will use the locally # installed tools (if present) even if the user did not add it to its # environment variables. @@ -4486,13 +4486,13 @@ def do_pythia8(self, line): preamble = misc.get_HEPTools_location_setter( pjoin(MG5DIR,'HEPTools'),'lib') preamble += "\n unset PYTHIA8DATA\n" - + open(pythia_cmd_card,'w').write("""! ! It is possible to run this card manually with: ! %s %s ! """%(preamble+pythia_main,os.path.basename(pythia_cmd_card))+cmd_card.getvalue()) - + # launch pythia8 pythia_log = pjoin(self.me_dir , 'Events', self.run_name , '%s_pythia8.log' % tag) @@ -4504,13 +4504,13 @@ def do_pythia8(self, line): shell_exe = None if os.path.exists('/usr/bin/env'): shell_exe = '/usr/bin/env %s'%shell - else: + else: shell_exe = misc.which(shell) if not shell_exe: raise self.InvalidCmd('No s hell could be found in your environment.\n'+ "Make sure that either '%s' is in your path or that the"%shell+\ " command '/usr/bin/env %s' exists and returns a valid path."%shell) - + exe_cmd = "#!%s\n%s"%(shell_exe,' '.join( [preamble+pythia_main, os.path.basename(pythia_cmd_card)])) @@ -4528,7 +4528,7 @@ def do_pythia8(self, line): ( os.path.exists(HepMC_event_output) and \ stat.S_ISFIFO(os.stat(HepMC_event_output).st_mode)) startPY8timer = time.time() - + # Information that will be extracted from this PY8 run PY8_extracted_information={ 'sigma_m':None, 'Nacc':None, 'Ntry':None, 'cross_sections':{} } @@ -4556,7 +4556,7 @@ def do_pythia8(self, line): n_cores = max(int(self.options['cluster_size']),1) elif self.options['run_mode']==2: n_cores = max(int(self.cluster.nb_core),1) - + lhe_file_name = os.path.basename(PY8_Card.subruns[0]['Beams:LHEF']) lhe_file = lhe_parser.EventFile(pjoin(self.me_dir,'Events', self.run_name,PY8_Card.subruns[0]['Beams:LHEF'])) @@ -4574,7 +4574,7 @@ def do_pythia8(self, line): if self.options['run_mode']==2: min_n_events_per_job = 100 elif self.options['run_mode']==1: - min_n_events_per_job = 1000 + min_n_events_per_job = 1000 min_n_core = n_events//min_n_events_per_job n_cores = max(min(min_n_core,n_cores),1) @@ -4584,8 +4584,8 @@ def do_pythia8(self, line): logger.info('Follow Pythia8 shower by running the '+ 'following command (in a separate terminal):\n tail -f %s'%pythia_log) - if self.options['run_mode']==2 and self.options['nb_core']>1: - ret_code = self.cluster.launch_and_wait(wrapper_path, + if self.options['run_mode']==2 and self.options['nb_core']>1: + ret_code = self.cluster.launch_and_wait(wrapper_path, argument= [], stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events',self.run_name)) else: @@ -4630,10 +4630,10 @@ def do_pythia8(self, line): wrapper = open(wrapper_path,'w') if self.options['cluster_temp_path'] is None: exe_cmd = \ -"""#!%s +"""#!%s ./%s PY8Card.dat >& PY8_log.txt """ - else: + else: exe_cmd = \ """#!%s ln -s ./events_$1.lhe.gz ./events.lhe.gz @@ -4663,21 +4663,21 @@ def do_pythia8(self, line): # Set it as executable st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) - + # Split the .lhe event file, create event partition partition=[n_available_events//n_cores]*n_cores for i in range(n_available_events%n_cores): partition[i] += 1 - + # Splitting according to the total number of events requested by the user # Will be used to determine the number of events to indicate in the PY8 split cards. partition_for_PY8=[n_events//n_cores]*n_cores for i in range(n_events%n_cores): partition_for_PY8[i] += 1 - - logger.info('Splitting .lhe event file for PY8 parallelization...') - n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) - + + logger.info('Splitting .lhe event file for PY8 parallelization...') + n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) + if n_splits!=len(partition): raise MadGraph5Error('Error during lhe file splitting. Expected %d files but obtained %d.' %(len(partition),n_splits)) @@ -4690,7 +4690,7 @@ def do_pythia8(self, line): # Add the necessary run content shutil.move(pjoin(parallelization_dir,lhe_file.name+'_%d.lhe.gz'%split_id), pjoin(parallelization_dir,split_files[-1])) - + logger.info('Submitting Pythia8 jobs...') for i, split_file in enumerate(split_files): # We must write a PY8Card tailored for each split so as to correct the normalization @@ -4706,7 +4706,7 @@ def do_pythia8(self, line): split_PY8_Card.write(pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,'PY8Card.dat'), add_missing=False) in_files = [pjoin(parallelization_dir,os.path.basename(pythia_main)), - pjoin(parallelization_dir,'PY8Card_%d.dat'%i), + pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,split_file)] if self.options['cluster_temp_path'] is None: out_files = [] @@ -4718,35 +4718,35 @@ def do_pythia8(self, line): if os.path.basename(in_file)==split_file: ln(in_file,selected_cwd,name='events.lhe.gz') elif os.path.basename(in_file).startswith('PY8Card'): - ln(in_file,selected_cwd,name='PY8Card.dat') + ln(in_file,selected_cwd,name='PY8Card.dat') else: - ln(in_file,selected_cwd) + ln(in_file,selected_cwd) in_files = [] wrapper_path = os.path.basename(wrapper_path) else: out_files = ['split_%d.tar.gz'%i] selected_cwd = parallelization_dir - self.cluster.submit2(wrapper_path, - argument=[str(i)], cwd=selected_cwd, + self.cluster.submit2(wrapper_path, + argument=[str(i)], cwd=selected_cwd, input_files=in_files, output_files=out_files, required_output=out_files) - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return logger.info('Pythia8 shower jobs: %d Idle, %d Running, %d Done [%s]'\ %(Idle, Running, Done, misc.format_time(time.time() - startPY8timer))) self.cluster.wait(parallelization_dir,wait_monitoring) - + logger.info('Merging results from the split PY8 runs...') if self.options['cluster_temp_path']: # Decompressing the output for i, split_file in enumerate(split_files): misc.call(['tar','-xzf','split_%d.tar.gz'%i],cwd=parallelization_dir) os.remove(pjoin(parallelization_dir,'split_%d.tar.gz'%i)) - + # Now merge logs pythia_log_file = open(pythia_log,'w') n_added = 0 @@ -4778,7 +4778,7 @@ def wait_monitoring(Idle, Running, Done): if n_added>0: PY8_extracted_information['sigma_m'] /= float(n_added) pythia_log_file.close() - + # djr plots djr_HwU = None n_added = 0 @@ -4845,7 +4845,7 @@ def wait_monitoring(Idle, Running, Done): if not os.path.isfile(hepmc_file): continue all_hepmc_files.append(hepmc_file) - + if len(all_hepmc_files)>0: hepmc_output = pjoin(self.me_dir,'Events',self.run_name,HepMC_event_output) with misc.TMP_directory() as tmp_dir: @@ -4860,8 +4860,8 @@ def wait_monitoring(Idle, Running, Done): break header.close() tail = open(pjoin(tmp_dir,'tail.hepmc'),'w') - n_tail = 0 - + n_tail = 0 + for line in misc.reverse_readline(all_hepmc_files[-1]): if line.startswith('HepMC::'): n_tail += 1 @@ -4871,7 +4871,7 @@ def wait_monitoring(Idle, Running, Done): tail.close() if n_tail>1: raise MadGraph5Error('HEPMC files should only have one trailing command.') - ###################################################################### + ###################################################################### # This is the most efficient way of putting together HEPMC's, *BUT* # # WARNING: NEED TO RENDER THE CODE BELOW SAFE TOWARDS INJECTION # ###################################################################### @@ -4888,12 +4888,12 @@ def wait_monitoring(Idle, Running, Done): elif sys.platform == 'darwin': # sed on MAC has slightly different synthax than on os.system(' '.join(['sed','-i',"''","'%s;$d'"% - (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) - else: - # other UNIX systems + (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) + else: + # other UNIX systems os.system(' '.join(['sed','-i']+["-e '%id'"%(i+1) for i in range(n_head)]+ ["-e '$d'",hepmc_file])) - + os.system(' '.join(['cat',pjoin(tmp_dir,'header.hepmc')]+all_hepmc_files+ [pjoin(tmp_dir,'tail.hepmc'),'>',hepmc_output])) @@ -4915,12 +4915,12 @@ def wait_monitoring(Idle, Running, Done): 'Inclusive cross section:' not in '\n'.join(open(pythia_log,'r').readlines()[-20:]): logger.warning('Fail to produce a pythia8 output. More info in \n %s'%pythia_log) return - + # Plot for Pythia8 successful = self.create_plot('Pythia8') if not successful: logger.warning('Failed to produce Pythia8 merging plots.') - + self.to_store.append('pythia8') # Study matched cross-sections @@ -4931,7 +4931,7 @@ def wait_monitoring(Idle, Running, Done): if self.options['run_mode']==0 or (self.options['run_mode']==2 and self.options['nb_core']==1): PY8_extracted_information['sigma_m'],PY8_extracted_information['Nacc'],\ PY8_extracted_information['Ntry'] = self.parse_PY8_log_file( - pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) + pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) else: logger.warning('Pythia8 cross-section could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1. YYYYY') @@ -4944,8 +4944,8 @@ def wait_monitoring(Idle, Running, Done): Ntry = PY8_extracted_information['Ntry'] sigma_m = PY8_extracted_information['sigma_m'] # Compute pythia error - error = self.results[self.run_name].return_tag(self.run_tag)['error'] - try: + error = self.results[self.run_name].return_tag(self.run_tag)['error'] + try: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) except ZeroDivisionError: # Cannot compute error @@ -4966,31 +4966,31 @@ def wait_monitoring(Idle, Running, Done): else: logger.warning('Pythia8 merged cross-sections could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1.XXXXX') - PY8_extracted_information['cross_sections'] = {} - + PY8_extracted_information['cross_sections'] = {} + cross_sections = PY8_extracted_information['cross_sections'] if cross_sections: - # Filter the cross_sections specified an keep only the ones + # Filter the cross_sections specified an keep only the ones # with central parameters and a different merging scale a_float_re = '[\+|-]?\d+(\.\d*)?([EeDd][\+|-]?\d+)?' central_merging_re = re.compile( '^\s*Weight_MERGING\s*=\s*(?P%s)\s*$'%a_float_re, - re.IGNORECASE) + re.IGNORECASE) cross_sections = dict( (float(central_merging_re.match(xsec).group('merging')),value) - for xsec, value in cross_sections.items() if not + for xsec, value in cross_sections.items() if not central_merging_re.match(xsec) is None) central_scale = PY8_Card['JetMatching:qCut'] if \ int(self.run_card['ickkw'])==1 else PY8_Card['Merging:TMS'] if central_scale in cross_sections: self.results.add_detail('cross_pythia8', cross_sections[central_scale][0]) self.results.add_detail('error_pythia8', cross_sections[central_scale][1]) - + #logger.info('Pythia8 merged cross-sections are:') #for scale in sorted(cross_sections.keys()): # logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ - # (scale,cross_sections[scale][0],cross_sections[scale][1])) - + # (scale,cross_sections[scale][0],cross_sections[scale][1])) + xsecs_file = open(pjoin(self.me_dir,'Events',self.run_name, '%s_merged_xsecs.txt'%tag),'w') if cross_sections: @@ -5003,9 +5003,9 @@ def wait_monitoring(Idle, Running, Done): xsecs_file.write('Cross-sections could not be read from the'+\ "XML node 'xsection' of the .dat file produced by Pythia8.") xsecs_file.close() - + #Update the banner - # We add directly the pythia command card because it has the full + # We add directly the pythia command card because it has the full # information self.banner.add(pythia_cmd_card) @@ -5022,13 +5022,13 @@ def wait_monitoring(Idle, Running, Done): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + def parse_PY8_log_file(self, log_file_path): """ Parse a log file to extract number of event and cross-section. """ pythiare = re.compile("Les Houches User Process\(es\)\s*\d+\s*\|\s*(?P\d+)\s*(?P\d+)\s*(?P\d+)\s*\|\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") pythia_xsec_re = re.compile("Inclusive cross section\s*:\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") sigma_m, Nacc, Ntry = None, None, None - for line in misc.BackRead(log_file_path): + for line in misc.BackRead(log_file_path): info = pythiare.search(line) if not info: # Also try to obtain the cross-section and error from the final xsec line of pythia8 log @@ -5058,7 +5058,7 @@ def parse_PY8_log_file(self, log_file_path): raise self.InvalidCmd("Could not find cross-section and event number information "+\ "in Pythia8 log\n '%s'."%log_file_path) - + def extract_cross_sections_from_DJR(self,djr_output): """Extract cross-sections from a djr XML output.""" import xml.dom.minidom as minidom @@ -5075,11 +5075,11 @@ def extract_cross_sections_from_DJR(self,djr_output): [float(xsec.childNodes[0].data.split()[0]), float(xsec.childNodes[0].data.split()[1])]) for xsec in xsections) - + def do_pythia(self, line): """launch pythia""" - - + + # Check argument's validity args = self.split_arg(line) if '--no_default' in args: @@ -5089,12 +5089,12 @@ def do_pythia(self, line): args.remove('--no_default') else: no_default = False - + if not self.run_name: self.check_pythia(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) self.check_pythia(args) @@ -5102,7 +5102,7 @@ def do_pythia(self, line): logger.error('pythia-pgs require event_norm to be on sum. Do not run pythia6') return - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1]) if self.options['automatic_html_opening']: @@ -5114,35 +5114,35 @@ def do_pythia(self, line): self.banner = banner_mod.recover_banner(self.results, 'pythia') pythia_src = pjoin(self.options['pythia-pgs_path'],'src') - + self.results.add_detail('run_mode', 'madevent') self.update_status('Running Pythia', 'pythia') try: os.remove(pjoin(self.me_dir,'Events','pythia.done')) except Exception: - pass - + pass + ## LAUNCHING PYTHIA # check that LHAPATH is define. if not re.search(r'^\s*LHAPATH=%s/PDFsets' % pythia_src, - open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), + open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), re.M): f = open(pjoin(self.me_dir,'Cards','pythia_card.dat'),'a') f.write('\n LHAPATH=%s/PDFsets' % pythia_src) f.close() tag = self.run_tag pythia_log = pjoin(self.me_dir, 'Events', self.run_name , '%s_pythia.log' % tag) - #self.cluster.launch_and_wait('../bin/internal/run_pythia', + #self.cluster.launch_and_wait('../bin/internal/run_pythia', # argument= [pythia_src], stdout= pythia_log, # stderr=subprocess.STDOUT, # cwd=pjoin(self.me_dir,'Events')) output_files = ['pythia_events.hep'] if self.run_card['use_syst']: output_files.append('syst.dat') - if self.run_card['ickkw'] == 1: + if self.run_card['ickkw'] == 1: output_files += ['beforeveto.tree', 'xsecs.tree', 'events.tree'] - + os.environ['PDG_MASS_TBL'] = pjoin(pythia_src,'mass_width_2004.mc') self.cluster.launch_and_wait(pjoin(pythia_src, 'pythia'), input_files=[pjoin(self.me_dir, "Events", "unweighted_events.lhe"), @@ -5152,23 +5152,23 @@ def do_pythia(self, line): stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events')) - + os.remove(pjoin(self.me_dir, "Events", "unweighted_events.lhe")) if not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): logger.warning('Fail to produce pythia output. More info in \n %s' % pythia_log) return - + self.to_store.append('pythia') - + # Find the matched cross-section if int(self.run_card['ickkw']): # read the line from the bottom of the file - #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, + #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, # '%s_pythia.log' % tag)) - pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") - for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, + pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") + for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, '%s_pythia.log' % tag)): info = pythiare.search(line) if not info: @@ -5188,16 +5188,16 @@ def do_pythia(self, line): self.results.add_detail('nb_event_pythia', Nacc) #compute pythia error error = self.results[self.run_name].return_tag(self.run_tag)['error'] - if Nacc: + if Nacc: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) else: error_m = 10000 * sigma_m # works both for fixed number of generated events and fixed accepted events self.results.add_detail('error_pythia', error_m) - break + break #pythia_log.close() - + pydir = pjoin(self.options['pythia-pgs_path'], 'src') eradir = self.options['exrootanalysis_path'] madir = self.options['madanalysis_path'] @@ -5216,12 +5216,12 @@ def do_pythia(self, line): # Creating LHE file self.run_hep2lhe(banner_path) - + if int(self.run_card['ickkw']): misc.gzip(pjoin(self.me_dir,'Events','beforeveto.tree'), - stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + - if self.run_card['use_syst'] in self.true: # Calculate syscalc info based on syst.dat try: @@ -5233,7 +5233,7 @@ def do_pythia(self, line): # Store syst.dat misc.gzip(pjoin(self.me_dir,'Events', 'syst.dat'), stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_syst.dat.gz')) - + # Store syscalc.dat if os.path.exists(pjoin(self.me_dir, 'Events', 'syscalc.dat')): filename = pjoin(self.me_dir, 'Events' ,self.run_name, @@ -5253,7 +5253,7 @@ def do_pythia(self, line): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + ################################################################################ def do_remove(self, line): @@ -5263,8 +5263,8 @@ def do_remove(self, line): run, tag, mode = self.check_remove(args) if 'banner' in mode: mode.append('all') - - + + if run == 'all': # Check first if they are not a run with a name run. if os.path.exists(pjoin(self.me_dir, 'Events', 'all')): @@ -5280,7 +5280,7 @@ def do_remove(self, line): logger.info(error) pass # run already clear return - + # Check that run exists if not os.path.exists(pjoin(self.me_dir, 'Events', run)): raise self.InvalidCmd('No run \'%s\' detected' % run) @@ -5294,7 +5294,7 @@ def do_remove(self, line): # Found the file to delete - + to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) to_delete += misc.glob('*', pjoin(self.me_dir, 'HTML', run)) # forbid the banner to be removed @@ -5314,7 +5314,7 @@ def do_remove(self, line): if os.path.exists(pjoin(self.me_dir, 'Events', run, 'unweighted_events.lhe.gz')): to_delete.append('unweighted_events.lhe.gz') if os.path.exists(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')): - to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) + to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) if nb_rm != len(to_delete): logger.warning('Be carefull that partonic information are on the point to be removed.') if 'all' in mode: @@ -5327,8 +5327,8 @@ def do_remove(self, line): if 'delphes' not in mode: to_delete = [f for f in to_delete if 'delphes' not in f] if 'parton' not in mode: - to_delete = [f for f in to_delete if 'delphes' in f - or 'pgs' in f + to_delete = [f for f in to_delete if 'delphes' in f + or 'pgs' in f or 'pythia' in f] if not self.force and len(to_delete): question = 'Do you want to delete the following files?\n %s' % \ @@ -5336,7 +5336,7 @@ def do_remove(self, line): ans = self.ask(question, 'y', choices=['y','n']) else: ans = 'y' - + if ans == 'y': for file2rm in to_delete: if os.path.exists(pjoin(self.me_dir, 'Events', run, file2rm)): @@ -5374,7 +5374,7 @@ def do_remove(self, line): if ans == 'y': for file2rm in to_delete: os.remove(file2rm) - + if 'banner' in mode: to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) if tag: @@ -5389,8 +5389,8 @@ def do_remove(self, line): return elif any(['banner' not in os.path.basename(p) for p in to_delete]): if to_delete: - raise MadGraph5Error('''Some output still exists for this run. - Please remove those output first. Do for example: + raise MadGraph5Error('''Some output still exists for this run. + Please remove those output first. Do for example: remove %s all banner ''' % run) else: @@ -5400,7 +5400,7 @@ def do_remove(self, line): return else: logger.info('''The banner is not removed. In order to remove it run: - remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) + remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) # update database. self.results.clean(mode, run, tag) @@ -5420,7 +5420,7 @@ def do_plot(self, line): logger.info('plot for run %s' % self.run_name) if not self.force: self.ask_edit_cards(['plot_card.dat'], args, plot=True) - + if any([arg in ['all','parton'] for arg in args]): filename = pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe') if os.path.exists(filename+'.gz'): @@ -5438,8 +5438,8 @@ def do_plot(self, line): except Exception: pass else: - logger.info('No valid files for partonic plot') - + logger.info('No valid files for partonic plot') + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_events.lhe' % self.run_tag) @@ -5452,10 +5452,10 @@ def do_plot(self, line): stdout= "%s.gz" % filename) else: logger.info('No valid files for pythia plot') - - + + if any([arg in ['all','pgs'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) @@ -5464,15 +5464,15 @@ def do_plot(self, line): misc.gzip(filename) else: logger.info('No valid files for pgs plot') - + if any([arg in ['all','delphes'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) if os.path.exists(filename): self.create_plot('Delphes') - misc.gzip(filename) + misc.gzip(filename) else: logger.info('No valid files for delphes plot') @@ -5488,9 +5488,9 @@ def do_syscalc(self, line): if self.ninitial == 1: logger.error('SysCalc can\'t be run for decay processes') return - + logger.info('Calculating systematics for run %s' % self.run_name) - + self.ask_edit_cards(['run_card.dat'], args, plot=False) self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) if any([arg in ['all','parton'] for arg in args]): @@ -5504,7 +5504,7 @@ def do_syscalc(self, line): stdout="%s.gz" % filename) else: logger.info('No valid files for parton level systematics run.') - + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_syst.dat' % self.run_tag) @@ -5525,17 +5525,17 @@ def do_syscalc(self, line): else: logger.info('No valid files for pythia level') - + def store_result(self): - """ tar the pythia results. This is done when we are quite sure that + """ tar the pythia results. This is done when we are quite sure that the pythia output will not be use anymore """ if not self.run_name: return - + if not self.to_store: - return - + return + tag = self.run_card['run_tag'] self.update_status('storing files of previous run', level=None,\ error=True) @@ -5546,14 +5546,14 @@ def store_result(self): misc.gzip(pjoin(self.me_dir,'Events',self.run_name,"unweighted_events.lhe")) if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) - + if 'pythia' in self.to_store: self.update_status('Storing Pythia files of previous run', level='pythia', error=True) p = pjoin(self.me_dir,'Events') n = self.run_name t = tag self.to_store.remove('pythia') - misc.gzip(pjoin(p,'pythia_events.hep'), + misc.gzip(pjoin(p,'pythia_events.hep'), stdout=pjoin(p, str(n),'%s_pythia_events.hep' % t),forceexternal=True) if 'pythia8' in self.to_store: @@ -5581,26 +5581,26 @@ def store_result(self): os.system("mv " + file_path + hepmc_fileformat + " " + move_hepmc_path) self.update_status('Done', level='pythia',makehtml=False,error=True) - self.results.save() - + self.results.save() + self.to_store = [] - def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, + def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, run_type='', mode=None, **opt): """ """ argument = [str(arg) for arg in argument] if mode is None: mode = self.cluster_mode - + # ensure that exe is executable if os.path.exists(exe) and not os.access(exe, os.X_OK): os.system('chmod +x %s ' % exe) elif (cwd and os.path.exists(pjoin(cwd, exe))) and not \ os.access(pjoin(cwd, exe), os.X_OK): os.system('chmod +x %s ' % pjoin(cwd, exe)) - + if mode == 0: - self.update_status((remaining, 1, + self.update_status((remaining, 1, self.total_jobs - remaining -1, run_type), level=None, force=False) start = time.time() #os.system('cd %s; ./%s' % (cwd,exe)) @@ -5613,24 +5613,24 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, elif mode in [1,2]: exename = os.path.basename(exe) # For condor cluster, create the input/output files - if 'ajob' in exename: + if 'ajob' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat','dname.mg', pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) - + output_files = [] required_output = [] - + #Find the correct PDF input file input_files.append(self.get_pdf_input_filename()) - + #Find the correct ajob Gre = re.compile("\s*j=(G[\d\.\w]+)") origre = re.compile("grid_directory=(G[\d\.\w]+)") - try : + try : fsock = open(exe) except Exception: fsock = open(pjoin(cwd,exe)) @@ -5648,21 +5648,21 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if os.path.isdir(pjoin(cwd,G)): input_files.append(G) required_output.append('%s/results.dat' % G) - + if origre.search(text): G_grid = origre.search(text).groups()[0] input_files.append(pjoin(G_grid, 'ftn26')) - + #submitting - self.cluster.submit2(exe, stdout=stdout, cwd=cwd, + self.cluster.submit2(exe, stdout=stdout, cwd=cwd, input_files=input_files, output_files=output_files, required_output=required_output) elif 'survey' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5671,7 +5671,7 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [] required_output = [] - + #Find the correct ajob suffix = "_%s" % int(float(argument[0])) if suffix == '_0': @@ -5685,12 +5685,12 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if '.' in argument[0]: offset = int(str(argument[0]).split('.')[1]) else: - offset = 0 - + offset = 0 + if offset ==0 or offset == int(float(argument[0])): if os.path.exists(pjoin(cwd, G, 'input_app.txt')): os.remove(pjoin(cwd, G, 'input_app.txt')) - + if os.path.exists(os.path.realpath(pjoin(cwd, G, 'ftn25'))): if offset == 0 or offset == int(float(argument[0])): os.remove(pjoin(cwd, G, 'ftn25')) @@ -5706,16 +5706,16 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, pass #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, required_output=required_output, **opt) elif "refine_splitted.sh" in exename: input_files = ['madevent','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5725,25 +5725,25 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [argument[0]] required_output = [] for G in output_files: - required_output.append('%s/results.dat' % G) + required_output.append('%s/results.dat' % G) input_files.append(pjoin(argument[1], "input_app.txt")) input_files.append(pjoin(argument[1], "ftn26")) - + #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, - required_output=required_output, **opt) + required_output=required_output, **opt) + + - - else: self.cluster.submit(exe, argument=argument, stdout=stdout, cwd=cwd, **opt) - + ############################################################################ def find_madevent_mode(self): """Find if Madevent is in Group mode or not""" - + # The strategy is too look in the files Source/run_configs.inc # if we found: ChanPerJob=3 then it's a group mode. file_path = pjoin(self.me_dir, 'Source', 'run_config.inc') @@ -5752,11 +5752,11 @@ def find_madevent_mode(self): return 'group' else: return 'v4' - + ############################################################################ def monitor(self, run_type='monitor', mode=None, html=False): """ monitor the progress of running job """ - + starttime = time.time() if mode is None: @@ -5772,8 +5772,8 @@ def monitor(self, run_type='monitor', mode=None, html=False): else: update_status = lambda idle, run, finish: None update_first = None - try: - self.cluster.wait(self.me_dir, update_status, update_first=update_first) + try: + self.cluster.wait(self.me_dir, update_status, update_first=update_first) except Exception as error: logger.info(error) if not self.force: @@ -5788,24 +5788,24 @@ def monitor(self, run_type='monitor', mode=None, html=False): raise except KeyboardInterrupt as error: self.cluster.remove() - raise - - + raise + - ############################################################################ + + ############################################################################ def configure_directory(self, html_opening=True): - """ All action require before any type of run """ + """ All action require before any type of run """ # Basic check assert os.path.exists(pjoin(self.me_dir,'SubProcesses')) # environmental variables to be included in make_opts self.make_opts_var = {} - + #see when the last file was modified time_mod = max([os.path.getmtime(pjoin(self.me_dir,'Cards','run_card.dat')), os.path.getmtime(pjoin(self.me_dir,'Cards','param_card.dat'))]) - + if self.configured >= time_mod and hasattr(self, 'random') and hasattr(self, 'run_card'): #just ensure that cluster specific are correctly handled if self.cluster: @@ -5820,7 +5820,7 @@ def configure_directory(self, html_opening=True): #open only once the web page # Change current working directory self.launching_dir = os.getcwd() - + # Check if we need the MSSM special treatment model = self.find_model_name() if model == 'mssm' or model.startswith('mssm-'): @@ -5828,14 +5828,14 @@ def configure_directory(self, html_opening=True): mg5_param = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') check_param_card.convert_to_mg5card(param_card, mg5_param) check_param_card.check_valid_param_card(mg5_param) - + # limit the number of event to 100k self.check_nb_events() # this is in order to avoid conflicts between runs with and without # lhapdf. not needed anymore the makefile handles it automaticallu #misc.compile(['clean4pdf'], cwd = pjoin(self.me_dir, 'Source')) - + self.make_opts_var['pdlabel1'] = '' self.make_opts_var['pdlabel2'] = '' if self.run_card['pdlabel1'] in ['eva', 'iww']: @@ -5866,7 +5866,7 @@ def configure_directory(self, html_opening=True): self.copy_lep_densities(self.run_card['pdlabel'], pjoin(self.me_dir, 'Source')) self.make_opts_var['pdlabel1'] = 'ee' self.make_opts_var['pdlabel2'] = 'ee' - + # set random number if self.run_card['iseed'] != 0: self.random = int(self.run_card['iseed']) @@ -5885,18 +5885,18 @@ def configure_directory(self, html_opening=True): break else: self.random = random.randint(1, 30107) - + #set random seed for python part of the code if self.run_card['python_seed'] == -2: #-2 means same as run_card import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] elif self.run_card['python_seed'] >= 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] if self.run_card['ickkw'] == 2: logger.info('Running with CKKW matching') self.treat_ckkw_matching() @@ -5905,12 +5905,12 @@ def configure_directory(self, html_opening=True): self.update_make_opts(self.run_card) # reset list of Gdirectory self.Gdirs = None - + # create param_card.inc and run_card.inc self.do_treatcards('') - + logger.info("compile Source Directory") - + # Compile for name in [ 'all']:#, '../bin/internal/combine_events']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) @@ -5933,7 +5933,7 @@ def configure_directory(self, html_opening=True): os.remove(pjoin(self.me_dir, 'lib','libbias.a')) force_subproc_clean = True - + # Finally compile the bias module as well if self.run_card['bias_module'] not in ['dummy',None]: logger.debug("Compiling the bias module '%s'"%bias_name) @@ -5945,7 +5945,7 @@ def configure_directory(self, html_opening=True): 'INVALID' in str(bias_module_valid).upper(): raise InvalidCmd("The bias module '%s' cannot be used because of:\n%s"% (bias_name,bias_module_valid)) - + self.compile(arg=[], cwd=os.path.join(self.me_dir, 'Source','BIAS',bias_name)) self.proc_characteristics['bias_module']=bias_name # Update the proc_characterstics file @@ -5954,7 +5954,7 @@ def configure_directory(self, html_opening=True): if force_subproc_clean: # Make sure that madevent will be recompiled - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] for nb_proc,subdir in enumerate(subproc): Pdir = pjoin(self.me_dir, 'SubProcesses',subdir.strip()) @@ -5971,20 +5971,20 @@ def configure_directory(self, html_opening=True): ############################################################################ @staticmethod def check_dir(path, default=''): - """check if the directory exists. if so return the path otherwise the + """check if the directory exists. if so return the path otherwise the default""" - + if os.path.isdir(path): return path else: return default - + ############################################################################ def get_Gdir(self, Pdir=None, symfact=None): """get the list of Gdirectory if not yet saved.""" - + if hasattr(self, "Gdirs") and self.Gdirs: if self.me_dir in self.Gdirs[0]: if Pdir is None: @@ -6000,8 +6000,8 @@ def get_Gdir(self, Pdir=None, symfact=None): Pdirs = self.get_Pdir() - Gdirs = {self.me_dir:[]} - mfactors = {} + Gdirs = {self.me_dir:[]} + mfactors = {} for P in Pdirs: Gdirs[P] = [] #for the next line do not use P, since in readonly mode it might not have symfact @@ -6012,7 +6012,7 @@ def get_Gdir(self, Pdir=None, symfact=None): mfactors[pjoin(P, "G%s" % tag)] = mfactor self.Gdirs = (Gdirs, mfactors) return self.get_Gdir(Pdir, symfact=symfact) - + ############################################################################ def set_run_name(self, name, tag=None, level='parton', reload_card=False, allow_new_tag=True): @@ -6030,8 +6030,8 @@ def get_last_tag(self, level): tagRun = self.results[self.run_name][i] if tagRun.pythia or tagRun.shower or tagRun.pythia8 : return tagRun['tag'] - - + + # when are we force to change the tag new_run:previous run requiring changes upgrade_tag = {'parton': ['parton','pythia','pgs','delphes','madanalysis5_hadron','madanalysis5_parton', 'rivet'], 'pythia': ['pythia','pgs','delphes','madanalysis5_hadron'], @@ -6044,7 +6044,7 @@ def get_last_tag(self, level): 'syscalc':[], 'rivet':['rivet']} - if name == self.run_name: + if name == self.run_name: if reload_card: run_card = pjoin(self.me_dir, 'Cards','run_card.dat') self.run_card = banner_mod.RunCard(run_card) @@ -6064,13 +6064,13 @@ def get_last_tag(self, level): break return get_last_tag(self, level) - + # save/clean previous run if self.run_name: self.store_result() # store new name self.run_name = name - + new_tag = False # First call for this run -> set the banner self.banner = banner_mod.recover_banner(self.results, level, name) @@ -6079,8 +6079,8 @@ def get_last_tag(self, level): else: # Read run_card run_card = pjoin(self.me_dir, 'Cards','run_card.dat') - self.run_card = banner_mod.RunCard(run_card) - + self.run_card = banner_mod.RunCard(run_card) + if tag: self.run_card['run_tag'] = tag new_tag = True @@ -6093,7 +6093,7 @@ def get_last_tag(self, level): self.results.update('add run %s' % name, 'all', makehtml=False) else: for tag in upgrade_tag[level]: - + if getattr(self.results[self.run_name][-1], tag): # LEVEL is already define in the last tag -> need to switch tag tag = self.get_available_tag() @@ -6103,8 +6103,8 @@ def get_last_tag(self, level): if not new_tag: # We can add the results to the current run tag = self.results[self.run_name][-1]['tag'] - self.run_card['run_tag'] = tag # ensure that run_tag is correct - + self.run_card['run_tag'] = tag # ensure that run_tag is correct + if allow_new_tag and (name in self.results and not new_tag): self.results.def_current(self.run_name) else: @@ -6113,15 +6113,15 @@ def get_last_tag(self, level): self.run_tag = self.run_card['run_tag'] return get_last_tag(self, level) - - - + + + ############################################################################ def check_nb_events(self): - """Find the number of event in the run_card, and check that this is not + """Find the number of event in the run_card, and check that this is not too large""" - + nb_event = int(self.run_card['nevents']) if nb_event > 1000000: logger.warning("Attempting to generate more than 1M events") @@ -6133,20 +6133,20 @@ def check_nb_events(self): return - - ############################################################################ + + ############################################################################ def update_random(self): """ change random number""" - + self.random += 3 if self.random > 30081*30081: # can't use too big random number raise MadGraph5Error('Random seed too large ' + str(self.random) + ' > 30081*30081') - if self.run_card['python_seed'] == -2: + if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.random) + random.seed(self.random) random.mg_seedset = self.random - + ############################################################################ def save_random(self): """save random number in appropirate file""" @@ -6155,14 +6155,14 @@ def save_random(self): fsock.writelines('r=%s\n' % self.random) def do_quit(self, *args, **opts): - + return common_run.CommonRunCmd.do_quit(self, *args, **opts) #return CmdExtended.do_quit(self, *args, **opts) - + ############################################################################ def treat_CKKW_matching(self): """check for ckkw""" - + lpp1 = self.run_card['lpp1'] lpp2 = self.run_card['lpp2'] e1 = self.run_card['ebeam1'] @@ -6170,19 +6170,19 @@ def treat_CKKW_matching(self): pd = self.run_card['pdlabel'] lha = self.run_card['lhaid'] xq = self.run_card['xqcut'] - translation = {'e1': e1, 'e2':e2, 'pd':pd, + translation = {'e1': e1, 'e2':e2, 'pd':pd, 'lha':lha, 'xq':xq} if lpp1 or lpp2: - # Remove ':s from pd + # Remove ':s from pd if pd.startswith("'"): pd = pd[1:] if pd.endswith("'"): - pd = pd[:-1] + pd = pd[:-1] if xq >2 or xq ==2: xq = 2 - + # find data file if pd == "lhapdf": issudfile = 'lib/issudgrid-%(e1)s-%(e2)s-%(pd)s-%(lha)s-%(xq)s.dat.gz' @@ -6192,9 +6192,9 @@ def treat_CKKW_matching(self): issudfile = pjoin(self.webbin, issudfile % translation) else: issudfile = pjoin(self.me_dir, issudfile % translation) - + logger.info('Sudakov grid file: %s' % issudfile) - + # check that filepath exists if os.path.exists(issudfile): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') @@ -6203,20 +6203,20 @@ def treat_CKKW_matching(self): msg = 'No sudakov grid file for parameter choice. Start to generate it. This might take a while' logger.info(msg) self.update_status('GENERATE SUDAKOV GRID', level='parton') - + for i in range(-2,6): - self.cluster.submit('%s/gensudgrid ' % self.dirbin, + self.cluster.submit('%s/gensudgrid ' % self.dirbin, argument = ['%d'%i], - cwd=self.me_dir, + cwd=self.me_dir, stdout=open(pjoin(self.me_dir, 'gensudgrid%s.log' % i),'w')) self.monitor() for i in range(-2,6): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') os.system('cat %s/gensudgrid%s.log >> %s' % (self.me_dir, path)) misc.gzip(path, stdout=issudfile) - + ############################################################################ - def create_root_file(self, input='unweighted_events.lhe', + def create_root_file(self, input='unweighted_events.lhe', output='unweighted_events.root' ): """create the LHE root file """ self.update_status('Creating root files', level='parton') @@ -6233,14 +6233,14 @@ def create_root_file(self, input='unweighted_events.lhe', totar = False torm = True input = input[:-3] - + try: - misc.call(['%s/ExRootLHEFConverter' % eradir, + misc.call(['%s/ExRootLHEFConverter' % eradir, input, output], cwd=pjoin(self.me_dir, 'Events')) except Exception: logger.warning('fail to produce Root output [problem with ExRootAnalysis]') - + if totar: if os.path.exists('%s.gz' % input): try: @@ -6251,13 +6251,13 @@ def create_root_file(self, input='unweighted_events.lhe', misc.gzip(input) if torm: os.remove(input) - + def run_syscalc(self, mode='parton', event_path=None, output=None): - """create the syscalc output""" + """create the syscalc output""" if self.run_card['use_syst'] not in self.true: return - + scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): return @@ -6265,12 +6265,12 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): if self.run_card['event_norm'] != 'sum': logger.critical('SysCalc works only when event_norm is on \'sum\'.') return - logger.info('running SysCalc on mode %s' % mode) - + logger.info('running SysCalc on mode %s' % mode) + # Restore the old default for SysCalc+PY6 if self.run_card['sys_matchscale']=='auto': self.run_card['sys_matchscale'] = "30 50" - + # Check that all pdfset are correctly installed lhaid = [self.run_card.get_lhapdf_id()] if '&&' in self.run_card['sys_pdf']: @@ -6285,20 +6285,20 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.debug(str(error)) logger.warning('Systematic computation requires lhapdf to run. Bypass SysCalc') return - + # Copy all the relevant PDF sets [self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid] - + to_syscalc={'sys_scalefact': self.run_card['sys_scalefact'], 'sys_alpsfact': self.run_card['sys_alpsfact'], 'sys_matchscale': self.run_card['sys_matchscale'], 'sys_scalecorrelation': self.run_card['sys_scalecorrelation'], 'sys_pdf': self.run_card['sys_pdf']} - - tag = self.run_card['run_tag'] + + tag = self.run_card['run_tag'] card = pjoin(self.me_dir, 'bin','internal', 'syscalc_card.dat') template = open(pjoin(self.me_dir, 'bin','internal', 'syscalc_template.dat')).read() - + if '&&' in to_syscalc['sys_pdf']: to_syscalc['sys_pdf'] = to_syscalc['sys_pdf'].split('#',1)[0].replace('&&',' \n ') else: @@ -6311,8 +6311,8 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): new.append(d) else: new[-1] += ' %s' % d - to_syscalc['sys_pdf'] = '\n'.join(new) - + to_syscalc['sys_pdf'] = '\n'.join(new) + if to_syscalc['sys_pdf'].lower() in ['', 'f', 'false', 'none', '.false.']: to_syscalc['sys_pdf'] = '' if to_syscalc['sys_alpsfact'].lower() in ['', 'f', 'false', 'none','.false.']: @@ -6320,17 +6320,17 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): - + # check if the scalecorrelation parameter is define: if not 'sys_scalecorrelation' in self.run_card: self.run_card['sys_scalecorrelation'] = -1 open(card,'w').write(template % self.run_card) - + if not os.path.exists(card): return False - - + + event_dir = pjoin(self.me_dir, 'Events') if not event_path: @@ -6353,19 +6353,19 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): raise SysCalcError('qcut value for sys_matchscale lower than qcut in pythia_card. Bypass syscalc') if float(value) < xqcut: raise SysCalcError('qcut value for sys_matchscale lower than xqcut in run_card. Bypass syscalc') - - + + event_path = pjoin(event_dir,'syst.dat') output = pjoin(event_dir, 'syscalc.dat') else: raise self.InvalidCmd('Invalid mode %s' % mode) - + if not os.path.exists(event_path): if os.path.exists(event_path+'.gz'): misc.gunzip(event_path+'.gz') else: raise SysCalcError('Events file %s does not exits' % event_path) - + self.update_status('Calculating systematics for %s level' % mode, level = mode.lower()) try: proc = misc.call([os.path.join(scdir, 'sys_calc'), @@ -6374,7 +6374,7 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): stderr = subprocess.STDOUT, cwd=event_dir) # Wait 5 s to make sure file is finished writing - time.sleep(5) + time.sleep(5) except OSError as error: logger.error('fail to run syscalc: %s. Please check that SysCalc is correctly installed.' % error) else: @@ -6382,11 +6382,11 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.warning('SysCalc Failed. Please read the associate log to see the reason. Did you install the associate PDF set?') elif mode == 'parton': files.mv(output, event_path) - + self.update_status('End syscalc for %s level' % mode, level = mode.lower(), makehtml=False) - - return True + + return True action_switcher = AskRun @@ -6399,23 +6399,23 @@ def ask_run_configuration(self, mode=None, args=[]): passing_cmd.append('reweight=ON') if '-M' in args or '--madspin' in args: passing_cmd.append('madspin=ON') - + switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, mode=mode, line_args=args, force=self.force, first_cmd=passing_cmd, return_instance=True) # - self.switch = switch # store the value of the switch for plugin purpose + self.switch = switch # store the value of the switch for plugin purpose if 'dynamical' in switch: mode = 'auto' - + # Now that we know in which mode we are check that all the card #exists (copy default if needed) - + cards = ['param_card.dat', 'run_card.dat'] if switch['shower'] == 'Pythia6': cards.append('pythia_card.dat') if switch['shower'] == 'Pythia8': - cards.append('pythia8_card.dat') + cards.append('pythia8_card.dat') if switch['detector'] in ['PGS','DELPHES+PGS']: cards.append('pgs_card.dat') if switch['detector'] in ['Delphes', 'DELPHES+PGS']: @@ -6438,29 +6438,29 @@ def ask_run_configuration(self, mode=None, args=[]): cards.append('rivet_card.dat') self.keep_cards(cards) - + first_cmd = cmd_switch.get_cardcmd() - + if os.path.isfile(pjoin(self.me_dir,'Cards','MadLoopParams.dat')): cards.append('MadLoopParams.dat') - + if self.force: self.check_param_card(pjoin(self.me_dir,'Cards','param_card.dat' )) return switch - + if 'dynamical' in switch and switch['dynamical']: self.ask_edit_cards(cards, plot=False, mode='auto', first_cmd=first_cmd) else: self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) return switch - + ############################################################################ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None): """Ask the question when launching pythia""" - + pythia_suffix = '' if pythia_version==6 else '%d'%pythia_version - + available_mode = ['0', '1'] if pythia_version==6: available_mode.append('2') @@ -6485,10 +6485,10 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = self.ask(question, '0', options) elif not mode: mode = 'auto' - + if mode.isdigit(): mode = name[mode] - + auto = False if mode == 'auto': auto = True @@ -6497,7 +6497,7 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = 'pgs' elif os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')): mode = 'delphes' - else: + else: mode = 'pythia%s'%pythia_suffix logger.info('Will run in mode %s' % mode) # Now that we know in which mode we are check that all the card @@ -6513,15 +6513,15 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) cards.append('delphes_trigger.dat') self.keep_cards(cards, ignore=['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat', 'plot_card.dat']) - + if self.force: return mode - + if not banner: banner = self.banner - + if auto: - self.ask_edit_cards(cards, from_banner=['param', 'run'], + self.ask_edit_cards(cards, from_banner=['param', 'run'], mode='auto', plot=(pythia_version==6), banner=banner ) else: @@ -6529,12 +6529,12 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) plot=(pythia_version==6), banner=banner) return mode - + #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmdShell(MadEventCmd, cmd.CmdShell): - """The command line processor of MadGraph""" + """The command line processor of MadGraph""" @@ -6548,11 +6548,11 @@ class SubProcesses(object): @classmethod def clean(cls): cls.name_to_pdg = {} - + @staticmethod def get_subP(me_dir): """return the list of Subprocesses""" - + out = [] for line in open(pjoin(me_dir,'SubProcesses', 'subproc.mg')): if not line: @@ -6560,9 +6560,9 @@ def get_subP(me_dir): name = line.strip() if os.path.exists(pjoin(me_dir, 'SubProcesses', name)): out.append(pjoin(me_dir, 'SubProcesses', name)) - + return out - + @staticmethod @@ -6623,9 +6623,9 @@ def get_subP_ids(path): particles = re.search("/([\d,-]+)/", line) all_ids.append([int(p) for p in particles.group(1).split(',')]) return all_ids - - -#=============================================================================== + + +#=============================================================================== class GridPackCmd(MadEventCmd): """The command for the gridpack --Those are not suppose to be use interactively--""" @@ -6639,7 +6639,7 @@ def __init__(self, me_dir = None, nb_event=0, seed=0, gran=-1, *completekey, **s self.random = seed self.random_orig = self.random self.granularity = gran - + self.options['automatic_html_opening'] = False #write the grid_card.dat on disk self.nb_event = int(nb_event) @@ -6680,7 +6680,7 @@ def write_RunWeb(self, me_dir): def write_gridcard(self, nb_event, seed, gran): """write the grid_card.dat file at appropriate location""" - + # first try to write grid_card within the gridpack. print("WRITE GRIDCARD", self.me_dir) if self.readonly: @@ -6689,35 +6689,35 @@ def write_gridcard(self, nb_event, seed, gran): fsock = open('grid_card.dat','w') else: fsock = open(pjoin(self.me_dir, 'Cards', 'grid_card.dat'),'w') - + gridpackcard = banner_mod.GridpackCard() gridpackcard['GridRun'] = True gridpackcard['gevents'] = nb_event gridpackcard['gseed'] = seed gridpackcard['ngran'] = gran - + gridpackcard.write(fsock) ############################################################################ def get_Pdir(self): """get the list of Pdirectory if not yet saved.""" - + if hasattr(self, "Pdirs"): if self.me_dir in self.Pdirs[0]: return self.Pdirs - + if not self.readonly: - self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) + self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] else: - self.Pdirs = [l.strip() - for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + self.Pdirs = [l.strip() + for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] + return self.Pdirs - + def prepare_local_dir(self): """create the P directory structure in the local directory""" - + if not self.readonly: os.chdir(self.me_dir) else: @@ -6726,7 +6726,7 @@ def prepare_local_dir(self): os.mkdir(p) files.cp(pjoin(self.me_dir,'SubProcesses',p,'symfact.dat'), pjoin(p, 'symfact.dat')) - + def launch(self, nb_event, seed): """ launch the generation for the grid """ @@ -6742,13 +6742,13 @@ def launch(self, nb_event, seed): if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(seed) + random.seed(seed) random.mg_seedset = seed elif self.run_card['python_seed'] > 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] # 2) Run the refine for the grid self.update_status('Generating Events', level=None) #misc.call([pjoin(self.me_dir,'bin','refine4grid'), @@ -6767,70 +6767,70 @@ def launch(self, nb_event, seed): self.exec_cmd('decay_events -from_cards', postcmd=False) elif self.run_card['use_syst'] and self.run_card['systematics_program'] == 'systematics': self.options['nb_core'] = 1 - self.exec_cmd('systematics %s --from_card' % + self.exec_cmd('systematics %s --from_card' % pjoin('Events', self.run_name, 'unweighted_events.lhe.gz'), postcmd=False,printcmd=False) - + def refine4grid(self, nb_event): """Special refine for gridpack run.""" self.nb_refine += 1 - + precision = nb_event self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) - + # initialize / remove lhapdf mode # self.configure_directory() # All this has been done before self.cluster_mode = 0 # force single machine # Store seed in randinit file, to be read by ranmar.f self.save_random() - + self.update_status('Refine results to %s' % precision, level=None) logger.info("Using random number seed offset = %s" % self.random) refine_opt = {'err_goal': nb_event, 'split_channels': False, - 'ngran':self.granularity, 'readonly': self.readonly} + 'ngran':self.granularity, 'readonly': self.readonly} x_improve = gen_ximprove.gen_ximprove_gridpack(self, refine_opt) x_improve.launch() # create the ajob for the refinment and run those! - self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack - - + self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack + + #bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) #print 'run combine!!!' #combine_runs.CombineRuns(self.me_dir) - + return #update html output Presults = sum_html.collect_result(self) cross, error = Presults.xsec, Presults.xerru self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + #self.update_status('finish refine', 'parton', makehtml=False) #devnull.close() - - - + + + return self.total_jobs = 0 - subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if + subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if P.startswith('P') and os.path.isdir(pjoin(self.me_dir,'SubProcesses', P))] devnull = open(os.devnull, 'w') for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) # clean previous run for match in misc.glob('*ajob*', Pdir): if os.path.basename(match)[:4] in ['ajob', 'wait', 'run.', 'done']: os.remove(pjoin(Pdir, match)) - + logfile = pjoin(Pdir, 'gen_ximprove.log') misc.call([pjoin(bindir, 'gen_ximprove')], @@ -6840,40 +6840,40 @@ def refine4grid(self, nb_event): if os.path.exists(pjoin(Pdir, 'ajob1')): alljobs = misc.glob('ajob*', Pdir) - nb_tot = len(alljobs) + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) if os.path.exists(pjoin(self.me_dir,'error')): self.monitor(html=True) raise MadEventError('Error detected in dir %s: %s' % \ (Pdir, open(pjoin(self.me_dir,'error')).read())) - self.monitor(run_type='All job submitted for refine number %s' % + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) combine_runs.CombineRuns(self.me_dir) - + #update html output cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + self.update_status('finish refine', 'parton', makehtml=False) devnull.close() def do_combine_events(self, line): - """Advanced commands: Launch combine events""" + """Advanced commands: Launch combine events""" if self.readonly: outdir = 'Events' @@ -6895,17 +6895,17 @@ def do_combine_events(self, line): self.banner.add_generation_info(self.results.current['cross'], self.run_card['nevents']) if not hasattr(self, 'random_orig'): self.random_orig = 0 self.banner.change_seed(self.random_orig) - - + + if not os.path.exists(pjoin(outdir, self.run_name)): os.mkdir(pjoin(outdir, self.run_name)) - self.banner.write(pjoin(outdir, self.run_name, + self.banner.write(pjoin(outdir, self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -6915,7 +6915,7 @@ def do_combine_events(self, line): if os.path.exists(pjoin(Gdir, 'events.lhe')): result = sum_html.OneResult('') result.read_results(pjoin(Gdir, 'results.dat')) - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec')*gscalefact[Gdir], result.get('xerru')*gscalefact[Gdir], result.get('axsec')*gscalefact[Gdir] @@ -6924,7 +6924,7 @@ def do_combine_events(self, line): sum_xsec += result.get('xsec')*gscalefact[Gdir] sum_xerru.append(result.get('xerru')*gscalefact[Gdir]) sum_axsec += result.get('axsec')*gscalefact[Gdir] - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.nb_event) @@ -6933,26 +6933,26 @@ def do_combine_events(self, line): AllEvent.add(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() - + self.banner.add_generation_info(sum_xsec, self.nb_event) nb_event = AllEvent.unweight(pjoin(outdir, self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.nb_event, log_level=logging.DEBUG, normalization=self.run_card['event_norm'], proc_charac=self.proc_characteristic) - - + + if partials: for i in range(partials): try: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) self.banner.add_generation_info(sum_xsec, nb_event) if self.run_card['bias_module'].lower() not in ['dummy', 'none']: @@ -6961,7 +6961,7 @@ def do_combine_events(self, line): class MadLoopInitializer(object): """ A container class for the various methods for initializing MadLoop. It is - placed in MadEventInterface because it is used by Madevent for loop-induced + placed in MadEventInterface because it is used by Madevent for loop-induced simulations. """ @staticmethod @@ -6974,7 +6974,7 @@ def make_and_run(dir_name,checkRam=False): if os.path.isfile(pjoin(dir_name,'check')): os.remove(pjoin(dir_name,'check')) os.remove(pjoin(dir_name,'check_sa.o')) - os.remove(pjoin(dir_name,'loop_matrix.o')) + os.remove(pjoin(dir_name,'loop_matrix.o')) # Now run make devnull = open(os.devnull, 'w') start=time.time() @@ -6996,7 +6996,7 @@ def make_and_run(dir_name,checkRam=False): stdout=devnull, stderr=devnull, close_fds=True) try: ptimer.execute() - #poll as often as possible; otherwise the subprocess might + #poll as often as possible; otherwise the subprocess might # "sneak" in some extra memory usage while you aren't looking # Accuracy of .2 seconds is enough for the timing. while ptimer.poll(): @@ -7028,7 +7028,7 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, If mu_r > 0.0, then the renormalization constant value will be hardcoded directly in check_sa.f, if is is 0 it will be set to Sqrt(s) and if it is < 0.0 the value in the param_card.dat is used. - If the split_orders target (i.e. the target squared coupling orders for + If the split_orders target (i.e. the target squared coupling orders for the computation) is != -1, it will be changed in check_sa.f via the subroutine CALL SET_COUPLINGORDERS_TARGET(split_orders).""" @@ -7043,12 +7043,12 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, file_path = pjoin(directories[0],'check_sa.f') if not os.path.isfile(file_path): raise MadGraph5Error('Could not find the location of check_sa.f'+\ - ' from the specified path %s.'%str(file_path)) + ' from the specified path %s.'%str(file_path)) file = open(file_path, 'r') check_sa = file.read() file.close() - + file = open(file_path, 'w') check_sa = re.sub(r"READPS = \S+\)","READPS = %s)"%('.TRUE.' if read_ps \ else '.FALSE.'), check_sa) @@ -7064,42 +7064,42 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, (("%.17e"%mu_r).replace('e','d')),check_sa) elif mu_r < 0.0: check_sa = re.sub(r"MU_R=SQRTS","",check_sa) - + if split_orders > 0: check_sa = re.sub(r"SET_COUPLINGORDERS_TARGET\(-?\d+\)", - "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) - + "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) + file.write(check_sa) file.close() - @staticmethod + @staticmethod def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ req_files = ['HelFilter.dat','LoopFilter.dat'], attempts = [4,15]): - """ Run the initialization of the process in 'run_dir' with success + """ Run the initialization of the process in 'run_dir' with success characterized by the creation of the files req_files in this directory. The directory containing the driving source code 'check_sa.f'. - The list attempt gives the successive number of PS points the + The list attempt gives the successive number of PS points the initialization should be tried with before calling it failed. Returns the number of PS points which were necessary for the init. Notice at least run_dir or SubProc_dir must be provided. A negative attempt number given in input means that quadprec will be forced for initialization.""" - + # If the user does not want detailed info, then set the dictionary # to a dummy one. if infos is None: infos={} - + if SubProc_dir is None and run_dir is None: raise MadGraph5Error('At least one of [SubProc_dir,run_dir] must'+\ ' be provided in run_initialization.') - + # If the user does not specify where is check_sa.f, then it is assumed # to be one levels above run_dir if SubProc_dir is None: SubProc_dir = os.path.abspath(pjoin(run_dir,os.pardir)) - + if run_dir is None: directories =[ dir for dir in misc.glob('P[0-9]*', SubProc_dir) if os.path.isdir(dir) ] @@ -7109,7 +7109,7 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find a valid running directory'+\ ' in %s.'%str(SubProc_dir)) - # Use the presence of the file born_matrix.f to decide if it is a + # Use the presence of the file born_matrix.f to decide if it is a # loop-induced process or not. It's not crucial, but just that because # of the dynamic adjustment of the ref scale used for deciding what are # the zero contributions, more points are neeeded for loop-induced. @@ -7128,9 +7128,9 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ %MLCardPath) else: - MLCard = banner_mod.MadLoopParam(MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) MLCard_orig = banner_mod.MadLoopParam(MLCard) - + # Make sure that LoopFilter really is needed. if not MLCard['UseLoopFilter']: try: @@ -7153,11 +7153,11 @@ def need_init(): proc_prefix+fname)) for fname in my_req_files]) or \ not os.path.isfile(pjoin(run_dir,'check')) or \ not os.access(pjoin(run_dir,'check'), os.X_OK) - + # Check if this is a process without born by checking the presence of the # file born_matrix.f is_loop_induced = os.path.exists(pjoin(run_dir,'born_matrix.f')) - + # For loop induced processes, always attempt quadruple precision if # double precision attempts fail and the user didn't specify himself # quadruple precision initializations attempts @@ -7166,11 +7166,11 @@ def need_init(): use_quad_prec = 1 curr_attempt = 1 - MLCard.set('WriteOutFilters',True) - + MLCard.set('WriteOutFilters',True) + while to_attempt!=[] and need_init(): curr_attempt = to_attempt.pop() - # if the attempt is a negative number it means we must force + # if the attempt is a negative number it means we must force # quadruple precision at initialization time if curr_attempt < 0: use_quad_prec = -1 @@ -7183,11 +7183,11 @@ def need_init(): MLCard.set('ZeroThres',1e-9) # Plus one because the filter are written on the next PS point after curr_attempt = abs(curr_attempt+1) - MLCard.set('MaxAttempts',curr_attempt) + MLCard.set('MaxAttempts',curr_attempt) MLCard.write(pjoin(SubProc_dir,'MadLoopParams.dat')) # initialization is performed. - MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, + MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, npoints = curr_attempt) compile_time, run_time, ram_usage = \ MadLoopInitializer.make_and_run(run_dir) @@ -7200,7 +7200,7 @@ def need_init(): infos['Process_compilation']==None: infos['Process_compilation'] = compile_time infos['Initialization'] = run_time - + MLCard_orig.write(pjoin(SubProc_dir,'MadLoopParams.dat')) if need_init(): return None @@ -7219,8 +7219,8 @@ def need_init(ML_resources_path, proc_prefix, r_files): MLCardPath = pjoin(proc_dir,'SubProcesses','MadLoopParams.dat') if not os.path.isfile(MLCardPath): raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ - %MLCardPath) - MLCard = banner_mod.MadLoopParam(MLCardPath) + %MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) req_files = ['HelFilter.dat','LoopFilter.dat'] # Make sure that LoopFilter really is needed. @@ -7234,9 +7234,9 @@ def need_init(ML_resources_path, proc_prefix, r_files): req_files.remove('HelFilter.dat') except ValueError: pass - + for v_folder in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)): + '%s*'%subproc_prefix)): # Make sure it is a valid MadLoop directory if not os.path.isdir(v_folder) or not os.path.isfile(\ pjoin(v_folder,'loop_matrix.f')): @@ -7247,7 +7247,7 @@ def need_init(ML_resources_path, proc_prefix, r_files): if need_init(pjoin(proc_dir,'SubProcesses','MadLoop5_resources'), proc_prefix, req_files): return True - + return False @staticmethod @@ -7265,7 +7265,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['treatCardsLoopNoInit'], cwd=pjoin(proc_dir,'Source')) else: interface.do_treatcards('all --no_MadLoopInit') - + # First make sure that IREGI and CUTTOOLS are compiled if needed if os.path.exists(pjoin(proc_dir,'Source','CutTools')): misc.compile(arg=['libcuttools'],cwd=pjoin(proc_dir,'Source')) @@ -7273,8 +7273,8 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['libiregi'],cwd=pjoin(proc_dir,'Source')) # Then make sure DHELAS and MODEL are compiled misc.compile(arg=['libmodel'],cwd=pjoin(proc_dir,'Source')) - misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) - + misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) + # Now initialize the MadLoop outputs logger.info('Initializing MadLoop loop-induced matrix elements '+\ '(this can take some time)...') @@ -7283,7 +7283,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, if MG_options: if interface and hasattr(interface, 'cluster') and isinstance(interface.cluster, cluster.MultiCore): mcore = interface.cluster - else: + else: mcore = cluster.MultiCore(**MG_options) else: mcore = cluster.onecore @@ -7294,10 +7294,10 @@ def run_initialization_wrapper(run_dir, infos, attempts): run_dir=run_dir, infos=infos) else: n_PS = MadLoopInitializer.run_initialization( - run_dir=run_dir, infos=infos, attempts=attempts) + run_dir=run_dir, infos=infos, attempts=attempts) infos['nPS'] = n_PS return 0 - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return @@ -7307,21 +7307,21 @@ def wait_monitoring(Idle, Running, Done): init_info = {} # List all virtual folders while making sure they are valid MadLoop folders VirtualFolders = [f for f in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)) if (os.path.isdir(f) or + '%s*'%subproc_prefix)) if (os.path.isdir(f) or os.path.isfile(pjoin(f,'loop_matrix.f')))] logger.debug("Now Initializing MadLoop matrix element in %d folder%s:"%\ (len(VirtualFolders),'s' if len(VirtualFolders)>1 else '')) - logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in + logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in VirtualFolders)) for v_folder in VirtualFolders: init_info[v_folder] = {} - + # We try all multiples of n_PS from 1 to max_mult, first in DP and then # in QP before giving up, or use default values if n_PS is None. max_mult = 3 if n_PS is None: # Then use the default list of number of PS points to try - mcore.submit(run_initialization_wrapper, + mcore.submit(run_initialization_wrapper, [pjoin(v_folder), init_info[v_folder], None]) else: # Use specific set of PS points @@ -7348,8 +7348,8 @@ def wait_monitoring(Idle, Running, Done): '%d PS points (%s), in %.3g(compil.) + %.3g(init.) secs.'%( abs(init['nPS']),'DP' if init['nPS']>0 else 'QP', init['Process_compilation'],init['Initialization'])) - - logger.info('MadLoop initialization finished.') + + logger.info('MadLoop initialization finished.') AskforEditCard = common_run.AskforEditCard @@ -7364,16 +7364,16 @@ def wait_monitoring(Idle, Running, Done): import os import optparse - # Get the directory of the script real path (bin) - # and add it to the current PYTHONPATH + # Get the directory of the script real path (bin) + # and add it to the current PYTHONPATH #root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath( __file__ )))) sys.path.insert(0, root_path) - class MyOptParser(optparse.OptionParser): + class MyOptParser(optparse.OptionParser): class InvalidOption(Exception): pass def error(self, msg=''): raise MyOptParser.InvalidOption(msg) - # Write out nice usage message if called with -h or --help + # Write out nice usage message if called with -h or --help usage = "usage: %prog [options] [FILE] " parser = MyOptParser(usage=usage) parser.add_option("-l", "--logging", default='INFO', @@ -7384,7 +7384,7 @@ def error(self, msg=''): help='force to launch debug mode') parser_error = '' done = False - + for i in range(len(sys.argv)-1): try: (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) @@ -7394,7 +7394,7 @@ def error(self, msg=''): else: args += sys.argv[len(sys.argv)-i:] if not done: - # raise correct error: + # raise correct error: try: (options, args) = parser.parse_args() except MyOptParser.InvalidOption as error: @@ -7407,8 +7407,8 @@ def error(self, msg=''): import subprocess import logging import logging.config - # Set logging level according to the logging level given by options - #logging.basicConfig(level=vars(logging)[options.logging]) + # Set logging level according to the logging level given by options + #logging.basicConfig(level=vars(logging)[options.logging]) import internal import internal.coloring_logging # internal.file = XXX/bin/internal/__init__.py @@ -7431,13 +7431,13 @@ def error(self, msg=''): raise pass - # Call the cmd interface main loop + # Call the cmd interface main loop try: if args: # a single command is provided if '--web' in args: - i = args.index('--web') - args.pop(i) + i = args.index('--web') + args.pop(i) cmd_line = MadEventCmd(me_dir, force_run=True) else: cmd_line = MadEventCmdShell(me_dir, force_run=True) @@ -7457,13 +7457,13 @@ def error(self, msg=''): pass - - - - - - - - + + + + + + + + diff --git a/epochX/cudacpp/gq_ttq.mad/src/cudacpp_src.mk b/epochX/cudacpp/gq_ttq.mad/src/cudacpp_src.mk index d4cc628aec..b4e446bc45 100644 --- a/epochX/cudacpp/gq_ttq.mad/src/cudacpp_src.mk +++ b/epochX/cudacpp/gq_ttq.mad/src/cudacpp_src.mk @@ -1,12 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories - -THISMK = $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #------------------------------------------------------------------------------- @@ -16,165 +11,24 @@ SHELL := /bin/bash #------------------------------------------------------------------------------- -#=== Configure common compiler flags for CUDA and C++ - -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here - -#------------------------------------------------------------------------------- - #=== Configure the C++ compiler -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) $(USE_NVTX) -fPIC -Wall -Wshadow -Wextra +include ../Source/make_opts + +MG_CXXFLAGS += -fPIC -I. $(USE_NVTX) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS+= -ffast-math # see issue #117 +MG_CXXFLAGS += -ffast-math # see issue #117 endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY # Note: AR, CXX and FC are implicitly defined if not set externally # See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html ###RANLIB = ranlib -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -LDFLAGS = -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -LDFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler (note: NVCC is already exported including ccache) - -###$(info NVCC=$(NVCC)) - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ builds (note: NVCC is already exported including ccache) - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for CUDA and C++ - -# Assuming uname is available, detect if architecture is PowerPC -UNAME_P := $(shell uname -p) - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # BUILD ERROR IF THIS ADDED IN SRC?! -else - ###AR=gcc-ar # needed by -flto - ###RANLIB=gcc-ranlib # needed by -flto - ###CXXFLAGS+= -flto # NB: build error from src/Makefile unless gcc-ar and gcc-ranlib are used - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -#------------------------------------------------------------------------------- - #=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the build flags appropriate to OMPFLAGS ###$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -###$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -###$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -###$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -###$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - CXXFLAGS += -DMGONGPU_HAS_NO_CURAND -else ifneq ($(RNDGEN),hasCurand) - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- @@ -182,28 +36,18 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -###$(info Current directory is $(shell pwd)) -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIRREL = ../lib/$(BUILDDIR) - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR=1 is set)) -else - override BUILDDIR = . - override LIBDIRREL = ../lib - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) -endif -######$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) +# Build directory: +BUILDDIR := build.$(DIRTAG) +LIBDIRREL := ../lib/$(BUILDDIR) # Workaround for Mac #375 (I did not manage to fix rpath with @executable_path): use absolute paths for LIBDIR # (NB: this is quite ugly because it creates the directory if it does not exist - to avoid removing src by mistake) -UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Darwin) override LIBDIR = $(shell mkdir -p $(LIBDIRREL); cd $(LIBDIRREL); pwd) ifeq ($(wildcard $(LIBDIR)),) @@ -223,55 +67,35 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -# Target (and build options): debug -debug: OPTFLAGS = -g -O0 -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ #------------------------------------------------------------------------------- cxx_objects=$(addprefix $(BUILDDIR)/, Parameters_sm.o read_slha.o) -ifneq ($(NVCC),) +ifeq ($(AVX),cuda) +COMPILER=$(NVCC) cu_objects=$(addprefix $(BUILDDIR)/, Parameters_sm_cu.o) +else +COMPILER=$(CXX) +cu_objects= endif # Target (and build rules): common (src) library -ifneq ($(NVCC),) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) $(cu_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(NVCC) -shared -o $@ $(cxx_objects) $(cu_objects) $(LDFLAGS) -else -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(CXX) -shared -o $@ $(cxx_objects) $(LDFLAGS) -endif + mkdir -p $(LIBDIR) + $(COMPILER) -shared -o $@ $(cxx_objects) $(cu_objects) $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- @@ -279,19 +103,7 @@ endif .PHONY: clean clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) -else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe -endif - -cleanall: - @echo - $(MAKE) clean -f $(THISMK) - @echo - rm -rf $(LIBDIR)/build.* - rm -rf build.* + $(RM) -f ../lib/build.*/*.so + $(RM) -rf build.* #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/gq_ttq.mad/src/mgOnGpuCxtypes.h b/epochX/cudacpp/gq_ttq.mad/src/mgOnGpuCxtypes.h index ca9a9f00c0..3290d314d6 100644 --- a/epochX/cudacpp/gq_ttq.mad/src/mgOnGpuCxtypes.h +++ b/epochX/cudacpp/gq_ttq.mad/src/mgOnGpuCxtypes.h @@ -21,10 +21,14 @@ // Complex type in cuda: thrust or cucomplex or cxsmpl #ifdef __CUDACC__ #if defined MGONGPU_CUCXTYPE_THRUST +#ifdef __CLANG__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wtautological-compare" // for icpx2021/clang13 (https://stackoverflow.com/a/15864661) +#endif #include +#ifdef __CLANG__ #pragma clang diagnostic pop +#endif #elif defined MGONGPU_CUCXTYPE_CUCOMPLEX #include #elif not defined MGONGPU_CUCXTYPE_CXSMPL diff --git a/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt b/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt index e73dd42300..e906078545 100644 --- a/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt +++ b/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt @@ -52,7 +52,7 @@ Note that you can still compile and run aMC@NLO with the built-in PDFs Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt import /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j.mg The import format was not given, so we guess it as command set stdout_level DEBUG @@ -61,7 +61,7 @@ set zerowidth_tchannel F define j = p INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.0053882598876953125  +DEBUG: model prefixing takes 0.005285501480102539  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -172,7 +172,7 @@ INFO: Process u~ u > t t~ added to mirror process u u~ > t t~ INFO: Process c~ c > t t~ added to mirror process c c~ > t t~ INFO: Process d~ d > t t~ added to mirror process d d~ > t t~ INFO: Process s~ s > t t~ added to mirror process s s~ > t t~ -5 processes with 7 diagrams generated in 0.031 s +5 processes with 7 diagrams generated in 0.030 s Total: 5 processes with 7 diagrams add process p p > t t~ j @1 INFO: Checking for minimal orders which gives processes. @@ -212,7 +212,7 @@ INFO: Process d~ g > t t~ d~ added to mirror process g d~ > t t~ d~ INFO: Process d~ d > t t~ g added to mirror process d d~ > t t~ g INFO: Process s~ g > t t~ s~ added to mirror process g s~ > t t~ s~ INFO: Process s~ s > t t~ g added to mirror process s s~ > t t~ g -13 processes with 76 diagrams generated in 0.141 s +13 processes with 76 diagrams generated in 0.135 s Total: 18 processes with 83 diagrams add process p p > t t~ j j @2 INFO: Checking for minimal orders which gives processes. @@ -378,7 +378,7 @@ INFO: Process s~ u~ > t t~ u~ s~ added to mirror process u~ s~ > t t~ u~ s~ INFO: Process s~ c~ > t t~ c~ s~ added to mirror process c~ s~ > t t~ c~ s~ INFO: Process s~ d~ > t t~ d~ s~ added to mirror process d~ s~ > t t~ d~ s~ INFO: Crossed process found for s~ s~ > t t~ s~ s~, reuse diagrams. -65 processes with 1119 diagrams generated in 1.922 s +65 processes with 1119 diagrams generated in 1.802 s Total: 83 processes with 1202 diagrams output madevent ../TMPOUT/CODEGEN_mad_pp_tt012j --hel_recycling=False --vector_size=32 --me_exporter=standalone_cudacpp Load PLUGIN.CUDACPP_OUTPUT @@ -497,7 +497,7 @@ INFO: Combined process d d~ > t t~ WEIGHTED<=2 with process u u~ > t t~ WEIGHTED INFO: Combined process s s~ > t t~ WEIGHTED<=2 with process u u~ > t t~ WEIGHTED<=2 INFO: Creating files in directory P2_gg_ttxgg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -514,7 +514,7 @@ INFO: Generating Feynman diagrams for Process: g g > t t~ g g WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group gg_ttxgg INFO: Creating files in directory P2_gg_ttxuux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -531,7 +531,7 @@ INFO: Generating Feynman diagrams for Process: g g > t t~ u u~ WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group gg_ttxuux INFO: Creating files in directory P2_gu_ttxgu DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -548,7 +548,7 @@ INFO: Generating Feynman diagrams for Process: g u > t t~ g u WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group gu_ttxgu INFO: Creating files in directory P2_gux_ttxgux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -565,7 +565,7 @@ INFO: Generating Feynman diagrams for Process: g u~ > t t~ g u~ WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group gux_ttxgux INFO: Creating files in directory P2_uux_ttxgg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -582,7 +582,7 @@ INFO: Generating Feynman diagrams for Process: u u~ > t t~ g g WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group uux_ttxgg INFO: Creating files in directory P1_gg_ttxg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -599,7 +599,7 @@ INFO: Generating Feynman diagrams for Process: g g > t t~ g WEIGHTED<=3 @1 INFO: Finding symmetric diagrams for subprocess group gg_ttxg INFO: Creating files in directory P2_uu_ttxuu DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -616,7 +616,7 @@ INFO: Generating Feynman diagrams for Process: u u > t t~ u u WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group uu_ttxuu INFO: Creating files in directory P2_uux_ttxuux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -633,7 +633,7 @@ INFO: Generating Feynman diagrams for Process: u u~ > t t~ u u~ WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group uux_ttxuux INFO: Creating files in directory P2_uxux_ttxuxux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -650,7 +650,7 @@ INFO: Generating Feynman diagrams for Process: u~ u~ > t t~ u~ u~ WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group uxux_ttxuxux INFO: Creating files in directory P2_uc_ttxuc DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -667,7 +667,7 @@ INFO: Generating Feynman diagrams for Process: u c > t t~ u c WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group uc_ttxuc INFO: Creating files in directory P2_uux_ttxccx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -684,7 +684,7 @@ INFO: Generating Feynman diagrams for Process: u u~ > t t~ c c~ WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group uux_ttxccx INFO: Creating files in directory P2_ucx_ttxucx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -701,7 +701,7 @@ INFO: Generating Feynman diagrams for Process: u c~ > t t~ u c~ WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group ucx_ttxucx INFO: Creating files in directory P2_uxcx_ttxuxcx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -718,7 +718,7 @@ INFO: Generating Feynman diagrams for Process: u~ c~ > t t~ u~ c~ WEIGHTED<=4 @2 INFO: Finding symmetric diagrams for subprocess group uxcx_ttxuxcx INFO: Creating files in directory P1_gu_ttxu DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -735,7 +735,7 @@ INFO: Generating Feynman diagrams for Process: g u > t t~ u WEIGHTED<=3 @1 INFO: Finding symmetric diagrams for subprocess group gu_ttxu INFO: Creating files in directory P1_gux_ttxux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -752,7 +752,7 @@ INFO: Generating Feynman diagrams for Process: g u~ > t t~ u~ WEIGHTED<=3 @1 INFO: Finding symmetric diagrams for subprocess group gux_ttxux INFO: Creating files in directory P1_uux_ttxg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -769,7 +769,7 @@ INFO: Generating Feynman diagrams for Process: u u~ > t t~ g WEIGHTED<=3 @1 INFO: Finding symmetric diagrams for subprocess group uux_ttxg INFO: Creating files in directory P0_gg_ttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -786,7 +786,7 @@ INFO: Generating Feynman diagrams for Process: g g > t t~ WEIGHTED<=2 INFO: Finding symmetric diagrams for subprocess group gg_ttx INFO: Creating files in directory P0_uux_ttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1058]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6262]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -801,15 +801,15 @@ INFO: Created files CPPProcess.h and CPPProcess.cc in directory ./. DEBUG: vector, subproc_group,self.opt['vector_size'] =  32 True 32 [export_v4.py at line 1872]  INFO: Generating Feynman diagrams for Process: u u~ > t t~ WEIGHTED<=2 INFO: Finding symmetric diagrams for subprocess group uux_ttx -Generated helas calls for 18 subprocesses (372 diagrams) in 1.279 s -Wrote files for 810 helas calls in 3.193 s +Generated helas calls for 18 subprocesses (372 diagrams) in 1.274 s +Wrote files for 810 helas calls in 3.219 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 5 routines in 0.331 s +ALOHA: aloha creates 5 routines in 0.343 s DEBUG: Entering PLUGIN_ProcessExporter.convert_model (create the model) [output.py at line 202]  ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines @@ -817,7 +817,7 @@ ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 10 routines in 0.310 s +ALOHA: aloha creates 10 routines in 0.308 s VVV1 VVV1 FFV1 @@ -844,12 +844,14 @@ save configuration file to /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CO INFO: Use Fortran compiler gfortran INFO: Use c++ compiler g++ INFO: Generate web pages +DEBUG: standardise /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/Source/make_opts (fix f2py3 and sort make_opts_variables) before applying patch.common DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j; patch -p4 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.common patching file Source/genps.inc +patching file Source/make_opts patching file Source/makefile patching file SubProcesses/makefile +patching file bin/internal/banner.py patching file bin/internal/gen_ximprove.py -Hunk #1 succeeded at 391 (offset 6 lines). patching file bin/internal/madevent_interface.py DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P0_gg_ttx; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f @@ -1028,9 +1030,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m8.865s -user 0m8.358s -sys 0m0.477s +real 0m8.774s +user 0m8.281s +sys 0m0.464s ************************************************************ * * * W E L C O M E to * @@ -1056,7 +1058,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards run quit INFO: @@ -1086,7 +1088,7 @@ INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/mg5amc INFO: load configuration from /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/Cards/me5_configuration.txt Using default text editor "vi". Set another one in ./input/mg5_configuration.txt Using default eps viewer "evince". Set another one in ./input/mg5_configuration.txt -No valid web browser found. Please set in ./input/mg5_configuration.txt +Using default web browser "firefox". Set another one in ./input/mg5_configuration.txt treatcards param quit INFO: diff --git a/epochX/cudacpp/pp_tt012j.mad/Source/make_opts b/epochX/cudacpp/pp_tt012j.mad/Source/make_opts index e4b87ee6ad..435bed0dc7 100644 --- a/epochX/cudacpp/pp_tt012j.mad/Source/make_opts +++ b/epochX/cudacpp/pp_tt012j.mad/Source/make_opts @@ -1,7 +1,7 @@ DEFAULT_CPP_COMPILER=g++ DEFAULT_F2PY_COMPILER=f2py3 DEFAULT_F_COMPILER=gfortran -GLOBAL_FLAG=-O3 -ffast-math -fbounds-check +GLOBAL_FLAG=-O3 -ffast-math MACFLAG= MG5AMC_VERSION=SpecifiedByMG5aMCAtRunTime PYTHIA8_PATH=NotInstalled @@ -13,31 +13,53 @@ BIASLIBDIR=../../../lib/ BIASLIBRARY=libbias.$(libext) # Rest of the makefile -ifeq ($(origin FFLAGS),undefined) -FFLAGS= -w -fPIC -#FFLAGS+= -g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none -endif -FFLAGS += $(GLOBAL_FLAG) +#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) + +# Detect O/S kernel (Linux, Darwin...) +UNAME_S := $(shell uname -s) + +# Detect architecture (x86_64, ppc64le...) +UNAME_P := $(shell uname -p) + +#------------------------------------------------------------------------------- # REMOVE MACFLAG IF NOT ON MAC OR FOR F2PY -UNAME := $(shell uname -s) ifdef f2pymode MACFLAG= else -ifneq ($(UNAME), Darwin) +ifneq ($(UNAME_S), Darwin) MACFLAG= endif endif +############################################################ +# Default compiler flags +# To change optimisation level, override these as follows: +# make CXXFLAGS="-O0 -g" +# or export them as environment variables +# For debugging Fortran, one could e.g. use: +# FCFLAGS="-g -fbounds-check -ffpe-trap=invalid,zero,overflow,underflow,denormal -Wall -fimplicit-none" +############################################################ +FCFLAGS ?= $(GLOBAL_FLAG) -fbounds-check +CXXFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG +NVCCFLAGS ?= $(GLOBAL_FLAG) -DNDEBUG -use_fast_math -lineinfo +LDFLAGS ?= $(STDLIB) -ifeq ($(origin CXXFLAGS),undefined) -CXXFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +ifneq ($(FFLAGS),) +# Madgraph used to use FFLAGS, so the user probably tries to change the flags specifically for madgraph: +FCFLAGS = $(FFLAGS) endif -ifeq ($(origin CFLAGS),undefined) -CFLAGS= -O $(STDLIB_FLAG) $(MACFLAG) +# Madgraph-specific flags: +WARNFLAGS = -Wall -Wshadow -Wextra +ifeq (,$(findstring -std=,$(CXXFLAGS))) +CXXSTANDARD= -std=c++17 endif +MG_FCFLAGS += -fPIC -w +MG_CXXFLAGS += -fPIC $(CXXSTANDARD) $(WARNFLAGS) $(MACFLAG) +MG_NVCCFLAGS += -fPIC $(CXXSTANDARD) --forward-unknown-to-host-compiler $(WARNFLAGS) +MG_LDFLAGS += $(MACFLAG) # Set FC unless it's defined by an environment variable ifeq ($(origin FC),default) @@ -49,45 +71,40 @@ endif # Increase the number of allowed charcters in a Fortran line ifeq ($(FC), ftn) -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler else VERS="$(shell $(FC) --version | grep ifort -i)" ifeq ($(VERS), "") -FFLAGS+= -ffixed-line-length-132 +MG_FCFLAGS += -ffixed-line-length-132 else -FFLAGS+= -extend-source # for ifort type of compiler +MG_FCFLAGS += -extend-source # for ifort type of compiler endif endif -UNAME := $(shell uname -s) -ifeq ($(origin LDFLAGS), undefined) -LDFLAGS=$(STDLIB) $(MACFLAG) -endif - # Options: dynamic, lhapdf # Option dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) dylibext=dylib else dylibext=so endif ifdef dynamic -ifeq ($(UNAME), Darwin) +ifeq ($(UNAME_S), Darwin) libext=dylib -FFLAGS+= -fno-common -LDFLAGS += -bundle +MG_FCFLAGS += -fno-common +MG_LDFLAGS += -bundle define CREATELIB $(FC) -dynamiclib -undefined dynamic_lookup -o $(1) $(2) endef else libext=so -FFLAGS+= -fPIC -LDFLAGS += -shared +MG_FCFLAGS += -fPIC +MG_LDFLAGS += -shared define CREATELIB -$(FC) $(FFLAGS) $(LDFLAGS) -o $(1) $(2) +$(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MG_LDFLAGS) $(LDFLAGS) -o $(1) $(2) endef endif else @@ -101,17 +118,9 @@ endif # Option lhapdf ifneq ($(lhapdf),) -CXXFLAGS += $(shell $(lhapdf) --cppflags) +MG_CXXFLAGS += $(shell $(lhapdf) --cppflags) alfas_functions=alfas_functions_lhapdf llhapdf+= $(shell $(lhapdf) --cflags --libs) -lLHAPDF -# check if we need to activate c++11 (for lhapdf6.2) -ifeq ($(origin CXX),default) -ifeq ($lhapdfversion$lhapdfsubversion,62) -CXX=$(DEFAULT_CPP_COMPILER) -std=c++11 -else -CXX=$(DEFAULT_CPP_COMPILER) -endif -endif else alfas_functions=alfas_functions llhapdf= @@ -120,4 +129,207 @@ endif # Helper function to check MG5 version define CHECK_MG5AMC_VERSION python -c 'import re; from distutils.version import StrictVersion; print StrictVersion("$(MG5AMC_VERSION)") >= StrictVersion("$(1)") if re.match("^[\d\.]+$$","$(MG5AMC_VERSION)") else True;' -endef \ No newline at end of file +endef + +#------------------------------------------------------------------------------- + +# Set special cases for non-gcc/clang builds +# AVX below gets overridden from outside in architecture-specific builds +AVX ?= none +# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] +# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] +$(info AVX=$(AVX)) +ifeq ($(UNAME_P),arm) + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) + endif +else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 + ifeq ($(AVX),none) + override AVXFLAGS = -mno-sse3 # no SIMD + else ifeq ($(AVX),sse4) + override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) + else ifeq ($(AVX),avx2) + override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] + else ifeq ($(AVX),512y) + override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] + else ifeq ($(AVX),512z) + override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) + else + $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) + endif +endif + +# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? +MG_CXXFLAGS+= $(AVXFLAGS) + +#------------------------------------------------------------------------------- + +#=== Configure the CUDA compiler if available + +# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) +# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below +ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside + $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") + override CUDA_HOME=disabled +endif + +# If CUDA_HOME is not set, try to set it from the location of nvcc +ifndef CUDA_HOME + CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) + $(info CUDA_HOME="$(CUDA_HOME)") +endif + +# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists +ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) + NVCC = $(CUDA_HOME)/bin/nvcc + USE_NVTX ?=-DUSE_NVTX + # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html + # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ + # Default: use compute capability 70 (Volta architecture), and embed PTX to support later architectures, too. + # Set MADGRAPH_CUDA_ARCHITECTURE to the desired value to change the default. + # Build for multiple architectures using a space-separated list, e.g. MADGRAPH_CUDA_ARCHITECTURE="70 80" + MADGRAPH_CUDA_ARCHITECTURE ?= 70 + # Generate PTX for the first architecture: + CUARCHFLAGS := --generate-code arch=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)),code=compute_$(firstword $(MADGRAPH_CUDA_ARCHITECTURE)) + # Generate device code for all architectures: + CUARCHFLAGS += $(foreach arch,$(MADGRAPH_CUDA_ARCHITECTURE), --generate-code arch=compute_$(arch),code=sm_$(arch)) + + CUINC = -I$(CUDA_HOME)/include/ + CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! + MG_LDFLAGS += $(CURANDLIBFLAGS) + MG_NVCCFLAGS += $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) + +else ifeq ($(AVX),cuda) + $(error nvcc is not visible in PATH. Either add it to PATH or export CUDA_HOME to compile with cuda) + ifeq ($(AVX),cuda) + $(error Cannot compile for cuda without NVCC) + endif +endif + +# Set the host C++ compiler for nvcc via "-ccbin " +# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) +MG_NVCCFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) + +# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) +ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) +MG_NVCCFLAGS += -allow-unsupported-compiler +endif + +#------------------------------------------------------------------------------- + +#=== Configure ccache for C++ and CUDA builds + +# Enable ccache if USECCACHE=1 +ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) + override CXX:=ccache $(CXX) +endif + +ifneq ($(NVCC),) + ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) + override NVCC:=ccache $(NVCC) + endif +endif + +#------------------------------------------------------------------------------- + +#=== Configure PowerPC-specific compiler flags for C++ and CUDA + +# PowerPC-specific CXX / CUDA compiler flags (being reviewed) +ifeq ($(UNAME_P),ppc64le) + MG_CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 + MG_NVCCFLAGS+= -Xcompiler -mno-float128 + + ifeq ($(AVX),sse4) + override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) + endif +endif + +#------------------------------------------------------------------------------- +#=== Apple-specific compiler/linker options + +# Add -std=c++17 explicitly to avoid build errors on macOS +# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" +ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) +MG_CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 +endif + +ifeq ($(UNAME_S),Darwin) +STDLIB = -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) +MG_LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" +else +MG_LDFLAGS += -Xlinker --no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +endif + +#------------------------------------------------------------------------------- + +#=== C++/CUDA-specific flags for floating-point types and random generators to use + +# Set the default FPTYPE (floating point type) choice +FPTYPE ?= m + +# Set the default HELINL (inline helicities?) choice +HELINL ?= 0 + +# Set the default HRDCOD (hardcode cIPD physics parameters?) choice +HRDCOD ?= 0 + +# Set the default RNDGEN (random number generator) choice +ifeq ($(NVCC),) + RNDGEN ?= hasNoCurand +else + RNDGEN ?= hasCurand +endif + +# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so sub-makes don't go back to the defaults +export AVX +export AVXFLAGS +export FPTYPE +export HELINL +export HRDCOD +export RNDGEN + +#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN + +# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") +# $(info FPTYPE=$(FPTYPE)) +ifeq ($(FPTYPE),d) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE +else ifeq ($(FPTYPE),f) + COMMONFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT +else ifeq ($(FPTYPE),m) + COMMONFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT +else + $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) +endif + +# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") +# $(info HELINL=$(HELINL)) +ifeq ($(HELINL),1) + COMMONFLAGS += -DMGONGPU_INLINE_HELAMPS +else ifneq ($(HELINL),0) + $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") +# $(info HRDCOD=$(HRDCOD)) +ifeq ($(HRDCOD),1) + COMMONFLAGS += -DMGONGPU_HARDCODE_PARAM +else ifneq ($(HRDCOD),0) + $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) +endif + +# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") +$(info RNDGEN=$(RNDGEN)) +ifeq ($(RNDGEN),hasNoCurand) + override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND + override CURANDLIBFLAGS = +else ifeq ($(RNDGEN),hasCurand) + CXXFLAGSCURAND = $(CUINC) +else + $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) +endif + +MG_CXXFLAGS += $(COMMONFLAGS) +MG_NVCCFLAGS += $(COMMONFLAGS) + +#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/pp_tt012j.mad/Source/makefile b/epochX/cudacpp/pp_tt012j.mad/Source/makefile index 00c73099a0..407b1b753e 100644 --- a/epochX/cudacpp/pp_tt012j.mad/Source/makefile +++ b/epochX/cudacpp/pp_tt012j.mad/Source/makefile @@ -10,8 +10,8 @@ include make_opts # Source files -PROCESS= hfill.o matrix.o myamp.o -DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o +PROCESS = hfill.o matrix.o myamp.o +DSAMPLE = dsample.o ranmar.o DiscreteSampler.o StringCast.o HBOOK = hfill.o hcurve.o hbook1.o hbook2.o GENERIC = $(alfas_functions).o transpole.o invarients.o hfill.o pawgraphs.o ran1.o \ rw_events.o rw_routines.o kin_functions.o open_file.o basecode.o setrun.o \ @@ -22,7 +22,7 @@ GENSUDGRID = gensudgrid.o is-sud.o setrun_gen.o rw_routines.o open_file.o # Locally compiled libraries -LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) +LIBRARIES=$(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) # Binaries @@ -32,6 +32,9 @@ BINARIES = $(BINDIR)gen_ximprove $(BINDIR)gensudgrid $(BINDIR)combine_runs all: $(LIBRARIES) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(LIBDIR)libbias.$(libext) +%.o: %.f *.inc + $(FC) -I. $(MG_FCFLAGS) $(FCFLAGS) -c $< -o $@ + # Libraries $(LIBDIR)libdsample.$(libext): $(DSAMPLE) @@ -39,36 +42,35 @@ $(LIBDIR)libdsample.$(libext): $(DSAMPLE) $(LIBDIR)libgeneric.$(libext): $(GENERIC) $(call CREATELIB, $@, $^) $(LIBDIR)libdhelas.$(libext): DHELAS - cd DHELAS; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libpdf.$(libext): PDF make_opts - cd PDF; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" ifneq (,$(filter edff chff, $(pdlabel1) $(pdlabel2))) $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make ; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" else $(LIBDIR)libgammaUPC.$(libext): PDF/gammaUPC - cd PDF/gammaUPC; make -f makefile_dummy; cd ../../ -endif + $(MAKE) -C $< -f makefile_dummy FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" +endif $(LIBDIR)libcernlib.$(libext): CERNLIB - cd CERNLIB; make; cd .. + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" # The bias library is here the dummy by default; compilation of other ones specified in the run_card will be done by MG5aMC directly. $(LIBDIR)libbias.$(libext): BIAS/dummy - cd BIAS/dummy; make; cd ../../ + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" $(LIBDIR)libmodel.$(libext): MODEL param_card.inc - cd MODEL; make + $(MAKE) -C $< FFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" FCFLAGS="$(MG_FCFLAGS) $(FCFLAGS)" param_card.inc: ../Cards/param_card.dat ../bin/madevent treatcards param + touch $@ # madevent doesn't update the time stamp if there's nothing to do -$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o - $(FC) $(LDFLAGS) -o $@ $^ -#$(BINDIR)combine_events: $(COMBINE) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) run_card.inc $(LIBDIR)libbias.$(libext) -# $(FC) -o $@ $(COMBINE) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC $(llhapdf) $(LDFLAGS) -lbias +$(BINDIR)gen_ximprove: gen_ximprove.o ranmar.o rw_routines.o open_file.o + $(FC) $(MG_LDFLAGS) $(LDFLAGS) -o $@ $^ $(BINDIR)gensudgrid: $(GENSUDGRID) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libcernlib.$(libext) - $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(LDFLAGS) + $(FC) -o $@ $(GENSUDGRID) -L$(LIBDIR) -lmodel -lpdf -lgammaUPC -lcernlib $(llhapdf) $(MG_LDFLAGS) $(LDFLAGS) # Dependencies @@ -85,6 +87,7 @@ rw_events.o: rw_events.f run_config.inc run_card.inc: ../Cards/run_card.dat ../bin/madevent treatcards run + touch $@ # madevent doesn't update the time stamp if there's nothing to do clean4pdf: rm -f ../lib/libpdf.$(libext) @@ -120,7 +123,7 @@ $(LIBDIR)libiregi.a: $(IREGIDIR) cd $(IREGIDIR); make ln -sf ../Source/$(IREGIDIR)libiregi.a $(LIBDIR)libiregi.a -cleanSource: +clean: $(RM) *.o $(LIBRARIES) $(BINARIES) cd PDF; make clean; cd .. cd PDF/gammaUPC; make clean; cd ../../ @@ -132,11 +135,3 @@ cleanSource: cd BIAS/ptj_bias; make clean; cd ../.. if [ -d $(CUTTOOLSDIR) ]; then cd $(CUTTOOLSDIR); make clean; cd ..; fi if [ -d $(IREGIDIR) ]; then cd $(IREGIDIR); make clean; cd ..; fi - -clean: cleanSource - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make clean; cd -; done; - -cleanavx: - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; -cleanall: cleanSource # THIS IS THE ONE - for i in `ls -d ../SubProcesses/P*`; do cd $$i; make cleanavxs; cd -; done; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/Bridge.h b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/Bridge.h index bf8b5e024d..c263f39a62 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/Bridge.h +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/Bridge.h @@ -236,7 +236,7 @@ namespace mg5amcCpu #ifdef __CUDACC__ if( ( m_nevt < s_gputhreadsmin ) || ( m_nevt % s_gputhreadsmin != 0 ) ) throw std::runtime_error( "Bridge constructor: nevt should be a multiple of " + std::to_string( s_gputhreadsmin ) ); - while( m_nevt != m_gpublocks * m_gputhreads ) + while( m_nevt != static_cast( m_gpublocks * m_gputhreads ) ) { m_gputhreads /= 2; if( m_gputhreads < s_gputhreadsmin ) @@ -266,7 +266,7 @@ namespace mg5amcCpu template void Bridge::set_gpugrid( const int gpublocks, const int gputhreads ) { - if( m_nevt != gpublocks * gputhreads ) + if( m_nevt != static_cast( gpublocks * gputhreads ) ) throw std::runtime_error( "Bridge: gpublocks*gputhreads must equal m_nevt in set_gpugrid" ); m_gpublocks = gpublocks; m_gputhreads = gputhreads; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/MadgraphTest.h b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/MadgraphTest.h index ef40624c88..b0f2250c25 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/MadgraphTest.h +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/MadgraphTest.h @@ -199,10 +199,6 @@ class MadgraphTest : public testing::TestWithParam } }; -// Since we link both the CPU-only and GPU tests into the same executable, we prevent -// a multiply defined symbol by only compiling this in the non-CUDA phase: -#ifndef __CUDACC__ - /// Compare momenta and matrix elements. /// This uses an implementation of TestDriverBase to run a madgraph workflow, /// and compares momenta and matrix elements with a reference file. @@ -307,6 +303,4 @@ TEST_P( MadgraphTest, CompareMomentaAndME ) } } -#endif // __CUDACC__ - #endif /* MADGRAPHTEST_H_ */ diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/MatrixElementKernels.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/MatrixElementKernels.cc index 74b5239ebf..2d6f27cd5d 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/MatrixElementKernels.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/MatrixElementKernels.cc @@ -196,6 +196,9 @@ namespace mg5amcGpu void MatrixElementKernelDevice::setGrid( const int gpublocks, const int gputhreads ) { + m_gpublocks = gpublocks; + m_gputhreads = gputhreads; + if( m_gpublocks == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gpublocks must be > 0 in setGrid" ); if( m_gputhreads == 0 ) throw std::runtime_error( "MatrixElementKernelDevice: gputhreads must be > 0 in setGrid" ); if( this->nevt() != m_gpublocks * m_gputhreads ) throw std::runtime_error( "MatrixElementKernelDevice: nevt mismatch in setGrid" ); diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/check_sa.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/check_sa.cc index 3fbf0ffbee..07b7304b17 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/check_sa.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/check_sa.cc @@ -81,7 +81,7 @@ namespace mg5amcGpu namespace mg5amcCpu #endif { - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU)" << std::endl; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/counters.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/counters.cc index 3bbdec9387..cf875e8be4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/counters.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/counters.cc @@ -32,6 +32,8 @@ extern "C" case +0: return "CudaCpp"; break; default: assert( false ); break; } + + return 0; } static mgOnGpu::Timer program_timer; diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk index 509307506b..a522ddb335 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk @@ -1,56 +1,41 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: use ':=' to ensure that the value of CUDACPP_MAKEFILE is not modified further down after including make_opts -#=== NB: use 'override' to ensure that the value can not be modified from the outside -override CUDACPP_MAKEFILE := $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) -###$(info CUDACPP_MAKEFILE='$(CUDACPP_MAKEFILE)') - -#=== NB: different names (e.g. cudacpp.mk and cudacpp_src.mk) are used in the Subprocess and src directories -override CUDACPP_SRC_MAKEFILE = cudacpp_src.mk - -#------------------------------------------------------------------------------- - -#=== Use bash in the Makefile (https://www.gnu.org/software/make/manual/html_node/Choosing-the-Shell.html) - -SHELL := /bin/bash - -#------------------------------------------------------------------------------- - -#=== Detect O/S and architecture (assuming uname is available, https://en.wikipedia.org/wiki/Uname) - -# Detect O/S kernel (Linux, Darwin...) -UNAME_S := $(shell uname -s) -###$(info UNAME_S='$(UNAME_S)') - -# Detect architecture (x86_64, ppc64le...) -UNAME_P := $(shell uname -p) -###$(info UNAME_P='$(UNAME_P)') - -#------------------------------------------------------------------------------- - -#=== Include the common MG5aMC Makefile options - -# OM: this is crucial for MG5aMC flag consistency/documentation -# AV: temporarely comment this out because it breaks cudacpp builds -ifneq ($(wildcard ../../Source/make_opts),) -include ../../Source/make_opts -endif +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. + +# This makefile extends the Fortran makefile called "makefile" + +CUDACPP_SRC_MAKEFILE = cudacpp_src.mk + +# Self-invocation with adapted flags: +cppnative: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=native AVXFLAGS="-march=native" cppbuild +cppnone: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=none AVXFLAGS= cppbuild +cppsse4: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=sse4 AVXFLAGS=-march=nehalem cppbuild +cppavx2: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=avx2 AVXFLAGS=-march=haswell cppbuild +cppavx512y: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512y AVXFLAGS="-march=skylake-avx512 -mprefer-vector-width=256" cppbuild +cppavx512z: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=512z AVXFLAGS="-march=skylake-avx512 -DMGONGPU_PVW512" cppbuild +cuda: $(SOURCEDIR_GUARD) $(PROCESS) + $(MAKE) AVX=cuda cudabuild #------------------------------------------------------------------------------- #=== Configure common compiler flags for C++ and CUDA +# NB: The base flags are defined in the fortran "makefile" + +# Include directories +INCFLAGS = -I. -I../../src -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here +MG_CXXFLAGS += $(INCFLAGS) +MG_NVCCFLAGS += $(INCFLAGS) # Dependency on src directory -MG5AMC_COMMONLIB = mg5amc_common -LIBFLAGS = -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -INCFLAGS += -I../../src +MG5AMC_COMMONLIB = mg5amc_common # Compiler-specific googletest build directory (#125 and #738) ifneq ($(shell $(CXX) --version | grep '^Intel(R) oneAPI DPC++/C++ Compiler'),) @@ -99,356 +84,42 @@ endif #------------------------------------------------------------------------------- -#=== Configure the C++ compiler - -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) -Wall -Wshadow -Wextra -ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS += -ffast-math # see issue #117 -endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY - -# Optionally add debug flags to display the full list of flags (eg on Darwin) -###CXXFLAGS+= -v - -# Note: AR, CXX and FC are implicitly defined if not set externally -# See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html - -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler - -# If CXX is not a single word (example "clang++ --gcc-toolchain...") then disable CUDA builds (issue #505) -# This is because it is impossible to pass this to "CUFLAGS += -ccbin " below -ifneq ($(words $(subst ccache ,,$(CXX))),1) # allow at most "CXX=ccache " from outside - $(warning CUDA builds are not supported for multi-word CXX "$(CXX)") - override CUDA_HOME=disabled -endif - -# If CUDA_HOME is not set, try to set it from the location of nvcc -ifndef CUDA_HOME - CUDA_HOME = $(patsubst %bin/nvcc,%,$(shell which nvcc 2>/dev/null)) - $(warning CUDA_HOME was not set: using "$(CUDA_HOME)") -endif - -# Set NVCC as $(CUDA_HOME)/bin/nvcc if it exists -ifneq ($(wildcard $(CUDA_HOME)/bin/nvcc),) - NVCC = $(CUDA_HOME)/bin/nvcc - USE_NVTX ?=-DUSE_NVTX - # See https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html - # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ - # Default: use compute capability 70 for V100 (CERN lxbatch, CERN itscrd, Juwels Cluster). - # Embed device code for 70, and PTX for 70+. - # Export MADGRAPH_CUDA_ARCHITECTURE (comma-separated list) to use another value or list of values (see #533). - # Examples: use 60 for P100 (Piz Daint), 80 for A100 (Juwels Booster, NVidia raplab/Curiosity). - MADGRAPH_CUDA_ARCHITECTURE ?= 70 - ###CUARCHFLAGS = -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=compute_$(MADGRAPH_CUDA_ARCHITECTURE) -gencode arch=compute_$(MADGRAPH_CUDA_ARCHITECTURE),code=sm_$(MADGRAPH_CUDA_ARCHITECTURE) # Older implementation (AV): go back to this one for multi-GPU support #533 - ###CUARCHFLAGS = --gpu-architecture=compute_$(MADGRAPH_CUDA_ARCHITECTURE) --gpu-code=sm_$(MADGRAPH_CUDA_ARCHITECTURE),compute_$(MADGRAPH_CUDA_ARCHITECTURE) # Newer implementation (SH): cannot use this as-is for multi-GPU support #533 - comma:=, - CUARCHFLAGS = $(foreach arch,$(subst $(comma), ,$(MADGRAPH_CUDA_ARCHITECTURE)),-gencode arch=compute_$(arch),code=compute_$(arch) -gencode arch=compute_$(arch),code=sm_$(arch)) - CUINC = -I$(CUDA_HOME)/include/ - ifeq ($(RNDGEN),hasNoCurand) - CURANDLIBFLAGS= - else - CURANDLIBFLAGS = -L$(CUDA_HOME)/lib64/ -lcurand # NB: -lcuda is not needed here! - endif - CUOPTFLAGS = -lineinfo - CUFLAGS = $(foreach opt, $(OPTFLAGS), -Xcompiler $(opt)) $(CUOPTFLAGS) $(INCFLAGS) $(CUINC) $(USE_NVTX) $(CUARCHFLAGS) -use_fast_math - ###CUFLAGS += -Xcompiler -Wall -Xcompiler -Wextra -Xcompiler -Wshadow - ###NVCC_VERSION = $(shell $(NVCC) --version | grep 'Cuda compilation tools' | cut -d' ' -f5 | cut -d, -f1) - CUFLAGS += -std=c++17 # need CUDA >= 11.2 (see #333): this is enforced in mgOnGpuConfig.h - # Without -maxrregcount: baseline throughput: 6.5E8 (16384 32 12) up to 7.3E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 160 # improves throughput: 6.9E8 (16384 32 12) up to 7.7E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 128 # improves throughput: 7.3E8 (16384 32 12) up to 7.6E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 96 # degrades throughput: 4.1E8 (16384 32 12) up to 4.5E8 (65536 128 12) - ###CUFLAGS+= --maxrregcount 64 # degrades throughput: 1.7E8 (16384 32 12) flat at 1.7E8 (65536 128 12) -else ifneq ($(origin REQUIRE_CUDA),undefined) - # If REQUIRE_CUDA is set but no cuda is found, stop here (e.g. for CI tests on GPU #443) - $(error No cuda installation found (set CUDA_HOME or make nvcc visible in PATH)) -else - # No cuda. Switch cuda compilation off and go to common random numbers in C++ - $(warning CUDA_HOME is not set or is invalid: export CUDA_HOME to compile with cuda) - override NVCC= - override USE_NVTX= - override CUINC= - override CURANDLIBFLAGS= -endif -export NVCC -export CUFLAGS - -# Set the host C++ compiler for nvcc via "-ccbin " -# (NB issue #505: this must be a single word, "clang++ --gcc-toolchain..." is not supported) -CUFLAGS += -ccbin $(shell which $(subst ccache ,,$(CXX))) - -# Allow newer (unsupported) C++ compilers with older versions of CUDA if ALLOW_UNSUPPORTED_COMPILER_IN_CUDA is set (#504) -ifneq ($(origin ALLOW_UNSUPPORTED_COMPILER_IN_CUDA),undefined) -CUFLAGS += -allow-unsupported-compiler -endif - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ and CUDA builds - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif -ifneq ($(NVCC),) - ifeq ($(USECCACHE)$(shell echo $(NVCC) | grep ccache),1) - override NVCC:=ccache $(NVCC) - endif -endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for C++ and CUDA - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # would increase to none=4.08-4.12E6, sse4=4.99-5.03E6! -else - ###CXXFLAGS+= -flto # also on Intel this would increase throughputs by a factor 2 to 4... - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -# PowerPC-specific CUDA compiler flags (to be reviewed!) -ifeq ($(UNAME_P),ppc64le) - CUFLAGS+= -Xcompiler -mno-float128 -endif - -#------------------------------------------------------------------------------- - #=== Configure defaults and check if user-defined choices exist for OMPFLAGS, AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the default OMPFLAGS choice -ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on Intel (was ok without nvcc but not ok with nvcc before #578) -else ifneq ($(shell $(CXX) --version | egrep '^(clang)'),) -override OMPFLAGS = -fopenmp -###override OMPFLAGS = # disable OpenMP MT on clang (was not ok without or with nvcc before #578) -###else ifneq ($(shell $(CXX) --version | egrep '^(Apple clang)'),) # AV for Mac (Apple clang compiler) -else ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) +OMPFLAGS ?= -fopenmp +ifeq ($(UNAME_S),Darwin) # OM for Mac (any compiler) override OMPFLAGS = # AV disable OpenMP MT on Apple clang (builds fail in the CI #578) -###override OMPFLAGS = -fopenmp # OM reenable OpenMP MT on Apple clang? (AV Oct 2023: this still fails in the CI) -else -override OMPFLAGS = -fopenmp # enable OpenMP MT by default on all other platforms -###override OMPFLAGS = # disable OpenMP MT on all other platforms (default before #575) -endif - -# Set the default AVX (vectorization) choice -ifeq ($(AVX),) - ifeq ($(UNAME_P),ppc64le) - ###override AVX = none - override AVX = sse4 - else ifeq ($(UNAME_P),arm) - ###override AVX = none - override AVX = sse4 - else ifeq ($(wildcard /proc/cpuinfo),) - override AVX = none - $(warning Using AVX='$(AVX)' because host SIMD features cannot be read from /proc/cpuinfo) - else ifeq ($(shell grep -m1 -c avx512vl /proc/cpuinfo)$(shell $(CXX) --version | grep ^clang),1) - override AVX = 512y - ###$(info Using AVX='$(AVX)' as no user input exists) - else - override AVX = avx2 - ifneq ($(shell grep -m1 -c avx512vl /proc/cpuinfo),1) - $(warning Using AVX='$(AVX)' because host does not support avx512vl) - else - $(warning Using AVX='$(AVX)' because this is faster than avx512vl for clang) - endif - endif -else - ###$(info Using AVX='$(AVX)' according to user input) -endif - -# Set the default FPTYPE (floating point type) choice -ifeq ($(FPTYPE),) - override FPTYPE = d -endif - -# Set the default HELINL (inline helicities?) choice -ifeq ($(HELINL),) - override HELINL = 0 -endif - -# Set the default HRDCOD (hardcode cIPD physics parameters?) choice -ifeq ($(HRDCOD),) - override HRDCOD = 0 -endif - -# Set the default RNDGEN (random number generator) choice -ifeq ($(RNDGEN),) - ifeq ($(NVCC),) - override RNDGEN = hasNoCurand - else ifeq ($(RNDGEN),) - override RNDGEN = hasCurand - endif endif -# Export AVX, FPTYPE, HELINL, HRDCOD, RNDGEN, OMPFLAGS so that it is not necessary to pass them to the src Makefile too -export AVX -export FPTYPE -export HELINL -export HRDCOD -export RNDGEN +# Export here, so sub makes don't fall back to the defaults: export OMPFLAGS -#------------------------------------------------------------------------------- - -#=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN - -# Set the build flags appropriate to OMPFLAGS -$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT - CUFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS - CUFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM - CUFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - override CXXFLAGSCURAND = -DMGONGPU_HAS_NO_CURAND -else ifeq ($(RNDGEN),hasCurand) - override CXXFLAGSCURAND = -else - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- #=== Configure build directories and build lockfiles === -# Build directory "short" tag (defines target and path to the optional build directory) -# (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) - -# Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) -# (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) - -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIR = ../../lib/$(BUILDDIR) - override LIBDIRRPATH = '$$ORIGIN/../$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is set = 1)) -else - override BUILDDIR = . - override LIBDIR = ../../lib - override LIBDIRRPATH = '$$ORIGIN/$(LIBDIR)' - $(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) +# Build directory "short" tag (defines target and path to the build directory) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +CUDACPP_BUILDDIR = build.$(DIRTAG) +CUDACPP_LIBDIR := ../../lib/$(CUDACPP_BUILDDIR) +LIBDIRRPATH := '$$ORIGIN:$$ORIGIN/../$(CUDACPP_LIBDIR)' +ifneq ($(AVX),) + $(info Building CUDACPP in CUDACPP_BUILDDIR=$(CUDACPP_BUILDDIR). Libs in $(CUDACPP_LIBDIR)) endif -###override INCDIR = ../../include -###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) -# On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH +# On Linux, set rpath to CUDACPP_LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables or shared libraries ($ORIGIN on Linux) -# On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary +# On Darwin, building libraries with absolute paths in CUDACPP_LIBDIR makes this unnecessary ifeq ($(UNAME_S),Darwin) override CXXLIBFLAGSRPATH = override CULIBFLAGSRPATH = - override CXXLIBFLAGSRPATH2 = - override CULIBFLAGSRPATH2 = else # RPATH to cuda/cpp libs when linking executables override CXXLIBFLAGSRPATH = -Wl,-rpath,$(LIBDIRRPATH) override CULIBFLAGSRPATH = -Xlinker -rpath,$(LIBDIRRPATH) - # RPATH to common lib when linking cuda/cpp libs - override CXXLIBFLAGSRPATH2 = -Wl,-rpath,'$$ORIGIN' - override CULIBFLAGSRPATH2 = -Xlinker -rpath,'$$ORIGIN' endif # Setting LD_LIBRARY_PATH or DYLD_LIBRARY_PATH in the RUNTIME is no longer necessary (neither on Linux nor on Mac) @@ -458,107 +129,68 @@ override RUNTIME = #=== Makefile TARGETS and build rules below #=============================================================================== -cxx_main=$(BUILDDIR)/check.exe -fcxx_main=$(BUILDDIR)/fcheck.exe +cxx_main=$(CUDACPP_BUILDDIR)/check.exe +fcxx_main=$(CUDACPP_BUILDDIR)/fcheck.exe -ifneq ($(NVCC),) -cu_main=$(BUILDDIR)/gcheck.exe -fcu_main=$(BUILDDIR)/fgcheck.exe -else -cu_main= -fcu_main= -endif - -testmain=$(BUILDDIR)/runTest.exe +cu_main=$(CUDACPP_BUILDDIR)/gcheck.exe +fcu_main=$(CUDACPP_BUILDDIR)/fgcheck.exe ifneq ($(GTESTLIBS),) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) $(testmain) -else -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_main) $(cxx_main) $(fcu_main) $(fcxx_main) +testmain=$(CUDACPP_BUILDDIR)/runTest.exe +cutestmain=$(CUDACPP_BUILDDIR)/runTest_cuda.exe endif -# Target (and build options): debug -MAKEDEBUG= -debug: OPTFLAGS = -g -O0 -debug: CUOPTFLAGS = -G -debug: MAKEDEBUG := debug -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -$(BUILDDIR)/.build.$(TAG): - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @if [ "$(oldtagsb)" != "" ]; then echo "Cannot build for tag=$(TAG) as old builds exist for other tags:"; echo " $(oldtagsb)"; echo "Please run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @touch $(BUILDDIR)/.build.$(TAG) +cppbuild: $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(cxx_main) $(fcxx_main) $(testmain) +cudabuild: $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(cu_main) $(fcu_main) $(cutestmain) # Generic target and build rules: objects from CUDA compilation -ifneq ($(NVCC),) -$(BUILDDIR)/%.o : %.cu *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cu *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c $< -o $@ -$(BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ -endif +$(CUDACPP_BUILDDIR)/%_cu.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ # Generic target and build rules: objects from C++ compilation # (NB do not include CUINC here! add it only for NVTX or curand #679) -$(BUILDDIR)/%.o : %.cc *.h ../../src/*.h $(BUILDDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ +$(CUDACPP_BUILDDIR)/%.o : %.cc *.h ../../src/*.h + @mkdir -p $(CUDACPP_BUILDDIR) + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Apply special build flags only to CrossSectionKernel.cc and gCrossSectionKernel.cu (no fast math, see #117 and #516) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS := $(filter-out -ffast-math,$(CXXFLAGS)) -$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math +$(CUDACPP_BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -fno-fast-math ifneq ($(NVCC),) -$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -fno-fast-math +$(CUDACPP_BUILDDIR)/gCrossSectionKernels.o: NVCCFLAGS += -Xcompiler -fno-fast-math endif endif # Apply special build flags only to check_sa.o and gcheck_sa.o (NVTX in timermap.h, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) -$(BUILDDIR)/gcheck_sa.o: CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_CXXFLAGS += $(USE_NVTX) $(CUINC) # Apply special build flags only to check_sa and CurandRandomNumberKernel (curand headers, #679) -$(BUILDDIR)/check_sa.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gcheck_sa.o: CUFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CXXFLAGSCURAND) -$(BUILDDIR)/gCurandRandomNumberKernel.o: CUFLAGS += $(CXXFLAGSCURAND) -ifeq ($(RNDGEN),hasCurand) -$(BUILDDIR)/CurandRandomNumberKernel.o: CXXFLAGS += $(CUINC) -endif +$(CUDACPP_BUILDDIR)/check_sa.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gcheck_sa.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o: MG_CXXFLAGS += $(CXXFLAGSCURAND) +$(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o: MG_NVCCFLAGS += $(CXXFLAGSCURAND) + # Avoid "warning: builtin __has_trivial_... is deprecated; use __is_trivially_... instead" in nvcc with icx2023 (#592) ifneq ($(shell $(CXX) --version | egrep '^(Intel)'),) ifneq ($(NVCC),) -CUFLAGS += -Xcompiler -Wno-deprecated-builtins +MG_NVCCFLAGS += -Xcompiler -Wno-deprecated-builtins endif endif -# Avoid clang warning "overriding '-ffp-contract=fast' option with '-ffp-contract=on'" (#516) -# This patch does remove the warning, but I prefer to keep it disabled for the moment... -###ifneq ($(shell $(CXX) --version | egrep '^(clang|Apple clang|Intel)'),) -###$(BUILDDIR)/CrossSectionKernels.o: CXXFLAGS += -Wno-overriding-t-option -###ifneq ($(NVCC),) -###$(BUILDDIR)/gCrossSectionKernels.o: CUFLAGS += -Xcompiler -Wno-overriding-t-option -###endif -###endif - #### Apply special build flags only to CPPProcess.cc (-flto) ###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += -flto -#### Apply special build flags only to CPPProcess.cc (AVXFLAGS) -###$(BUILDDIR)/CPPProcess.o: CXXFLAGS += $(AVXFLAGS) - #------------------------------------------------------------------------------- -# Target (and build rules): common (src) library -commonlib : $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc $(BUILDDIR)/.build.$(TAG) - $(MAKE) -C ../../src $(MAKEDEBUG) -f $(CUDACPP_SRC_MAKEFILE) +$(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so: ../../src/*.h ../../src/*.cc + $(MAKE) AVX=$(AVX) AVXFLAGS="$(AVXFLAGS)" -C ../../src -f $(CUDACPP_SRC_MAKEFILE) #------------------------------------------------------------------------------- @@ -566,162 +198,123 @@ processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') ###$(info processid_short=$(processid_short)) MG5AMC_CXXLIB = mg5amc_$(processid_short)_cpp -cxx_objects_lib=$(BUILDDIR)/CPPProcess.o $(BUILDDIR)/MatrixElementKernels.o $(BUILDDIR)/BridgeKernels.o $(BUILDDIR)/CrossSectionKernels.o -cxx_objects_exe=$(BUILDDIR)/CommonRandomNumberKernel.o $(BUILDDIR)/RamboSamplingKernels.o +cxx_objects_lib=$(CUDACPP_BUILDDIR)/CPPProcess.o $(CUDACPP_BUILDDIR)/MatrixElementKernels.o $(CUDACPP_BUILDDIR)/BridgeKernels.o $(CUDACPP_BUILDDIR)/CrossSectionKernels.o +cxx_objects_exe=$(CUDACPP_BUILDDIR)/CommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/RamboSamplingKernels.o -ifneq ($(NVCC),) MG5AMC_CULIB = mg5amc_$(processid_short)_cuda -cu_objects_lib=$(BUILDDIR)/gCPPProcess.o $(BUILDDIR)/gMatrixElementKernels.o $(BUILDDIR)/gBridgeKernels.o $(BUILDDIR)/gCrossSectionKernels.o -cu_objects_exe=$(BUILDDIR)/gCommonRandomNumberKernel.o $(BUILDDIR)/gRamboSamplingKernels.o -endif +cu_objects_lib=$(CUDACPP_BUILDDIR)/gCPPProcess.o $(CUDACPP_BUILDDIR)/gMatrixElementKernels.o $(CUDACPP_BUILDDIR)/gBridgeKernels.o $(CUDACPP_BUILDDIR)/gCrossSectionKernels.o +cu_objects_exe=$(CUDACPP_BUILDDIR)/gCommonRandomNumberKernel.o $(CUDACPP_BUILDDIR)/gRamboSamplingKernels.o # Target (and build rules): C++ and CUDA shared libraries -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(BUILDDIR)/fbridge.o -$(LIBDIR)/lib$(MG5AMC_CXXLIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) - $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) - -ifneq ($(NVCC),) -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(BUILDDIR)/fbridge_cu.o -$(LIBDIR)/lib$(MG5AMC_CULIB).so: $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) - $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH2) -L$(LIBDIR) -l$(MG5AMC_COMMONLIB) -endif +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: cxx_objects_lib += $(CUDACPP_BUILDDIR)/fbridge.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) + $(CXX) -shared -o $@ $(cxx_objects_lib) $(CXXLIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) $(MG_LDFLAGS) $(LDFLAGS) -#------------------------------------------------------------------------------- - -# Target (and build rules): Fortran include files -###$(INCDIR)/%.inc : ../%.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### \cp $< $@ +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: cu_objects_lib += $(CUDACPP_BUILDDIR)/fbridge_cu.o +$(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so: $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) + $(NVCC) --shared -o $@ $(cu_objects_lib) $(CULIBFLAGSRPATH) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) #------------------------------------------------------------------------------- # Target (and build rules): C++ and CUDA standalone executables -$(cxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cxx_main): $(BUILDDIR)/check_sa.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o - $(CXX) -o $@ $(BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) $(BUILDDIR)/CurandRandomNumberKernel.o $(CURANDLIBFLAGS) -ifneq ($(NVCC),) +$(cxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(cxx_main): $(CUDACPP_BUILDDIR)/check_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/check_sa.o $(OMPFLAGS) -ldl -pthread $(cxx_objects_exe) $(CUDACPP_BUILDDIR)/CurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) + ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(cu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(cu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(cu_main): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(cu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(cu_main): $(BUILDDIR)/gcheck_sa.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o - $(NVCC) -o $@ $(BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(LIBFLAGS) -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) $(BUILDDIR)/gCurandRandomNumberKernel.o $(CURANDLIBFLAGS) +$(cu_main): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif +$(cu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(cu_main): $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/gcheck_sa.o $(CUARCHFLAGS) $(cu_objects_exe) $(CUDACPP_BUILDDIR)/gCurandRandomNumberKernel.o $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- - -# Generic target and build rules: objects from Fortran compilation -$(BUILDDIR)/%.o : %.f *.inc - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(FC) -I. -c $< -o $@ - -# Generic target and build rules: objects from Fortran compilation -###$(BUILDDIR)/%.o : %.f *.inc -### @if [ ! -d $(INCDIR) ]; then echo "mkdir -p $(INCDIR)"; mkdir -p $(INCDIR); fi -### @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi -### $(FC) -I. -I$(INCDIR) -c $< -o $@ - -# Target (and build rules): Fortran standalone executables -###$(BUILDDIR)/fcheck_sa.o : $(INCDIR)/fbridge.inc +# Check executables: ifeq ($(UNAME_S),Darwin) -$(fcxx_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 +$(fcxx_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif -$(fcxx_main): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcxx_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler.o $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) - $(CXX) -o $@ $(BUILDDIR)/fcheck_sa.o $(OMPFLAGS) $(BUILDDIR)/fsampler.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CXXLIB) $(cxx_objects_exe) +$(fcxx_main): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcxx_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) # Process-specific library +$(fcxx_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so $(cxx_objects_exe) + $(CXX) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(cxx_objects_exe) $(OMPFLAGS) $(CUDACPP_BUILDDIR)/fsampler.o -lgfortran -L$(CUDACPP_LIBDIR) $(MG_LDFLAGS) $(LDFLAGS) -ifneq ($(NVCC),) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(fcu_main): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(fcu_main): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(fcu_main): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(fcu_main): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') endif ifeq ($(UNAME_S),Darwin) -$(fcu_main): LIBFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 -endif -$(fcu_main): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(fcu_main): $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) - $(NVCC) -o $@ $(BUILDDIR)/fcheck_sa.o $(BUILDDIR)/fsampler_cu.o $(LIBFLAGS) -lgfortran -L$(LIBDIR) -l$(MG5AMC_CULIB) $(cu_objects_exe) +$(fcu_main): MG_LDFLAGS += -L$(shell dirname $(shell $(FC) --print-file-name libgfortran.dylib)) # add path to libgfortran on Mac #375 endif +$(fcu_main): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(fcu_main): MG_LDFLAGS += -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) # Process-specific library +$(fcu_main): $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so $(cu_objects_exe) + $(NVCC) -o $@ $(CUDACPP_BUILDDIR)/fcheck_sa.o $(CUDACPP_BUILDDIR)/fsampler_cu.o $(cu_objects_exe) -lgfortran $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- # Target (and build rules): test objects and test executable -$(BUILDDIR)/testxxx.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testxxx_cu.o: INCFLAGS += $(GTESTINC) -$(BUILDDIR)/testxxx_cu.o: testxxx_cc_ref.txt -$(testmain): $(BUILDDIR)/testxxx_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -endif +$(testmain) $(cutestmain): $(GTESTLIBS) +$(testmain) $(cutestmain): INCFLAGS += $(GTESTINC) +$(testmain) $(cutestmain): MG_LDFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main -$(BUILDDIR)/testmisc.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(CUDACPP_BUILDDIR)/testxxx.o $(CUDACPP_BUILDDIR)/testxxx_cu.o: $(GTESTLIBS) testxxx_cc_ref.txt +$(testmain): $(CUDACPP_BUILDDIR)/testxxx.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testxxx.o # Comment out this line to skip the C++ test of xxx functions +$(cutestmain): $(CUDACPP_BUILDDIR)/testxxx_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testxxx_cu.o # Comment out this line to skip the CUDA test of xxx functions -ifneq ($(NVCC),) -$(BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) -$(BUILDDIR)/testmisc_cu.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/testmisc_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests -endif -$(BUILDDIR)/runTest.o: $(GTESTLIBS) -$(BUILDDIR)/runTest.o: INCFLAGS += $(GTESTINC) -$(testmain): $(BUILDDIR)/runTest.o -$(testmain): cxx_objects_exe += $(BUILDDIR)/runTest.o +$(CUDACPP_BUILDDIR)/testmisc.o $(CUDACPP_BUILDDIR)/testmisc_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/testmisc.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/testmisc.o # Comment out this line to skip the C++ miscellaneous tests +$(cutestmain): $(CUDACPP_BUILDDIR)/testmisc_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/testmisc_cu.o # Comment out this line to skip the CUDA miscellaneous tests + + +$(CUDACPP_BUILDDIR)/runTest.o $(CUDACPP_BUILDDIR)/runTest_cu.o: $(GTESTLIBS) +$(testmain): $(CUDACPP_BUILDDIR)/runTest.o +$(testmain): cxx_objects_exe += $(CUDACPP_BUILDDIR)/runTest.o +$(cutestmain): $(CUDACPP_BUILDDIR)/runTest_cu.o +$(cutestmain): cu_objects_exe += $(CUDACPP_BUILDDIR)/runTest_cu.o + -ifneq ($(NVCC),) -$(BUILDDIR)/runTest_cu.o: $(GTESTLIBS) -$(BUILDDIR)/runTest_cu.o: INCFLAGS += $(GTESTINC) ifneq ($(shell $(CXX) --version | grep ^Intel),) -$(testmain): LIBFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') -$(testmain): LIBFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') +$(cutestmain): MG_LDFLAGS += -lintlc # compile with icpx and link with nvcc (undefined reference to `_intel_fast_memcpy') +$(cutestmain): MG_LDFLAGS += -lsvml # compile with icpx and link with nvcc (undefined reference to `__svml_cos4_l9') else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 -$(testmain): LIBFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc -endif -$(testmain): $(BUILDDIR)/runTest_cu.o -$(testmain): cu_objects_exe += $(BUILDDIR)/runTest_cu.o +$(cutestmain): MG_LDFLAGS += -L$(patsubst %bin/nvc++,%lib,$(subst ccache ,,$(CXX))) -lnvhpcatm -lnvcpumath -lnvc endif -$(testmain): $(GTESTLIBS) -$(testmain): INCFLAGS += $(GTESTINC) -$(testmain): LIBFLAGS += -L$(GTESTLIBDIR) -lgtest -lgtest_main ifneq ($(OMPFLAGS),) ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -$(testmain): LIBFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) +$(testmain): MG_LDFLAGS += -liomp5 # see #578 (not '-qopenmp -static-intel' as in https://stackoverflow.com/questions/45909648) else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -$(testmain): LIBFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 +$(testmain): MG_LDFLAGS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 ###else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) ###$(testmain): LIBFLAGS += ???? # OMP is not supported yet by cudacpp for Apple clang (see #578 and #604) else -$(testmain): LIBFLAGS += -lgomp +$(testmain): MG_LDFLAGS += -lgomp endif endif -ifeq ($(NVCC),) # link only runTest.o -$(testmain): LIBFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) - $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -ldl -pthread $(LIBFLAGS) -else # link both runTest.o and runTest_cu.o -$(testmain): LIBFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH -$(testmain): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) - $(NVCC) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) $(cu_objects_lib) $(cu_objects_exe) -ldl $(LIBFLAGS) -lcuda -endif +$(testmain): MG_LDFLAGS += $(CXXLIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(testmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cxx_objects_lib) $(cxx_objects_exe) $(GTESTLIBS) + $(CXX) -o $@ $(cxx_objects_lib) $(cxx_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -pthread $(MG_LDFLAGS) $(LDFLAGS) + +$(cutestmain): MG_LDFLAGS += $(CULIBFLAGSRPATH) # avoid the need for LD_LIBRARY_PATH +$(cutestmain): $(CUDACPP_LIBDIR)/lib$(MG5AMC_COMMONLIB).so $(cu_objects_lib) $(cu_objects_exe) $(GTESTLIBS) + $(NVCC) -o $@ $(cu_objects_lib) $(cu_objects_exe) -L$(CUDACPP_LIBDIR) -l$(MG5AMC_COMMONLIB) -ldl -lcuda $(MG_LDFLAGS) $(LDFLAGS) # Use target gtestlibs to build only googletest ifneq ($(GTESTLIBS),) @@ -731,72 +324,15 @@ endif # Use flock (Linux only, no Mac) to allow 'make -j' if googletest has not yet been downloaded https://stackoverflow.com/a/32666215 $(GTESTLIBS): ifneq ($(shell which flock 2>/dev/null),) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - flock $(BUILDDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) + flock $(TESTDIR)/.make_test.lock $(MAKE) -C $(TESTDIR) else if [ -d $(TESTDIR) ]; then $(MAKE) -C $(TESTDIR); fi endif #------------------------------------------------------------------------------- -# Target: build all targets in all AVX modes (each AVX mode in a separate build directory) -# Split the avxall target into five separate targets to allow parallel 'make -j avxall' builds -# (Hack: add a fbridge.inc dependency to avxall, to ensure it is only copied once for all AVX modes) -avxnone: - @echo - $(MAKE) USEBUILDDIR=1 AVX=none -f $(CUDACPP_MAKEFILE) - -avxsse4: - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 -f $(CUDACPP_MAKEFILE) - -avxavx2: - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 -f $(CUDACPP_MAKEFILE) - -avx512y: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y -f $(CUDACPP_MAKEFILE) - -avx512z: - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z -f $(CUDACPP_MAKEFILE) - -ifeq ($(UNAME_P),ppc64le) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else ifeq ($(UNAME_P),arm) -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 -avxall: avxnone avxsse4 -else -###avxall: $(INCDIR)/fbridge.inc avxnone avxsse4 avxavx2 avx512y avx512z -avxall: avxnone avxsse4 avxavx2 avx512y avx512z -endif - -#------------------------------------------------------------------------------- - -# Target: clean the builds -.PHONY: clean - -clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(BUILDDIR) -else - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe - rm -f $(LIBDIR)/lib$(MG5AMC_CXXLIB).so $(LIBDIR)/lib$(MG5AMC_CULIB).so -endif - $(MAKE) -C ../../src clean -f $(CUDACPP_SRC_MAKEFILE) -### rm -rf $(INCDIR) - -cleanall: - @echo - $(MAKE) USEBUILDDIR=0 clean -f $(CUDACPP_MAKEFILE) - @echo - $(MAKE) USEBUILDDIR=0 -C ../../src cleanall -f $(CUDACPP_SRC_MAKEFILE) - rm -rf build.* - # Target: clean the builds as well as the gtest installation(s) -distclean: cleanall +distclean: clean cleansrc ifneq ($(wildcard $(TESTDIRCOMMON)),) $(MAKE) -C $(TESTDIRCOMMON) clean endif @@ -848,50 +384,55 @@ endif #------------------------------------------------------------------------------- -# Target: check (run the C++ test executable) +# Target: check/gcheck (run the C++ test executable) # [NB THIS IS WHAT IS USED IN THE GITHUB CI!] -ifneq ($(NVCC),) -check: runTest cmpFcheck cmpFGcheck -else check: runTest cmpFcheck -endif +gcheck: + $(MAKE) AVX=cuda runTest cmpFGcheck # Target: runTest (run the C++ test executable runTest.exe) -runTest: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/runTest.exe +ifneq ($(AVX),cuda) +runTest: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest.exe +else +runTest: cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/runTest_cuda.exe +endif + # Target: runCheck (run the C++ standalone executable check.exe, with a small number of events) -runCheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/check.exe -p 2 32 2 +runCheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe -p 2 32 2 # Target: runGcheck (run the CUDA standalone executable gcheck.exe, with a small number of events) -runGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/gcheck.exe -p 2 32 2 +runGcheck: AVX=cuda +runGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe -p 2 32 2 # Target: runFcheck (run the Fortran standalone executable - with C++ MEs - fcheck.exe, with a small number of events) -runFcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 +runFcheck: cppbuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 # Target: runFGcheck (run the Fortran standalone executable - with CUDA MEs - fgcheck.exe, with a small number of events) -runFGcheck: all.$(TAG) - $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 +runFGcheck: AVX=cuda +runFGcheck: + $(MAKE) AVX=cuda cudabuild + $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 # Target: cmpFcheck (compare ME results from the C++ and Fortran with C++ MEs standalone executables, with a small number of events) -cmpFcheck: all.$(TAG) +cmpFcheck: cppbuild @echo - @echo "$(BUILDDIR)/check.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/check.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/C++) = $${me1}"; echo "Avg ME (F77/C++) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/C++) returned NaN"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/C++) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi # Target: cmpFGcheck (compare ME results from the CUDA and Fortran with CUDA MEs standalone executables, with a small number of events) -cmpFGcheck: all.$(TAG) +cmpFGcheck: AVX=cuda +cmpFGcheck: + $(MAKE) AVX=cuda cudabuild @echo - @echo "$(BUILDDIR)/gcheck.exe --common -p 2 32 2" - @echo "$(BUILDDIR)/fgcheck.exe 2 32 2" - @me1=$(shell $(RUNTIME) $(BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi + @echo "$(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2" + @echo "$(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2" + @me1=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/gcheck.exe --common -p 2 32 2 | grep MeanMatrix | awk '{print $$4}'); me2=$(shell $(RUNTIME) $(CUDACPP_BUILDDIR)/fgcheck.exe 2 32 2 | grep Average | awk '{print $$4}'); echo "Avg ME (C++/CUDA) = $${me1}"; echo "Avg ME (F77/CUDA) = $${me2}"; if [ "$${me2}" == "NaN" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; elif [ "$${me2}" == "" ]; then echo "ERROR! Fortran calculation (F77/CUDA) crashed"; else python3 -c "me1=$${me1}; me2=$${me2}; reldif=abs((me2-me1)/me1); print('Relative difference =', reldif); ok = reldif <= 2E-4; print ( '%s (relative difference %s 2E-4)' % ( ('OK','<=') if ok else ('ERROR','>') ) ); import sys; sys.exit(0 if ok else 1)"; fi -# Target: memcheck (run the CUDA standalone executable gcheck.exe with a small number of events through cuda-memcheck) -memcheck: all.$(TAG) - $(RUNTIME) $(CUDA_HOME)/bin/cuda-memcheck --check-api-memory-access yes --check-deprecated-instr yes --check-device-heap yes --demangle full --language c --leak-check full --racecheck-report all --report-api-errors all --show-backtrace yes --tool memcheck --track-unused-memory yes $(BUILDDIR)/gcheck.exe -p 2 32 2 - -#------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/makefile b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/makefile index d572486c2e..b69917ee1f 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/makefile +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/makefile @@ -1,27 +1,30 @@ SHELL := /bin/bash -include ../../Source/make_opts -FFLAGS+= -w +# Include general setup +OPTIONS_MAKEFILE := ../../Source/make_opts +include $(OPTIONS_MAKEFILE) # Enable the C preprocessor https://gcc.gnu.org/onlinedocs/gfortran/Preprocessing-Options.html -FFLAGS+= -cpp +MG_FCFLAGS += -cpp +MG_CXXFLAGS += -I. -# Compile counters with -O3 as in the cudacpp makefile (avoid being "unfair" to Fortran #740) -CXXFLAGS = -O3 -Wall -Wshadow -Wextra +all: help cppnative + +# Target if user does not specify target +help: + $(info No target specified.) + $(info Viable targets are 'cppnative' (default), 'cppnone', 'cppsse4', 'cppavx2', 'cpp512y', 'cpp512z' and 'cuda') + $(info Or 'cppall' for all C++ targets) + $(info Or 'ALL' for all C++ and cuda targets) -# Add -std=c++17 explicitly to avoid build errors on macOS -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -std=c++17 -mmacosx-version-min=11.3 -endif -# Enable ccache if USECCACHE=1 +# Enable ccache for C++ if USECCACHE=1 (do not enable it for Fortran since it is not supported for Fortran) ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) override CXX:=ccache $(CXX) endif -ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) - override FC:=ccache $(FC) -endif +###ifeq ($(USECCACHE)$(shell echo $(FC) | grep ccache),1) +### override FC:=ccache $(FC) +###endif # Load additional dependencies of the bias module, if present ifeq (,$(wildcard ../bias_dependencies)) @@ -46,34 +49,25 @@ else MADLOOP_LIB = endif -LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias - -processid_short=$(shell basename $(CURDIR) | awk -F_ '{print $$(NF-1)"_"$$NF}') -CUDACPP_MAKEFILE=cudacpp.mk -# NB1 Using ":=" below instead of "=" is much faster (it only runs the subprocess once instead of many times) -# NB2 Use '|&' in CUDACPP_BUILDDIR to avoid confusing errors about googletest #507 -# NB3 Do not add a comment inlined "CUDACPP_BUILDDIR=$(shell ...) # comment" as otherwise a trailing space is included... -# NB4 The variables relevant to the cudacpp Makefile must be explicitly passed to $(shell...) -CUDACPP_MAKEENV:=$(shell echo '$(.VARIABLES)' | tr " " "\n" | egrep "(USEBUILDDIR|AVX|FPTYPE|HELINL|HRDCOD)") -###$(info CUDACPP_MAKEENV=$(CUDACPP_MAKEENV)) -###$(info $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))")) -CUDACPP_BUILDDIR:=$(shell $(MAKE) $(foreach v,$(CUDACPP_MAKEENV),$(v)="$($(v))") -f $(CUDACPP_MAKEFILE) -pn 2>&1 | awk '/Building/{print $$3}' | sed s/BUILDDIR=//) -ifeq ($(CUDACPP_BUILDDIR),) -$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) -else -$(info CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)') -endif -CUDACPP_COMMONLIB=mg5amc_common -CUDACPP_CXXLIB=mg5amc_$(processid_short)_cpp -CUDACPP_CULIB=mg5amc_$(processid_short)_cuda - +LINKLIBS = $(LINK_MADLOOP_LIB) $(LINK_LOOP_LIBS) -L$(LIBDIR) -ldhelas -ldsample -lmodel -lgeneric -lpdf -lcernlib $(llhapdf) -lbias LIBS = $(LIBDIR)libbias.$(libext) $(LIBDIR)libdhelas.$(libext) $(LIBDIR)libdsample.$(libext) $(LIBDIR)libgeneric.$(libext) $(LIBDIR)libpdf.$(libext) $(LIBDIR)libgammaUPC.$(libext) $(LIBDIR)libmodel.$(libext) $(LIBDIR)libcernlib.$(libext) $(MADLOOP_LIB) $(LOOP_LIBS) ifneq ("$(wildcard ../../Source/RUNNING)","") LINKLIBS += -lrunning - LIBS += $(LIBDIR)librunning.$(libext) + LIBS += $(LIBDIR)librunning.$(libext) endif +SOURCEDIR_GUARD:=../../Source/.timestamp_guard +# We use $(SOURCEDIR_GUARD) to figure out if Source is out of date. The Source makefile doesn't correctly +# update all files, so we need a proxy that is updated every time we run "$(MAKE) -C ../../Source". +$(SOURCEDIR_GUARD) ../../Source/discretesampler.mod &: ../../Source/*.f ../../Cards/param_card.dat ../../Cards/run_card.dat +ifneq ($(shell which flock 2>/dev/null),) + flock ../../Source/.lock -c "$(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD)" +else + $(MAKE) -C ../../Source; touch $(SOURCEDIR_GUARD) +endif + +$(LIBS): $(SOURCEDIR_GUARD) # Source files @@ -91,82 +85,83 @@ PROCESS= myamp.o genps.o unwgt.o setcuts.o get_color.o \ DSIG=driver.o $(patsubst %.f, %.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) DSIG_cudacpp=driver_cudacpp.o $(patsubst %.f, %_cudacpp.o, $(filter-out auto_dsig.f, $(wildcard auto_dsig*.f))) -SYMMETRY = symmetry.o idenparts.o +SYMMETRY = symmetry.o idenparts.o -# Binaries +# cudacpp targets: +CUDACPP_MAKEFILE := cudacpp.mk +ifneq (,$(wildcard $(CUDACPP_MAKEFILE))) +include $(CUDACPP_MAKEFILE) +endif -ifeq ($(UNAME),Darwin) -LDFLAGS += -lc++ # avoid 'Undefined symbols' for chrono::steady_clock on macOS (checked with otool -L libmg5amc_gg_ttx_cpp.so) -LDFLAGS += -mmacosx-version-min=11.3 # avoid "ld: warning: object file was built for newer macOS version than being linked" -else -LDFLAGS += -Wl,--no-relax # avoid 'failed to convert GOTPCREL relocation' error #458 (not supported on macOS) +ifeq ($(CUDACPP_BUILDDIR),) +$(error CUDACPP_BUILDDIR='$(CUDACPP_BUILDDIR)' should not be empty!) endif +CUDACPP_COMMONLIB=mg5amc_common +CUDACPP_CXXLIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CXXLIB).so +CUDACPP_CULIB := $(CUDACPP_BUILDDIR)/lib$(MG5AMC_CULIB).so -all: $(PROG)_fortran $(CUDACPP_BUILDDIR)/$(PROG)_cpp # also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (#503) +# Set up OpenMP if supported +OMPFLAGS ?= -fopenmp ifneq ($(shell $(CXX) --version | egrep '^Intel'),) -override OMPFLAGS = -fopenmp LINKLIBS += -liomp5 # see #578 LINKLIBS += -lintlc # undefined reference to `_intel_fast_memcpy' else ifneq ($(shell $(CXX) --version | egrep '^clang'),) -override OMPFLAGS = -fopenmp $(CUDACPP_BUILDDIR)/$(PROG)_cpp: LINKLIBS += -L $(shell dirname $(shell $(CXX) -print-file-name=libc++.so)) -lomp # see #604 else ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -override OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang -else -override OMPFLAGS = -fopenmp +OMPFLAGS = # OMP is not supported yet by cudacpp for Apple clang endif -$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o - $(FC) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) - -$(LIBS): .libs -.libs: ../../Cards/param_card.dat ../../Cards/run_card.dat - cd ../../Source; make - touch $@ +# Binaries -$(CUDACPP_BUILDDIR)/.cudacpplibs: - $(MAKE) -f $(CUDACPP_MAKEFILE) - touch $@ +$(PROG)_fortran: $(PROCESS) $(DSIG) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -o $(PROG)_fortran $(PROCESS) $(DSIG) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o $(LDFLAGS) # On Linux, set rpath to LIBDIR to make it unnecessary to use LD_LIBRARY_PATH # Use relative paths with respect to the executables ($ORIGIN on Linux) # On Darwin, building libraries with absolute paths in LIBDIR makes this unnecessary -ifeq ($(UNAME_S),Darwin) - override LIBFLAGSRPATH = -else ifeq ($(USEBUILDDIR),1) - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' -else - override LIBFLAGSRPATH = -Wl,-rpath,'$$ORIGIN/$(LIBDIR)' +ifneq ($(UNAME_S),Darwin) + LIBFLAGSRPATH := -Wl,-rpath,'$$ORIGIN:$$ORIGIN/../$(LIBDIR)/$(CUDACPP_BUILDDIR)' endif -.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link +.PHONY: madevent_fortran_link madevent_cuda_link madevent_cpp_link madevent_cppnone_link madevent_cppsse4_link madevent_cppavx2_link madevent_cpp512y_link madevent_cpp512z_link clean cleanall cleansrc madevent_fortran_link: $(PROG)_fortran rm -f $(PROG) ln -s $(PROG)_fortran $(PROG) -madevent_cpp_link: $(CUDACPP_BUILDDIR)/$(PROG)_cpp - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) +madevent_cppnone_link: AVX=none +madevent_cppnone_link: cppnone + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -madevent_cuda_link: $(CUDACPP_BUILDDIR)/$(PROG)_cuda - rm -f $(PROG) - ln -s $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) +madevent_cppavx2_link: AVX=avx2 +madevent_cppavx2_link: cppavx2 + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512y_link: AVX=512y +madevent_cpp512y_link: cppavx512y + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) + +madevent_cpp512z_link: AVX=512z +madevent_cpp512z_link: cppavx512z + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROG) -# Building $(PROG)_cpp also builds $(PROG)_cuda if $(CUDACPP_CULIB) exists (improved patch for cpp-only builds #503) -$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(LIBS) $(MATRIX) counters.o ompnumthreads.o $(CUDACPP_BUILDDIR)/.cudacpplibs - $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CXXLIB) $(LIBFLAGSRPATH) $(LDFLAGS) - if [ -f $(LIBDIR)/$(CUDACPP_BUILDDIR)/lib$(CUDACPP_CULIB).* ]; then $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) -l$(CUDACPP_CULIB) $(LIBFLAGSRPATH) $(LDFLAGS); fi +madevent_cuda_link: AVX=cuda +madevent_cuda_link: cuda + ln -sf $(CUDACPP_BUILDDIR)/$(PROG)_cuda $(PROG) -$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(CUDACPP_BUILDDIR)/$(PROG)_cpp +$(CUDACPP_BUILDDIR)/$(PROG)_cpp: $(LIBS) $(CUDACPP_CXXLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cpp -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CXXLIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) + +$(CUDACPP_BUILDDIR)/$(PROG)_cuda: $(LIBS) $(CUDACPP_CULIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) counters.o ompnumthreads.o + $(FC) -o $(CUDACPP_BUILDDIR)/$(PROG)_cuda -L$(LIBDIR)/$(CUDACPP_BUILDDIR) -l$(CUDACPP_COMMONLIB) $(PROCESS) $(DSIG_cudacpp) auto_dsig.o $(MATRIX) $(LINKLIBS) $(BIASDEPENDENCIES) $(OMPFLAGS) counters.o ompnumthreads.o -L$(CUDACPP_BUILDDIR) -l$(MG5AMC_CULIB) $(LIBFLAGSRPATH) $(MG_LDFLAGS) $(LDFLAGS) counters.o: counters.cc timer.h - $(CXX) $(CXXFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ ompnumthreads.o: ompnumthreads.cc ompnumthreads.h - $(CXX) -I. $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) $(OMPFLAGS) -c $< -o $@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) $(FC) -o $(PROG)_forhel $(PROCESS) $(MATRIX_HEL) $(LINKLIBS) $(LDFLAGS) $(BIASDEPENDENCIES) $(OMPFLAGS) @@ -174,27 +169,14 @@ $(PROG)_forhel: $(PROCESS) auto_dsig.o $(LIBS) $(MATRIX_HEL) gensym: $(SYMMETRY) configs.inc $(LIBS) $(FC) -o gensym $(SYMMETRY) -L$(LIBDIR) $(LINKLIBS) $(LDFLAGS) -###ifeq (,$(wildcard fbridge.inc)) # Pointless: fbridge.inc always exists as this is the cudacpp-modified makefile! -###$(LIBDIR)libmodel.$(libext): ../../Cards/param_card.dat -### cd ../../Source/MODEL; make -### -###$(LIBDIR)libgeneric.$(libext): ../../Cards/run_card.dat -### cd ../../Source; make -### -###$(LIBDIR)libpdf.$(libext): -### cd ../../Source/PDF; make -### -###$(LIBDIR)libgammaUPC.$(libext): -### cd ../../Source/PDF/gammaUPC; make -###endif # Add source so that the compiler finds the DiscreteSampler module. $(MATRIX): %.o: %.f - $(FC) $(FFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC -%.o: %.f - $(FC) $(FFLAGS) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC + $(FC) $(MG_FCFLAGS) $(FCFLAGS) $(MATRIX_FLAG) -c $< -I../../Source/ -I../../Source/PDF/gammaUPC +%.o $(CUDACPP_BUILDDIR)/%.o: %.f + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -I../../Source/ -I../../Source/PDF/gammaUPC -c $< -o $@ %_cudacpp.o: %.f - $(FC) $(FFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ + $(FC) $(MG_FCFLAGS) $(FCFLAGS) -c -DMG5AMC_MEEXPORTER_CUDACPP $< -I../../Source/ $(OMPFLAGS) -o $@ # Dependencies @@ -215,60 +197,42 @@ unwgt.o: genps.inc nexternal.inc symswap.inc cluster.inc run.inc message.inc \ initcluster.o: message.inc # Extra dependencies on discretesampler.mod +../../Source/discretesampler.mod: ../../Source/DiscreteSampler.f -auto_dsig.o: .libs -driver.o: .libs -driver_cudacpp.o: .libs -$(MATRIX): .libs -genps.o: .libs +auto_dsig.o: ../../Source/discretesampler.mod +driver.o: ../../Source/discretesampler.mod +driver_cudacpp.o: ../../Source/discretesampler.mod +$(MATRIX): ../../Source/discretesampler.mod +genps.o: ../../Source/discretesampler.mod # Cudacpp avxall targets -UNAME_P := $(shell uname -p) ifeq ($(UNAME_P),ppc64le) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else ifeq ($(UNAME_P),arm) -avxall: avxnone avxsse4 +cppall: cppnative cppnone cppsse4 else -avxall: avxnone avxsse4 avxavx2 avx512y avx512z +cppall: cppnative cppnone cppsse4 cppavx2 cppavx512y cppavx512z endif -avxnone: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=none - -avxsse4: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=sse4 - -avxavx2: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=avx2 - -avx512y: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512y - -avx512z: $(PROG)_fortran $(DSIG_cudacpp) - @echo - $(MAKE) USEBUILDDIR=1 AVX=512z - -###endif - -# Clean (NB: 'make clean' in Source calls 'make clean' in all P*) +ALL: cppall cuda -clean: # Clean builds: fortran in this Pn; cudacpp executables for one AVX in this Pn - $(RM) *.o gensym $(PROG) $(PROG)_fortran $(PROG)_forhel $(CUDACPP_BUILDDIR)/$(PROG)_cpp $(CUDACPP_BUILDDIR)/$(PROG)_cuda +# Clean all architecture-specific builds: +clean: + $(RM) *.o gensym $(PROG) $(PROG)_* + $(RM) -rf build.*/*{.o,.so,.exe,.dylib,madevent_*} + @for dir in build.*; do if [ -z "$$(ls -A $${dir})" ]; then rm -r $${dir}; else echo "Not cleaning $${dir}; not empty"; fi; done -cleanavxs: clean # Clean builds: fortran in this Pn; cudacpp for all AVX in this Pn and in src - $(MAKE) -f $(CUDACPP_MAKEFILE) cleanall - rm -f $(CUDACPP_BUILDDIR)/.cudacpplibs - rm -f .libs +cleanall: cleansrc + for PROCESS in ../P[0-9]*; do $(MAKE) -C $${PROCESS} clean; done -cleanall: # Clean builds: fortran in all P* and in Source; cudacpp for all AVX in all P* and in src - make -C ../../Source cleanall - rm -rf $(LIBDIR)libbias.$(libext) - rm -f ../../Source/*.mod ../../Source/*/*.mod +# Clean one architecture-specific build +clean%: + $(RM) -r build.$*_* -distclean: cleanall # Clean all fortran and cudacpp builds as well as the googletest installation - $(MAKE) -f $(CUDACPP_MAKEFILE) distclean +# Clean common source directories (interferes with other P*) +cleansrc: + make -C ../../Source clean + $(RM) -f $(SOURCEDIR_GUARD) ../../Source/{*.mod,.lock} ../../Source/*/*.mod + $(RM) -r $(LIBDIR)libbias.$(libext) + if [ -d ../../src ]; then $(MAKE) -C ../../src -f cudacpp_src.mk clean; fi diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/runTest.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/runTest.cc index d4a760a71b..6c77775fb2 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/runTest.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/runTest.cc @@ -243,18 +243,20 @@ struct CUDATest : public CUDA_CPU_TestBase // Use two levels of macros to force stringification at the right level // (see https://gcc.gnu.org/onlinedocs/gcc-3.0.1/cpp_3.html#SEC17 and https://stackoverflow.com/a/3419392) // Google macro is in https://github.com/google/googletest/blob/master/googletest/include/gtest/gtest-param-test.h +/* clang-format off */ #define TESTID_CPU( s ) s##_CPU #define XTESTID_CPU( s ) TESTID_CPU( s ) #define MG_INSTANTIATE_TEST_SUITE_CPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CPUTest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); #define TESTID_GPU( s ) s##_GPU #define XTESTID_GPU( s ) TESTID_GPU( s ) #define MG_INSTANTIATE_TEST_SUITE_GPU( prefix, test_suite_name ) \ -INSTANTIATE_TEST_SUITE_P( prefix, \ - test_suite_name, \ - testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); + INSTANTIATE_TEST_SUITE_P( prefix, \ + test_suite_name, \ + testing::Values( new CUDATest( MG_EPOCH_REFERENCE_FILE_NAME ) ) ); +/* clang-format on */ #ifdef __CUDACC__ MG_INSTANTIATE_TEST_SUITE_GPU( XTESTID_GPU( MG_EPOCH_PROCESS_ID ), MadgraphTest ); diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/testxxx.cc b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/testxxx.cc index 3361fe5aa9..1d315f6d75 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/testxxx.cc +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/testxxx.cc @@ -40,7 +40,7 @@ namespace mg5amcCpu { std::string FPEhandlerMessage = "unknown"; int FPEhandlerIevt = -1; - inline void FPEhandler( int sig ) + inline void FPEhandler( int ) { #ifdef __CUDACC__ std::cerr << "Floating Point Exception (GPU): '" << FPEhandlerMessage << "' ievt=" << FPEhandlerIevt << std::endl; @@ -71,11 +71,10 @@ TEST( XTESTID( MG_EPOCH_PROCESS_ID ), testxxx ) constexpr bool testEvents = !dumpEvents; // run the test? constexpr fptype toleranceXXXs = std::is_same::value ? 1.E-15 : 1.E-5; // Constant parameters - constexpr int neppM = MemoryAccessMomenta::neppM; // AOSOA layout constexpr int np4 = CPPProcess::np4; - const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') - assert( nevt % neppM == 0 ); // nevt must be a multiple of neppM - assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV + const int nevt = 32; // 12 independent tests plus 20 duplicates (need a multiple of 16 for floats '512z') + assert( nevt % MemoryAccessMomenta::neppM == 0 ); // nevt must be a multiple of neppM + assert( nevt % neppV == 0 ); // nevt must be a multiple of neppV // Fill in the input momenta #ifdef __CUDACC__ mg5amcGpu::PinnedHostBufferMomenta hstMomenta( nevt ); // AOSOA[npagM][npar=4][np4=4][neppM] diff --git a/epochX/cudacpp/pp_tt012j.mad/bin/internal/banner.py b/epochX/cudacpp/pp_tt012j.mad/bin/internal/banner.py index bd1517985f..b408679c2f 100755 --- a/epochX/cudacpp/pp_tt012j.mad/bin/internal/banner.py +++ b/epochX/cudacpp/pp_tt012j.mad/bin/internal/banner.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,7 +53,7 @@ MADEVENT = False import madgraph.various.misc as misc import madgraph.iolibs.file_writers as file_writers - import madgraph.iolibs.files as files + import madgraph.iolibs.files as files import models.check_param_card as param_card_reader from madgraph import MG5DIR, MadGraph5Error, InvalidCmd @@ -80,36 +80,36 @@ class Banner(dict): 'mgproccard': 'MGProcCard', 'mgruncard': 'MGRunCard', 'ma5card_parton' : 'MA5Card_parton', - 'ma5card_hadron' : 'MA5Card_hadron', + 'ma5card_hadron' : 'MA5Card_hadron', 'mggenerationinfo': 'MGGenerationInfo', 'mgpythiacard': 'MGPythiaCard', 'mgpgscard': 'MGPGSCard', 'mgdelphescard': 'MGDelphesCard', 'mgdelphestrigger': 'MGDelphesTrigger', 'mgshowercard': 'MGShowerCard' } - + forbid_cdata = ['initrwgt'] - + def __init__(self, banner_path=None): """ """ if isinstance(banner_path, Banner): dict.__init__(self, banner_path) self.lhe_version = banner_path.lhe_version - return + return else: dict.__init__(self) - + #Look at the version if MADEVENT: self['mgversion'] = '#%s\n' % open(pjoin(MEDIR, 'MGMEVersion.txt')).read() else: info = misc.get_pkg_info() self['mgversion'] = info['version']+'\n' - + self.lhe_version = None - + if banner_path: self.read_banner(banner_path) @@ -123,7 +123,7 @@ def __init__(self, banner_path=None): 'mgruncard':'run_card.dat', 'mgpythiacard':'pythia_card.dat', 'mgpgscard' : 'pgs_card.dat', - 'mgdelphescard':'delphes_card.dat', + 'mgdelphescard':'delphes_card.dat', 'mgdelphestrigger':'delphes_trigger.dat', 'mg5proccard':'proc_card_mg5.dat', 'mgproccard': 'proc_card.dat', @@ -137,10 +137,10 @@ def __init__(self, banner_path=None): 'mgshowercard':'shower_card.dat', 'pythia8':'pythia8_card.dat', 'ma5card_parton':'madanalysis5_parton_card.dat', - 'ma5card_hadron':'madanalysis5_hadron_card.dat', + 'ma5card_hadron':'madanalysis5_hadron_card.dat', 'run_settings':'' } - + def read_banner(self, input_path): """read a banner""" @@ -151,7 +151,7 @@ def read_banner(self, input_path): def split_iter(string): return (x.groups(0)[0] for x in re.finditer(r"([^\n]*\n)", string, re.DOTALL)) input_path = split_iter(input_path) - + text = '' store = False for line in input_path: @@ -170,13 +170,13 @@ def split_iter(string): text += line else: text += '%s%s' % (line, '\n') - - #reaching end of the banner in a event file avoid to read full file + + #reaching end of the banner in a event file avoid to read full file if "
" in line: break elif "" in line: break - + def __getattribute__(self, attr): """allow auto-build for the run_card/param_card/... """ try: @@ -187,23 +187,23 @@ def __getattribute__(self, attr): return self.charge_card(attr) - + def change_lhe_version(self, version): """change the lhe version associate to the banner""" - + version = float(version) if version < 3: version = 1 elif version > 3: raise Exception("Not Supported version") self.lhe_version = version - + def get_cross(self, witherror=False): """return the cross-section of the file""" if "init" not in self: raise Exception - + text = self["init"].split('\n') cross = 0 error = 0 @@ -217,13 +217,13 @@ def get_cross(self, witherror=False): return cross else: return cross, math.sqrt(error) - + def scale_init_cross(self, ratio): """modify the init information with the associate scale""" assert "init" in self - + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -231,29 +231,29 @@ def scale_init_cross(self, ratio): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break pid = int(pid) - + line = " %+13.7e %+13.7e %+13.7e %i" % \ (ratio*float(xsec), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + def get_pdg_beam(self): """return the pdg of each beam""" - + assert "init" in self - + all_lines = self["init"].split('\n') pdg1,pdg2,_ = all_lines[0].split(None, 2) return int(pdg1), int(pdg2) - + def load_basic(self, medir): """ Load the proc_card /param_card and run_card """ - + self.add(pjoin(medir,'Cards', 'param_card.dat')) self.add(pjoin(medir,'Cards', 'run_card.dat')) if os.path.exists(pjoin(medir, 'SubProcesses', 'procdef_mg5.dat')): @@ -261,29 +261,29 @@ def load_basic(self, medir): self.add(pjoin(medir,'Cards', 'proc_card_mg5.dat')) else: self.add(pjoin(medir,'Cards', 'proc_card.dat')) - + def change_seed(self, seed): """Change the seed value in the banner""" # 0 = iseed p = re.compile(r'''^\s*\d+\s*=\s*iseed''', re.M) new_seed_str = " %s = iseed" % seed self['mgruncard'] = p.sub(new_seed_str, self['mgruncard']) - + def add_generation_info(self, cross, nb_event): """add info on MGGeneration""" - + text = """ # Number of Events : %s # Integrated weight (pb) : %s """ % (nb_event, cross) self['MGGenerationInfo'] = text - + ############################################################################ # SPLIT BANNER ############################################################################ def split(self, me_dir, proc_card=True): """write the banner in the Cards directory. - proc_card argument is present to avoid the overwrite of proc_card + proc_card argument is present to avoid the overwrite of proc_card information""" for tag, text in self.items(): @@ -305,37 +305,37 @@ def check_pid(self, pid2label): """special routine removing width/mass of particles not present in the model This is usefull in case of loop model card, when we want to use the non loop model.""" - + if not hasattr(self, 'param_card'): self.charge_card('slha') - + for tag in ['mass', 'decay']: block = self.param_card.get(tag) for data in block: pid = data.lhacode[0] - if pid not in list(pid2label.keys()): + if pid not in list(pid2label.keys()): block.remove((pid,)) def get_lha_strategy(self): """get the lha_strategy: how the weight have to be handle by the shower""" - + if not self["init"]: raise Exception("No init block define") - + data = self["init"].split('\n')[0].split() if len(data) != 10: misc.sprint(len(data), self['init']) raise Exception("init block has a wrong format") return int(float(data[-2])) - + def set_lha_strategy(self, value): """set the lha_strategy: how the weight have to be handle by the shower""" - + if not (-4 <= int(value) <= 4): six.reraise(Exception, "wrong value for lha_strategy", value) if not self["init"]: raise Exception("No init block define") - + all_lines = self["init"].split('\n') data = all_lines[0].split() if len(data) != 10: @@ -351,13 +351,13 @@ def modify_init_cross(self, cross, allow_zero=False): assert isinstance(cross, dict) # assert "all" in cross assert "init" in self - + cross = dict(cross) for key in cross.keys(): if isinstance(key, str) and key.isdigit() and int(key) not in cross: cross[int(key)] = cross[key] - - + + all_lines = self["init"].split('\n') new_data = [] new_data.append(all_lines[0]) @@ -365,7 +365,7 @@ def modify_init_cross(self, cross, allow_zero=False): line = all_lines[i] split = line.split() if len(split) == 4: - xsec, xerr, xmax, pid = split + xsec, xerr, xmax, pid = split else: new_data += all_lines[i:] break @@ -383,23 +383,23 @@ def modify_init_cross(self, cross, allow_zero=False): (float(cross[pid]), ratio* float(xerr), ratio*float(xmax), pid) new_data.append(line) self['init'] = '\n'.join(new_data) - + ############################################################################ # WRITE BANNER ############################################################################ def write(self, output_path, close_tag=True, exclude=[]): """write the banner""" - + if isinstance(output_path, str): ff = open(output_path, 'w') else: ff = output_path - + if MADEVENT: header = open(pjoin(MEDIR, 'Source', 'banner_header.txt')).read() else: header = open(pjoin(MG5DIR,'Template', 'LO', 'Source', 'banner_header.txt')).read() - + if not self.lhe_version: self.lhe_version = self.get('run_card', 'lhe_version', default=1.0) if float(self.lhe_version) < 3: @@ -412,7 +412,7 @@ def write(self, output_path, close_tag=True, exclude=[]): for tag in [t for t in self.ordered_items if t in list(self.keys())]+ \ [t for t in self.keys() if t not in self.ordered_items]: - if tag in ['init'] or tag in exclude: + if tag in ['init'] or tag in exclude: continue capitalized_tag = self.capitalized_items[tag] if tag in self.capitalized_items else tag start_data, stop_data = '', '' @@ -422,19 +422,19 @@ def write(self, output_path, close_tag=True, exclude=[]): stop_data = ']]>\n' out = '<%(tag)s>%(start_data)s\n%(text)s\n%(stop_data)s\n' % \ {'tag':capitalized_tag, 'text':self[tag].strip(), - 'start_data': start_data, 'stop_data':stop_data} + 'start_data': start_data, 'stop_data':stop_data} try: ff.write(out) except: ff.write(out.encode('utf-8')) - - + + if not '/header' in exclude: out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) if 'init' in self and not 'init' in exclude: text = self['init'] @@ -444,22 +444,22 @@ def write(self, output_path, close_tag=True, exclude=[]): ff.write(out) except: ff.write(out.encode('utf-8')) - + if close_tag: - out = '\n' + out = '\n' try: ff.write(out) except: - ff.write(out.encode('utf-8')) + ff.write(out.encode('utf-8')) return ff - - + + ############################################################################ # BANNER ############################################################################ def add(self, path, tag=None): """Add the content of the file to the banner""" - + if not tag: card_name = os.path.basename(path) if 'param_card' in card_name: @@ -505,33 +505,33 @@ def add_text(self, tag, text): if tag == 'param_card': tag = 'slha' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' - + self[tag.lower()] = text - - + + def charge_card(self, tag): """Build the python object associated to the card""" - + if tag in ['param_card', 'param']: tag = 'slha' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' elif tag == 'shower_card': tag = 'mgshowercard' elif tag == 'FO_analyse_card': tag = 'foanalyse' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse'], 'invalid card %s' % tag - + if tag == 'slha': param_card = self[tag].split('\n') self.param_card = param_card_reader.ParamCard(param_card) @@ -544,56 +544,56 @@ def charge_card(self, tag): self.proc_card = ProcCard(proc_card) return self.proc_card elif tag =='mgshowercard': - shower_content = self[tag] + shower_content = self[tag] if MADEVENT: import internal.shower_card as shower_card else: import madgraph.various.shower_card as shower_card self.shower_card = shower_card.ShowerCard(shower_content, True) - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.shower_card.testing = False return self.shower_card elif tag =='foanalyse': - analyse_content = self[tag] + analyse_content = self[tag] if MADEVENT: import internal.FO_analyse_card as FO_analyse_card else: import madgraph.various.FO_analyse_card as FO_analyse_card - # set testing to false (testing = true allow to init using + # set testing to false (testing = true allow to init using # the card content instead of the card path" self.FOanalyse_card = FO_analyse_card.FOAnalyseCard(analyse_content, True) self.FOanalyse_card.testing = False return self.FOanalyse_card - + def get_detail(self, tag, *arg, **opt): """return a specific """ - + if tag in ['param_card', 'param']: tag = 'slha' attr_tag = 'param_card' elif tag in ['run_card', 'run']: - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], '%s not recognized' % tag - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) + self.charge_card(attr_tag) card = getattr(self, attr_tag) if len(arg) == 0: @@ -613,7 +613,7 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 2 and tag == 'slha': try: return card[arg[0]].get(arg[1:]) @@ -621,15 +621,15 @@ def get_detail(self, tag, *arg, **opt): if 'default' in opt: return opt['default'] else: - raise + raise elif len(arg) == 0: return card else: raise Exception("Unknow command") - + #convenient alias get = get_detail - + def set(self, tag, *args): """modify one of the cards""" @@ -637,27 +637,27 @@ def set(self, tag, *args): tag = 'slha' attr_tag = 'param_card' elif tag == 'run_card': - tag = 'mgruncard' + tag = 'mgruncard' attr_tag = 'run_card' elif tag == 'proc_card': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' elif tag == 'model': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('model',) elif tag == 'generate': - tag = 'mg5proccard' + tag = 'mg5proccard' attr_tag = 'proc_card' arg = ('generate',) elif tag == 'shower_card': tag = 'mgshowercard' attr_tag = 'shower_card' assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized' - + if not hasattr(self, attr_tag): - self.charge_card(attr_tag) - + self.charge_card(attr_tag) + card = getattr(self, attr_tag) if len(args) ==2: if tag == 'mg5proccard': @@ -666,20 +666,20 @@ def set(self, tag, *args): card[args[0]] = args[1] else: card[args[:-1]] = args[-1] - - + + @misc.multiple_try() def add_to_file(self, path, seed=None, out=None): """Add the banner to a file and change the associate seed in the banner""" if seed is not None: self.set("run_card", "iseed", seed) - + if not out: path_out = "%s.tmp" % path else: path_out = out - + ff = self.write(path_out, close_tag=False, exclude=['MGGenerationInfo', '/header', 'init']) ff.write("## END BANNER##\n") @@ -698,44 +698,44 @@ def add_to_file(self, path, seed=None, out=None): files.mv(path_out, path) - + def split_banner(banner_path, me_dir, proc_card=True): """a simple way to split a banner""" - + banner = Banner(banner_path) banner.split(me_dir, proc_card) - + def recover_banner(results_object, level, run=None, tag=None): """as input we receive a gen_crossxhtml.AllResults object. This define the current banner and load it """ - + if not run: - try: - _run = results_object.current['run_name'] - _tag = results_object.current['tag'] + try: + _run = results_object.current['run_name'] + _tag = results_object.current['tag'] except Exception: return Banner() else: _run = run if not tag: - try: - _tag = results_object[run].tags[-1] + try: + _tag = results_object[run].tags[-1] except Exception as error: if os.path.exists( pjoin(results_object.path,'Events','%s_banner.txt' % (run))): tag = None else: - return Banner() + return Banner() else: _tag = tag - - path = results_object.path - if tag: + + path = results_object.path + if tag: banner_path = pjoin(path,'Events',run,'%s_%s_banner.txt' % (run, tag)) else: banner_path = pjoin(results_object.path,'Events','%s_banner.txt' % (run)) - + if not os.path.exists(banner_path): if level != "parton" and tag != _tag: return recover_banner(results_object, level, _run, results_object[_run].tags[0]) @@ -754,12 +754,12 @@ def recover_banner(results_object, level, run=None, tag=None): return Banner(lhe.banner) # security if the banner was remove (or program canceled before created it) - return Banner() - + return Banner() + banner = Banner(banner_path) - - - + + + if level == 'pythia': if 'mgpythiacard' in banner: del banner['mgpythiacard'] @@ -768,13 +768,13 @@ def recover_banner(results_object, level, run=None, tag=None): if tag in banner: del banner[tag] return banner - + class InvalidRunCard(InvalidCmd): pass class ProcCard(list): """Basic Proccard object""" - + history_header = \ '#************************************************************\n' + \ '#* MadGraph5_aMC@NLO *\n' + \ @@ -798,10 +798,10 @@ class ProcCard(list): '#* run as ./bin/mg5_aMC filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - - - - + + + + def __init__(self, init=None): """ initialize a basic proc_card""" self.info = {'model': 'sm', 'generate':None, @@ -810,13 +810,13 @@ def __init__(self, init=None): if init: self.read(init) - + def read(self, init): """read the proc_card and save the information""" - + if isinstance(init, str): #path to file init = open(init, 'r') - + store_line = '' for line in init: line = line.rstrip() @@ -828,28 +828,28 @@ def read(self, init): store_line = "" if store_line: raise Exception("WRONG CARD FORMAT") - - + + def move_to_last(self, cmd): """move an element to the last history.""" for line in self[:]: if line.startswith(cmd): self.remove(line) list.append(self, line) - + def append(self, line): """"add a line in the proc_card perform automatically cleaning""" - + line = line.strip() cmds = line.split() if len(cmds) == 0: return - + list.append(self, line) - + # command type: cmd = cmds[0] - + if cmd == 'output': # Remove previous outputs from history self.clean(allow_for_removal = ['output'], keep_switch=True, @@ -875,7 +875,7 @@ def append(self, line): elif cmds[1] == 'proc_v4': #full cleaning self[:] = [] - + def clean(self, to_keep=['set','add','load'], remove_bef_last=None, @@ -884,13 +884,13 @@ def clean(self, to_keep=['set','add','load'], keep_switch=False): """Remove command in arguments from history. All command before the last occurrence of 'remove_bef_last' - (including it) will be removed (but if another options tells the opposite). + (including it) will be removed (but if another options tells the opposite). 'to_keep' is a set of line to always keep. - 'to_remove' is a set of line to always remove (don't care about remove_bef_ + 'to_remove' is a set of line to always remove (don't care about remove_bef_ status but keep_switch acts.). - if 'allow_for_removal' is define only the command in that list can be + if 'allow_for_removal' is define only the command in that list can be remove of the history for older command that remove_bef_lb1. all parameter - present in to_remove are always remove even if they are not part of this + present in to_remove are always remove even if they are not part of this list. keep_switch force to keep the statement remove_bef_??? which changes starts the removal mode. @@ -900,8 +900,8 @@ def clean(self, to_keep=['set','add','load'], if __debug__ and allow_for_removal: for arg in to_keep: assert arg not in allow_for_removal - - + + nline = -1 removal = False #looping backward @@ -912,7 +912,7 @@ def clean(self, to_keep=['set','add','load'], if not removal and remove_bef_last: if self[nline].startswith(remove_bef_last): removal = True - switch = True + switch = True # if this is the switch and is protected pass to the next element if switch and keep_switch: @@ -923,12 +923,12 @@ def clean(self, to_keep=['set','add','load'], if any([self[nline].startswith(arg) for arg in to_remove]): self.pop(nline) continue - + # Only if removal mode is active! if removal: if allow_for_removal: # Only a subset of command can be removed - if any([self[nline].startswith(arg) + if any([self[nline].startswith(arg) for arg in allow_for_removal]): self.pop(nline) continue @@ -936,10 +936,10 @@ def clean(self, to_keep=['set','add','load'], # All command have to be remove but protected self.pop(nline) continue - + # update the counter to pass to the next element nline -= 1 - + def get(self, tag, default=None): if isinstance(tag, int): list.__getattr__(self, tag) @@ -954,32 +954,32 @@ def get(self, tag, default=None): except ValueError: name, content = line[7:].split(None,1) out.append((name, content)) - return out + return out else: return self.info[tag] - + def write(self, path): """write the proc_card to a given path""" - + fsock = open(path, 'w') fsock.write(self.history_header) for line in self: while len(line) > 70: - sub, line = line[:70]+"\\" , line[70:] + sub, line = line[:70]+"\\" , line[70:] fsock.write(sub+"\n") else: fsock.write(line+"\n") - -class InvalidCardEdition(InvalidCmd): pass - + +class InvalidCardEdition(InvalidCmd): pass + class ConfigFile(dict): """ a class for storing/dealing with input file. - """ + """ def __init__(self, finput=None, **opt): """initialize a new instance. input can be an instance of MadLoopParam, - a file, a path to a file, or simply Nothing""" - + a file, a path to a file, or simply Nothing""" + if isinstance(finput, self.__class__): dict.__init__(self) for key in finput.__dict__: @@ -989,7 +989,7 @@ def __init__(self, finput=None, **opt): return else: dict.__init__(self) - + # Initialize it with all the default value self.user_set = set() self.auto_set = set() @@ -1000,15 +1000,15 @@ def __init__(self, finput=None, **opt): self.comments = {} # comment associated to parameters. can be display via help message # store the valid options for a given parameter. self.allowed_value = {} - + self.default_setup() self.plugin_input(finput) - + # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, **opt) - + @@ -1028,7 +1028,7 @@ def __add__(self, other): base = self.__class__(self) #base = copy.copy(self) base.update((key.lower(),value) for key, value in other.items()) - + return base def __radd__(self, other): @@ -1036,26 +1036,26 @@ def __radd__(self, other): new = copy.copy(other) new.update((key, value) for key, value in self.items()) return new - + def __contains__(self, key): return dict.__contains__(self, key.lower()) def __iter__(self): - + for name in super(ConfigFile, self).__iter__(): yield self.lower_to_case[name.lower()] - - + + #iter = super(ConfigFile, self).__iter__() #misc.sprint(iter) #return (self.lower_to_case[name] for name in iter) - + def keys(self): return [name for name in self] - + def items(self): return [(name,self[name]) for name in self] - + @staticmethod def warn(text, level, raiseerror=False): """convenient proxy to raiseerror/print warning""" @@ -1071,11 +1071,11 @@ def warn(text, level, raiseerror=False): log = lambda t: logger.log(level, t) elif level: log = level - + return log(text) def post_set(self, name, value, change_userdefine, raiseerror): - + if value is None: value = self[name] @@ -1087,25 +1087,25 @@ def post_set(self, name, value, change_userdefine, raiseerror): return getattr(self, 'post_set_%s' % name)(value, change_userdefine, raiseerror) else: raise - + def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): """set the attribute and set correctly the type if the value is a string. change_userdefine on True if we have to add the parameter in user_set """ - + if not len(self): #Should never happen but when deepcopy/pickle self.__init__() - + name = name.strip() - lower_name = name.lower() - + lower_name = name.lower() + # 0. check if this parameter is a system only one if change_userdefine and lower_name in self.system_only: text='%s is a private entry which can not be modify by the user. Keep value at %s' % (name,self[name]) self.warn(text, 'critical', raiseerror) return - + #1. check if the parameter is set to auto -> pass it to special if lower_name in self: targettype = type(dict.__getitem__(self, lower_name)) @@ -1115,22 +1115,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): self.user_set.remove(lower_name) #keep old value. self.post_set(lower_name, 'auto', change_userdefine, raiseerror) - return + return elif lower_name in self.auto_set: self.auto_set.remove(lower_name) - + # 2. Find the type of the attribute that we want if lower_name in self.list_parameter: targettype = self.list_parameter[lower_name] - - - + + + if isinstance(value, str): # split for each comma/space value = value.strip() if value.startswith('[') and value.endswith(']'): value = value[1:-1] - #do not perform split within a " or ' block + #do not perform split within a " or ' block data = re.split(r"((? bad input dropped.append(val) - + if not new_values: text= "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ % (value, name, self[lower_name]) text += "allowed values are any list composed of the following entries: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) - return self.warn(text, 'warning', raiseerror) - elif dropped: + return self.warn(text, 'warning', raiseerror) + elif dropped: text = "some value for entry '%s' are not valid. Invalid items are: '%s'.\n" \ % (name, dropped) text += "value will be set to %s" % new_values - text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) + text += "allowed items in the list are: %s" % ', '.join([str(i) for i in self.allowed_value[lower_name]]) self.warn(text, 'warning') values = new_values # make the assignment - dict.__setitem__(self, lower_name, values) + dict.__setitem__(self, lower_name, values) if change_userdefine: self.user_set.add(lower_name) #check for specific action - return self.post_set(lower_name, None, change_userdefine, raiseerror) + return self.post_set(lower_name, None, change_userdefine, raiseerror) elif lower_name in self.dict_parameter: - targettype = self.dict_parameter[lower_name] + targettype = self.dict_parameter[lower_name] full_reset = True #check if we just update the current dict or not - + if isinstance(value, str): value = value.strip() # allowed entry: @@ -1209,7 +1209,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): # name , value => just add the entry # name value => just add the entry # {name1:value1, name2:value2} => full reset - + # split for each comma/space if value.startswith('{') and value.endswith('}'): new_value = {} @@ -1219,23 +1219,23 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): x, y = pair.split(':') x, y = x.strip(), y.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] new_value[x] = y value = new_value elif ',' in value: x,y = value.split(',') value = {x.strip():y.strip()} full_reset = False - + elif ':' in value: x,y = value.split(':') value = {x.strip():y.strip()} - full_reset = False + full_reset = False else: x,y = value.split() value = {x:y} - full_reset = False - + full_reset = False + if isinstance(value, dict): for key in value: value[key] = self.format_variable(value[key], targettype, name=name) @@ -1248,7 +1248,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - elif name in self: + elif name in self: targettype = type(self[name]) else: logger.debug('Trying to add argument %s in %s. ' % (name, self.__class__.__name__) +\ @@ -1256,22 +1256,22 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): suggestions = [k for k in self.keys() if k.startswith(name[0].lower())] if len(suggestions)>0: logger.debug("Did you mean one of the following: %s"%suggestions) - self.add_param(lower_name, self.format_variable(UnknownType(value), + self.add_param(lower_name, self.format_variable(UnknownType(value), UnknownType, name)) self.lower_to_case[lower_name] = name if change_userdefine: self.user_set.add(lower_name) return self.post_set(lower_name, None, change_userdefine, raiseerror) - + value = self.format_variable(value, targettype, name=name) #check that the value is allowed: if lower_name in self.allowed_value and '*' not in self.allowed_value[lower_name]: valid = False allowed = self.allowed_value[lower_name] - + # check if the current value is allowed or not (set valid to True) if value in allowed: - valid=True + valid=True elif isinstance(value, str): value = value.lower().strip() allowed = [str(v).lower() for v in allowed] @@ -1279,7 +1279,7 @@ def __setitem__(self, name, value, change_userdefine=False,raiseerror=False): i = allowed.index(value) value = self.allowed_value[lower_name][i] valid=True - + if not valid: # act if not valid: text = "value '%s' for entry '%s' is not valid. Preserving previous value: '%s'.\n" \ @@ -1303,7 +1303,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, if __debug__: if lower_name in self: raise Exception("Duplicate case for %s in %s" % (name,self.__class__)) - + dict.__setitem__(self, lower_name, value) self.lower_to_case[lower_name] = name if isinstance(value, list): @@ -1318,12 +1318,12 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, elif isinstance(value, dict): allvalues = list(value.values()) if any([type(allvalues[0]) != type(v) for v in allvalues]): - raise Exception("All entry should have the same type") - self.dict_parameter[lower_name] = type(allvalues[0]) + raise Exception("All entry should have the same type") + self.dict_parameter[lower_name] = type(allvalues[0]) if '__type__' in value: del value['__type__'] dict.__setitem__(self, lower_name, value) - + if allowed and allowed != ['*']: self.allowed_value[lower_name] = allowed if lower_name in self.list_parameter: @@ -1333,8 +1333,8 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, assert value in allowed or '*' in allowed #elif isinstance(value, bool) and allowed != ['*']: # self.allowed_value[name] = [True, False] - - + + if system: self.system_only.add(lower_name) if comment: @@ -1342,7 +1342,7 @@ def add_param(self, name, value, system=False, comment=False, typelist=None, def do_help(self, name): """return a minimal help for the parameter""" - + out = "## Information on parameter %s from class %s\n" % (name, self.__class__.__name__) if name.lower() in self: out += "## current value: %s (parameter should be of type %s)\n" % (self[name], type(self[name])) @@ -1351,7 +1351,7 @@ def do_help(self, name): else: out += "## Unknown for this class\n" if name.lower() in self.user_set: - out += "## This value is considered as being set by the user\n" + out += "## This value is considered as being set by the user\n" else: out += "## This value is considered as being set by the system\n" if name.lower() in self.allowed_value: @@ -1359,17 +1359,17 @@ def do_help(self, name): out += "Allowed value are: %s\n" % ','.join([str(p) for p in self.allowed_value[name.lower()]]) else: out += "Suggested value are : %s\n " % ','.join([str(p) for p in self.allowed_value[name.lower()] if p!='*']) - + logger.info(out) return out @staticmethod def guess_type_from_value(value): "try to guess the type of the string --do not use eval as it might not be safe" - + if not isinstance(value, str): return str(value.__class__.__name__) - + #use ast.literal_eval to be safe since value is untrusted # add a timeout to mitigate infinite loop, memory stack attack with misc.stdchannel_redirected(sys.stdout, os.devnull): @@ -1388,7 +1388,7 @@ def guess_type_from_value(value): @staticmethod def format_variable(value, targettype, name="unknown"): """assign the value to the attribute for the given format""" - + if isinstance(targettype, str): if targettype in ['str', 'int', 'float', 'bool']: targettype = eval(targettype) @@ -1412,7 +1412,7 @@ def format_variable(value, targettype, name="unknown"): (name, type(value), targettype, value)) else: raise InvalidCmd("Wrong input type for %s found %s and expecting %s for value %s" %\ - (name, type(value), targettype, value)) + (name, type(value), targettype, value)) else: if targettype != UnknownType: value = value.strip() @@ -1441,8 +1441,8 @@ def format_variable(value, targettype, name="unknown"): value = int(value) elif value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} - value =int(value[:-1]) * convert[value[-1]] - elif '/' in value or '*' in value: + value =int(value[:-1]) * convert[value[-1]] + elif '/' in value or '*' in value: try: split = re.split('(\*|/)',value) v = float(split[0]) @@ -1461,7 +1461,7 @@ def format_variable(value, targettype, name="unknown"): try: value = float(value.replace('d','e')) except ValueError: - raise InvalidCmd("%s can not be mapped to an integer" % value) + raise InvalidCmd("%s can not be mapped to an integer" % value) try: new_value = int(value) except ValueError: @@ -1471,7 +1471,7 @@ def format_variable(value, targettype, name="unknown"): value = new_value else: raise InvalidCmd("incorect input: %s need an integer for %s" % (value,name)) - + elif targettype == float: if value.endswith(('k', 'M')) and value[:-1].isdigit(): convert = {'k':1000, 'M':1000000} @@ -1496,33 +1496,33 @@ def format_variable(value, targettype, name="unknown"): value = v else: raise InvalidCmd("type %s is not handle by the card" % targettype) - + return value - - + + def __getitem__(self, name): - + lower_name = name.lower() if __debug__: if lower_name not in self: if lower_name in [key.lower() for key in self] : raise Exception("Some key are not lower case %s. Invalid use of the class!"\ % [key for key in self if key.lower() != key]) - + if lower_name in self.auto_set: return 'auto' - + return dict.__getitem__(self, name.lower()) - + get = __getitem__ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): """convenient way to change attribute. changeifuserset=False means that the value is NOT change is the value is not on default. - user=True, means that the value will be marked as modified by the user - (potentially preventing future change to the value) + user=True, means that the value will be marked as modified by the user + (potentially preventing future change to the value) """ # changeifuserset=False -> we need to check if the user force a value. @@ -1530,8 +1530,8 @@ def set(self, name, value, changeifuserset=True, user=False, raiseerror=False): if name.lower() in self.user_set: #value modified by the user -> do nothing return - self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) - + self.__setitem__(name, value, change_userdefine=user, raiseerror=raiseerror) + class RivetCard(ConfigFile): @@ -1706,7 +1706,7 @@ def setRelevantParamCard(self, f_params, f_relparams): yexec_dict = {} yexec_line = exec_line + "yaxis_relvar = " + self['yaxis_relvar'] exec(yexec_line, locals(), yexec_dict) - if self['yaxis_label'] == "": + if self['yaxis_label'] == "": self['yaxis_label'] = "yaxis_relvar" f_relparams.write("{0} = {1}\n".format(self['yaxis_label'], yexec_dict['yaxis_relvar'])) else: @@ -1715,11 +1715,11 @@ def setRelevantParamCard(self, f_params, f_relparams): class ProcCharacteristic(ConfigFile): """A class to handle information which are passed from MadGraph to the madevent - interface.""" - + interface.""" + def default_setup(self): """initialize the directory to the default value""" - + self.add_param('loop_induced', False) self.add_param('has_isr', False) self.add_param('has_fsr', False) @@ -1735,16 +1735,16 @@ def default_setup(self): self.add_param('pdg_initial1', [0]) self.add_param('pdg_initial2', [0]) self.add_param('splitting_types',[], typelist=str) - self.add_param('perturbation_order', [], typelist=str) - self.add_param('limitations', [], typelist=str) - self.add_param('hel_recycling', False) + self.add_param('perturbation_order', [], typelist=str) + self.add_param('limitations', [], typelist=str) + self.add_param('hel_recycling', False) self.add_param('single_color', True) - self.add_param('nlo_mixed_expansion', True) + self.add_param('nlo_mixed_expansion', True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1752,49 +1752,49 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: if '#' in line: line = line.split('#',1)[0] if not line: continue - + if '=' in line: key, value = line.split('=',1) self[key.strip()] = value - + def write(self, outputpath): """write the file""" template ="# Information about the process #\n" template +="#########################################\n" - + fsock = open(outputpath, 'w') fsock.write(template) - + for key, value in self.items(): fsock.write(" %s = %s \n" % (key, value)) - - fsock.close() - + + fsock.close() + class GridpackCard(ConfigFile): """an object for the GridpackCard""" - + def default_setup(self): """default value for the GridpackCard""" - + self.add_param("GridRun", True) self.add_param("gevents", 2500) self.add_param("gseed", 1) - self.add_param("ngran", -1) - + self.add_param("ngran", -1) + def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -1802,7 +1802,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -1812,19 +1812,19 @@ def read(self, finput): self[line[1].strip()] = line[0].replace('\'','').strip() def write(self, output_file, template=None): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'grid_card_default.dat') else: template = pjoin(MEDIR, 'Cards', 'grid_card_default.dat') - + text = "" - for line in open(template,'r'): + for line in open(template,'r'): nline = line.split('#')[0] nline = nline.split('!')[0] comment = line[len(nline):] @@ -1832,19 +1832,19 @@ def write(self, output_file, template=None): if len(nline) != 2: text += line elif nline[1].strip() in self: - text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) + text += ' %s\t= %s %s' % (self[nline[1].strip()],nline[1], comment) else: logger.info('Adding missing parameter %s to current run_card (with default value)' % nline[1].strip()) - text += line - + text += line + if isinstance(output_file, str): fsock = open(output_file,'w') else: fsock = output_file - + fsock.write(text) fsock.close() - + class PY8Card(ConfigFile): """ Implements the Pythia8 card.""" @@ -1868,7 +1868,7 @@ def add_default_subruns(self, type): def default_setup(self): """ Sets up the list of available PY8 parameters.""" - + # Visible parameters # ================== self.add_param("Main:numberOfEvents", -1) @@ -1877,11 +1877,11 @@ def default_setup(self): self.add_param("JetMatching:qCut", -1.0, always_write_to_card=False) self.add_param("JetMatching:doShowerKt",False,always_write_to_card=False) # -1 means that it is automatically set. - self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) + self.add_param("JetMatching:nJetMax", -1, always_write_to_card=False) # for CKKWL merging self.add_param("Merging:TMS", -1.0, always_write_to_card=False) self.add_param("Merging:Process", '', always_write_to_card=False) - # -1 means that it is automatically set. + # -1 means that it is automatically set. self.add_param("Merging:nJetMax", -1, always_write_to_card=False) # for both merging, chose whether to also consider different merging # scale values for the extra weights related to scale and PDF variations. @@ -1918,10 +1918,10 @@ def default_setup(self): comment='This allows to turn on/off hadronization alltogether.') self.add_param("partonlevel:mpi", True, hidden=True, always_write_to_card=False, comment='This allows to turn on/off MPI alltogether.') - self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, + self.add_param("Beams:setProductionScalesFromLHEF", False, hidden=True, always_write_to_card=False, comment='This parameter is automatically set to True by MG5aMC when doing MLM merging with PY8.') - + # for MLM merging self.add_param("JetMatching:merge", False, hidden=True, always_write_to_card=False, comment='Specifiy if we are merging sample of different multiplicity.') @@ -1931,9 +1931,9 @@ def default_setup(self): comment='Value of the merging scale below which one does not even write the HepMC event.') self.add_param("JetMatching:doVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') - self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) + self.add_param("JetMatching:scheme", 1, hidden=True, always_write_to_card=False) self.add_param("JetMatching:setMad", False, hidden=True, always_write_to_card=False, - comment='Specify one must read inputs from the MadGraph banner.') + comment='Specify one must read inputs from the MadGraph banner.') self.add_param("JetMatching:coneRadius", 1.0, hidden=True, always_write_to_card=False) self.add_param("JetMatching:nQmatch",4,hidden=True, always_write_to_card=False) # for CKKWL merging (common with UMEPS, UNLOPS) @@ -1946,7 +1946,7 @@ def default_setup(self): self.add_param("Merging:applyVeto", False, hidden=True, always_write_to_card=False, comment='Do veto externally (e.g. in SysCalc).') self.add_param("Merging:includeWeightInXsection", True, hidden=True, always_write_to_card=False, - comment='If turned off, then the option belows forces PY8 to keep the original weight.') + comment='If turned off, then the option belows forces PY8 to keep the original weight.') self.add_param("Merging:muRen", 91.188, hidden=True, always_write_to_card=False, comment='Set renormalization scales of the 2->2 process.') self.add_param("Merging:muFacInME", 91.188, hidden=True, always_write_to_card=False, @@ -1958,7 +1958,7 @@ def default_setup(self): # To be added in subruns for CKKWL self.add_param("Merging:mayRemoveDecayProducts", False, hidden=True, always_write_to_card=False) self.add_param("Merging:doKTMerging", False, hidden=True, always_write_to_card=False) - self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) + self.add_param("Merging:Dparameter", 0.4, hidden=True, always_write_to_card=False) self.add_param("Merging:doPTLundMerging", False, hidden=True, always_write_to_card=False) # Special Pythia8 paremeters useful to simplify the shower. @@ -1975,33 +1975,33 @@ def default_setup(self): # Add parameters controlling the subruns execution flow. # These parameters should not be part of PY8SubRun daughter. self.add_default_subruns('parameters') - + def __init__(self, *args, **opts): - # Parameters which are not printed in the card unless they are - # 'user_set' or 'system_set' or part of the + # Parameters which are not printed in the card unless they are + # 'user_set' or 'system_set' or part of the # self.hidden_params_to_always_print set. self.hidden_param = [] self.hidden_params_to_always_write = set() self.visible_params_to_always_write = set() # List of parameters that should never be written out given the current context. self.params_to_never_write = set() - + # Parameters which have been set by the system (i.e. MG5 itself during # the regular course of the shower interface) self.system_set = set() - + # Add attributes controlling the subruns execution flow. # These attributes should not be part of PY8SubRun daughter. self.add_default_subruns('attributes') - - # Parameters which have been set by the + + # Parameters which have been set by the super(PY8Card, self).__init__(*args, **opts) - def add_param(self, name, value, hidden=False, always_write_to_card=True, + def add_param(self, name, value, hidden=False, always_write_to_card=True, comment=None): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. The option 'hidden' decides whether the parameter should be visible to the user. The option 'always_write_to_card' decides whether it should @@ -2017,7 +2017,7 @@ def add_param(self, name, value, hidden=False, always_write_to_card=True, self.hidden_params_to_always_write.add(name) else: if always_write_to_card: - self.visible_params_to_always_write.add(name) + self.visible_params_to_always_write.add(name) if not comment is None: if not isinstance(comment, str): raise MadGraph5Error("Option 'comment' must be a string, not"+\ @@ -2036,7 +2036,7 @@ def add_subrun(self, py8_subrun): self.subruns[py8_subrun['Main:subrun']] = py8_subrun if not 'LHEFInputs:nSubruns' in self.user_set: self['LHEFInputs:nSubruns'] = max(self.subruns.keys()) - + def userSet(self, name, value, **opts): """Set an attribute of this card, following a user_request""" self.__setitem__(name, value, change_userdefine=True, **opts) @@ -2044,10 +2044,10 @@ def userSet(self, name, value, **opts): self.system_set.remove(name.lower()) def vetoParamWriteOut(self, name): - """ Forbid the writeout of a specific parameter of this card when the + """ Forbid the writeout of a specific parameter of this card when the "write" function will be invoked.""" self.params_to_never_write.add(name.lower()) - + def systemSet(self, name, value, **opts): """Set an attribute of this card, independently of a specific user request and only if not already user_set.""" @@ -2058,7 +2058,7 @@ def systemSet(self, name, value, **opts): if force or name.lower() not in self.user_set: self.__setitem__(name, value, change_userdefine=False, **opts) self.system_set.add(name.lower()) - + def MadGraphSet(self, name, value, **opts): """ Sets a card attribute, but only if it is absent or not already user_set.""" @@ -2068,18 +2068,18 @@ def MadGraphSet(self, name, value, **opts): force = False if name.lower() not in self or (force or name.lower() not in self.user_set): self.__setitem__(name, value, change_userdefine=False, **opts) - self.system_set.add(name.lower()) - + self.system_set.add(name.lower()) + def defaultSet(self, name, value, **opts): self.__setitem__(name, value, change_userdefine=False, **opts) - + @staticmethod def pythia8_formatting(value, formatv=None): """format the variable into pythia8 card convention. The type is detected by default""" if not formatv: if isinstance(value,UnknownType): - formatv = 'unknown' + formatv = 'unknown' elif isinstance(value, bool): formatv = 'bool' elif isinstance(value, int): @@ -2095,7 +2095,7 @@ def pythia8_formatting(value, formatv=None): formatv = 'str' else: assert formatv - + if formatv == 'unknown': # No formatting then return str(value) @@ -2116,7 +2116,7 @@ def pythia8_formatting(value, formatv=None): elif formatv == 'float': return '%.10e' % float(value) elif formatv == 'shortfloat': - return '%.3f' % float(value) + return '%.3f' % float(value) elif formatv == 'str': return "%s" % value elif formatv == 'list': @@ -2124,9 +2124,9 @@ def pythia8_formatting(value, formatv=None): return ','.join([PY8Card.pythia8_formatting(arg, 'shortfloat') for arg in value]) else: return ','.join([PY8Card.pythia8_formatting(arg) for arg in value]) - - def write(self, output_file, template, read_subrun=False, + + def write(self, output_file, template, read_subrun=False, print_only_visible=False, direct_pythia_input=False, add_missing=True): """ Write the card to output_file using a specific template. > 'print_only_visible' specifies whether or not the hidden parameters @@ -2143,28 +2143,28 @@ def write(self, output_file, template, read_subrun=False, or p.lower() in self.user_set] # Filter against list of parameters vetoed for write-out visible_param = [p for p in visible_param if p.lower() not in self.params_to_never_write] - + # Now the hidden param which must be written out if print_only_visible: hidden_output_param = [] else: hidden_output_param = [p for p in self if p.lower() in self.hidden_param and not p.lower() in self.user_set and - (p.lower() in self.hidden_params_to_always_write or + (p.lower() in self.hidden_params_to_always_write or p.lower() in self.system_set)] # Filter against list of parameters vetoed for write-out hidden_output_param = [p for p in hidden_output_param if p not in self.params_to_never_write] - + if print_only_visible: subruns = [] else: if not read_subrun: subruns = sorted(self.subruns.keys()) - + # Store the subruns to write in a dictionary, with its ID in key # and the corresponding stringstream in value subruns_to_write = {} - + # Sort these parameters nicely so as to put together parameters # belonging to the same group (i.e. prefix before the ':' in their name). def group_params(params): @@ -2191,7 +2191,7 @@ def group_params(params): # First dump in a temporary_output (might need to have a second pass # at the very end to update 'LHEFInputs:nSubruns') output = StringIO.StringIO() - + # Setup template from which to read if isinstance(template, str): if os.path.isfile(template): @@ -2199,7 +2199,7 @@ def group_params(params): elif '\n' in template: tmpl = StringIO.StringIO(template) else: - raise Exception("File input '%s' not found." % file_input) + raise Exception("File input '%s' not found." % file_input) elif template is None: # Then use a dummy empty StringIO, hence skipping the reading tmpl = StringIO.StringIO() @@ -2257,8 +2257,8 @@ def group_params(params): # Remove all of its variables (so that nothing is overwritten) DummySubrun.clear() DummySubrun.write(subruns_to_write[int(value)], - tmpl, read_subrun=True, - print_only_visible=print_only_visible, + tmpl, read_subrun=True, + print_only_visible=print_only_visible, direct_pythia_input=direct_pythia_input) logger.info('Adding new unknown subrun with ID %d.'% @@ -2267,7 +2267,7 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - + # Change parameters which must be output if param in visible_param: new_value = PY8Card.pythia8_formatting(self[param]) @@ -2286,10 +2286,10 @@ def group_params(params): last_pos = tmpl.tell() line = tmpl.readline() continue - - # Substitute the value. + + # Substitute the value. # If it is directly the pytia input, then don't write the param if it - # is not in the list of visible_params_to_always_write and was + # is not in the list of visible_params_to_always_write and was # not user_set or system_set if ((not direct_pythia_input) or (param.lower() in self.visible_params_to_always_write) or @@ -2304,16 +2304,16 @@ def group_params(params): output.write(template%(param_entry, value_entry.replace(value,new_value))) - + # Proceed to next line last_pos = tmpl.tell() line = tmpl.readline() - + # If add_missing is False, make sure to empty the list of remaining parameters if not add_missing: visible_param = [] hidden_output_param = [] - + # Now output the missing parameters. Warn about visible ones. if len(visible_param)>0 and not template is None: output.write( @@ -2343,12 +2343,12 @@ def group_params(params): """%(' for subrun %d'%self['Main:subrun'] if 'Main:subrun' in self else '')) for param in hidden_output_param: if param.lower() in self.comments: - comment = '\n'.join('! %s'%c for c in + comment = '\n'.join('! %s'%c for c in self.comments[param.lower()].split('\n')) output.write(comment+'\n') output.write('%s=%s\n'%(param,PY8Card.pythia8_formatting(self[param]))) - - # Don't close the file if we were reading a subrun, but simply write + + # Don't close the file if we were reading a subrun, but simply write # output and return now if read_subrun: output_file.write(output.getvalue()) @@ -2382,12 +2382,12 @@ def group_params(params): out.close() else: output_file.write(output.getvalue()) - + def read(self, file_input, read_subrun=False, setter='default'): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file. - The setter option choses the authority that sets potential - modified/new parameters. It can be either: + The setter option choses the authority that sets potential + modified/new parameters. It can be either: 'default' or 'user' or 'system'""" if isinstance(file_input, str): if "\n" in file_input: @@ -2423,8 +2423,8 @@ def read(self, file_input, read_subrun=False, setter='default'): raise MadGraph5Error("Could not read line '%s' of Pythia8 card."%\ line) if '!' in value: - value,_ = value.split('!',1) - + value,_ = value.split('!',1) + # Read a subrun if detected: if param=='Main:subrun': if read_subrun: @@ -2451,7 +2451,7 @@ def read(self, file_input, read_subrun=False, setter='default'): last_pos = finput.tell() line = finput.readline() continue - + # Read parameter. The case of a parameter not defined in the card is # handled directly in ConfigFile. @@ -2478,7 +2478,7 @@ def add_default_subruns(self, type): def __init__(self, *args, **opts): """ Initialize a subrun """ - + # Force user to set it manually. subrunID = -1 if 'subrun_id' in opts: @@ -2489,7 +2489,7 @@ def __init__(self, *args, **opts): def default_setup(self): """Sets up the list of available PY8SubRun parameters.""" - + # Add all default PY8Card parameters super(PY8SubRun, self).default_setup() # Make sure they are all hidden @@ -2501,33 +2501,33 @@ def default_setup(self): self.add_param("Main:subrun", -1) self.add_param("Beams:LHEF", "events.lhe.gz") - + class RunBlock(object): """ Class for a series of parameter in the run_card that can be either visible or hidden. - name: allow to set in the default run_card $name to set where that + name: allow to set in the default run_card $name to set where that block need to be inserted template_on: information to include is block is active template_off: information to include is block is not active on_fields/off_fields: paramater associated to the block - can be specify but are otherwise automatically but + can be specify but are otherwise automatically but otherwise determined from the template. - + function: status(self,run_card) -> return which template need to be used check_validity(self, runcard) -> sanity check - create_default_for_process(self, run_card, proc_characteristic, - history, proc_def) + create_default_for_process(self, run_card, proc_characteristic, + history, proc_def) post_set_XXXX(card, value, change_userdefine, raiseerror) -> fct called when XXXXX is set post_set(card, value, change_userdefine, raiseerror, **opt) -> fct called when a parameter is changed - -> no access to parameter name + -> no access to parameter name -> not called if post_set_XXXX is defined """ - - + + def __init__(self, name, template_on, template_off, on_fields=False, off_fields=False): self.name = name @@ -2550,7 +2550,7 @@ def fields(self): def find_fields_from_template(template): """ return the list of fields from a template. checking line like %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ - + return re.findall(r"^\s*%\((.*)\)s\s*=\s*\1", template, re.M) def get_template(self, card): @@ -2565,7 +2565,7 @@ def get_unused_template(self, card): if self.status(card): return self.template_off else: - return self.template_on + return self.template_on def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -2594,20 +2594,20 @@ def manage_parameters(self, card, written, to_write): written.add(name) if name in to_write: to_write.remove(name) - + def check_validity(self, runcard): """run self consistency check here --avoid to use runcard[''] = xxx here since it can trigger post_set function""" return def create_default_for_process(self, run_card, proc_characteristic, history, proc_def): - return + return # @staticmethod # def post_set(card, value, change_userdefine, raiseerror, **opt): # """default action to run when a parameter of the block is defined. # Here we do not know which parameter is modified. if this is needed. # then one need to define post_set_XXXXX(card, value, change_userdefine, raiseerror) -# and then only that function is used +# and then only that function is used # """ # # if 'pdlabel' in card.user_set: @@ -2621,7 +2621,7 @@ class RunCard(ConfigFile): blocks = [] parameter_in_block = {} - allowed_lep_densities = {} + allowed_lep_densities = {} default_include_file = 'run_card.inc' default_autodef_file = 'run.inc' donewarning = [] @@ -2637,7 +2637,7 @@ def plugin_input(self, finput): curr_dir = os.path.dirname(os.path.dirname(finput.name)) elif isinstance(finput, str): curr_dir = os.path.dirname(os.path.dirname(finput)) - + if curr_dir: if os.path.exists(pjoin(curr_dir, 'bin', 'internal', 'plugin_run_card')): # expected format {} passing everything as optional argument @@ -2646,7 +2646,7 @@ def plugin_input(self, finput): continue opts = dict(eval(line)) self.add_param(**opts) - + @classmethod def fill_post_set_from_blocks(cls): """set the post_set function for any parameter defined in a run_block""" @@ -2659,8 +2659,8 @@ def fill_post_set_from_blocks(cls): elif hasattr(block, 'post_set'): setattr(cls, 'post_set_%s' % parameter, block.post_set) cls.parameter_in_block[parameter] = block - - + + def __new__(cls, finput=None, **opt): cls.fill_post_set_from_blocks() @@ -2718,9 +2718,9 @@ def __new__(cls, finput=None, **opt): return super(RunCard, cls).__new__(cls, finput, **opt) def __init__(self, *args, **opts): - + # The following parameter are updated in the defaultsetup stage. - + #parameter for which no warning should be raised if not define self.hidden_param = [] # in which include file the parameer should be written @@ -2739,11 +2739,11 @@ def __init__(self, *args, **opts): self.cuts_parameter = {} # parameter added where legacy requires an older value. self.system_default = {} - + self.display_block = [] # set some block to be displayed self.fct_mod = {} # {param: (fct_pointer, *argument, **opts)} - self.cut_class = {} + self.cut_class = {} self.warned=False @@ -2776,11 +2776,11 @@ def get_lepton_densities(cls): else: cls.allowed_lep_densities[identity].append(name) - def add_param(self, name, value, fortran_name=None, include=True, + def add_param(self, name, value, fortran_name=None, include=True, hidden=False, legacy=False, cut=False, system=False, sys_default=None, autodef=False, fct_mod=None, **opts): - """ add a parameter to the card. value is the default value and + """ add a parameter to the card. value is the default value and defines the type (int/float/bool/str) of the input. fortran_name: defines what is the associate name in the f77 code include: defines if we have to put the value in the include file @@ -2795,7 +2795,7 @@ def add_param(self, name, value, fortran_name=None, include=True, fct_mod: defines a function to run if the parameter is modify in the include file options of **opts: - allowed: list of valid options. '*' means anything else should be allowed. - empty list means anything possible as well. + empty list means anything possible as well. - comment: add comment for writing/help - typelist: type of the list if default is empty """ @@ -2823,9 +2823,9 @@ def add_param(self, name, value, fortran_name=None, include=True, self.fct_mod[name] = fct_mod def read(self, finput, consistency=True, unknown_warning=True, **opt): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -2836,7 +2836,7 @@ def read(self, finput, consistency=True, unknown_warning=True, **opt): finput = open(finput) else: raise Exception("No such file %s" % finput) - + for line in finput: line = line.split('#')[0] line = line.split('!')[0] @@ -2864,8 +2864,8 @@ def add_unknown_entry(self, name, value, unknow_warning): This is based on the guess_entry_fromname for the various syntax providing input. This then call add_param accordingly. - This function does not returns anything. - """ + This function does not returns anything. + """ if name == "dsqrt_q2fact1" and not self.LO: raise InvalidRunCard("Looks like you passed a LO run_card for a NLO run. Please correct") @@ -2903,7 +2903,7 @@ def add_unknown_entry(self, name, value, unknow_warning): " The type was assigned to %s. \n"+\ " The definition of that variable will %sbe automatically added to fortran file %s\n"+\ " The value of that variable will %sbe passed to the fortran code via fortran file %s",\ - name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, + name, value, vartype if vartype != "list" else "list of %s" % opts.get('typelist').__name__, "" if opts.get('autodef', False) else "not", "" if opts.get('autodef', False) in [True,False] else opts.get('autodef'), "" if opts.get('include', True) else "not", "" if opts.get('include', True) in [True,False] else opts.get('include')) RunCard.donewarning.append(name) @@ -2923,19 +2923,19 @@ def valid_line(self, line, tmp): return False elif line.strip().startswith('%'): parameter = line[line.find('(')+1:line.find(')')] - + try: cond = self.cuts_parameter[parameter] except KeyError: return True - - + + if template_options.get(cond, default) or cond is True: return True else: - return False + return False else: - return True + return True def reset_simd(self, old_value, new_value, name, *args, **opts): @@ -2946,28 +2946,28 @@ def make_clean(self,old_value, new_value, name, dir): raise Exception('pass make clean for ', dir) def make_Ptouch(self,old_value, new_value, name, reset): - raise Exception('pass Ptouch for ', reset) - + raise Exception('pass Ptouch for ', reset) + def write(self, output_file, template=None, python_template=False, write_hidden=False, template_options=None, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" - to_write = set(self.user_set) + to_write = set(self.user_set) written = set() if not template: raise Exception if not template_options: template_options = collections.defaultdict(str) - + if python_template: text = open(template,'r').read() - text = text.split('\n') + text = text.split('\n') # remove if templating - text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] + text = [l if not l.startswith('#IF') else l[l.find(')# ')+2:] for l in text if self.valid_line(l, template_options)] text ='\n'.join(text) - + if python_template and not to_write: import string if self.blocks: @@ -2981,14 +2981,14 @@ def write(self, output_file, template=None, python_template=False, if not self.list_parameter: text = text % self else: - data = dict((key.lower(),value) for key, value in self.items()) + data = dict((key.lower(),value) for key, value in self.items()) for name in self.list_parameter: if self.list_parameter[name] != str: data[name] = ', '.join(str(v) for v in data[name]) else: data[name] = "['%s']" % "', '".join(str(v) for v in data[name]) text = text % data - else: + else: text = "" for line in open(template,'r'): nline = line.split('#')[0] @@ -3005,11 +3005,11 @@ def write(self, output_file, template=None, python_template=False, this_group = this_group[0] text += this_group.get_template(self) % self this_group.manage_parameters(self, written, to_write) - + elif len(nline) != 2: text += line elif nline[1].strip() in self: - + name = nline[1].strip().lower() value = self[name] if name in self.list_parameter: @@ -3026,15 +3026,15 @@ def write(self, output_file, template=None, python_template=False, else: endline = '' text += ' %s\t= %s %s%s' % (value, name, comment, endline) - written.add(name) + written.add(name) if name in to_write: to_write.remove(name) else: logger.info('Adding missing parameter %s to current %s (with default value)', (name, self.filename)) - written.add(name) - text += line + written.add(name) + text += line for b in self.blocks: if b.status(self): @@ -3057,7 +3057,7 @@ def write(self, output_file, template=None, python_template=False, else: #partial writting -> add only what is needed to_add = [] - for line in b.get_template(self).split('\n'): + for line in b.get_template(self).split('\n'): nline = line.split('#')[0] nline = nline.split('!')[0] nline = nline.split('=') @@ -3072,8 +3072,8 @@ def write(self, output_file, template=None, python_template=False, continue #already include before else: to_add.append(line % {nline[1].strip():value, name:value}) - written.add(name) - + written.add(name) + if name in to_write: to_write.remove(name) else: @@ -3095,13 +3095,13 @@ def write(self, output_file, template=None, python_template=False, text += '\n'.join(to_add) if to_write or write_hidden: - text+="""#********************************************************************* + text+="""#********************************************************************* # Additional hidden parameters #********************************************************************* -""" +""" if write_hidden: # - # do not write hidden parameter not hidden for this template + # do not write hidden parameter not hidden for this template # if python_template: written = written.union(set(re.findall('\%\((\w*)\)s', open(template,'r').read(), re.M))) @@ -3129,7 +3129,7 @@ def get_last_value_include(self, output_dir): if inc file does not exist we will return the current value (i.e. set has no change) """ - #remember that + #remember that # default_include_file is a class variable # self.includepath is on the form include_path : [list of param ] out = {} @@ -3165,7 +3165,7 @@ def get_value_from_include(self, path, list_of_params, output_dir): with open(pjoin(output_dir,path), 'r') as fsock: text = fsock.read() - + for name in list_of_params: misc.sprint(name, name in self.fortran_name) misc.sprint(self.fortran_name[name] if name in self.fortran_name[name] else name) @@ -3191,11 +3191,11 @@ def get_value_from_include(self, path, list_of_params, output_dir): misc.sprint(self.fortran_name) misc.sprint(text) raise Exception - return out + return out def get_default(self, name, default=None, log_level=None): - """return self[name] if exist otherwise default. log control if we + """return self[name] if exist otherwise default. log control if we put a warning or not if we use the default value""" lower_name = name.lower() @@ -3216,13 +3216,13 @@ def get_default(self, name, default=None, log_level=None): log_level = 20 if not default: default = dict.__getitem__(self, name.lower()) - + logger.log(log_level, '%s missed argument %s. Takes default: %s' % (self.filename, name, default)) self[name] = default return default else: - return self[name] + return self[name] def mod_inc_pdlabel(self, value): """flag pdlabel has 'dressed' if one of the special lepton PDF with beamstralung. @@ -3237,16 +3237,16 @@ def edit_dummy_fct_from_file(self, filelist, outdir): filelist is a list of input files (given by the user) containing a series of function to be placed in replacement of standard (typically dummy) functions of the code. - This use LO/NLO class attribute that defines which function name need to - be placed in which file. + This use LO/NLO class attribute that defines which function name need to + be placed in which file. First time this is used, a backup of the original file is done in order to - recover if the user remove some of those files. + recover if the user remove some of those files. The function present in the file are determined automatically via regular expression. and only that function is replaced in the associated file. - function in the filelist starting with user_ will also be include within the + function in the filelist starting with user_ will also be include within the dummy_fct.f file """ @@ -3269,7 +3269,7 @@ def edit_dummy_fct_from_file(self, filelist, outdir): fsock = file_writers.FortranWriter(tmp,'w') function_text = fsock.remove_routine(text, fct) fsock.close() - test = open(tmp,'r').read() + test = open(tmp,'r').read() if fct not in self.dummy_fct_file: if fct.startswith('user_'): self.dummy_fct_file[fct] = self.dummy_fct_file['user_'] @@ -3315,22 +3315,22 @@ def guess_entry_fromname(self, name, value): - vartype: type of the variable - name: name of the variable (stripped from metadata) - options: additional options for the add_param - rules: - - if name starts with str_, int_, float_, bool_, list_, dict_ then + rules: + - if name starts with str_, int_, float_, bool_, list_, dict_ then - vartype is set accordingly - name is strip accordingly - otherwise guessed from value (which is string) - if name contains min/max - vartype is set to float - options has an added {'cut':True} - - suffixes like + - suffixes like - will be removed from named - will be added in options (for add_param) as {'cut':True} see add_param documentation for the list of supported options - if include is on False set autodef to False (i.e. enforce it False for future change) """ - # local function + # local function def update_typelist(value, name, opts): """convert a string to a list and update opts to keep track of the type """ value = value.strip() @@ -3358,7 +3358,7 @@ def update_typelist(value, name, opts): opts[key] = val name = name.replace("<%s=%s>" %(key,val), '') - # get vartype + # get vartype # first check that name does not force it supported_type = ["str", "float", "int", "bool", "list", "dict"] if "_" in name and name.split("_")[0].lower() in supported_type: @@ -3406,13 +3406,13 @@ def f77_formatting(value, formatv=None): value = str(value).lower() else: assert formatv - + if formatv == 'bool': if str(value) in ['1','T','.true.','True']: return '.true.' else: return '.false.' - + elif formatv == 'int': try: return str(int(value)) @@ -3422,12 +3422,12 @@ def f77_formatting(value, formatv=None): return str(int(fl)) else: raise - + elif formatv == 'float': if isinstance(value, str): value = value.replace('d','e') return ('%.10e' % float(value)).replace('e','d') - + elif formatv == 'str': # Check if it is a list if value.strip().startswith('[') and value.strip().endswith(']'): @@ -3437,20 +3437,20 @@ def f77_formatting(value, formatv=None): enumerate(elements)] else: return "'%s'" % value - - + + def check_validity(self, log_level=30): """check that parameter missing in the card are set to the expected value""" for name, value in self.system_default.items(): self.set(name, value, changeifuserset=False) - + for name in self.includepath[False]: to_bypass = self.hidden_param + list(self.legacy_parameter.keys()) if name not in to_bypass: - self.get_default(name, log_level=log_level) + self.get_default(name, log_level=log_level) for name in self.legacy_parameter: if self[name] != self.legacy_parameter[name]: @@ -3458,28 +3458,28 @@ def check_validity(self, log_level=30): for block in self.blocks: block.check_validity(self) - + def update_system_parameter_for_include(self): - """update hidden system only parameter for the correct writtin in the + """update hidden system only parameter for the correct writtin in the include""" return - + def write_include_file(self, output_dir, output_file=None): """Write the various include file in output_dir. The entry True of self.includepath will be written in run_card.inc The entry False will not be written anywhere output_file allows testing by providing stream. - This also call the function to add variable definition for the - variable with autodef=True (handle by write_autodef function) + This also call the function to add variable definition for the + variable with autodef=True (handle by write_autodef function) """ - + # ensure that all parameter are coherent and fix those if needed self.check_validity() - + #ensusre that system only parameter are correctly set self.update_system_parameter_for_include() @@ -3490,10 +3490,10 @@ def write_include_file(self, output_dir, output_file=None): self.write_autodef(output_dir, output_file=None) # check/fix status of customised functions self.edit_dummy_fct_from_file(self["custom_fcts"], os.path.dirname(output_dir)) - + for incname in self.includepath: self.write_one_include_file(output_dir, incname, output_file) - + for name,value in value_in_old_include.items(): if value != self[name]: self.fct_mod[name][0](value, self[name], name, *self.fct_mod[name][1],**self.fct_mod[name][2]) @@ -3515,13 +3515,13 @@ def write_one_include_file(self, output_dir, incname, output_file=None): fsock = file_writers.FortranWriter(pjoin(output_dir,pathinc+'.tmp')) - for key in self.includepath[incname]: + for key in self.includepath[incname]: #define the fortran name if key in self.fortran_name: fortran_name = self.fortran_name[key] else: fortran_name = key - + if incname in self.include_as_parameter: fsock.writelines('INTEGER %s\n' % fortran_name) #get the value with warning if the user didn't set it @@ -3534,7 +3534,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): # in case of a list, add the length of the list as 0th # element in fortran. Only in case of integer or float # list (not for bool nor string) - targettype = self.list_parameter[key] + targettype = self.list_parameter[key] if targettype is bool: pass elif targettype is int: @@ -3550,7 +3550,7 @@ def write_one_include_file(self, output_dir, incname, output_file=None): elif isinstance(value, dict): for fortran_name, onevalue in value.items(): line = '%s = %s \n' % (fortran_name, self.f77_formatting(onevalue)) - fsock.writelines(line) + fsock.writelines(line) elif isinstance(incname,str) and 'compile' in incname: if incname in self.include_as_parameter: line = 'PARAMETER (%s=%s)' %( fortran_name, value) @@ -3585,7 +3585,7 @@ def write_autodef(self, output_dir, output_file=None): filetocheck = dict(self.definition_path) if True not in self.definition_path: filetocheck[True] = [] - + for incname in filetocheck: if incname is True: @@ -3598,7 +3598,7 @@ def write_autodef(self, output_dir, output_file=None): if output_file: fsock = output_file input = fsock.getvalue() - + else: input = open(pjoin(output_dir,pathinc),'r').read() # do not define fsock here since we might not need to overwrite it @@ -3608,7 +3608,7 @@ def write_autodef(self, output_dir, output_file=None): previous = re.findall(re_pat, input, re.M) # now check which one needed to be added (and remove those identicaly defined) to_add = [] - for key in filetocheck[incname]: + for key in filetocheck[incname]: curr_type = self[key].__class__.__name__ length = "" if curr_type in [list, "list"]: @@ -3640,10 +3640,10 @@ def write_autodef(self, output_dir, output_file=None): fsock.truncate(0) fsock.seek(0) - # remove outdated lines + # remove outdated lines lines = input.split('\n') if previous: - out = [line for line in lines if not re.search(re_pat, line, re.M) or + out = [line for line in lines if not re.search(re_pat, line, re.M) or re.search(re_pat, line, re.M).groups() not in previous] else: out = lines @@ -3662,7 +3662,7 @@ def write_autodef(self, output_dir, output_file=None): stop = out.index('C STOP USER COMMON BLOCK') out = out[:start]+ out[stop+1:] #add new common-block - if self.definition_path[incname]: + if self.definition_path[incname]: out.append("C START USER COMMON BLOCK") if isinstance(pathinc , str): filename = os.path.basename(pathinc).split('.',1)[0] @@ -3675,10 +3675,10 @@ def write_autodef(self, output_dir, output_file=None): filename = filename.upper() out.append(" COMMON/USER_CUSTOM_%s/%s" %(filename,','.join( self.definition_path[incname]))) out.append('C STOP USER COMMON BLOCK') - + if not output_file: fsock.writelines(out) - fsock.close() + fsock.close() else: # for iotest out = ["%s\n" %l for l in out] @@ -3702,7 +3702,7 @@ def get_idbmup(lpp): def get_banner_init_information(self): """return a dictionary with the information needed to write the first line of the block of the lhe file.""" - + output = {} output["idbmup1"] = self.get_idbmup(self['lpp1']) output["idbmup2"] = self.get_idbmup(self['lpp2']) @@ -3713,7 +3713,7 @@ def get_banner_init_information(self): output["pdfsup1"] = self.get_pdf_id(self["pdlabel"]) output["pdfsup2"] = self.get_pdf_id(self["pdlabel"]) return output - + def get_pdf_id(self, pdf): if pdf == "lhapdf": lhaid = self["lhaid"] @@ -3721,19 +3721,19 @@ def get_pdf_id(self, pdf): return lhaid[0] else: return lhaid - else: + else: try: return {'none': 0, 'iww': 0, 'eva':0, 'edff':0, 'chff':0, 'cteq6_m':10000,'cteq6_l':10041,'cteq6l1':10042, 'nn23lo':246800,'nn23lo1':247000,'nn23nlo':244800 - }[pdf] + }[pdf] except: - return 0 - + return 0 + def get_lhapdf_id(self): return self.get_pdf_id(self['pdlabel']) - def remove_all_cut(self): + def remove_all_cut(self): """remove all the cut""" for name in self.cuts_parameter: @@ -3749,7 +3749,7 @@ def remove_all_cut(self): elif 'eta' in name: self[name] = -1 else: - self[name] = 0 + self[name] = 0 ################################################################################################ ### Define various template subpart for the LO Run_card @@ -3767,11 +3767,11 @@ def remove_all_cut(self): %(nb_proton1)s = nb_proton1 # number of proton for the first beam %(nb_neutron1)s = nb_neutron1 # number of neutron for the first beam %(mass_ion1)s = mass_ion1 # mass of the heavy ion (first beam) -# Note that seting differently the two beams only work if you use +# Note that seting differently the two beams only work if you use # group_subprocess=False when generating your matrix-element %(nb_proton2)s = nb_proton2 # number of proton for the second beam %(nb_neutron2)s = nb_neutron2 # number of neutron for the second beam - %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) + %(mass_ion2)s = mass_ion2 # mass of the heavy ion (second beam) """ template_off = "# To see heavy ion options: type \"update ion_pdf\"" @@ -3834,11 +3834,11 @@ def remove_all_cut(self): # Frame for polarization ------------------------------------------------------------------------------------ template_on = \ """#********************************************************************* -# Frame where to evaluate the matrix-element (not the cut!) for polarization +# Frame where to evaluate the matrix-element (not the cut!) for polarization #********************************************************************* %(me_frame)s = me_frame ! list of particles to sum-up to define the rest-frame ! in which to evaluate the matrix-element - ! [1,2] means the partonic center of mass + ! [1,2] means the partonic center of mass """ template_off = "" frame_block = RunBlock('frame', template_on=template_on, template_off=template_off) @@ -3891,7 +3891,7 @@ def remove_all_cut(self): # CONTROL The extra running scale (not QCD) * # Such running is NOT include in systematics computation * #*********************************************************************** - %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale + %(fixed_extra_scale)s = fixed_extra_scale ! False means dynamical scale %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode %(mue_over_ref)s = mue_over_ref ! ratio to mur if dynamical scale """ @@ -3908,10 +3908,10 @@ def remove_all_cut(self): %(tmin_for_channel)s = tmin_for_channel ! limit the non-singular reach of --some-- channel of integration related to T-channel diagram (value between -1 and 0), -1 is no impact %(survey_splitting)s = survey_splitting ! for loop-induced control how many core are used at survey for the computation of a single iteration. %(survey_nchannel_per_job)s = survey_nchannel_per_job ! control how many Channel are integrated inside a single job on cluster/multicore - %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) + %(refine_evt_by_job)s = refine_evt_by_job ! control the maximal number of events for the first iteration of the refine (larger means less jobs) +#********************************************************************* +# Compilation flag. #********************************************************************* -# Compilation flag. -#********************************************************************* %(global_flag)s = global_flag ! fortran optimization flag use for the all code. %(aloha_flag)s = aloha_flag ! fortran optimization flag for aloha function. Suggestions: '-ffast-math' %(matrix_flag)s = matrix_flag ! fortran optimization flag for matrix.f function. Suggestions: '-O3' @@ -3948,7 +3948,7 @@ def check_validity(self, card): if card['pdlabel'] != card['pdlabel1']: dict.__setitem__(card, 'pdlabel', card['pdlabel1']) elif card['pdlabel1'] in sum(card.allowed_lep_densities.values(),[]): - raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") + raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel2'] in sum(card.allowed_lep_densities.values(),[]): raise InvalidRunCard("Assymetric beam pdf not supported for e e collision with ISR/bemstralung option") elif card['pdlabel1'] == 'none': @@ -3962,7 +3962,7 @@ def check_validity(self, card): dict.__setitem__(card, 'pdlabel2', card['pdlabel']) if abs(card['lpp1']) == 1 == abs(card['lpp2']) and card['pdlabel1'] != card['pdlabel2']: - raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") + raise InvalidRunCard("Assymetric beam pdf not supported for proton-proton collision") def status(self, card): """return False if template_off to be used, True if template_on to be used""" @@ -4028,7 +4028,7 @@ def post_set(card, value, change_userdefine, raiseerror, name='unknown', **opt): if name == 'fixed_fac_scale2' and 'fixed_fac_scale1' not in card.user_set: dict.__setitem__(card, 'fixed_fac_scale1', card['fixed_fac_scale']) if name == 'fixed_fac_scale1' and 'fixed_fac_scale2' not in card.user_set: - dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) + dict.__setitem__(card, 'fixed_fac_scale2', card['fixed_fac_scale']) def status(self, card): @@ -4061,32 +4061,32 @@ def status(self, card): class RunCardLO(RunCard): """an object to handle in a nice way the run_card information""" - + blocks = [heavy_ion_block, beam_pol_block, syscalc_block, ecut_block, frame_block, eva_scale_block, mlm_block, ckkw_block, psoptim_block, pdlabel_block, fixedfacscale, running_block] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), "get_dummy_x1": pjoin("SubProcesses","dummy_fct.f"), - "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), + "get_dummy_x1_x2": pjoin("SubProcesses","dummy_fct.f"), "dummy_boostframe": pjoin("SubProcesses","dummy_fct.f"), "user_dynamical_scale": pjoin("SubProcesses","dummy_fct.f"), "bias_wgt_custom": pjoin("SubProcesses","dummy_fct.f"), "user_": pjoin("SubProcesses","dummy_fct.f") # all function starting by user will be added to that file } - + include_as_parameter = ['vector.inc'] if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_lo.dat") - + def default_setup(self): """default value for the run_card.dat""" - + self.add_param("run_tag", "tag_1", include=False) self.add_param("gridpack", False) self.add_param("time_of_flight", -1.0, include=False) - self.add_param("nevents", 10000) + self.add_param("nevents", 10000) self.add_param("iseed", 0) self.add_param("python_seed", -2, include=False, hidden=True, comment="controlling python seed [handling in particular the final unweighting].\n -1 means use default from random module.\n -2 means set to same value as iseed") self.add_param("lpp1", 1, fortran_name="lpp(1)", allowed=[-1,1,0,2,3,9,-2,-3,4,-4], @@ -4106,7 +4106,7 @@ def default_setup(self): self.add_param('nb_neutron1', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(1)", comment='For heavy ion physics nb of neutron in the ion (for both beam but if group_subprocess was False)') self.add_param('nb_neutron2', 0, hidden=True, allowed=[1,0, 126 , '*'],fortran_name="nb_neutron(2)", - comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') + comment='For heavy ion physics nb of neutron in the ion (of beam 2 if group_subprocess was False )') self.add_param('mass_ion1', -1.0, hidden=True, fortran_name="mass_ion(1)", allowed=[-1,0, 0.938, 207.9766521*0.938, 0.000511, 0.105, '*'], comment='For heavy ion physics mass in GeV of the ion (of beam 1)') @@ -4133,11 +4133,11 @@ def default_setup(self): self.add_param("mue_over_ref", 1.0, hidden=True, comment='ratio mu_other/mu for dynamical scale') self.add_param("ievo_eva",0,hidden=True, allowed=[0,1],fortran_name="ievo_eva", comment='eva: 0 for EW pdf muf evolution by q^2; 1 for evo by pT^2') - + # Bias module options self.add_param("bias_module", 'None', include=False, hidden=True) self.add_param('bias_parameters', {'__type__':1.0}, include='BIAS/bias.inc', hidden=True) - + #matching self.add_param("scalefact", 1.0) self.add_param("ickkw", 0, allowed=[0,1], hidden=True, comment="\'0\' for standard fixed order computation.\n\'1\' for MLM merging activates alphas and pdf re-weighting according to a kt clustering of the QCD radiation.") @@ -4221,7 +4221,7 @@ def default_setup(self): self.add_param("mmaa", 0.0, cut='aa') self.add_param("mmll", 0.0, cut='ll') self.add_param("mmjjmax", -1.0, cut='jj') - self.add_param("mmbbmax", -1.0, cut='bb') + self.add_param("mmbbmax", -1.0, cut='bb') self.add_param("mmaamax", -1.0, cut='aa') self.add_param("mmllmax", -1.0, cut='ll') self.add_param("mmnl", 0.0, cut='LL') @@ -4231,9 +4231,9 @@ def default_setup(self): self.add_param("ptllmax", -1.0, cut='ll') self.add_param("xptj", 0.0, cut='jj') self.add_param("xptb", 0.0, cut='bb') - self.add_param("xpta", 0.0, cut='aa') + self.add_param("xpta", 0.0, cut='aa') self.add_param("xptl", 0.0, cut='ll') - # ordered pt jet + # ordered pt jet self.add_param("ptj1min", 0.0, cut='jj') self.add_param("ptj1max", -1.0, cut='jj') self.add_param("ptj2min", 0.0, cut='jj') @@ -4241,7 +4241,7 @@ def default_setup(self): self.add_param("ptj3min", 0.0, cut='jjj') self.add_param("ptj3max", -1.0, cut='jjj') self.add_param("ptj4min", 0.0, cut='j'*4) - self.add_param("ptj4max", -1.0, cut='j'*4) + self.add_param("ptj4max", -1.0, cut='j'*4) self.add_param("cutuse", 0, cut='jj') # ordered pt lepton self.add_param("ptl1min", 0.0, cut='l'*2) @@ -4249,7 +4249,7 @@ def default_setup(self): self.add_param("ptl2min", 0.0, cut='l'*2) self.add_param("ptl2max", -1.0, cut='l'*2) self.add_param("ptl3min", 0.0, cut='l'*3) - self.add_param("ptl3max", -1.0, cut='l'*3) + self.add_param("ptl3max", -1.0, cut='l'*3) self.add_param("ptl4min", 0.0, cut='l'*4) self.add_param("ptl4max", -1.0, cut='l'*4) # Ht sum of jets @@ -4257,7 +4257,7 @@ def default_setup(self): self.add_param("htjmax", -1.0, cut='j'*2) self.add_param("ihtmin", 0.0, cut='J'*2) self.add_param("ihtmax", -1.0, cut='J'*2) - self.add_param("ht2min", 0.0, cut='J'*3) + self.add_param("ht2min", 0.0, cut='J'*3) self.add_param("ht3min", 0.0, cut='J'*3) self.add_param("ht4min", 0.0, cut='J'*4) self.add_param("ht2max", -1.0, cut='J'*3) @@ -4267,7 +4267,7 @@ def default_setup(self): self.add_param("ptgmin", 0.0, cut='aj') self.add_param("r0gamma", 0.4, hidden=True) self.add_param("xn", 1.0, hidden=True) - self.add_param("epsgamma", 1.0, hidden=True) + self.add_param("epsgamma", 1.0, hidden=True) self.add_param("isoem", True, hidden=True) self.add_param("xetamin", 0.0, cut='jj') self.add_param("deltaeta", 0.0, cut='j'*2) @@ -4280,7 +4280,7 @@ def default_setup(self): self.add_param("use_syst", True) self.add_param('systematics_program', 'systematics', include=False, hidden=True, comment='Choose which program to use for systematics computation: none, systematics, syscalc') self.add_param('systematics_arguments', ['--mur=0.5,1,2', '--muf=0.5,1,2', '--pdf=errorset'], include=False, hidden=True, comment='Choose the argment to pass to the systematics command. like --mur=0.25,1,4. Look at the help of the systematics function for more details.') - + self.add_param("sys_scalefact", "0.5 1 2", include=False, hidden=True) self.add_param("sys_alpsfact", "None", include=False, hidden=True) self.add_param("sys_matchscale", "auto", include=False, hidden=True) @@ -4315,8 +4315,8 @@ def default_setup(self): self.add_param('aloha_flag', '', include=False, hidden=True, comment='global fortran compilation flag, suggestion: -ffast-math', fct_mod=(self.make_clean, ('Source/DHELAS'),{})) self.add_param('matrix_flag', '', include=False, hidden=True, comment='fortran compilation flag for the matrix-element files, suggestion -O3', - fct_mod=(self.make_Ptouch, ('matrix'),{})) - self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', + fct_mod=(self.make_Ptouch, ('matrix'),{})) + self.add_param('vector_size', 1, include='vector.inc', hidden=True, comment='lockstep size for parralelism run', fortran_name='VECSIZE_MEMMAX', fct_mod=(self.reset_simd,(),{})) # parameter allowing to define simple cut via the pdg @@ -4329,24 +4329,24 @@ def default_setup(self): self.add_param('eta_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False) - + self.add_param('pdg_cut',[0], system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], system=True) # store pt min self.add_param('ptmax4pdg',[-1.], system=True) self.add_param('Emin4pdg',[0.], system=True) # store pt min - self.add_param('Emax4pdg',[-1.], system=True) + self.add_param('Emax4pdg',[-1.], system=True) self.add_param('etamin4pdg',[0.], system=True) # store pt min - self.add_param('etamax4pdg',[-1.], system=True) + self.add_param('etamax4pdg',[-1.], system=True) self.add_param('mxxmin4pdg',[-1.], system=True) self.add_param('mxxpart_antipart', [False], system=True) - - - + + + def check_validity(self): """ """ - + super(RunCardLO, self).check_validity() - + #Make sure that nhel is only either 0 (i.e. no MC over hel) or #1 (MC over hel with importance sampling). In particular, it can #no longer be > 1. @@ -4357,12 +4357,12 @@ def check_validity(self): "not %s." % self['nhel']) if int(self['maxjetflavor']) > 6: raise InvalidRunCard('maxjetflavor should be lower than 5! (6 is partly supported)') - + if len(self['pdgs_for_merging_cut']) > 1000: raise InvalidRunCard("The number of elements in "+\ "'pdgs_for_merging_cut' should not exceed 1000.") - + # some cut need to be deactivated in presence of isolation if self['ptgmin'] > 0: if self['pta'] > 0: @@ -4370,18 +4370,18 @@ def check_validity(self): self['pta'] = 0.0 if self['draj'] > 0: logger.warning('draj cut discarded since photon isolation is used') - self['draj'] = 0.0 - - # special treatment for gridpack use the gseed instead of the iseed + self['draj'] = 0.0 + + # special treatment for gridpack use the gseed instead of the iseed if self['gridrun']: self['iseed'] = self['gseed'] - + #Some parameter need to be fixed when using syscalc #if self['use_syst']: # if self['scalefact'] != 1.0: # logger.warning('Since use_syst=T, changing the value of \'scalefact\' to 1') # self['scalefact'] = 1.0 - + # CKKW Treatment if self['ickkw'] > 0: if self['ickkw'] != 1: @@ -4399,7 +4399,7 @@ def check_validity(self): raise InvalidRunCard('maxjetflavor at 6 is NOT supported for matching!') if self['ickkw'] == 2: # add warning if ckkw selected but the associate parameter are empty - self.get_default('highestmult', log_level=20) + self.get_default('highestmult', log_level=20) self.get_default('issgridfile', 'issudgrid.dat', log_level=20) if self['xqcut'] > 0: if self['ickkw'] == 0: @@ -4412,13 +4412,13 @@ def check_validity(self): if self['drjl'] != 0: if 'drjl' in self.user_set: logger.warning('Since icckw>0, changing the value of \'drjl\' to 0') - self['drjl'] = 0 - if not self['auto_ptj_mjj']: + self['drjl'] = 0 + if not self['auto_ptj_mjj']: if self['mmjj'] > self['xqcut']: logger.warning('mmjj > xqcut (and auto_ptj_mjj = F). MMJJ set to 0') - self['mmjj'] = 0.0 - - # check validity of the pdf set + self['mmjj'] = 0.0 + + # check validity of the pdf set # note that pdlabel is automatically set to lhapdf if pdlabel1 or pdlabel2 is set to lhapdf if self['pdlabel'] == 'lhapdf': #add warning if lhaid not define @@ -4426,7 +4426,7 @@ def check_validity(self): mod = False for i in [1,2]: - lpp = 'lpp%i' %i + lpp = 'lpp%i' %i pdlabelX = 'pdlabel%i' % i if self[lpp] == 0: # nopdf if self[pdlabelX] != 'none': @@ -4459,12 +4459,12 @@ def check_validity(self): raise InvalidRunCard( "Heavy ion mode is only supported for lpp1=1/2") if self['lpp2'] not in [1,2]: if self['nb_proton2'] !=1 or self['nb_neutron2'] !=0: - raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") + raise InvalidRunCard( "Heavy ion mode is only supported for lpp2=1/2") # check that fixed_fac_scale(1/2) is setting as expected # if lpp=2/3/4 -> default is that beam in fixed scale - # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are + # check that fixed_fac_scale is not setup if fixed_fac_scale1/2 are # check that both fixed_fac_scale1/2 are defined together # ensure that fixed_fac_scale1 and fixed_fac_scale2 are setup as needed if 'fixed_fac_scale1' in self.user_set: @@ -4475,13 +4475,13 @@ def check_validity(self): elif 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale1 are defined but not fixed_fac_scale2. The value of fixed_fac_scale2 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale2'] = self['fixed_fac_scale'] - elif self['lpp2'] !=0: + elif self['lpp2'] !=0: raise Exception('fixed_fac_scale2 not defined while fixed_fac_scale1 is. Please fix your run_card.') elif 'fixed_fac_scale2' in self.user_set: if 'fixed_fac_scale' in self.user_set: logger.warning('fixed_fac_scale and fixed_fac_scale2 are defined but not fixed_fac_scale1. The value of fixed_fac_scale1 will be set to the one of fixed_fac_scale.') self['fixed_fac_scale1'] = self['fixed_fac_scale'] - elif self['lpp1'] !=0: + elif self['lpp1'] !=0: raise Exception('fixed_fac_scale1 not defined while fixed_fac_scale2 is. Please fix your run_card.') else: if 'fixed_fac_scale' in self.user_set: @@ -4500,12 +4500,12 @@ def check_validity(self): logger.warning('fixed_fac_scale1 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale1']) logger.warning('fixed_fac_scale2 not defined whithin your run_card. Using default value: %s', self['fixed_fac_scale2']) - # check if lpp = + # check if lpp = if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]): for i in [1,2]: if abs(self['lpp%s' % i ]) in [3,4] and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: logger.warning("Vector boson from lepton PDF is using fixed scale value of muf [dsqrt_q2fact%s]. Looks like you kept the default value (Mz). Is this really the cut-off that you want to use?" % i) - + if abs(self['lpp%s' % i ]) == 2 and self['fixed_fac_scale%s' % i] and self['dsqrt_q2fact%s'%i] == 91.188: if self['pdlabel'] in ['edff','chff']: logger.warning("Since 3.5.0 exclusive photon-photon processes in ultraperipheral proton and nuclear collisions from gamma-UPC (arXiv:2207.03012) will ignore the factorisation scale.") @@ -4515,10 +4515,10 @@ def check_validity(self): if six.PY2 and self['hel_recycling']: self['hel_recycling'] = False - logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. + logger.warning("""Helicity recycling optimization requires Python3. This optimzation is therefore deactivated automatically. In general this optimization speeds up the computation by a factor of two.""") - + # check that ebeam is bigger than the associated mass. for i in [1,2]: if self['lpp%s' % i ] not in [1,2]: @@ -4529,13 +4529,13 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") - elif self['ebeam%i' % i] < self['mass_ion%i' % i]: + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + elif self['ebeam%i' % i] < self['mass_ion%i' % i]: if self['ebeam%i' %i] == 0: logger.warning("At rest ion mode set: Energy beam set to %s" % self['mass_ion%i' % i]) self.set('ebeam%i' %i, self['mass_ion%i' % i]) - - + + # check the tmin_for_channel is negative if self['tmin_for_channel'] == 0: raise InvalidRunCard('tmin_for_channel can not be set to 0.') @@ -4543,15 +4543,15 @@ def check_validity(self): logger.warning('tmin_for_channel should be negative. Will be using -%f instead' % self['tmin_for_channel']) self.set('tmin_for_channel', -self['tmin_for_channel']) - + def update_system_parameter_for_include(self): """system parameter need to be setupe""" - + # polarization self['frame_id'] = sum(2**(n) for n in self['me_frame']) - + # set the pdg_for_cut fortran parameter - pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + + pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys()) + list(self['e_min_pdg'].keys()) +list(self['e_max_pdg'].keys()) + list(self['eta_min_pdg'].keys()) +list(self['eta_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys()) + list(self['mxx_only_part_antipart'].keys())) @@ -4559,15 +4559,15 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different pdgs are allowed for pdg specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative pdg code') - - + + if any(pdg in pdg_to_cut for pdg in [1,2,3,4,5,21,22,11,13,15]): raise Exception("Can not use PDG related cut for light quark/b quark/lepton/gluon/photon") - + if pdg_to_cut: self['pdg_cut'] = list(pdg_to_cut) self['ptmin4pdg'] = [] @@ -4595,7 +4595,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -4605,11 +4605,11 @@ def update_system_parameter_for_include(self): self['ptmax4pdg'] = [-1.] self['Emax4pdg'] = [-1.] self['etamax4pdg'] =[-1.] - self['mxxmin4pdg'] =[0.] + self['mxxmin4pdg'] =[0.] self['mxxpart_antipart'] = [False] - - - + + + def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules process 1->N all cut set on off. @@ -4626,7 +4626,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if proc_characteristic['loop_induced']: self['nhel'] = 1 self['pdgs_for_merging_cut'] = proc_characteristic['colored_pdgs'] - + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() @@ -4636,7 +4636,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): # check for beam_id beam_id = set() beam_id_split = [set(), set()] - for proc in proc_def: + for proc in proc_def: for oneproc in proc: for i,leg in enumerate(oneproc['legs']): if not leg['state']: @@ -4654,20 +4654,20 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): maxjetflavor = max([4]+[abs(i) for i in beam_id if -7< i < 7]) self['maxjetflavor'] = maxjetflavor self['asrwgtflavor'] = maxjetflavor - + if any(i in beam_id for i in [1,-1,2,-2,3,-3,4,-4,5,-5,21,22]): # check for e p collision if any(id in beam_id for id in [11,-11,13,-13]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [11,-11,13,-13]): - self['lpp1'] = 0 - self['lpp2'] = 1 - self['ebeam1'] = '1k' - self['ebeam2'] = '6500' + self['lpp1'] = 0 + self['lpp2'] = 1 + self['ebeam1'] = '1k' + self['ebeam2'] = '6500' else: - self['lpp1'] = 1 - self['lpp2'] = 0 - self['ebeam1'] = '6500' + self['lpp1'] = 1 + self['lpp2'] = 0 + self['ebeam1'] = '6500' self['ebeam2'] = '1k' # UPC for p p collision @@ -4677,7 +4677,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam1'] = '6500' self['ebeam2'] = '6500' self['pdlabel'] = 'edff' - + elif any(id in beam_id for id in [11,-11,13,-13]): self['lpp1'] = 0 self['lpp2'] = 0 @@ -4688,7 +4688,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('ecut') self.display_block.append('beam_pol') - + # check for possibility of eva eva_in_b1 = any(i in beam_id_split[0] for i in [23,24,-24]) #,12,-12,14,-14]) @@ -4701,10 +4701,10 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['nhel'] = 1 self['pdlabel'] = 'eva' self['fixed_fac_scale'] = True - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') elif eva_in_b1: - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') self['pdlabel1'] = 'eva' self['fixed_fac_scale1'] = True self['nhel'] = 1 @@ -4724,7 +4724,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['pdlabel2'] = 'eva' self['fixed_fac_scale2'] = True self['nhel'] = 1 - self.display_block.append('beam_pol') + self.display_block.append('beam_pol') for i in beam_id_split[0]: if abs(i) == 11: self['lpp1'] = math.copysign(3,i) @@ -4740,34 +4740,34 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if any(i in beam_id for i in [22,23,24,-24,12,-12,14,-14]): self.display_block.append('eva_scale') - # automatic polarisation of the beam if neutrino beam + # automatic polarisation of the beam if neutrino beam if any(id in beam_id for id in [12,-12,14,-14,16,-16]): self.display_block.append('beam_pol') if any(id in beam_id_split[0] for id in [12,14,16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = -100 if not all(id in [12,14,16] for id in beam_id_split[0]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1]. %s') elif any(id in beam_id_split[0] for id in [-12,-14,-16]): - self['lpp1'] = 0 - self['ebeam1'] = '1k' + self['lpp1'] = 0 + self['ebeam1'] = '1k' self['polbeam1'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[0]): - logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') + logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam1].') if any(id in beam_id_split[1] for id in [12,14,16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = -100 if not all(id in [12,14,16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') elif any(id in beam_id_split[1] for id in [-12,-14,-16]): - self['lpp2'] = 0 - self['ebeam2'] = '1k' + self['lpp2'] = 0 + self['ebeam2'] = '1k' self['polbeam2'] = 100 if not all(id in [-12,-14,-16] for id in beam_id_split[1]): logger.warning('Issue with default beam setup of neutrino in the run_card. Please check it up [polbeam2].') - + # Check if need matching min_particle = 99 max_particle = 0 @@ -4798,12 +4798,12 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - + break + if matching: self['ickkw'] = 1 self['xqcut'] = 30 - #self['use_syst'] = False + #self['use_syst'] = False self['drjj'] = 0 self['drjl'] = 0 self['sys_alpsfact'] = "0.5 1 2" @@ -4811,8 +4811,8 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self.display_block.append('mlm') self.display_block.append('ckkw') self['dynamical_scale_choice'] = -1 - - + + # For interference module, the systematics are wrong. # automatically set use_syst=F and set systematics_program=none no_systematics = False @@ -4826,14 +4826,14 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): continue break - + if interference or no_systematics: self['use_syst'] = False self['systematics_program'] = 'none' if interference: self['dynamical_scale_choice'] = 3 self['sde_strategy'] = 2 - + # set default integration strategy # interference case is already handle above # here pick strategy 2 if only one QCD color flow @@ -4852,7 +4852,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): if pure_lepton and proton_initial: self['sde_strategy'] = 1 else: - # check if multi-jet j + # check if multi-jet j is_multijet = True for proc in proc_def: if any(abs(j.get('id')) not in jet_id for j in proc[0]['legs']): @@ -4860,7 +4860,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): break if is_multijet: self['sde_strategy'] = 2 - + # if polarization is used, set the choice of the frame in the run_card # But only if polarization is used for massive particles for plist in proc_def: @@ -4870,7 +4870,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): model = proc.get('model') particle = model.get_particle(l.get('id')) if particle.get('mass').lower() != 'zero': - self.display_block.append('frame') + self.display_block.append('frame') break else: continue @@ -4894,15 +4894,15 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): proc = proc_list[0] if proc['forbidden_onsh_s_channels']: self['sde_strategy'] = 1 - + if 'fix_scale' in proc_characteristic['limitations']: self['fixed_ren_scale'] = 1 self['fixed_fac_scale'] = 1 if self['ickkw'] == 1: logger.critical("MLM matching/merging not compatible with the model! You need to use another method to remove the double counting!") self['ickkw'] = 0 - - # define class of particles present to hide all the cuts associated to + + # define class of particles present to hide all the cuts associated to # not present class cut_class = collections.defaultdict(int) for proc in proc_def: @@ -4925,41 +4925,41 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): one_proc_cut['L'] += 1 elif abs(pdg) in [12,14,16]: one_proc_cut['n'] += 1 - one_proc_cut['L'] += 1 + one_proc_cut['L'] += 1 elif str(oneproc.get('model').get_particle(pdg)['mass']) != 'ZERO': one_proc_cut['H'] += 1 - + for key, nb in one_proc_cut.items(): cut_class[key] = max(cut_class[key], nb) self.cut_class = dict(cut_class) self.cut_class[''] = True #avoid empty - + # If model has running functionality add the additional parameter model = proc_def[0][0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Read file input/default_run_card_lo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'LO', 'Cards', 'run_card.dat') python_template = True else: template = pjoin(MEDIR, 'Cards', 'run_card_default.dat') python_template = False - + hid_lines = {'default':True}#collections.defaultdict(itertools.repeat(True).next) if isinstance(output_file, str): @@ -4975,9 +4975,9 @@ def write(self, output_file, template=None, python_template=False, hid_lines[k1+k2] = True super(RunCardLO, self).write(output_file, template=template, - python_template=python_template, + python_template=python_template, template_options=hid_lines, - **opt) + **opt) class InvalidMadAnalysis5Card(InvalidCmd): @@ -4986,19 +4986,19 @@ class InvalidMadAnalysis5Card(InvalidCmd): class MadAnalysis5Card(dict): """ A class to store a MadAnalysis5 card. Very basic since it is basically free format.""" - + _MG5aMC_escape_tag = '@MG5aMC' - + _default_hadron_inputs = ['*.hepmc', '*.hep', '*.stdhep', '*.lhco','*.root'] _default_parton_inputs = ['*.lhe'] _skip_analysis = False - + @classmethod def events_can_be_reconstructed(cls, file_path): """ Checks from the type of an event file whether it can be reconstructed or not.""" return not (file_path.endswith('.lhco') or file_path.endswith('.lhco.gz') or \ file_path.endswith('.root') or file_path.endswith('.root.gz')) - + @classmethod def empty_analysis(cls): """ A method returning the structure of an empty analysis """ @@ -5012,7 +5012,7 @@ def empty_reconstruction(cls): 'reco_output':'lhe'} def default_setup(self): - """define the default value""" + """define the default value""" self['mode'] = 'parton' self['inputs'] = [] # None is the default stdout level, it will be set automatically by MG5aMC @@ -5025,8 +5025,8 @@ def default_setup(self): # of this class and some other property could be added to this dictionary # in the future. self['analyses'] = {} - # The recasting structure contains on set of commands and one set of - # card lines. + # The recasting structure contains on set of commands and one set of + # card lines. self['recasting'] = {'commands':[],'card':[]} # Add the default trivial reconstruction to use an lhco input # This is just for the structure @@ -5035,7 +5035,7 @@ def default_setup(self): 'root_input': MadAnalysis5Card.empty_reconstruction()} self['reconstruction']['lhco_input']['reco_output']='lhco' - self['reconstruction']['root_input']['reco_output']='root' + self['reconstruction']['root_input']['reco_output']='root' # Specify in which order the analysis/recasting were specified self['order'] = [] @@ -5049,7 +5049,7 @@ def __init__(self, finput=None,mode=None): return else: dict.__init__(self) - + # Initialize it with all the default value self.default_setup() if not mode is None: @@ -5058,15 +5058,15 @@ def __init__(self, finput=None,mode=None): # if input is define read that input if isinstance(finput, (file, str, StringIO.StringIO)): self.read(finput, mode=mode) - + def read(self, input, mode=None): """ Read an MA5 card""" - + if mode not in [None,'parton','hadron']: raise MadGraph5Error('A MadAnalysis5Card can be read online the modes'+ "'parton' or 'hadron'") card_mode = mode - + if isinstance(input, (file, StringIO.StringIO)): input_stream = input elif isinstance(input, str): @@ -5099,10 +5099,10 @@ def read(self, input, mode=None): except ValueError: option = line[len(self._MG5aMC_escape_tag):] option = option.strip() - + if option=='inputs': self['inputs'].extend([v.strip() for v in value.split(',')]) - + elif option == 'skip_analysis': self._skip_analysis = True @@ -5118,7 +5118,7 @@ def read(self, input, mode=None): except: raise InvalidMadAnalysis5Card( "MA5 output level specification '%s' is incorrect."%str(value)) - + elif option=='analysis_name': current_type = 'analyses' current_name = value @@ -5127,7 +5127,7 @@ def read(self, input, mode=None): "Analysis '%s' already defined in MadAnalysis5 card"%current_name) else: self[current_type][current_name] = MadAnalysis5Card.empty_analysis() - + elif option=='set_reconstructions': try: reconstructions = eval(value) @@ -5142,7 +5142,7 @@ def read(self, input, mode=None): "analysis in a MadAnalysis5 card.") self[current_type][current_name]['reconstructions']=reconstructions continue - + elif option=='reconstruction_name': current_type = 'reconstruction' current_name = value @@ -5161,7 +5161,7 @@ def read(self, input, mode=None): raise InvalidMadAnalysis5Card( "Option '%s' can only take the values 'lhe' or 'root'"%option) self['reconstruction'][current_name]['reco_output'] = value.lower() - + elif option.startswith('recasting'): current_type = 'recasting' try: @@ -5171,11 +5171,11 @@ def read(self, input, mode=None): if len(self['recasting'][current_name])>0: raise InvalidMadAnalysis5Card( "Only one recasting can be defined in MadAnalysis5 hadron card") - + else: raise InvalidMadAnalysis5Card( "Unreckognized MG5aMC instruction in MadAnalysis5 card: '%s'"%option) - + if option in ['analysis_name','reconstruction_name'] or \ option.startswith('recasting'): self['order'].append((current_type,current_name)) @@ -5209,7 +5209,7 @@ def read(self, input, mode=None): self['inputs'] = self._default_hadron_inputs else: self['inputs'] = self._default_parton_inputs - + # Make sure at least one reconstruction is specified for each hadron # level analysis and that it exists. if self['mode']=='hadron': @@ -5221,7 +5221,7 @@ def read(self, input, mode=None): analysis['reconstructions']): raise InvalidMadAnalysis5Card('A reconstructions specified in'+\ " analysis '%s' is not defined."%analysis_name) - + def write(self, output): """ Write an MA5 card.""" @@ -5232,7 +5232,7 @@ def write(self, output): else: raise MadGraph5Error('Incorrect input for the write function of'+\ ' the MadAnalysis5Card card. Received argument type is: %s'%str(type(output))) - + output_lines = [] if self._skip_analysis: output_lines.append('%s skip_analysis'%self._MG5aMC_escape_tag) @@ -5240,11 +5240,11 @@ def write(self, output): if not self['stdout_lvl'] is None: output_lines.append('%s stdout_lvl=%s'%(self._MG5aMC_escape_tag,self['stdout_lvl'])) for definition_type, name in self['order']: - + if definition_type=='analyses': output_lines.append('%s analysis_name = %s'%(self._MG5aMC_escape_tag,name)) output_lines.append('%s set_reconstructions = %s'%(self._MG5aMC_escape_tag, - str(self['analyses'][name]['reconstructions']))) + str(self['analyses'][name]['reconstructions']))) elif definition_type=='reconstruction': output_lines.append('%s reconstruction_name = %s'%(self._MG5aMC_escape_tag,name)) elif definition_type=='recasting': @@ -5254,23 +5254,23 @@ def write(self, output): output_lines.extend(self[definition_type][name]) elif definition_type in ['reconstruction']: output_lines.append('%s reco_output = %s'%(self._MG5aMC_escape_tag, - self[definition_type][name]['reco_output'])) + self[definition_type][name]['reco_output'])) output_lines.extend(self[definition_type][name]['commands']) elif definition_type in ['analyses']: - output_lines.extend(self[definition_type][name]['commands']) - + output_lines.extend(self[definition_type][name]['commands']) + output_stream.write('\n'.join(output_lines)) - + return - - def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, + + def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, UFO_model_path=None, run_tag=''): - """ Returns a list of tuples ('AnalysisTag',['commands']) specifying - the commands of the MadAnalysis runs required from this card. - At parton-level, the number of such commands is the number of analysis + """ Returns a list of tuples ('AnalysisTag',['commands']) specifying + the commands of the MadAnalysis runs required from this card. + At parton-level, the number of such commands is the number of analysis asked for. In the future, the idea is that the entire card can be processed in one go from MA5 directly.""" - + if isinstance(inputs_arg, list): inputs = inputs_arg elif isinstance(inputs_arg, str): @@ -5278,21 +5278,21 @@ def get_MA5_cmds(self, inputs_arg, submit_folder, run_dir_path=None, else: raise MadGraph5Error("The function 'get_MA5_cmds' can only take "+\ " a string or a list for the argument 'inputs_arg'") - + if len(inputs)==0: raise MadGraph5Error("The function 'get_MA5_cmds' must have "+\ " at least one input specified'") - + if run_dir_path is None: run_dir_path = os.path.dirname(inputs_arg) - + cmds_list = [] - + UFO_load = [] # first import the UFO if provided if UFO_model_path: UFO_load.append('import %s'%UFO_model_path) - + def get_import(input, type=None): """ Generates the MA5 import commands for that event file. """ dataset_name = os.path.basename(input).split('.')[0] @@ -5304,7 +5304,7 @@ def get_import(input, type=None): if not type is None: res.append('set %s.type = %s'%(dataset_name, type)) return res - + fifo_status = {'warned_fifo':False,'fifo_used_up':False} def warn_fifo(input): if not input.endswith('.fifo'): @@ -5317,7 +5317,7 @@ def warn_fifo(input): logger.warning('Only the first MA5 analysis/reconstructions can be run on a fifo. Subsequent runs will skip fifo inputs.') fifo_status['warned_fifo'] = True return True - + # Then the event file(s) input(s) inputs_load = [] for input in inputs: @@ -5325,16 +5325,16 @@ def warn_fifo(input): if len(inputs) > 1: inputs_load.append('set main.stacking_method = superimpose') - + submit_command = 'submit %s'%submit_folder+'_%s' - + # Keep track of the reconstruction outpus in the MA5 workflow # Keys are reconstruction names and values are .lhe.gz reco file paths. # We put by default already the lhco/root ones present reconstruction_outputs = { - 'lhco_input':[f for f in inputs if + 'lhco_input':[f for f in inputs if f.endswith('.lhco') or f.endswith('.lhco.gz')], - 'root_input':[f for f in inputs if + 'root_input':[f for f in inputs if f.endswith('.root') or f.endswith('.root.gz')]} # If a recasting card has to be written out, chose here its path @@ -5343,7 +5343,7 @@ def warn_fifo(input): # Make sure to only run over one analysis over each fifo. for definition_type, name in self['order']: - if definition_type == 'reconstruction': + if definition_type == 'reconstruction': analysis_cmds = list(self['reconstruction'][name]['commands']) reco_outputs = [] for i_input, input in enumerate(inputs): @@ -5365,8 +5365,8 @@ def warn_fifo(input): analysis_cmds.append( submit_command%('reco_%s_%d'%(name,i_input+1))) analysis_cmds.append('remove reco_events') - - reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) + + reconstruction_outputs[name]= [pjoin(run_dir_path,rec_out) for rec_out in reco_outputs] if len(reco_outputs)>0: cmds_list.append(('_reco_%s'%name,analysis_cmds)) @@ -5386,7 +5386,7 @@ def warn_fifo(input): analysis_cmds = ['set main.mode = parton'] else: analysis_cmds = [] - analysis_cmds.extend(sum([get_import(rec_out) for + analysis_cmds.extend(sum([get_import(rec_out) for rec_out in reconstruction_outputs[reco]],[])) analysis_cmds.extend(self['analyses'][name]['commands']) analysis_cmds.append(submit_command%('%s_%s'%(name,reco))) @@ -5427,12 +5427,12 @@ def warn_fifo(input): %(mue_ref_fixed)s = mue_ref_fixed ! scale to use if fixed scale mode """ running_block_nlo = RunBlock('RUNNING', template_on=template_on, template_off="") - + class RunCardNLO(RunCard): """A class object for the run_card for a (aMC@)NLO pocess""" - + LO = False - + blocks = [running_block_nlo] dummy_fct_file = {"dummy_cuts": pjoin("SubProcesses","dummy_fct.f"), @@ -5443,11 +5443,11 @@ class RunCardNLO(RunCard): if MG5DIR: default_run_card = pjoin(MG5DIR, "internal", "default_run_card_nlo.dat") - - + + def default_setup(self): """define the default value""" - + self.add_param('run_tag', 'tag_1', include=False) self.add_param('nevents', 10000) self.add_param('req_acc', -1.0, include=False) @@ -5455,27 +5455,27 @@ def default_setup(self): self.add_param("time_of_flight", -1.0, include=False) self.add_param('event_norm', 'average') #FO parameter - self.add_param('req_acc_fo', 0.01, include=False) + self.add_param('req_acc_fo', 0.01, include=False) self.add_param('npoints_fo_grid', 5000, include=False) self.add_param('niters_fo_grid', 4, include=False) - self.add_param('npoints_fo', 10000, include=False) + self.add_param('npoints_fo', 10000, include=False) self.add_param('niters_fo', 6, include=False) #seed and collider self.add_param('iseed', 0) - self.add_param('lpp1', 1, fortran_name='lpp(1)') - self.add_param('lpp2', 1, fortran_name='lpp(2)') + self.add_param('lpp1', 1, fortran_name='lpp(1)') + self.add_param('lpp2', 1, fortran_name='lpp(2)') self.add_param('ebeam1', 6500.0, fortran_name='ebeam(1)') - self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') + self.add_param('ebeam2', 6500.0, fortran_name='ebeam(2)') self.add_param('pdlabel', 'nn23nlo', allowed=['lhapdf', 'emela', 'cteq6_m','cteq6_d','cteq6_l','cteq6l1', 'nn23lo','nn23lo1','nn23nlo','ct14q00','ct14q07','ct14q14','ct14q21'] +\ - sum(self.allowed_lep_densities.values(),[]) ) + sum(self.allowed_lep_densities.values(),[]) ) self.add_param('lhaid', [244600],fortran_name='lhaPDFid') self.add_param('pdfscheme', 0) # whether to include or not photon-initiated processes in lepton collisions self.add_param('photons_from_lepton', True) self.add_param('lhapdfsetname', ['internal_use_only'], system=True) - # stuff for lepton collisions - # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set - # whether the current PDF set has or not beamstrahlung + # stuff for lepton collisions + # these parameters are in general set automatically by eMELA in a consistent manner with the PDF set + # whether the current PDF set has or not beamstrahlung self.add_param('has_bstrahl', False, system=True) # renormalisation scheme of alpha self.add_param('alphascheme', 0, system=True) @@ -5486,31 +5486,31 @@ def default_setup(self): # w contribution included or not in the running of alpha self.add_param('w_run', 1, system=True) #shower and scale - self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') + self.add_param('parton_shower', 'HERWIG6', fortran_name='shower_mc') self.add_param('shower_scale_factor',1.0) self.add_param('mcatnlo_delta', False) self.add_param('fixed_ren_scale', False) self.add_param('fixed_fac_scale', False) self.add_param('fixed_extra_scale', True, hidden=True, system=True) # set system since running from Ellis-Sexton scale not implemented - self.add_param('mur_ref_fixed', 91.118) + self.add_param('mur_ref_fixed', 91.118) self.add_param('muf1_ref_fixed', -1.0, hidden=True) - self.add_param('muf_ref_fixed', 91.118) + self.add_param('muf_ref_fixed', 91.118) self.add_param('muf2_ref_fixed', -1.0, hidden=True) - self.add_param('mue_ref_fixed', 91.118, hidden=True) - self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', + self.add_param('mue_ref_fixed', 91.118, hidden=True) + self.add_param("dynamical_scale_choice", [-1],fortran_name='dyn_scale', allowed = [-2,-1,0,1,2,3,10], comment="\'-1\' is based on CKKW back clustering (following feynman diagram).\n \'1\' is the sum of transverse energy.\n '2' is HT (sum of the transverse mass)\n '3' is HT/2, '0' allows to use the user_hook definition (need to be defined via custom_fct entry) ") self.add_param('fixed_qes_scale', False, hidden=True) self.add_param('qes_ref_fixed', -1.0, hidden=True) self.add_param('mur_over_ref', 1.0) - self.add_param('muf_over_ref', 1.0) - self.add_param('muf1_over_ref', -1.0, hidden=True) + self.add_param('muf_over_ref', 1.0) + self.add_param('muf1_over_ref', -1.0, hidden=True) self.add_param('muf2_over_ref', -1.0, hidden=True) self.add_param('mue_over_ref', 1.0, hidden=True, system=True) # forbid the user to modigy due to incorrect handling of the Ellis-Sexton scale self.add_param('qes_over_ref', -1.0, hidden=True) self.add_param('reweight_scale', [True], fortran_name='lscalevar') - self.add_param('rw_rscale_down', -1.0, hidden=True) + self.add_param('rw_rscale_down', -1.0, hidden=True) self.add_param('rw_rscale_up', -1.0, hidden=True) - self.add_param('rw_fscale_down', -1.0, hidden=True) + self.add_param('rw_fscale_down', -1.0, hidden=True) self.add_param('rw_fscale_up', -1.0, hidden=True) self.add_param('rw_rscale', [1.0,2.0,0.5], fortran_name='scalevarR') self.add_param('rw_fscale', [1.0,2.0,0.5], fortran_name='scalevarF') @@ -5523,60 +5523,60 @@ def default_setup(self): #technical self.add_param('folding', [1,1,1], include=False) - + #merging self.add_param('ickkw', 0, allowed=[-1,0,3,4], comment=" - 0: No merging\n - 3: FxFx Merging : http://amcatnlo.cern.ch/FxFx_merging.htm\n - 4: UNLOPS merging (No interface within MG5aMC)\n - -1: NNLL+NLO jet-veto computation. See arxiv:1412.8408 [hep-ph]") self.add_param('bwcutoff', 15.0) - #cuts + #cuts self.add_param('jetalgo', 1.0) - self.add_param('jetradius', 0.7) + self.add_param('jetradius', 0.7) self.add_param('ptj', 10.0 , cut=True) - self.add_param('etaj', -1.0, cut=True) - self.add_param('gamma_is_j', True) + self.add_param('etaj', -1.0, cut=True) + self.add_param('gamma_is_j', True) self.add_param('ptl', 0.0, cut=True) - self.add_param('etal', -1.0, cut=True) + self.add_param('etal', -1.0, cut=True) self.add_param('drll', 0.0, cut=True) - self.add_param('drll_sf', 0.0, cut=True) + self.add_param('drll_sf', 0.0, cut=True) self.add_param('mll', 0.0, cut=True) - self.add_param('mll_sf', 30.0, cut=True) - self.add_param('rphreco', 0.1) - self.add_param('etaphreco', -1.0) - self.add_param('lepphreco', True) - self.add_param('quarkphreco', True) + self.add_param('mll_sf', 30.0, cut=True) + self.add_param('rphreco', 0.1) + self.add_param('etaphreco', -1.0) + self.add_param('lepphreco', True) + self.add_param('quarkphreco', True) self.add_param('ptgmin', 20.0, cut=True) - self.add_param('etagamma', -1.0) + self.add_param('etagamma', -1.0) self.add_param('r0gamma', 0.4) - self.add_param('xn', 1.0) + self.add_param('xn', 1.0) self.add_param('epsgamma', 1.0) - self.add_param('isoem', True) + self.add_param('isoem', True) self.add_param('maxjetflavor', 4, hidden=True) - self.add_param('pineappl', False) + self.add_param('pineappl', False) self.add_param('lhe_version', 3, hidden=True, include=False) - + # customization self.add_param("custom_fcts",[],typelist="str", include=False, comment="list of files containing function that overwritte dummy function of the code (like adding cuts/...)") #internal variable related to FO_analyse_card self.add_param('FO_LHE_weight_ratio',1e-3, hidden=True, system=True) - self.add_param('FO_LHE_postprocessing',['grouping','random'], + self.add_param('FO_LHE_postprocessing',['grouping','random'], hidden=True, system=True, include=False) - + # parameter allowing to define simple cut via the pdg self.add_param('pt_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('pt_max_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_min_pdg',{'__type__':0.}, include=False,cut=True) self.add_param('mxx_only_part_antipart', {'default':False}, include=False, hidden=True) - + #hidden parameter that are transfer to the fortran code self.add_param('pdg_cut',[0], hidden=True, system=True) # store which PDG are tracked self.add_param('ptmin4pdg',[0.], hidden=True, system=True) # store pt min self.add_param('ptmax4pdg',[-1.], hidden=True, system=True) self.add_param('mxxmin4pdg',[0.], hidden=True, system=True) self.add_param('mxxpart_antipart', [False], hidden=True, system=True) - + def check_validity(self): """check the validity of the various input""" - + super(RunCardNLO, self).check_validity() # for lepton-lepton collisions, ignore 'pdlabel' and 'lhaid' @@ -5588,12 +5588,12 @@ def check_validity(self): # for dressed lepton collisions, check that the lhaid is a valid one if self['pdlabel'] not in sum(self.allowed_lep_densities.values(),[]) + ['emela']: raise InvalidRunCard('pdlabel %s not allowed for dressed-lepton collisions' % self['pdlabel']) - + elif self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' self['reweight_pdf']=[False] logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') - + if self['lpp1'] == 0 == self['lpp2']: if self['pdlabel']!='nn23nlo' or self['reweight_pdf']: self['pdlabel']='nn23nlo' @@ -5601,8 +5601,8 @@ def check_validity(self): logger.info('''Lepton-lepton collisions: ignoring PDF related parameters in the run_card.dat (pdlabel, lhaid, reweight_pdf, ...)''') # For FxFx merging, make sure that the following parameters are set correctly: - if self['ickkw'] == 3: - # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed + if self['ickkw'] == 3: + # 1. Renormalization and factorization (and ellis-sexton scales) are not fixed scales=['fixed_ren_scale','fixed_fac_scale','fixed_QES_scale'] for scale in scales: if self[scale]: @@ -5615,7 +5615,7 @@ def check_validity(self): self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency in FxFx merging, dynamical_scale_choice has been set to -1 (default)''' ,'$MG:BOLD') - + # 2. Use kT algorithm for jets with pseudo-code size R=1.0 jetparams=['jetradius','jetalgo'] for jetparam in jetparams: @@ -5628,8 +5628,8 @@ def check_validity(self): self["dynamical_scale_choice"] = [-1] self["reweight_scale"]=[self["reweight_scale"][0]] logger.warning('''For consistency with the jet veto, the scale which will be used is ptj. dynamical_scale_choice will be set at -1.''' - ,'$MG:BOLD') - + ,'$MG:BOLD') + # For interface to PINEAPPL, need to use LHAPDF and reweighting to get scale uncertainties if self['pineappl'] and self['pdlabel'].lower() != 'lhapdf': raise InvalidRunCard('PineAPPL generation only possible with the use of LHAPDF') @@ -5661,7 +5661,7 @@ def check_validity(self): if (self['rw_fscale_down'] != -1.0 and ['rw_fscale_down'] not in self['rw_fscale']) or\ (self['rw_fscale_up'] != -1.0 and ['rw_fscale_up'] not in self['rw_fscale']): self['rw_fscale']=[1.0,self['rw_fscale_up'],self['rw_fscale_down']] - + # PDF reweighting check if any(self['reweight_pdf']): # check that we use lhapdf if reweighting is ON @@ -5672,7 +5672,7 @@ def check_validity(self): if self['pdlabel'] != "lhapdf": self['reweight_pdf']=[self['reweight_pdf'][0]] self['lhaid']=[self['lhaid'][0]] - + # make sure set have reweight_scale and dyn_scale_choice of length 1 when fixed scales: if self['fixed_ren_scale'] and self['fixed_fac_scale']: self['reweight_scale']=[self['reweight_scale'][0]] @@ -5685,7 +5685,7 @@ def check_validity(self): self['reweight_pdf']=self['reweight_pdf']*len(self['lhaid']) logger.warning("Setting 'reweight_pdf' for all 'lhaid' to %s" % self['reweight_pdf'][0]) if len(self['reweight_scale']) == 1 and len(self['dynamical_scale_choice']) != 1: - self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) + self['reweight_scale']=self['reweight_scale']*len(self['dynamical_scale_choice']) logger.warning("Setting 'reweight_scale' for all 'dynamical_scale_choice' to %s" % self['reweight_pdf'][0]) # Check that there are no identical elements in lhaid or dynamical_scale_choice @@ -5693,7 +5693,7 @@ def check_validity(self): raise InvalidRunCard("'lhaid' has two or more identical entries. They have to be all different for the code to work correctly.") if len(self['dynamical_scale_choice']) != len(set(self['dynamical_scale_choice'])): raise InvalidRunCard("'dynamical_scale_choice' has two or more identical entries. They have to be all different for the code to work correctly.") - + # Check that lenght of lists are consistent if len(self['reweight_pdf']) != len(self['lhaid']): raise InvalidRunCard("'reweight_pdf' and 'lhaid' lists should have the same length") @@ -5730,7 +5730,7 @@ def check_validity(self): if len(self['folding']) != 3: raise InvalidRunCard("'folding' should contain exactly three integers") for ifold in self['folding']: - if ifold not in [1,2,4,8]: + if ifold not in [1,2,4,8]: raise InvalidRunCard("The three 'folding' parameters should be equal to 1, 2, 4, or 8.") # Check MC@NLO-Delta if self['mcatnlo_delta'] and not self['parton_shower'].lower() == 'pythia8': @@ -5746,11 +5746,11 @@ def check_validity(self): logger.warning("At-rest proton mode set: energy beam set to 0.938 GeV") self.set('ebeam%i' %i, 0.938) else: - raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") + raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") def update_system_parameter_for_include(self): - + # set the pdg_for_cut fortran parameter pdg_to_cut = set(list(self['pt_min_pdg'].keys()) +list(self['pt_max_pdg'].keys())+ list(self['mxx_min_pdg'].keys())+ list(self['mxx_only_part_antipart'].keys())) @@ -5758,12 +5758,12 @@ def update_system_parameter_for_include(self): pdg_to_cut.discard('default') if len(pdg_to_cut)>25: raise Exception("Maximum 25 different PDGs are allowed for PDG specific cut") - + if any(int(pdg)<0 for pdg in pdg_to_cut): logger.warning('PDG specific cuts are always applied symmetrically on particles/anti-particles. Always use positve PDG codes') raise MadGraph5Error('Some PDG specific cuts are defined using negative PDG codes') - - + + if any(pdg in pdg_to_cut for pdg in [21,22,11,13,15]+ list(range(self['maxjetflavor']+1))): # Note that this will double check in the fortran code raise Exception("Can not use PDG related cuts for massless SM particles/leptons") @@ -5790,7 +5790,7 @@ def update_system_parameter_for_include(self): self[new_var].append(self[old_var][str(pdg)] if str(pdg) in self[old_var] else default) else: if str(pdg) not in self[old_var]: - raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) + raise Exception("no default value defined for %s and no value defined for pdg %s" % (old_var, pdg)) self[new_var].append(self[old_var][str(pdg)]) else: self['pdg_cut'] = [0] @@ -5800,12 +5800,12 @@ def update_system_parameter_for_include(self): self['mxxpart_antipart'] = [False] def write(self, output_file, template=None, python_template=False, **opt): - """Write the run_card in output_file according to template + """Write the run_card in output_file according to template (a path to a valid run_card)""" if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', + template = pjoin(MG5DIR, 'Template', 'NLO', 'Cards', 'run_card.dat') python_template = True else: @@ -5818,7 +5818,7 @@ def write(self, output_file, template=None, python_template=False, **opt): def create_default_for_process(self, proc_characteristic, history, proc_def): """Rules - e+ e- beam -> lpp:0 ebeam:500 + e+ e- beam -> lpp:0 ebeam:500 p p beam -> set maxjetflavor automatically process with tagged photons -> gamma_is_j = false process without QED splittings -> gamma_is_j = false, recombination = false @@ -5844,19 +5844,19 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self['ebeam2'] = 500 else: self['lpp1'] = 0 - self['lpp2'] = 0 - + self['lpp2'] = 0 + if proc_characteristic['ninitial'] == 1: #remove all cut self.remove_all_cut() # check for tagged photons tagged_particles = set() - + # If model has running functionality add the additional parameter model = proc_def[0].get('model') if model['running_elements']: - self.display_block.append('RUNNING') + self.display_block.append('RUNNING') # Check if need matching min_particle = 99 @@ -5885,7 +5885,7 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: idsmin = [l['id'] for l in procmin['legs']] break - + for procmax in proc_def: if len(procmax['legs']) != max_particle: continue @@ -5901,9 +5901,9 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): else: # all are jet => matching is ON matching=True - break - - if matching: + break + + if matching: self['ickkw'] = 3 self['fixed_ren_scale'] = False self["fixed_fac_scale"] = False @@ -5911,17 +5911,17 @@ def create_default_for_process(self, proc_characteristic, history, proc_def): self["jetalgo"] = 1 self["jetradius"] = 1 self["parton_shower"] = "PYTHIA8" - + # Read file input/default_run_card_nlo.dat # This has to be LAST !! if os.path.exists(self.default_run_card): self.read(self.default_run_card, consistency=False) - + class MadLoopParam(ConfigFile): """ a class for storing/dealing with the file MadLoopParam.dat contains a parser to read it, facilities to write a new file,... """ - + _ID_reduction_tool_map = {1:'CutTools', 2:'PJFry++', 3:'IREGI', @@ -5929,10 +5929,10 @@ class MadLoopParam(ConfigFile): 5:'Samurai', 6:'Ninja', 7:'COLLIER'} - + def default_setup(self): """initialize the directory to the default value""" - + self.add_param("MLReductionLib", "6|7|1") self.add_param("IREGIMODE", 2) self.add_param("IREGIRECY", True) @@ -5954,7 +5954,7 @@ def default_setup(self): self.add_param("HelicityFilterLevel", 2) self.add_param("LoopInitStartOver", False) self.add_param("HelInitStartOver", False) - self.add_param("UseQPIntegrandForNinja", True) + self.add_param("UseQPIntegrandForNinja", True) self.add_param("UseQPIntegrandForCutTools", True) self.add_param("COLLIERMode", 1) self.add_param("COLLIERComputeUVpoles", True) @@ -5966,9 +5966,9 @@ def default_setup(self): self.add_param("COLLIERUseInternalStabilityTest",True) def read(self, finput): - """Read the input file, this can be a path to a file, + """Read the input file, this can be a path to a file, a file object, a str with the content of the file.""" - + if isinstance(finput, str): if "\n" in finput: finput = finput.split('\n') @@ -5976,7 +5976,7 @@ def read(self, finput): finput = open(finput) else: raise Exception("No such file %s" % input) - + previous_line= '' for line in finput: if previous_line.startswith('#'): @@ -5985,20 +5985,20 @@ def read(self, finput): if len(value) and value[0] not in ['#', '!']: self.__setitem__(name, value, change_userdefine=True) previous_line = line - - + + def write(self, outputpath, template=None,commentdefault=False): - + if not template: if not MADEVENT: - template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', + template = pjoin(MG5DIR, 'Template', 'loop_material', 'StandAlone', 'Cards', 'MadLoopParams.dat') else: template = pjoin(MEDIR, 'Cards', 'MadLoopParams_default.dat') fsock = open(template, 'r') template = fsock.readlines() fsock.close() - + if isinstance(outputpath, str): output = open(outputpath, 'w') else: @@ -6019,7 +6019,7 @@ def f77format(value): return value else: raise Exception("Can not format input %s" % type(value)) - + name = '' done = set() for line in template: @@ -6034,12 +6034,12 @@ def f77format(value): elif line.startswith('#'): name = line[1:].split()[0] output.write(line) - - - - - -class eMELA_info(ConfigFile): + + + + + +class eMELA_info(ConfigFile): """ a class for eMELA (LHAPDF-like) info files """ path = '' @@ -6053,7 +6053,7 @@ def __init__(self, finput, me_dir): def read(self, finput): - if isinstance(finput, file): + if isinstance(finput, file): lines = finput.open().read().split('\n') self.path = finput.name else: @@ -6066,7 +6066,7 @@ def read(self, finput): k, v = l.split(':', 1) # ignore further occurrences of : try: self[k.strip()] = eval(v) - except (NameError, SyntaxError): + except (NameError, SyntaxError): self[k.strip()] = v def default_setup(self): @@ -6091,7 +6091,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): +"powers of alpha should be reweighted a posteriori") - logger.info('Updating variables according to %s' % self.path) + logger.info('Updating variables according to %s' % self.path) # Flavours in the running of alpha nd, nu, nl = self['eMELA_ActiveFlavoursAlpha'] self.log_and_update(banner, 'run_card', 'ndnq_run', nd) @@ -6130,8 +6130,8 @@ def update_epdf_emela_variables(self, banner, uvscheme): logger.warning('Cannot treat the following renormalisation schemes for ME and PDFs: %d, %d' \ % (uvscheme, uvscheme_pdf)) - # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref - # also check that the com energy is equal to qref, otherwise print a + # if PDFs use MSbar with fixed alpha, set the ren scale fixed to Qref + # also check that the com energy is equal to qref, otherwise print a # warning if uvscheme_pdf == 1: qref = self['eMELA_AlphaQref'] @@ -6144,23 +6144,23 @@ def update_epdf_emela_variables(self, banner, uvscheme): # LL / NLL PDF (0/1) pdforder = self['eMELA_PerturbativeOrder'] - # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) + # pdfscheme = 0->MSbar; 1->DIS; 2->eta (leptonic); 3->beta (leptonic) # 4->mixed (leptonic); 5-> nobeta (leptonic); 6->delta (leptonic) # if LL, use nobeta scheme unless LEGACYLLPDF > 0 if pdforder == 0: if 'eMELA_LEGACYLLPDF' not in self.keys() or self['eMELA_LEGACYLLPDF'] in [-1, 0]: self.log_and_update(banner, 'run_card', 'pdfscheme', 5) - elif self['eMELA_LEGACYLLPDF'] == 1: + elif self['eMELA_LEGACYLLPDF'] == 1: # mixed self.log_and_update(banner, 'run_card', 'pdfscheme', 4) - elif self['eMELA_LEGACYLLPDF'] == 2: + elif self['eMELA_LEGACYLLPDF'] == 2: # eta self.log_and_update(banner, 'run_card', 'pdfscheme', 2) - elif self['eMELA_LEGACYLLPDF'] == 3: + elif self['eMELA_LEGACYLLPDF'] == 3: # beta self.log_and_update(banner, 'run_card', 'pdfscheme', 3) elif pdforder == 1: - # for NLL, use eMELA_FactorisationSchemeInt = 0/1 + # for NLL, use eMELA_FactorisationSchemeInt = 0/1 # for delta/MSbar if self['eMELA_FactorisationSchemeInt'] == 0: # MSbar @@ -6177,7 +6177,7 @@ def update_epdf_emela_variables(self, banner, uvscheme): - + def log_and_update(self, banner, card, par, v): """update the card parameter par to value v diff --git a/epochX/cudacpp/pp_tt012j.mad/bin/internal/gen_ximprove.py b/epochX/cudacpp/pp_tt012j.mad/bin/internal/gen_ximprove.py index 5fd170d18d..cc842aa50f 100755 --- a/epochX/cudacpp/pp_tt012j.mad/bin/internal/gen_ximprove.py +++ b/epochX/cudacpp/pp_tt012j.mad/bin/internal/gen_ximprove.py @@ -2,18 +2,18 @@ # # Copyright (c) 2014 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch # ################################################################################ """ A python file to replace the fortran script gen_ximprove. - This script analyses the result of the survey/ previous refine and + This script analyses the result of the survey/ previous refine and creates the jobs for the following script. """ from __future__ import division @@ -66,77 +66,77 @@ class gensym(object): """a class to call the fortran gensym executable and handle it's output in order to create the various job that are needed for the survey""" - + #convenient shortcut for the formatting of variable @ staticmethod def format_variable(*args): return bannermod.ConfigFile.format_variable(*args) - + combining_job = 2 # number of channel by ajob - splitted_grid = False + splitted_grid = False min_iterations = 3 mode= "survey" - + def __init__(self, cmd, opt=None): - + try: super(gensym, self).__init__(cmd, opt) except TypeError: pass - - # Run statistics, a dictionary of RunStatistics(), with + + # Run statistics, a dictionary of RunStatistics(), with self.run_statistics = {} - + self.cmd = cmd self.run_card = cmd.run_card self.me_dir = cmd.me_dir - - + + # dictionary to keep track of the precision when combining iteration self.cross = collections.defaultdict(int) self.abscross = collections.defaultdict(int) self.sigma = collections.defaultdict(int) self.chi2 = collections.defaultdict(int) - + self.splitted_grid = False if self.cmd.proc_characteristics['loop_induced']: nexternal = self.cmd.proc_characteristics['nexternal'] self.splitted_grid = max(2, (nexternal-2)**2) if hasattr(self.cmd, "opts") and self.cmd.opts['accuracy'] == 0.1: self.cmd.opts['accuracy'] = 0.02 - + if isinstance(cmd.cluster, cluster.MultiCore) and self.splitted_grid > 1: self.splitted_grid = int(cmd.cluster.nb_core**0.5) if self.splitted_grid == 1 and cmd.cluster.nb_core >1: self.splitted_grid = 2 - + #if the user defines it in the run_card: if self.run_card['survey_splitting'] != -1: self.splitted_grid = self.run_card['survey_splitting'] if self.run_card['survey_nchannel_per_job'] != 1 and 'survey_nchannel_per_job' in self.run_card.user_set: - self.combining_job = self.run_card['survey_nchannel_per_job'] + self.combining_job = self.run_card['survey_nchannel_per_job'] elif self.run_card['hard_survey'] > 1: self.combining_job = 1 - - + + self.splitted_Pdir = {} self.splitted_for_dir = lambda x,y: self.splitted_grid self.combining_job_for_Pdir = lambda x: self.combining_job self.lastoffset = {} - + done_warning_zero_coupling = False def get_helicity(self, to_submit=True, clean=True): """launch a single call to madevent to get the list of non zero helicity""" - - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc P_zero_result = [] nb_tot_proc = len(subproc) - job_list = {} - - + job_list = {} + + for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -154,7 +154,7 @@ def get_helicity(self, to_submit=True, clean=True): p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts - + (stdout, _) = p.communicate(''.encode()) stdout = stdout.decode('ascii',errors='ignore') if stdout: @@ -166,11 +166,11 @@ def get_helicity(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir, 'error')): os.remove(pjoin(self.me_dir, 'error')) continue # bypass bad process - + self.cmd.compile(['madevent_forhel'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'madevent_forhel')): - raise Exception('Error make madevent_forhel not successful') - + raise Exception('Error make madevent_forhel not successful') + if not os.path.exists(pjoin(Pdir, 'Hel')): os.mkdir(pjoin(Pdir, 'Hel')) ff = open(pjoin(Pdir, 'Hel', 'input_app.txt'),'w') @@ -180,15 +180,15 @@ def get_helicity(self, to_submit=True, clean=True): try: os.remove(pjoin(Pdir, 'Hel','results.dat')) except Exception: - pass + pass # Launch gensym - p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, + p = misc.Popen(['../madevent_forhel < input_app.txt'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=pjoin(Pdir,'Hel'), shell=True) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(" ".encode()) stdout = stdout.decode('ascii',errors='ignore') if os.path.exists(pjoin(self.me_dir, 'error')): - raise Exception(pjoin(self.me_dir,'error')) + raise Exception(pjoin(self.me_dir,'error')) # note a continue is not enough here, we have in top to link # the matrixX_optim.f to matrixX_orig.f to let the code to work # after this error. @@ -203,7 +203,7 @@ def get_helicity(self, to_submit=True, clean=True): zero_gc = list() all_zampperhel = set() all_bad_amps_perhel = set() - + for line in stdout.splitlines(): if "=" not in line and ":" not in line: continue @@ -229,22 +229,22 @@ def get_helicity(self, to_submit=True, clean=True): "%s\n" % (' '.join(zero_gc)) +\ "This will slow down the computation. Please consider using restricted model:\n" +\ "https://answers.launchpad.net/mg5amcnlo/+faq/2312") - - + + all_good_hels = collections.defaultdict(list) for me_index, hel in all_hel: - all_good_hels[me_index].append(int(hel)) - + all_good_hels[me_index].append(int(hel)) + #print(all_hel) if self.run_card['hel_zeroamp']: all_bad_amps = collections.defaultdict(list) for me_index, amp in all_zamp: all_bad_amps[me_index].append(int(amp)) - + all_bad_amps_perhel = collections.defaultdict(list) for me_index, hel, amp in all_zampperhel: - all_bad_amps_perhel[me_index].append((int(hel),int(amp))) - + all_bad_amps_perhel[me_index].append((int(hel),int(amp))) + elif all_zamp: nb_zero = sum(int(a[1]) for a in all_zamp) if zero_gc: @@ -254,7 +254,7 @@ def get_helicity(self, to_submit=True, clean=True): else: logger.warning("The optimization detected that you have %i zero matrix-element for this SubProcess: %s.\n" % nb_zero +\ "This part can optimize if you set the flag hel_zeroamp to True in the run_card.") - + #check if we need to do something and write associate information" data = [all_hel, all_zamp, all_bad_amps_perhel] if not self.run_card['hel_zeroamp']: @@ -266,14 +266,14 @@ def get_helicity(self, to_submit=True, clean=True): old_data = open(pjoin(Pdir,'Hel','selection')).read() if old_data == data: continue - - + + with open(pjoin(Pdir,'Hel','selection'),'w') as fsock: - fsock.write(data) - - + fsock.write(data) + + for matrix_file in misc.glob('matrix*orig.f', Pdir): - + split_file = matrix_file.split('/') me_index = split_file[-1][len('matrix'):-len('_orig.f')] @@ -289,11 +289,11 @@ def get_helicity(self, to_submit=True, clean=True): #good_hels = sorted(list(good_hels)) good_hels = [str(x) for x in sorted(all_good_hels[me_index])] if self.run_card['hel_zeroamp']: - + bad_amps = [str(x) for x in sorted(all_bad_amps[me_index])] bad_amps_perhel = [x for x in sorted(all_bad_amps_perhel[me_index])] else: - bad_amps = [] + bad_amps = [] bad_amps_perhel = [] if __debug__: mtext = open(matrix_file).read() @@ -310,7 +310,7 @@ def get_helicity(self, to_submit=True, clean=True): recycler.set_input(matrix_file) recycler.set_output(out_file) - recycler.set_template(templ_file) + recycler.set_template(templ_file) recycler.generate_output_file() del recycler @@ -321,19 +321,19 @@ def get_helicity(self, to_submit=True, clean=True): return {}, P_zero_result - + def launch(self, to_submit=True, clean=True): """ """ if not hasattr(self, 'subproc'): - self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + self.subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] subproc = self.subproc - + P_zero_result = [] # check the number of times where they are no phase-space - + nb_tot_proc = len(subproc) - job_list = {} + job_list = {} for nb_proc,subdir in enumerate(subproc): self.cmd.update_status('Compiling for process %s/%s.
(previous processes already running)' % \ (nb_proc+1,nb_tot_proc), level=None) @@ -341,7 +341,7 @@ def launch(self, to_submit=True, clean=True): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) logger.info(' %s ' % subdir) - + # clean previous run if clean: for match in misc.glob('*ajob*', Pdir): @@ -349,17 +349,17 @@ def launch(self, to_submit=True, clean=True): os.remove(match) for match in misc.glob('G*', Pdir): if os.path.exists(pjoin(match,'results.dat')): - os.remove(pjoin(match, 'results.dat')) + os.remove(pjoin(match, 'results.dat')) if os.path.exists(pjoin(match, 'ftn25')): - os.remove(pjoin(match, 'ftn25')) - + os.remove(pjoin(match, 'ftn25')) + #compile gensym self.cmd.compile(['gensym'], cwd=Pdir) if not os.path.exists(pjoin(Pdir, 'gensym')): - raise Exception('Error make gensym not successful') - + raise Exception('Error make gensym not successful') + # Launch gensym - p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, + p = misc.Popen(['./gensym'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=Pdir) #sym_input = "%(points)d %(iterations)d %(accuracy)f \n" % self.opts (stdout, _) = p.communicate(''.encode()) @@ -367,8 +367,8 @@ def launch(self, to_submit=True, clean=True): if os.path.exists(pjoin(self.me_dir,'error')): files.mv(pjoin(self.me_dir,'error'), pjoin(Pdir,'ajob.no_ps.log')) P_zero_result.append(subdir) - continue - + continue + jobs = stdout.split() job_list[Pdir] = jobs try: @@ -386,8 +386,8 @@ def launch(self, to_submit=True, clean=True): continue else: if done: - raise Exception('Parsing error in gensym: %s' % stdout) - job_list[Pdir] = l.split() + raise Exception('Parsing error in gensym: %s' % stdout) + job_list[Pdir] = l.split() done = True if not done: raise Exception('Parsing error in gensym: %s' % stdout) @@ -408,16 +408,16 @@ def launch(self, to_submit=True, clean=True): if to_submit: self.submit_to_cluster(job_list) job_list = {} - + return job_list, P_zero_result - + def resubmit(self, min_precision=1.0, resubmit_zero=False): """collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - + job_list, P_zero_result = self.launch(to_submit=False, clean=False) - + for P , jobs in dict(job_list).items(): misc.sprint(jobs) to_resub = [] @@ -434,7 +434,7 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): elif max(one_result.xerru, one_result.xerrc)/one_result.xsec > min_precision: to_resub.append(job) else: - to_resub.append(job) + to_resub.append(job) if to_resub: for G in to_resub: try: @@ -442,19 +442,19 @@ def resubmit(self, min_precision=1.0, resubmit_zero=False): except Exception as error: misc.sprint(error) pass - misc.sprint(to_resub) + misc.sprint(to_resub) self.submit_to_cluster({P: to_resub}) - - - - - - - - - - - + + + + + + + + + + + def submit_to_cluster(self, job_list): """ """ @@ -467,7 +467,7 @@ def submit_to_cluster(self, job_list): nexternal = self.cmd.proc_characteristics['nexternal'] current = open(pjoin(path, "nexternal.inc")).read() ext = re.search(r"PARAMETER \(NEXTERNAL=(\d+)\)", current).group(1) - + if self.run_card['job_strategy'] == 2: self.splitted_grid = 2 if nexternal == int(ext): @@ -498,18 +498,18 @@ def submit_to_cluster(self, job_list): return self.submit_to_cluster_no_splitting(job_list) else: return self.submit_to_cluster_splitted(job_list) - - + + def submit_to_cluster_no_splitting(self, job_list): """submit the survey without the parralelization. This is the old mode which is still usefull in single core""" - - # write the template file for the parameter file + + # write the template file for the parameter file self.write_parameter(parralelization=False, Pdirs=list(job_list.keys())) - - + + # launch the job with the appropriate grouping - for Pdir, jobs in job_list.items(): + for Pdir, jobs in job_list.items(): jobs = list(jobs) i=0 while jobs: @@ -518,16 +518,16 @@ def submit_to_cluster_no_splitting(self, job_list): for _ in range(self.combining_job_for_Pdir(Pdir)): if jobs: to_submit.append(jobs.pop(0)) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=to_submit, cwd=pjoin(self.me_dir,'SubProcesses' , Pdir)) - + def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): """prepare the input_file for submitting the channel""" - + if 'SubProcesses' not in Pdir: Pdir = pjoin(self.me_dir, 'SubProcesses', Pdir) @@ -535,8 +535,8 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): self.splitted_Pdir[(Pdir, G)] = int(nb_job) - # 1. write the new input_app.txt - run_card = self.cmd.run_card + # 1. write the new input_app.txt + run_card = self.cmd.run_card options = {'event' : submit_ps, 'maxiter': 1, 'miniter': 1, @@ -545,29 +545,29 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): else run_card['nhel'], 'gridmode': -2, 'channel' : G - } - + } + Gdir = pjoin(Pdir, 'G%s' % G) - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + # 2. check that ftn25 exists. - assert os.path.exists(pjoin(Gdir, "ftn25")) - - + assert os.path.exists(pjoin(Gdir, "ftn25")) + + # 3. Submit the new jobs #call back function - packet = cluster.Packet((Pdir, G, step+1), + packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, (Pdir, G, step+1)) - + if step ==0: - self.lastoffset[(Pdir, G)] = 0 - - # resubmit the new jobs + self.lastoffset[(Pdir, G)] = 0 + + # resubmit the new jobs for i in range(int(nb_job)): name = "G%s_%s" % (G,i+1) self.lastoffset[(Pdir, G)] += 1 - offset = self.lastoffset[(Pdir, G)] + offset = self.lastoffset[(Pdir, G)] self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'refine_splitted.sh'), argument=[name, 'G%s'%G, offset], cwd= Pdir, @@ -575,9 +575,9 @@ def create_resubmit_one_iter(self, Pdir, G, submit_ps, nb_job, step=0): def submit_to_cluster_splitted(self, job_list): - """ submit the version of the survey with splitted grid creation - """ - + """ submit the version of the survey with splitted grid creation + """ + #if self.splitted_grid <= 1: # return self.submit_to_cluster_no_splitting(job_list) @@ -592,7 +592,7 @@ def submit_to_cluster_splitted(self, job_list): for job in jobs: packet = cluster.Packet((Pdir, job, 1), self.combine_iteration, (Pdir, job, 1)) - for i in range(self.splitted_for_dir(Pdir, job)): + for i in range(self.splitted_for_dir(Pdir, job)): self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[i+1, job], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), @@ -601,15 +601,15 @@ def submit_to_cluster_splitted(self, job_list): def combine_iteration(self, Pdir, G, step): grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - - # Compute the number of events used for this run. + + # Compute the number of events used for this run. nb_events = grid_calculator.target_evt Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) - + # 4. make the submission of the next iteration # Three cases - less than 3 iteration -> continue # - more than 3 and less than 5 -> check error @@ -627,15 +627,15 @@ def combine_iteration(self, Pdir, G, step): need_submit = False else: need_submit = True - + elif step >= self.cmd.opts['iterations']: need_submit = False elif self.cmd.opts['accuracy'] < 0: #check for luminosity raise Exception("Not Implemented") elif self.abscross[(Pdir,G)] == 0: - need_submit = False - else: + need_submit = False + else: across = self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) tot_across = self.get_current_axsec() if across == 0: @@ -646,20 +646,20 @@ def combine_iteration(self, Pdir, G, step): need_submit = True else: need_submit = False - - + + if cross: grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_events,mode=self.mode, conservative_factor=5.0) - - xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) - if float(cross)!=0.0 and float(error)!=0.0 else 8) + + xsec_format = '.%ig'%(max(3,int(math.log10(1.0/float(error)))+2) + if float(cross)!=0.0 and float(error)!=0.0 else 8) if need_submit: message = "%%s/G%%s is at %%%s +- %%.3g pb. Now submitting iteration #%s."%(xsec_format, step+1) logger.info(message%\ - (os.path.basename(Pdir), G, float(cross), + (os.path.basename(Pdir), G, float(cross), float(error)*float(cross))) self.resubmit_survey(Pdir,G, Gdirs, step) elif cross: @@ -670,26 +670,26 @@ def combine_iteration(self, Pdir, G, step): newGpath = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(newGpath): os.mkdir(newGpath) - + # copy the new grid: - files.cp(pjoin(Gdirs[0], 'ftn25'), + files.cp(pjoin(Gdirs[0], 'ftn25'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, 'ftn26')) - + # copy the events fsock = open(pjoin(newGpath, 'events.lhe'), 'w') for Gdir in Gdirs: - fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) - + fsock.write(open(pjoin(Gdir, 'events.lhe')).read()) + # copy one log - files.cp(pjoin(Gdirs[0], 'log.txt'), + files.cp(pjoin(Gdirs[0], 'log.txt'), pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G)) - - + + # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) else: logger.info("Survey finished for %s/G%s [0 cross]", os.path.basename(Pdir),G) - + Gdir = pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G) if not os.path.exists(Gdir): os.mkdir(Gdir) @@ -697,21 +697,21 @@ def combine_iteration(self, Pdir, G, step): files.cp(pjoin(Gdirs[0], 'log.txt'), Gdir) # create the appropriate results.dat self.write_results(grid_calculator, cross, error, Pdir, G, step) - + return 0 def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): """ exclude_sub_jobs is to remove some of the subjobs if a numerical issue is detected in one of them. Warning is issue when this occurs. """ - + # 1. create an object to combine the grid information and fill it grid_calculator = combine_grid.grid_information(self.run_card['nhel']) - + for i in range(self.splitted_for_dir(Pdir, G)): if i in exclude_sub_jobs: continue - path = pjoin(Pdir, "G%s_%s" % (G, i+1)) + path = pjoin(Pdir, "G%s_%s" % (G, i+1)) fsock = misc.mult_try_open(pjoin(path, 'results.dat')) one_result = grid_calculator.add_results_information(fsock) fsock.close() @@ -723,9 +723,9 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): fsock.close() os.remove(pjoin(path, 'results.dat')) #os.remove(pjoin(path, 'grid_information')) - - - + + + #2. combine the information about the total crossection / error # start by keep the interation in memory cross, across, sigma = grid_calculator.get_cross_section() @@ -736,12 +736,12 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): if maxwgt: nunwgt = grid_calculator.get_nunwgt(maxwgt) # Make sure not to apply the security below during the first step of the - # survey. Also, disregard channels with a contribution relative to the + # survey. Also, disregard channels with a contribution relative to the # total cross-section smaller than 1e-8 since in this case it is unlikely # that this channel will need more than 1 event anyway. apply_instability_security = False rel_contrib = 0.0 - if (self.__class__ != gensym or step > 1): + if (self.__class__ != gensym or step > 1): Pdir_across = 0.0 Gdir_across = 0.0 for (mPdir,mG) in self.abscross.keys(): @@ -750,7 +750,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): (self.sigma[(mPdir,mG)]+1e-99)) if mG == G: Gdir_across += (self.abscross[(mPdir,mG)]/ - (self.sigma[(mPdir,mG)]+1e-99)) + (self.sigma[(mPdir,mG)]+1e-99)) rel_contrib = abs(Gdir_across/(Pdir_across+1e-99)) if rel_contrib > (1.0e-8) and \ nunwgt < 2 and len(grid_calculator.results) > 1: @@ -770,14 +770,14 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): exclude_sub_jobs = list(exclude_sub_jobs) exclude_sub_jobs.append(th_maxwgt[-1][1]) grid_calculator.results.run_statistics['skipped_subchannel'] += 1 - + # Add some monitoring of the problematic events - gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) + gPath = pjoin(Pdir, "G%s_%s" % (G, th_maxwgt[-1][1]+1)) if os.path.isfile(pjoin(gPath,'events.lhe')): lhe_file = lhe_parser.EventFile(pjoin(gPath,'events.lhe')) discardedPath = pjoin(Pdir,'DiscardedUnstableEvents') if not os.path.exists(discardedPath): - os.mkdir(discardedPath) + os.mkdir(discardedPath) if os.path.isdir(discardedPath): # Keep only the event with a maximum weight, as it surely # is the problematic one. @@ -790,10 +790,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): lhe_file.close() evtRecord.write(pjoin(gPath,'events.lhe').read()) evtRecord.close() - + return self.combine_grid(Pdir, G, step, exclude_sub_jobs) - + if across !=0: if sigma != 0: self.cross[(Pdir,G)] += cross**3/sigma**2 @@ -814,10 +814,10 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): self.chi2[(Pdir,G)] = 0 cross = self.cross[(Pdir,G)] error = 0 - + else: error = 0 - + grid_calculator.results.compute_values(update_statistics=True) if (str(os.path.basename(Pdir)), G) in self.run_statistics: self.run_statistics[(str(os.path.basename(Pdir)), G)]\ @@ -825,8 +825,8 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): else: self.run_statistics[(str(os.path.basename(Pdir)), G)] = \ grid_calculator.results.run_statistics - - self.warnings_from_statistics(G, grid_calculator.results.run_statistics) + + self.warnings_from_statistics(G, grid_calculator.results.run_statistics) stats_msg = grid_calculator.results.run_statistics.nice_output( '/'.join([os.path.basename(Pdir),'G%s'%G])) @@ -836,7 +836,7 @@ def combine_grid(self, Pdir, G, step, exclude_sub_jobs=[]): # Clean up grid_information to avoid border effects in case of a crash for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) - try: + try: os.remove(pjoin(path, 'grid_information')) except OSError as oneerror: if oneerror.errno != 2: @@ -850,7 +850,7 @@ def warnings_from_statistics(self,G,stats): return EPS_fraction = float(stats['exceptional_points'])/stats['n_madloop_calls'] - + msg = "Channel %s has encountered a fraction of %.3g\n"+ \ "of numerically unstable loop matrix element computations\n"+\ "(which could not be rescued using quadruple precision).\n"+\ @@ -861,16 +861,16 @@ def warnings_from_statistics(self,G,stats): elif EPS_fraction > 0.01: logger.critical((msg%(G,EPS_fraction)).replace('might', 'can')) raise Exception((msg%(G,EPS_fraction)).replace('might', 'can')) - + def get_current_axsec(self): - + across = 0 for (Pdir,G) in self.abscross: across += self.abscross[(Pdir,G)]/(self.sigma[(Pdir,G)]+1e-99) return across - + def write_results(self, grid_calculator, cross, error, Pdir, G, step): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -888,7 +888,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step): maxwgt = grid_calculator.get_max_wgt() nunwgt = grid_calculator.get_nunwgt() luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -897,20 +897,20 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s %s 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross), fstr(maxwgt)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - + def resubmit_survey(self, Pdir, G, Gdirs, step): """submit the next iteration of the survey""" # 1. write the new input_app.txt to double the number of points - run_card = self.cmd.run_card + run_card = self.cmd.run_card options = {'event' : 2**(step) * self.cmd.opts['points'] / self.splitted_grid, 'maxiter': 1, 'miniter': 1, @@ -919,18 +919,18 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): else run_card['nhel'], 'gridmode': -2, 'channel' : '' - } - + } + if int(options['helicity']) == 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + for Gdir in Gdirs: - self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) - - + self.write_parameter_file(pjoin(Gdir, 'input_app.txt'), options) + + #2. resubmit the new jobs packet = cluster.Packet((Pdir, G, step+1), self.combine_iteration, \ - (Pdir, G, step+1)) + (Pdir, G, step+1)) nb_step = len(Gdirs) * (step+1) for i,subdir in enumerate(Gdirs): subdir = subdir.rsplit('_',1)[1] @@ -938,34 +938,34 @@ def resubmit_survey(self, Pdir, G, Gdirs, step): offset = nb_step+i+1 offset=str(offset) tag = "%s.%s" % (subdir, offset) - + self.cmd.launch_job(pjoin(self.me_dir, 'SubProcesses', 'survey.sh'), argument=[tag, G], cwd=pjoin(self.me_dir,'SubProcesses' , Pdir), packet_member=packet) - + def write_parameter_file(self, path, options): """ """ - + template =""" %(event)s %(maxiter)s %(miniter)s !Number of events and max and min iterations %(accuracy)s !Accuracy %(gridmode)s !Grid Adjustment 0=none, 2=adjust 1 !Suppress Amplitude 1=yes %(helicity)s !Helicity Sum/event 0=exact - %(channel)s """ + %(channel)s """ options['event'] = int(options['event']) open(path, 'w').write(template % options) - - + + def write_parameter(self, parralelization, Pdirs=None): """Write the parameter of the survey run""" run_card = self.cmd.run_card - + options = {'event' : self.cmd.opts['points'], 'maxiter': self.cmd.opts['iterations'], 'miniter': self.min_iterations, @@ -975,36 +975,36 @@ def write_parameter(self, parralelization, Pdirs=None): 'gridmode': 2, 'channel': '' } - + if int(options['helicity'])== 1: options['event'] = options['event'] * 2**(self.cmd.proc_characteristics['nexternal']//3) - + if parralelization: options['gridmode'] = -2 options['maxiter'] = 1 #this is automatic in dsample anyway options['miniter'] = 1 #this is automatic in dsample anyway options['event'] /= self.splitted_grid - + if not Pdirs: Pdirs = self.subproc - + for Pdir in Pdirs: - path =pjoin(Pdir, 'input_app.txt') + path =pjoin(Pdir, 'input_app.txt') self.write_parameter_file(path, options) - - -class gen_ximprove(object): - - + + +class gen_ximprove(object): + + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job @@ -1022,7 +1022,7 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_gridpack) elif cls.force_class == 'loop_induced': return super(gen_ximprove, cls).__new__(gen_ximprove_share) - + if cmd.proc_characteristics['loop_induced']: return super(gen_ximprove, cls).__new__(gen_ximprove_share) elif gen_ximprove.format_variable(cmd.run_card['gridpack'], bool): @@ -1031,31 +1031,31 @@ def __new__(cls, cmd, opt): return super(gen_ximprove, cls).__new__(gen_ximprove_share) else: return super(gen_ximprove, cls).__new__(gen_ximprove_v4) - - + + def __init__(self, cmd, opt=None): - + try: super(gen_ximprove, self).__init__(cmd, opt) except TypeError: pass - + self.run_statistics = {} self.cmd = cmd self.run_card = cmd.run_card run_card = self.run_card self.me_dir = cmd.me_dir - + #extract from the run_card the information that we need. self.gridpack = run_card['gridpack'] self.nhel = run_card['nhel'] if "nhel_refine" in run_card: self.nhel = run_card["nhel_refine"] - + if self.run_card['refine_evt_by_job'] != -1: self.max_request_event = run_card['refine_evt_by_job'] - - + + # Default option for the run self.gen_events = True self.parralel = False @@ -1066,7 +1066,7 @@ def __init__(self, cmd, opt=None): # parameter for the gridpack run self.nreq = 2000 self.iseed = 4321 - + # placeholder for information self.results = 0 #updated in launch/update_html @@ -1074,16 +1074,16 @@ def __init__(self, cmd, opt=None): self.configure(opt) elif isinstance(opt, bannermod.GridpackCard): self.configure_gridpack(opt) - + def __call__(self): return self.launch() - + def launch(self): - """running """ - + """running """ + #start the run self.handle_seed() - self.results = sum_html.collect_result(self.cmd, + self.results = sum_html.collect_result(self.cmd, main_dir=pjoin(self.cmd.me_dir,'SubProcesses')) #main_dir is for gridpack readonly mode if self.gen_events: # We run to provide a given number of events @@ -1095,15 +1095,15 @@ def launch(self): def configure(self, opt): """Defines some parameter of the run""" - + for key, value in opt.items(): if key in self.__dict__: targettype = type(getattr(self, key)) setattr(self, key, self.format_variable(value, targettype, key)) else: raise Exception('%s not define' % key) - - + + # special treatment always do outside the loop to avoid side effect if 'err_goal' in opt: if self.err_goal < 1: @@ -1113,24 +1113,24 @@ def configure(self, opt): logger.info("Generating %s unweighted events." % self.err_goal) self.gen_events = True self.err_goal = self.err_goal * self.gen_events_security # security - + def handle_seed(self): """not needed but for gridpack --which is not handle here for the moment""" return - - + + def find_job_for_event(self): """return the list of channel that need to be improved""" - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) - - goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 + + goal_lum = self.err_goal/(self.results.axsec+1e-99) #pb^-1 logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) - all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) - + all_channels.sort(key= lambda x:x.get('luminosity'), reverse=True) + to_refine = [] for C in all_channels: if C.get('axsec') == 0: @@ -1141,61 +1141,61 @@ def find_job_for_event(self): elif C.get('xerr') > max(C.get('axsec'), (1/(100*math.sqrt(self.err_goal)))*all_channels[-1].get('axsec')): to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru + - class gen_ximprove_v4(gen_ximprove): - + # some hardcoded value which impact the generation gen_events_security = 1.2 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 1000 # split jobs if a channel if it needs more than that + max_request_event = 1000 # split jobs if a channel if it needs more than that max_event_in_iter = 5000 min_event_in_iter = 1000 - max_splitting = 130 # maximum duplication of a given channel - min_iter = 3 + max_splitting = 130 # maximum duplication of a given channel + min_iter = 3 max_iter = 9 keep_grid_for_refine = False # only apply if needed to split the job - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + super(gen_ximprove_v4, self).__init__(cmd, opt) - + if cmd.opts['accuracy'] < cmd._survey_options['accuracy'][1]: self.increase_precision(cmd._survey_options['accuracy'][1]/cmd.opts['accuracy']) @@ -1203,7 +1203,7 @@ def reset_multijob(self): for path in misc.glob(pjoin('*', '*','multijob.dat'), pjoin(self.me_dir, 'SubProcesses')): open(path,'w').write('0\n') - + def write_multijob(self, Channel, nb_split): """ """ if nb_split <=1: @@ -1211,7 +1211,7 @@ def write_multijob(self, Channel, nb_split): f = open(pjoin(self.me_dir, 'SubProcesses', Channel.get('name'), 'multijob.dat'), 'w') f.write('%i\n' % nb_split) f.close() - + def increase_precision(self, rate=3): #misc.sprint(rate) if rate < 3: @@ -1222,25 +1222,25 @@ def increase_precision(self, rate=3): rate = rate -2 self.max_event_in_iter = int((rate+1) * 10000) self.min_events = int(rate+2) * 2500 - self.gen_events_security = 1 + 0.1 * (rate+2) - + self.gen_events_security = 1 + 0.1 * (rate+2) + if int(self.nhel) == 1: self.min_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//3) self.max_event_in_iter *= 2**(self.cmd.proc_characteristics['nexternal']//2) - - + + alphabet = "abcdefghijklmnopqrstuvwxyz" def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() #reset the potential multijob of previous run self.reset_multijob() - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. @@ -1257,17 +1257,17 @@ def get_job_for_event(self): else: for i in range(len(to_refine) //3): new_order.append(to_refine[i]) - new_order.append(to_refine[-2*i-1]) + new_order.append(to_refine[-2*i-1]) new_order.append(to_refine[-2*i-2]) if len(to_refine) % 3 == 1: - new_order.append(to_refine[i+1]) + new_order.append(to_refine[i+1]) elif len(to_refine) % 3 == 2: - new_order.append(to_refine[i+2]) + new_order.append(to_refine[i+2]) #ensure that the reordering is done nicely assert set([id(C) for C in to_refine]) == set([id(C) for C in new_order]) - to_refine = new_order - - + to_refine = new_order + + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target @@ -1279,7 +1279,7 @@ def get_job_for_event(self): nb_split = self.max_splitting nb_split=max(1, nb_split) - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1296,21 +1296,21 @@ def get_job_for_event(self): nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + # write the multi-job information self.write_multijob(C, nb_split) - + packet = cluster.Packet((C.parent_name, C.name), combine_runs.CombineRuns, (pjoin(self.me_dir, 'SubProcesses', C.parent_name)), {"subproc": C.name, "nb_split":nb_split}) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1321,7 +1321,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': packet, + 'packet': packet, } if nb_split == 1: @@ -1334,19 +1334,19 @@ def get_job_for_event(self): if self.keep_grid_for_refine: new_info['base_directory'] = info['directory'] jobs.append(new_info) - - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def create_ajob(self, template, jobs, write_dir=None): """create the ajob""" - + if not jobs: return if not write_dir: write_dir = pjoin(self.me_dir, 'SubProcesses') - + #filter the job according to their SubProcess directory # no mix submition P2job= collections.defaultdict(list) for j in jobs: @@ -1355,11 +1355,11 @@ def create_ajob(self, template, jobs, write_dir=None): for P in P2job.values(): self.create_ajob(template, P, write_dir) return - - + + #Here we can assume that all job are for the same directory. path = pjoin(write_dir, jobs[0]['P_dir']) - + template_text = open(template, 'r').read() # special treatment if needed to combine the script # computes how many submition miss one job @@ -1384,8 +1384,8 @@ def create_ajob(self, template, jobs, write_dir=None): skip1=0 combining_job =1 nb_sub = len(jobs) - - + + nb_use = 0 for i in range(nb_sub): script_number = i+1 @@ -1404,14 +1404,14 @@ def create_ajob(self, template, jobs, write_dir=None): info["base_directory"] = "./" fsock.write(template_text % info) nb_use += nb_job - + fsock.close() return script_number def get_job_for_precision(self): """create the ajob to achieve a give precision on the total cross-section""" - + assert self.err_goal <=1 xtot = abs(self.results.xsec) logger.info("Working on precision: %s %%" %(100*self.err_goal)) @@ -1428,46 +1428,46 @@ def get_job_for_precision(self): rerr *=rerr if not len(to_refine): return - - # change limit since most don't contribute + + # change limit since most don't contribute limit = math.sqrt((self.err_goal * xtot)**2 - rerr/math.sqrt(len(to_refine))) for C in to_refine[:]: cerr = C.mfactor*(C.xerru + len(to_refine)*C.xerrc) if cerr < limit: to_refine.remove(C) - + # all the channel are now selected. create the channel information logger.info('need to improve %s channels' % len(to_refine)) - + jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. # loop over the channel to refine for C in to_refine: - + #1. Determine how many events we need in each iteration yerr = C.mfactor*(C.xerru+len(to_refine)*C.xerrc) nevents = 0.2*C.nevents*(yerr/limit)**2 - + nb_split = int((nevents*(C.nunwgt/C.nevents)/self.max_request_event/ (2**self.min_iter-1))**(2/3)) nb_split = max(nb_split, 1) - # **(2/3) to slow down the increase in number of jobs + # **(2/3) to slow down the increase in number of jobs if nb_split > self.max_splitting: nb_split = self.max_splitting - + if nb_split >1: nevents = nevents / nb_split self.write_multijob(C, nb_split) # forbid too low/too large value nevents = min(self.min_event_in_iter, max(self.max_event_in_iter, nevents)) - - + + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': C.parent_name, + 'P_dir': C.parent_name, 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'offset': 1, # need to be change for splitted job 'nevents': nevents, @@ -1487,38 +1487,38 @@ def get_job_for_precision(self): new_info['offset'] = i+1 new_info['directory'] += self.alphabet[i % 26] + str((i+1)//26) jobs.append(new_info) - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs) + def update_html(self): """update the html from this object since it contains all the information""" - + run = self.cmd.results.current['run_name'] if not os.path.exists(pjoin(self.cmd.me_dir, 'HTML', run)): os.mkdir(pjoin(self.cmd.me_dir, 'HTML', run)) - + unit = self.cmd.results.unit - P_text = "" - if self.results: - Presults = self.results + P_text = "" + if self.results: + Presults = self.results else: self.results = sum_html.collect_result(self.cmd, None) Presults = self.results - + for P_comb in Presults: - P_text += P_comb.get_html(run, unit, self.cmd.me_dir) - - Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) - + P_text += P_comb.get_html(run, unit, self.cmd.me_dir) + + Presults.write_results_dat(pjoin(self.cmd.me_dir,'SubProcesses', 'results.dat')) + fsock = open(pjoin(self.cmd.me_dir, 'HTML', run, 'results.html'),'w') fsock.write(sum_html.results_header) fsock.write('%s
' % Presults.get_html(run, unit, self.cmd.me_dir)) - fsock.write('%s
' % P_text) - + fsock.write('%s ' % P_text) + self.cmd.results.add_detail('cross', Presults.xsec) - self.cmd.results.add_detail('error', Presults.xerru) - - return Presults.xsec, Presults.xerru + self.cmd.results.add_detail('error', Presults.xerru) + + return Presults.xsec, Presults.xerru @@ -1528,27 +1528,27 @@ class gen_ximprove_v4_nogridupdate(gen_ximprove_v4): # some hardcoded value which impact the generation gen_events_security = 1.1 # multiply the number of requested event by this number for security combining_job = 0 # allow to run multiple channel in sequence - max_request_event = 400 # split jobs if a channel if it needs more than that + max_request_event = 400 # split jobs if a channel if it needs more than that max_event_in_iter = 500 min_event_in_iter = 250 - max_splitting = 260 # maximum duplication of a given channel - min_iter = 2 + max_splitting = 260 # maximum duplication of a given channel + min_iter = 2 max_iter = 6 keep_grid_for_refine = True - def __init__(self, cmd, opt=None): - + def __init__(self, cmd, opt=None): + gen_ximprove.__init__(cmd, opt) - + if cmd.proc_characteristics['loopinduced'] and \ cmd.proc_characteristics['nexternal'] > 2: self.increase_parralelization(cmd.proc_characteristics['nexternal']) - + def increase_parralelization(self, nexternal): - self.max_splitting = 1000 - + self.max_splitting = 1000 + if self.run_card['refine_evt_by_job'] != -1: pass elif nexternal == 3: @@ -1563,27 +1563,27 @@ def increase_parralelization(self, nexternal): class gen_ximprove_share(gen_ximprove, gensym): """Doing the refine in multicore. Each core handle a couple of PS point.""" - nb_ps_by_job = 2000 + nb_ps_by_job = 2000 mode = "refine" gen_events_security = 1.15 # Note the real security is lower since we stop the jobs if they are at 96% # of this target. def __init__(self, *args, **opts): - + super(gen_ximprove_share, self).__init__(*args, **opts) self.generated_events = {} self.splitted_for_dir = lambda x,y : self.splitted_Pdir[(x,y)] - + def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - + goal_lum, to_refine = self.find_job_for_event() self.goal_lum = goal_lum - + # loop over the channel to refine to find the number of PS point to launch total_ps_points = 0 channel_to_ps_point = [] @@ -1593,7 +1593,7 @@ def get_job_for_event(self): os.remove(pjoin(self.me_dir, "SubProcesses",C.parent_name, C.name, "events.lhe")) except: pass - + #1. Compute the number of points are needed to reach target needed_event = goal_lum*C.get('axsec') if needed_event == 0: @@ -1609,18 +1609,18 @@ def get_job_for_event(self): nb_split = 1 if nb_split > self.max_splitting: nb_split = self.max_splitting - nevents = self.max_event_in_iter * self.max_splitting + nevents = self.max_event_in_iter * self.max_splitting else: nevents = self.max_event_in_iter * nb_split if nevents > self.max_splitting*self.max_event_in_iter: logger.warning("Channel %s/%s has a very low efficiency of unweighting. Might not be possible to reach target" % \ (C.name, C.parent_name)) - nevents = self.max_event_in_iter * self.max_splitting - - total_ps_points += nevents - channel_to_ps_point.append((C, nevents)) - + nevents = self.max_event_in_iter * self.max_splitting + + total_ps_points += nevents + channel_to_ps_point.append((C, nevents)) + if self.cmd.options["run_mode"] == 1: if self.cmd.options["cluster_size"]: nb_ps_by_job = total_ps_points /int(self.cmd.options["cluster_size"]) @@ -1634,7 +1634,7 @@ def get_job_for_event(self): nb_ps_by_job = total_ps_points / self.cmd.options["nb_core"] else: nb_ps_by_job = self.nb_ps_by_job - + nb_ps_by_job = int(max(nb_ps_by_job, 500)) for C, nevents in channel_to_ps_point: @@ -1648,20 +1648,20 @@ def get_job_for_event(self): self.create_resubmit_one_iter(C.parent_name, C.name[1:], submit_ps, nb_job, step=0) needed_event = goal_lum*C.get('xsec') logger.debug("%s/%s : need %s event. Need %s split job of %s points", C.parent_name, C.name, needed_event, nb_job, submit_ps) - - + + def combine_iteration(self, Pdir, G, step): - + grid_calculator, cross, error = self.combine_grid(Pdir, G, step) - + # collect all the generated_event Gdirs = [] #build the the list of directory for i in range(self.splitted_for_dir(Pdir, G)): path = pjoin(Pdir, "G%s_%s" % (G, i+1)) Gdirs.append(path) assert len(grid_calculator.results) == len(Gdirs) == self.splitted_for_dir(Pdir, G) - - + + # Check how many events are going to be kept after un-weighting. needed_event = cross * self.goal_lum if needed_event == 0: @@ -1671,19 +1671,19 @@ def combine_iteration(self, Pdir, G, step): if self.err_goal >=1: if needed_event > self.gen_events_security * self.err_goal: needed_event = int(self.gen_events_security * self.err_goal) - + if (Pdir, G) in self.generated_events: old_nunwgt, old_maxwgt = self.generated_events[(Pdir, G)] else: old_nunwgt, old_maxwgt = 0, 0 - + if old_nunwgt == 0 and os.path.exists(pjoin(Pdir,"G%s" % G, "events.lhe")): # possible for second refine. lhe = lhe_parser.EventFile(pjoin(Pdir,"G%s" % G, "events.lhe")) old_nunwgt = lhe.unweight(None, trunc_error=0.005, log_level=0) old_maxwgt = lhe.max_wgt - - + + maxwgt = max(grid_calculator.get_max_wgt(), old_maxwgt) new_evt = grid_calculator.get_nunwgt(maxwgt) @@ -1695,35 +1695,35 @@ def combine_iteration(self, Pdir, G, step): one_iter_nb_event = max(grid_calculator.get_nunwgt(),1) drop_previous_iteration = False # compare the number of events to generate if we discard the previous iteration - n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + n_target_one_iter = (needed_event-one_iter_nb_event) / ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) n_target_combined = (needed_event-nunwgt) / efficiency if n_target_one_iter < n_target_combined: # the last iteration alone has more event that the combine iteration. - # it is therefore interesting to drop previous iteration. + # it is therefore interesting to drop previous iteration. drop_previous_iteration = True nunwgt = one_iter_nb_event maxwgt = grid_calculator.get_max_wgt() new_evt = nunwgt - efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) - + efficiency = ( one_iter_nb_event/ sum([R.nevents for R in grid_calculator.results])) + try: if drop_previous_iteration: raise IOError output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'a') except IOError: output_file = open(pjoin(Pdir,"G%s" % G, "events.lhe"), 'w') - + misc.call(["cat"] + [pjoin(d, "events.lhe") for d in Gdirs], stdout=output_file) output_file.close() # For large number of iteration. check the number of event by doing the # real unweighting. - if nunwgt < 0.6 * needed_event and step > self.min_iter: + if nunwgt < 0.6 * needed_event and step > self.min_iter: lhe = lhe_parser.EventFile(output_file.name) old_nunwgt =nunwgt nunwgt = lhe.unweight(None, trunc_error=0.01, log_level=0) - - + + self.generated_events[(Pdir, G)] = (nunwgt, maxwgt) # misc.sprint("Adding %s event to %s. Currently at %s" % (new_evt, G, nunwgt)) @@ -1742,21 +1742,21 @@ def combine_iteration(self, Pdir, G, step): nevents = grid_calculator.results[0].nevents if nevents == 0: # possible if some integral returns 0 nevents = max(g.nevents for g in grid_calculator.results) - + need_ps_point = (needed_event - nunwgt)/(efficiency+1e-99) - need_job = need_ps_point // nevents + 1 - + need_job = need_ps_point // nevents + 1 + if step < self.min_iter: # This is normal but check if we are on the good track - job_at_first_iter = nb_split_before/2**(step-1) + job_at_first_iter = nb_split_before/2**(step-1) expected_total_job = job_at_first_iter * (2**self.min_iter-1) done_job = job_at_first_iter * (2**step-1) expected_remaining_job = expected_total_job - done_job - logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) + logger.debug("efficiency status (smaller is better): %s", need_job/expected_remaining_job) # increase if needed but not too much need_job = min(need_job, expected_remaining_job*1.25) - + nb_job = (need_job-0.5)//(2**(self.min_iter-step)-1) + 1 nb_job = max(1, nb_job) grid_calculator.write_grid_for_submission(Pdir,G, @@ -1768,7 +1768,7 @@ def combine_iteration(self, Pdir, G, step): nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) #self.create_job(Pdir, G, nb_job, nevents, step) - + elif step < self.max_iter: if step + 1 == self.max_iter: need_job = 1.20 * need_job # avoid to have just too few event. @@ -1777,21 +1777,21 @@ def combine_iteration(self, Pdir, G, step): grid_calculator.write_grid_for_submission(Pdir,G, self.splitted_for_dir(Pdir, G), nb_job*nevents ,mode=self.mode, conservative_factor=self.max_iter) - - + + logger.info("%s/G%s is at %i/%i ('%.2g%%') event. Resubmit %i job at iteration %i." \ % (os.path.basename(Pdir), G, int(nunwgt),int(needed_event)+1, (float(nunwgt)/needed_event)*100.0 if needed_event>0.0 else 0.0, nb_job, step)) self.create_resubmit_one_iter(Pdir, G, nevents, nb_job, step) - - + + return 0 - - + + def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency): - + #compute the value if cross == 0: abscross,nw, luminosity = 0, 0, 0 @@ -1807,7 +1807,7 @@ def write_results(self, grid_calculator, cross, error, Pdir, G, step, efficiency nevents = nunwgt # make the unweighting to compute the number of events: luminosity = nunwgt/cross - + #format the results.dat def fstr(nb): data = '%E' % nb @@ -1816,23 +1816,23 @@ def fstr(nb): power = int(power) + 1 return '%.5fE%+03i' %(nb,power) line = '%s %s %s %i %i %i %i %s %s %s 0.0 0.0 0\n' % \ - (fstr(cross), fstr(error*cross), fstr(error*cross), + (fstr(cross), fstr(error*cross), fstr(error*cross), nevents, nw, maxit,nunwgt, fstr(luminosity), fstr(wgt), fstr(abscross)) - + fsock = open(pjoin(self.me_dir,'SubProcesses' , Pdir, 'G%s' % G, - 'results.dat'),'w') + 'results.dat'),'w') fsock.writelines(line) fsock.close() - - - + + + class gen_ximprove_gridpack(gen_ximprove_v4): - - min_iter = 1 + + min_iter = 1 max_iter = 13 - max_request_event = 1e12 # split jobs if a channel if it needs more than that + max_request_event = 1e12 # split jobs if a channel if it needs more than that max_event_in_iter = 4000 min_event_in_iter = 500 combining_job = sys.maxsize @@ -1844,7 +1844,7 @@ def __new__(cls, *args, **opts): return super(gen_ximprove_gridpack, cls).__new__(cls, *args, **opts) def __init__(self, *args, **opts): - + self.ngran = -1 self.gscalefact = {} self.readonly = False @@ -1855,23 +1855,23 @@ def __init__(self, *args, **opts): self.readonly = opts['readonly'] super(gen_ximprove_gridpack,self).__init__(*args, **opts) if self.ngran == -1: - self.ngran = 1 - + self.ngran = 1 + def find_job_for_event(self): """return the list of channel that need to be improved""" import random - + assert self.err_goal >=1 self.err_goal = int(self.err_goal) self.gscalefact = {} - + xtot = self.results.axsec - goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 + goal_lum = self.err_goal/(xtot+1e-99) #pb^-1 # logger.info('Effective Luminosity %s pb^-1', goal_lum) - + all_channels = sum([list(P) for P in self.results],[]) all_channels.sort(key=lambda x : x.get('luminosity'), reverse=True) - + to_refine = [] for C in all_channels: tag = C.get('name') @@ -1885,27 +1885,27 @@ def find_job_for_event(self): #need to generate events logger.debug('request events for ', C.get('name'), 'cross=', C.get('axsec'), 'needed events = ', goal_lum * C.get('axsec')) - to_refine.append(C) - - logger.info('need to improve %s channels' % len(to_refine)) + to_refine.append(C) + + logger.info('need to improve %s channels' % len(to_refine)) return goal_lum, to_refine def get_job_for_event(self): """generate the script in order to generate a given number of event""" # correspond to write_gen in the fortran version - - + + goal_lum, to_refine = self.find_job_for_event() jobs = [] # list of the refine if some job are split is list of # dict with the parameter of the run. - + # loop over the channel to refine for C in to_refine: #1. Compute the number of points are needed to reach target needed_event = max(goal_lum*C.get('axsec'), self.ngran) nb_split = 1 - + #2. estimate how many points we need in each iteration if C.get('nunwgt') > 0: nevents = needed_event / nb_split * (C.get('nevents') / C.get('nunwgt')) @@ -1920,13 +1920,13 @@ def get_job_for_event(self): # forbid too low/too large value nevents = max(self.min_event_in_iter, min(self.max_event_in_iter, nevents)) logger.debug("%s : need %s event. Need %s split job of %s points", C.name, needed_event, nb_split, nevents) - + #create the info dict assume no splitting for the default info = {'name': self.cmd.results.current['run_name'], 'script_name': 'unknown', 'directory': C.name, # need to be change for splitted job - 'P_dir': os.path.basename(C.parent_name), + 'P_dir': os.path.basename(C.parent_name), 'offset': 1, # need to be change for splitted job 'Ppath': pjoin(self.cmd.me_dir, 'SubProcesses', C.parent_name), 'nevents': nevents, #int(nevents*self.gen_events_security)+1, @@ -1938,7 +1938,7 @@ def get_job_for_event(self): 'channel': C.name.replace('G',''), 'grid_refinment' : 0, #no refinment of the grid 'base_directory': '', #should be change in splitted job if want to keep the grid - 'packet': None, + 'packet': None, } if self.readonly: @@ -1946,11 +1946,11 @@ def get_job_for_event(self): info['base_directory'] = basedir jobs.append(info) - - write_dir = '.' if self.readonly else None - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) - + + write_dir = '.' if self.readonly else None + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), jobs, write_dir) + done = [] for j in jobs: if j['P_dir'] in done: @@ -1967,22 +1967,22 @@ def get_job_for_event(self): write_dir = '.' if self.readonly else pjoin(self.me_dir, 'SubProcesses') self.check_events(goal_lum, to_refine, jobs, write_dir) - + def check_events(self, goal_lum, to_refine, jobs, Sdir): """check that we get the number of requested events if not resubmit.""" - + new_jobs = [] - + for C, job_info in zip(to_refine, jobs): - P = job_info['P_dir'] + P = job_info['P_dir'] G = job_info['channel'] axsec = C.get('axsec') - requested_events= job_info['requested_event'] - + requested_events= job_info['requested_event'] + new_results = sum_html.OneResult((P,G)) new_results.read_results(pjoin(Sdir,P, 'G%s'%G, 'results.dat')) - + # need to resubmit? if new_results.get('nunwgt') < requested_events: pwd = pjoin(os.getcwd(),job_info['P_dir'],'G%s'%G) if self.readonly else \ @@ -1992,10 +1992,10 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): job_info['offset'] += 1 new_jobs.append(job_info) files.mv(pjoin(pwd, 'events.lhe'), pjoin(pwd, 'events.lhe.previous')) - + if new_jobs: - self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) - + self.create_ajob(pjoin(self.me_dir, 'SubProcesses', 'refine.sh'), new_jobs, Sdir) + done = [] for j in new_jobs: if j['P_dir'] in done: @@ -2015,9 +2015,9 @@ def check_events(self, goal_lum, to_refine, jobs, Sdir): files.put_at_end(pjoin(pwd, 'events.lhe'),pjoin(pwd, 'events.lhe.previous')) return self.check_events(goal_lum, to_refine, new_jobs, Sdir) - - - - + + + + diff --git a/epochX/cudacpp/pp_tt012j.mad/bin/internal/madevent_interface.py b/epochX/cudacpp/pp_tt012j.mad/bin/internal/madevent_interface.py index cb6bf4ca57..8abba3f33f 100755 --- a/epochX/cudacpp/pp_tt012j.mad/bin/internal/madevent_interface.py +++ b/epochX/cudacpp/pp_tt012j.mad/bin/internal/madevent_interface.py @@ -2,11 +2,11 @@ # # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors # -# This file is a part of the MadGraph5_aMC@NLO project, an application which +# This file is a part of the MadGraph5_aMC@NLO project, an application which # automatically generates Feynman diagrams and matrix elements for arbitrary # high-energy processes in the Standard Model and beyond. # -# It is subject to the MadGraph5_aMC@NLO license which should accompany this +# It is subject to the MadGraph5_aMC@NLO license which should accompany this # distribution. # # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch @@ -53,10 +53,10 @@ # Special logger for the Cmd Interface logger = logging.getLogger('madevent.stdout') # -> stdout logger_stderr = logging.getLogger('madevent.stderr') # ->stderr - + try: import madgraph -except ImportError as error: +except ImportError as error: # import from madevent directory MADEVENT = True import internal.extended_cmd as cmd @@ -92,7 +92,7 @@ import madgraph.various.lhe_parser as lhe_parser # import madgraph.various.histograms as histograms # imported later to not slow down the loading of the code import models.check_param_card as check_param_card - from madgraph.iolibs.files import ln + from madgraph.iolibs.files import ln from madgraph import InvalidCmd, MadGraph5Error, MG5DIR, ReadWrite @@ -113,10 +113,10 @@ class CmdExtended(common_run.CommonRunCmd): next_possibility = { 'start': [], } - + debug_output = 'ME5_debug' error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' - error_debug += 'More information is found in \'%(debug)s\'.\n' + error_debug += 'More information is found in \'%(debug)s\'.\n' error_debug += 'Please attach this file to your report.' config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' @@ -124,18 +124,18 @@ class CmdExtended(common_run.CommonRunCmd): keyboard_stop_msg = """stopping all operation in order to quit MadGraph5_aMC@NLO please enter exit""" - + # Define the Error InvalidCmd = InvalidCmd ConfigurationError = MadGraph5Error def __init__(self, me_dir, options, *arg, **opt): """Init history and line continuation""" - + # Tag allowing/forbiding question self.force = False - - # If possible, build an info line with current version number + + # If possible, build an info line with current version number # and date, from the VERSION text file info = misc.get_pkg_info() info_line = "" @@ -150,7 +150,7 @@ def __init__(self, me_dir, options, *arg, **opt): else: version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() info_line = "#* VERSION %s %s *\n" % \ - (version, (24 - len(version)) * ' ') + (version, (24 - len(version)) * ' ') # Create a header for the history file. # Remember to fill in time at writeout time! @@ -177,7 +177,7 @@ def __init__(self, me_dir, options, *arg, **opt): '#* run as ./bin/madevent.py filename *\n' + \ '#* *\n' + \ '#************************************************************\n' - + if info_line: info_line = info_line[1:] @@ -203,11 +203,11 @@ def __init__(self, me_dir, options, *arg, **opt): "* *\n" + \ "************************************************************") super(CmdExtended, self).__init__(me_dir, options, *arg, **opt) - + def get_history_header(self): - """return the history header""" + """return the history header""" return self.history_header % misc.get_time_info() - + def stop_on_keyboard_stop(self): """action to perform to close nicely on a keyboard interupt""" try: @@ -219,20 +219,20 @@ def stop_on_keyboard_stop(self): self.add_error_log_in_html(KeyboardInterrupt) except: pass - + def postcmd(self, stop, line): """ Update the status of the run for finishing interactive command """ - - stop = super(CmdExtended, self).postcmd(stop, line) + + stop = super(CmdExtended, self).postcmd(stop, line) # relaxing the tag forbidding question self.force = False - + if not self.use_rawinput: return stop - + if self.results and not self.results.current: return stop - + arg = line.split() if len(arg) == 0: return stop @@ -240,41 +240,41 @@ def postcmd(self, stop, line): return stop if isinstance(self.results.status, str) and self.results.status == 'Stop by the user': self.update_status('%s Stop by the user' % arg[0], level=None, error=True) - return stop + return stop elif not self.results.status: return stop elif str(arg[0]) in ['exit','quit','EOF']: return stop - + try: - self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], + self.update_status('Command \'%s\' done.
Waiting for instruction.' % arg[0], level=None, error=True) except Exception: misc.sprint('update_status fails') pass - - + + def nice_user_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() - return cmd.Cmd.nice_user_error(self, error, line) - + return cmd.Cmd.nice_user_error(self, error, line) + def nice_config_error(self, error, line): """If a ME run is currently running add a link in the html output""" self.add_error_log_in_html() stop = cmd.Cmd.nice_config_error(self, error, line) - - + + try: debug_file = open(self.debug_output, 'a') debug_file.write(open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat'))) debug_file.close() except: - pass + pass return stop - + def nice_error_handling(self, error, line): """If a ME run is currently running add a link in the html output""" @@ -294,7 +294,7 @@ def nice_error_handling(self, error, line): proc_card = pjoin(self.me_dir,'Cards','proc_card_mg5.dat') if os.path.exists(proc_card): self.banner.add(proc_card) - + out_dir = pjoin(self.me_dir, 'Events', self.run_name) if not os.path.isdir(out_dir): os.mkdir(out_dir) @@ -307,7 +307,7 @@ def nice_error_handling(self, error, line): else: pass else: - self.add_error_log_in_html() + self.add_error_log_in_html() stop = cmd.Cmd.nice_error_handling(self, error, line) try: debug_file = open(self.debug_output, 'a') @@ -316,14 +316,14 @@ def nice_error_handling(self, error, line): except: pass return stop - - + + #=============================================================================== # HelpToCmd #=============================================================================== class HelpToCmd(object): """ The Series of help routine for the MadEventCmd""" - + def help_pythia(self): logger.info("syntax: pythia [RUN] [--run_options]") logger.info("-- run pythia on RUN (current one by default)") @@ -352,29 +352,29 @@ def help_banner_run(self): logger.info(" Path should be the path of a valid banner.") logger.info(" RUN should be the name of a run of the current directory") self.run_options_help([('-f','answer all question by default'), - ('--name=X', 'Define the name associated with the new run')]) - + ('--name=X', 'Define the name associated with the new run')]) + def help_open(self): logger.info("syntax: open FILE ") logger.info("-- open a file with the appropriate editor.") logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') logger.info(' the path to the last created/used directory is used') logger.info(' The program used to open those files can be chosen in the') - logger.info(' configuration file ./input/mg5_configuration.txt') - - + logger.info(' configuration file ./input/mg5_configuration.txt') + + def run_options_help(self, data): if data: logger.info('-- local options:') for name, info in data: logger.info(' %s : %s' % (name, info)) - + logger.info("-- session options:") - logger.info(" Note that those options will be kept for the current session") + logger.info(" Note that those options will be kept for the current session") logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) logger.info(" --multicore : Run in multi-core configuration") logger.info(" --nb_core=X : limit the number of core to use to X.") - + def help_generate_events(self): logger.info("syntax: generate_events [run_name] [options]",) @@ -398,16 +398,16 @@ def help_initMadLoop(self): logger.info(" -f : Bypass the edition of MadLoopParams.dat.",'$MG:color:BLUE') logger.info(" -r : Refresh of the existing filters (erasing them if already present).",'$MG:color:BLUE') logger.info(" --nPS= : Specify how many phase-space points should be tried to set up the filters.",'$MG:color:BLUE') - + def help_calculate_decay_widths(self): - + if self.ninitial != 1: logger.warning("This command is only valid for processes of type A > B C.") logger.warning("This command can not be run in current context.") logger.warning("") - + logger.info("syntax: calculate_decay_widths [run_name] [options])") logger.info("-- Calculate decay widths and enter widths and BRs in param_card") logger.info(" for a series of processes of type A > B C ...") @@ -428,8 +428,8 @@ def help_survey(self): logger.info("-- evaluate the different channel associate to the process") self.run_options_help([("--" + key,value[-1]) for (key,value) in \ self._survey_options.items()]) - - + + def help_restart_gridpack(self): logger.info("syntax: restart_gridpack --precision= --restart_zero") @@ -439,14 +439,14 @@ def help_launch(self): logger.info("syntax: launch [run_name] [options])") logger.info(" --alias for either generate_events/calculate_decay_widths") logger.info(" depending of the number of particles in the initial state.") - + if self.ninitial == 1: logger.info("For this directory this is equivalent to calculate_decay_widths") self.help_calculate_decay_widths() else: logger.info("For this directory this is equivalent to $generate_events") self.help_generate_events() - + def help_refine(self): logger.info("syntax: refine require_precision [max_channel] [--run_options]") logger.info("-- refine the LAST run to achieve a given precision.") @@ -454,14 +454,14 @@ def help_refine(self): logger.info(' or the required relative error') logger.info(' max_channel:[5] maximal number of channel per job') self.run_options_help([]) - + def help_combine_events(self): """ """ logger.info("syntax: combine_events [run_name] [--tag=tag_name] [--run_options]") logger.info("-- Combine the last run in order to write the number of events") logger.info(" asked in the run_card.") self.run_options_help([]) - + def help_store_events(self): """ """ logger.info("syntax: store_events [--run_options]") @@ -481,7 +481,7 @@ def help_import(self): logger.info("syntax: import command PATH") logger.info("-- Execute the command present in the file") self.run_options_help([]) - + def help_syscalc(self): logger.info("syntax: syscalc [RUN] [%s] [-f | --tag=]" % '|'.join(self._plot_mode)) logger.info("-- calculate systematics information for the RUN (current run by default)") @@ -506,18 +506,18 @@ class AskRun(cmd.ControlSwitch): ('madspin', 'Decay onshell particles'), ('reweight', 'Add weights to events for new hypp.') ] - + def __init__(self, question, line_args=[], mode=None, force=False, *args, **opt): - + self.check_available_module(opt['mother_interface'].options) self.me_dir = opt['mother_interface'].me_dir super(AskRun,self).__init__(self.to_control, opt['mother_interface'], *args, **opt) - - + + def check_available_module(self, options): - + self.available_module = set() if options['pythia-pgs_path']: self.available_module.add('PY6') @@ -540,32 +540,32 @@ def check_available_module(self, options): self.available_module.add('Rivet') else: logger.warning("Rivet program installed but no parton shower with hepmc output detected.\n Please install pythia8") - + if not MADEVENT or ('mg5_path' in options and options['mg5_path']): self.available_module.add('MadSpin') if misc.has_f2py() or options['f2py_compiler']: self.available_module.add('reweight') -# old mode to activate the shower +# old mode to activate the shower def ans_parton(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if value is None: self.set_all_off() else: logger.warning('Invalid command: parton=%s' % value) - - + + # -# HANDLING SHOWER +# HANDLING SHOWER # def get_allowed_shower(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_shower'): return self.allowed_shower - + self.allowed_shower = [] if 'PY6' in self.available_module: self.allowed_shower.append('Pythia6') @@ -574,9 +574,9 @@ def get_allowed_shower(self): if self.allowed_shower: self.allowed_shower.append('OFF') return self.allowed_shower - + def set_default_shower(self): - + if 'PY6' in self.available_module and\ os.path.exists(pjoin(self.me_dir,'Cards','pythia_card.dat')): self.switch['shower'] = 'Pythia6' @@ -590,10 +590,10 @@ def set_default_shower(self): def check_value_shower(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_shower(): return True - + value =value.lower() if value in ['py6','p6','pythia_6'] and 'PY6' in self.available_module: return 'Pythia6' @@ -601,13 +601,13 @@ def check_value_shower(self, value): return 'Pythia8' else: return False - - -# old mode to activate the shower + + +# old mode to activate the shower def ans_pythia(self, value=None): """None: means that the user type 'pythia' value: means that the user type pythia=value""" - + if 'PY6' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return @@ -621,13 +621,13 @@ def ans_pythia(self, value=None): self.set_switch('shower', 'OFF') else: logger.warning('Invalid command: pythia=%s' % value) - - + + def consistency_shower_detector(self, vshower, vdetector): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower == 'OFF': @@ -635,35 +635,35 @@ def consistency_shower_detector(self, vshower, vdetector): return 'OFF' if vshower == 'Pythia8' and vdetector == 'PGS': return 'OFF' - + return None - + # # HANDLING DETECTOR # def get_allowed_detector(self): """return valid entry for the switch""" - + if hasattr(self, 'allowed_detector'): - return self.allowed_detector - + return self.allowed_detector + self.allowed_detector = [] if 'PGS' in self.available_module: self.allowed_detector.append('PGS') if 'Delphes' in self.available_module: self.allowed_detector.append('Delphes') - + if self.allowed_detector: self.allowed_detector.append('OFF') - return self.allowed_detector + return self.allowed_detector def set_default_detector(self): - + self.set_default_shower() #ensure that this one is called first! - + if 'PGS' in self.available_module and self.switch['shower'] == 'Pythia6'\ and os.path.exists(pjoin(self.me_dir,'Cards','pgs_card.dat')): self.switch['detector'] = 'PGS' @@ -674,16 +674,16 @@ def set_default_detector(self): self.switch['detector'] = 'OFF' else: self.switch['detector'] = 'Not Avail.' - -# old mode to activate pgs + +# old mode to activate pgs def ans_pgs(self, value=None): """None: means that the user type 'pgs' - value: means that the user type pgs=value""" - + value: means that the user type pgs=value""" + if 'PGS' not in self.available_module: logger.info('pythia-pgs not available. Ignore commmand') return - + if value is None: self.set_all_off() self.switch['shower'] = 'Pythia6' @@ -696,16 +696,16 @@ def ans_pgs(self, value=None): else: logger.warning('Invalid command: pgs=%s' % value) - + # old mode to activate Delphes def ans_delphes(self, value=None): """None: means that the user type 'delphes' - value: means that the user type delphes=value""" - + value: means that the user type delphes=value""" + if 'Delphes' not in self.available_module: logger.warning('Delphes not available. Ignore commmand') return - + if value is None: self.set_all_off() if 'PY6' in self.available_module: @@ -718,15 +718,15 @@ def ans_delphes(self, value=None): elif value == 'off': self.set_switch('detector', 'OFF') else: - logger.warning('Invalid command: pgs=%s' % value) + logger.warning('Invalid command: pgs=%s' % value) def consistency_detector_shower(self,vdetector, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ - + if vdetector == 'PGS' and vshower != 'Pythia6': return 'Pythia6' if vdetector == 'Delphes' and vshower not in ['Pythia6', 'Pythia8']: @@ -744,28 +744,28 @@ def consistency_detector_shower(self,vdetector, vshower): # def get_allowed_analysis(self): """return valid entry for the shower switch""" - + if hasattr(self, 'allowed_analysis'): return self.allowed_analysis - + self.allowed_analysis = [] if 'ExRoot' in self.available_module: self.allowed_analysis.append('ExRoot') if 'MA4' in self.available_module: self.allowed_analysis.append('MadAnalysis4') if 'MA5' in self.available_module: - self.allowed_analysis.append('MadAnalysis5') + self.allowed_analysis.append('MadAnalysis5') if 'Rivet' in self.available_module: - self.allowed_analysis.append('Rivet') - + self.allowed_analysis.append('Rivet') + if self.allowed_analysis: self.allowed_analysis.append('OFF') - + return self.allowed_analysis - + def check_analysis(self, value): """check an entry is valid. return the valid entry in case of shortcut""" - + if value in self.get_allowed_analysis(): return True if value.lower() in ['ma4', 'madanalysis4', 'madanalysis_4','4']: @@ -786,30 +786,30 @@ def consistency_shower_analysis(self, vshower, vanalysis): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'OFF' #new value for analysis - + return None - + def consistency_analysis_shower(self, vanalysis, vshower): """consistency_XX_YY(val_XX, val_YY) -> XX is the new key set by the user to a new value val_XX -> YY is another key - -> return value should be None or "replace_YY" + -> return value should be None or "replace_YY" """ if vshower != 'Pythia8' and vanalysis == 'Rivet': return 'Pythia8' #new value for analysis - + return None def set_default_analysis(self): """initialise the switch for analysis""" - + if 'MA4' in self.available_module and \ os.path.exists(pjoin(self.me_dir,'Cards','plot_card.dat')): self.switch['analysis'] = 'MadAnalysis4' @@ -818,46 +818,46 @@ def set_default_analysis(self): or os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat'))): self.switch['analysis'] = 'MadAnalysis5' elif 'ExRoot' in self.available_module: - self.switch['analysis'] = 'ExRoot' - elif self.get_allowed_analysis(): + self.switch['analysis'] = 'ExRoot' + elif self.get_allowed_analysis(): self.switch['analysis'] = 'OFF' else: self.switch['analysis'] = 'Not Avail.' - + # # MADSPIN handling # def get_allowed_madspin(self): """ ON|OFF|onshell """ - + if hasattr(self, 'allowed_madspin'): return self.allowed_madspin - + self.allowed_madspin = [] if 'MadSpin' in self.available_module: self.allowed_madspin = ['OFF',"ON",'onshell',"full"] return self.allowed_madspin - + def check_value_madspin(self, value): """handle alias and valid option not present in get_allowed_madspin""" - + if value.upper() in self.get_allowed_madspin(): return True elif value.lower() in self.get_allowed_madspin(): return True - + if 'MadSpin' not in self.available_module: return False - + if value.lower() in ['madspin', 'full']: return 'full' elif value.lower() in ['none']: return 'none' - - + + def set_default_madspin(self): """initialise the switch for madspin""" - + if 'MadSpin' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): self.switch['madspin'] = 'ON' @@ -865,10 +865,10 @@ def set_default_madspin(self): self.switch['madspin'] = 'OFF' else: self.switch['madspin'] = 'Not Avail.' - + def get_cardcmd_for_madspin(self, value): """set some command to run before allowing the user to modify the cards.""" - + if value == 'onshell': return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] elif value in ['full', 'madspin']: @@ -877,36 +877,36 @@ def get_cardcmd_for_madspin(self, value): return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] else: return [] - + # # ReWeight handling # def get_allowed_reweight(self): """ return the list of valid option for reweight=XXX """ - + if hasattr(self, 'allowed_reweight'): return getattr(self, 'allowed_reweight') - + if 'reweight' not in self.available_module: self.allowed_reweight = [] return self.allowed_reweight = ['OFF', 'ON'] - + # check for plugin mode plugin_path = self.mother_interface.plugin_path opts = misc.from_plugin_import(plugin_path, 'new_reweight', warning=False) self.allowed_reweight += opts - + def set_default_reweight(self): """initialise the switch for reweight""" - + if 'reweight' in self.available_module: if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): self.switch['reweight'] = 'ON' else: self.switch['reweight'] = 'OFF' else: - self.switch['reweight'] = 'Not Avail.' + self.switch['reweight'] = 'Not Avail.' #=============================================================================== # CheckValidForCmd @@ -916,14 +916,14 @@ class CheckValidForCmd(object): def check_banner_run(self, args): """check the validity of line""" - + if len(args) == 0: self.help_banner_run() raise self.InvalidCmd('banner_run requires at least one argument.') - + tag = [a[6:] for a in args if a.startswith('--tag=')] - - + + if os.path.exists(args[0]): type ='banner' format = self.detect_card_type(args[0]) @@ -931,7 +931,7 @@ def check_banner_run(self, args): raise self.InvalidCmd('The file is not a valid banner.') elif tag: args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) + (args[0], tag)) if not os.path.exists(args[0]): raise self.InvalidCmd('No banner associates to this name and tag.') else: @@ -939,7 +939,7 @@ def check_banner_run(self, args): type = 'run' banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) if not banners: - raise self.InvalidCmd('No banner associates to this name.') + raise self.InvalidCmd('No banner associates to this name.') elif len(banners) == 1: args[0] = banners[0] else: @@ -947,8 +947,8 @@ def check_banner_run(self, args): tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] tag = self.ask('which tag do you want to use?', tags[0], tags) args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ - (args[0], tag)) - + (args[0], tag)) + run_name = [arg[7:] for arg in args if arg.startswith('--name=')] if run_name: try: @@ -970,14 +970,14 @@ def check_banner_run(self, args): except Exception: pass self.set_run_name(name) - + def check_history(self, args): """check the validity of line""" - + if len(args) > 1: self.help_history() raise self.InvalidCmd('\"history\" command takes at most one argument') - + if not len(args): return elif args[0] != 'clean': @@ -985,16 +985,16 @@ def check_history(self, args): if dirpath and not os.path.exists(dirpath) or \ os.path.isdir(args[0]): raise self.InvalidCmd("invalid path %s " % dirpath) - + def check_save(self, args): """ check the validity of the line""" - + if len(args) == 0: args.append('options') if args[0] not in self._save_opts: raise self.InvalidCmd('wrong \"save\" format') - + if args[0] != 'options' and len(args) != 2: self.help_save() raise self.InvalidCmd('wrong \"save\" format') @@ -1003,7 +1003,7 @@ def check_save(self, args): if not os.path.exists(basename): raise self.InvalidCmd('%s is not a valid path, please retry' % \ args[1]) - + if args[0] == 'options': has_path = None for arg in args[1:]: @@ -1024,9 +1024,9 @@ def check_save(self, args): has_path = True if not has_path: if '--auto' in arg and self.options['mg5_path']: - args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) + args.insert(1, pjoin(self.options['mg5_path'],'input','mg5_configuration.txt')) else: - args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) + args.insert(1, pjoin(self.me_dir,'Cards','me5_configuration.txt')) def check_set(self, args): """ check the validity of the line""" @@ -1039,20 +1039,20 @@ def check_set(self, args): self.help_set() raise self.InvalidCmd('Possible options for set are %s' % \ self._set_options) - + if args[0] in ['stdout_level']: if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \ and not args[1].isdigit(): raise self.InvalidCmd('output_level needs ' + \ - 'a valid level') - + 'a valid level') + if args[0] in ['timeout']: if not args[1].isdigit(): - raise self.InvalidCmd('timeout values should be a integer') - + raise self.InvalidCmd('timeout values should be a integer') + def check_open(self, args): """ check the validity of the line """ - + if len(args) != 1: self.help_open() raise self.InvalidCmd('OPEN command requires exactly one argument') @@ -1069,7 +1069,7 @@ def check_open(self, args): raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file') else: return True - + path = self.me_dir if os.path.isfile(os.path.join(path,args[0])): args[0] = os.path.join(path,args[0]) @@ -1078,7 +1078,7 @@ def check_open(self, args): elif os.path.isfile(os.path.join(path,'HTML',args[0])): args[0] = os.path.join(path,'HTML',args[0]) # special for card with _default define: copy the default and open it - elif '_card.dat' in args[0]: + elif '_card.dat' in args[0]: name = args[0].replace('_card.dat','_card_default.dat') if os.path.isfile(os.path.join(path,'Cards', name)): files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0])) @@ -1086,13 +1086,13 @@ def check_open(self, args): else: raise self.InvalidCmd('No default path for this file') elif not os.path.isfile(args[0]): - raise self.InvalidCmd('No default path for this file') - + raise self.InvalidCmd('No default path for this file') + def check_initMadLoop(self, args): """ check initMadLoop command arguments are valid.""" - + opt = {'refresh': False, 'nPS': None, 'force': False} - + for arg in args: if arg in ['-r','--refresh']: opt['refresh'] = True @@ -1105,14 +1105,14 @@ def check_initMadLoop(self, args): except ValueError: raise InvalidCmd("The number of attempts specified "+ "'%s' is not a valid integer."%n_attempts) - + return opt - + def check_treatcards(self, args): """check that treatcards arguments are valid [param|run|all] [--output_dir=] [--param_card=] [--run_card=] """ - + opt = {'output_dir':pjoin(self.me_dir,'Source'), 'param_card':pjoin(self.me_dir,'Cards','param_card.dat'), 'run_card':pjoin(self.me_dir,'Cards','run_card.dat'), @@ -1129,14 +1129,14 @@ def check_treatcards(self, args): if os.path.isfile(value): card_name = self.detect_card_type(value) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' % (card_name, key)) opt[key] = value elif os.path.isfile(pjoin(self.me_dir,value)): card_name = self.detect_card_type(pjoin(self.me_dir,value)) if card_name != key: - raise self.InvalidCmd('Format for input file detected as %s while expecting %s' - % (card_name, key)) + raise self.InvalidCmd('Format for input file detected as %s while expecting %s' + % (card_name, key)) opt[key] = value else: raise self.InvalidCmd('No such file: %s ' % value) @@ -1154,14 +1154,14 @@ def check_treatcards(self, args): else: self.help_treatcards() raise self.InvalidCmd('Unvalid argument %s' % arg) - - return mode, opt - - + + return mode, opt + + def check_survey(self, args, cmd='survey'): """check that the argument for survey are valid""" - - + + self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) @@ -1183,41 +1183,41 @@ def check_survey(self, args, cmd='survey'): self.help_survey() raise self.InvalidCmd('Too many argument for %s command' % cmd) elif not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], None,'parton', True) args.pop(0) - + return True def check_generate_events(self, args): """check that the argument for generate_events are valid""" - + run = None if args and args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['auto','parton', 'pythia', 'pgs', 'delphes']: self.help_generate_events() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + #if len(args) > 1: # self.help_generate_events() # raise self.InvalidCmd('Too many argument for generate_events command: %s' % cmd) - + return run def check_calculate_decay_widths(self, args): """check that the argument for calculate_decay_widths are valid""" - + if self.ninitial != 1: raise self.InvalidCmd('Can only calculate decay widths for decay processes A > B C ...') @@ -1232,7 +1232,7 @@ def check_calculate_decay_widths(self, args): if len(args) > 1: self.help_calculate_decay_widths() raise self.InvalidCmd('Too many argument for calculate_decay_widths command: %s' % cmd) - + return accuracy @@ -1241,25 +1241,25 @@ def check_multi_run(self, args): """check that the argument for survey are valid""" run = None - + if not len(args): self.help_multi_run() raise self.InvalidCmd("""multi_run command requires at least one argument for the number of times that it call generate_events command""") - + if args[-1].startswith('--laststep='): run = args[-1].split('=')[-1] if run not in ['parton', 'pythia', 'pgs', 'delphes']: self.help_multi_run() raise self.InvalidCmd('invalid %s argument'% args[-1]) - if run != 'parton' and not self.options['pythia-pgs_path']: - raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. + if run != 'parton' and not self.options['pythia-pgs_path']: + raise self.InvalidCmd('''pythia-pgs not install. Please install this package first. To do so type: \'install pythia-pgs\' in the mg5 interface''') if run == 'delphes' and not self.options['delphes_path']: - raise self.InvalidCmd('''delphes not install. Please install this package first. + raise self.InvalidCmd('''delphes not install. Please install this package first. To do so type: \'install Delphes\' in the mg5 interface''') del args[-1] - + elif not args[0].isdigit(): self.help_multi_run() @@ -1267,7 +1267,7 @@ def check_multi_run(self, args): #pass nb run to an integer nb_run = args.pop(0) args.insert(0, int(nb_run)) - + return run @@ -1284,7 +1284,7 @@ def check_refine(self, args): self.help_refine() raise self.InvalidCmd('require_precision argument is require for refine cmd') - + if not self.run_name: if self.results.lastrun: self.set_run_name(self.results.lastrun) @@ -1296,17 +1296,17 @@ def check_refine(self, args): else: try: [float(arg) for arg in args] - except ValueError: - self.help_refine() + except ValueError: + self.help_refine() raise self.InvalidCmd('refine arguments are suppose to be number') - + return True - + def check_combine_events(self, arg): """ Check the argument for the combine events command """ - + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] elif not self.run_tag: @@ -1314,53 +1314,53 @@ def check_combine_events(self, arg): else: tag = self.run_tag self.run_tag = tag - + if len(arg) > 1: self.help_combine_events() raise self.InvalidCmd('Too many argument for combine_events command') - + if len(arg) == 1: self.set_run_name(arg[0], self.run_tag, 'parton', True) - + if not self.run_name: if not self.results.lastrun: raise self.InvalidCmd('No run_name currently define. Unable to run combine') else: self.set_run_name(self.results.lastrun) - + return True - + def check_pythia(self, args): """Check the argument for pythia command - syntax: pythia [NAME] + syntax: pythia [NAME] Note that other option are already removed at this point """ - + mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia', 'pgs', 'delphes']: self.help_pythia() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') - + if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' raise self.InvalidCmd(error_msg) - - - + + + tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1368,8 +1368,8 @@ def check_pythia(self, args): if self.results.lastrun: args.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): @@ -1388,21 +1388,21 @@ def check_pythia(self, args): files.ln(input_file, os.path.dirname(output_file)) else: misc.gunzip(input_file, keep=True, stdout=output_file) - + args.append(mode) - + def check_pythia8(self, args): """Check the argument for pythia command - syntax: pythia8 [NAME] + syntax: pythia8 [NAME] Note that other option are already removed at this point - """ + """ mode = None laststep = [arg for arg in args if arg.startswith('--laststep=')] if laststep and len(laststep)==1: mode = laststep[0].split('=')[-1] if mode not in ['auto', 'pythia','pythia8','delphes']: self.help_pythia8() - raise self.InvalidCmd('invalid %s argument'% args[-1]) + raise self.InvalidCmd('invalid %s argument'% args[-1]) elif laststep: raise self.InvalidCmd('only one laststep argument is allowed') @@ -1410,7 +1410,7 @@ def check_pythia8(self, args): if not self.options['pythia8_path']: logger.info('Retry reading configuration file to find pythia8 path') self.set_configuration() - + if not self.options['pythia8_path'] or not \ os.path.exists(pjoin(self.options['pythia8_path'],'bin','pythia8-config')): error_msg = 'No valid pythia8 path set.\n' @@ -1421,7 +1421,7 @@ def check_pythia8(self, args): raise self.InvalidCmd(error_msg) tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] @@ -1430,11 +1430,11 @@ def check_pythia8(self, args): args.insert(0, self.results.lastrun) else: raise self.InvalidCmd('No run name currently define. '+ - 'Please add this information.') - + 'Please add this information.') + if len(args) >= 1: if args[0] != self.run_name and\ - not os.path.exists(pjoin(self.me_dir,'Events',args[0], + not os.path.exists(pjoin(self.me_dir,'Events',args[0], 'unweighted_events.lhe.gz')): raise self.InvalidCmd('No events file corresponding to %s run. ' % args[0]) @@ -1451,9 +1451,9 @@ def check_pythia8(self, args): else: raise self.InvalidCmd('No event file corresponding to %s run. ' % self.run_name) - + args.append(mode) - + def check_remove(self, args): """Check that the remove command is valid""" @@ -1484,33 +1484,33 @@ def check_plot(self, args): madir = self.options['madanalysis_path'] td = self.options['td_path'] - + if not madir or not td: logger.info('Retry to read configuration file to find madanalysis/td') self.set_configuration() madir = self.options['madanalysis_path'] - td = self.options['td_path'] - + td = self.options['td_path'] + if not madir: error_msg = 'No valid MadAnalysis path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) + raise self.InvalidCmd(error_msg) if not td: error_msg = 'No valid td path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') + raise self.InvalidCmd('No run name currently define. Please add this information.') args.append('all') return - + if args[0] not in self._plot_mode: self.set_run_name(args[0], level='plot') del args[0] @@ -1518,45 +1518,45 @@ def check_plot(self, args): args.append('all') elif not self.run_name: self.help_plot() - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + for arg in args: if arg not in self._plot_mode and arg != self.run_name: self.help_plot() - raise self.InvalidCmd('unknown options %s' % arg) - + raise self.InvalidCmd('unknown options %s' % arg) + def check_syscalc(self, args): """Check the argument for the syscalc command syscalc run_name modes""" scdir = self.options['syscalc_path'] - + if not scdir: logger.info('Retry to read configuration file to find SysCalc') self.set_configuration() scdir = self.options['syscalc_path'] - + if not scdir: error_msg = 'No valid SysCalc path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' error_msg += 'Please note that you need to compile SysCalc first.' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + if len(args) == 0: if not hasattr(self, 'run_name') or not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') args.append('all') return #deal options tag = [a for a in args if a.startswith('--tag=')] - if tag: + if tag: args.remove(tag[0]) tag = tag[0][6:] - + if args[0] not in self._syscalc_mode: self.set_run_name(args[0], tag=tag, level='syscalc') del args[0] @@ -1564,61 +1564,61 @@ def check_syscalc(self, args): args.append('all') elif not self.run_name: self.help_syscalc() - raise self.InvalidCmd('No run name currently defined. Please add this information.') + raise self.InvalidCmd('No run name currently defined. Please add this information.') elif tag and tag != self.run_tag: self.set_run_name(self.run_name, tag=tag, level='syscalc') - + for arg in args: if arg not in self._syscalc_mode and arg != self.run_name: self.help_syscalc() - raise self.InvalidCmd('unknown options %s' % arg) + raise self.InvalidCmd('unknown options %s' % arg) if self.run_card['use_syst'] not in self.true: raise self.InvalidCmd('Run %s does not include ' % self.run_name + \ 'systematics information needed for syscalc.') - - + + def check_pgs(self, arg, no_default=False): """Check the argument for pythia command - syntax is "pgs [NAME]" + syntax is "pgs [NAME]" Note that other option are already remove at this point """ - + # If not pythia-pgs path if not self.options['pythia-pgs_path']: logger.info('Retry to read configuration file to find pythia-pgs path') self.set_configuration() - + if not self.options['pythia-pgs_path'] or not \ os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): error_msg = 'No valid pythia-pgs path set.\n' error_msg += 'Please use the set command to define the path and retry.\n' error_msg += 'You can also define it in the configuration file.\n' - raise self.InvalidCmd(error_msg) - + raise self.InvalidCmd(error_msg) + tag = [a for a in arg if a.startswith('--tag=')] - if tag: + if tag: arg.remove(tag[0]) tag = tag[0][6:] - - + + if len(arg) == 0 and not self.run_name: if self.results.lastrun: arg.insert(0, self.results.lastrun) else: - raise self.InvalidCmd('No run name currently define. Please add this information.') - + raise self.InvalidCmd('No run name currently define. Please add this information.') + if len(arg) == 1 and self.run_name == arg[0]: arg.pop(0) - + if not len(arg) and \ not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): if not no_default: self.help_pgs() raise self.InvalidCmd('''No file file pythia_events.hep currently available Please specify a valid run_name''') - - lock = None + + lock = None if len(arg) == 1: prev_tag = self.set_run_name(arg[0], tag, 'pgs') if not os.path.exists(pjoin(self.me_dir,'Events',self.run_name,'%s_pythia_events.hep.gz' % prev_tag)): @@ -1626,25 +1626,25 @@ def check_pgs(self, arg, no_default=False): else: input_file = pjoin(self.me_dir,'Events', self.run_name, '%s_pythia_events.hep.gz' % prev_tag) output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') - lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), + lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), argument=['-c', input_file]) else: - if tag: + if tag: self.run_card['run_tag'] = tag self.set_run_name(self.run_name, tag, 'pgs') - - return lock + + return lock def check_display(self, args): """check the validity of line syntax is "display XXXXX" """ - + if len(args) < 1 or args[0] not in self._display_opts: self.help_display() raise self.InvalidCmd - + if args[0] == 'variable' and len(args) !=2: raise self.InvalidCmd('variable need a variable name') @@ -1654,39 +1654,39 @@ def check_display(self, args): def check_import(self, args): """check the validity of line""" - + if not args: self.help_import() raise self.InvalidCmd('wrong \"import\" format') - + if args[0] != 'command': args.insert(0,'command') - - + + if not len(args) == 2 or not os.path.exists(args[1]): raise self.InvalidCmd('PATH is mandatory for import command\n') - + #=============================================================================== # CompleteForCmd #=============================================================================== class CompleteForCmd(CheckValidForCmd): """ The Series of help routine for the MadGraphCmd""" - - + + def complete_banner_run(self, text, line, begidx, endidx, formatting=True): "Complete the banner run command" try: - - + + args = self.split_arg(line[0:begidx], error=False) - + if args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args \ - if a.endswith(os.path.sep)])) - - + if a.endswith(os.path.sep)])) + + if len(args) > 1: # only options are possible tags = misc.glob('%s_*_banner.txt' % args[1], pjoin(self.me_dir, 'Events' , args[1])) @@ -1697,9 +1697,9 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): else: return self.list_completion(text, tags) return self.list_completion(text, tags +['--name=','-f'], line) - + # First argument - possibilites = {} + possibilites = {} comp = self.path_completion(text, os.path.join('.',*[a for a in args \ if a.endswith(os.path.sep)])) @@ -1711,10 +1711,10 @@ def complete_banner_run(self, text, line, begidx, endidx, formatting=True): run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) run_list = [n.rsplit('/',2)[1] for n in run_list] possibilites['RUN Name'] = self.list_completion(text, run_list) - + return self.deal_multiple_categories(possibilites, formatting) - - + + except Exception as error: print(error) @@ -1732,12 +1732,12 @@ def complete_history(self, text, line, begidx, endidx): if len(args) == 1: return self.path_completion(text) - - def complete_open(self, text, line, begidx, endidx): + + def complete_open(self, text, line, begidx, endidx): """ complete the open command """ args = self.split_arg(line[0:begidx]) - + # Directory continuation if os.path.sep in args[-1] + text: return self.path_completion(text, @@ -1751,10 +1751,10 @@ def complete_open(self, text, line, begidx, endidx): if os.path.isfile(os.path.join(path,'README')): possibility.append('README') if os.path.isdir(os.path.join(path,'Cards')): - possibility += [f for f in os.listdir(os.path.join(path,'Cards')) + possibility += [f for f in os.listdir(os.path.join(path,'Cards')) if f.endswith('.dat')] if os.path.isdir(os.path.join(path,'HTML')): - possibility += [f for f in os.listdir(os.path.join(path,'HTML')) + possibility += [f for f in os.listdir(os.path.join(path,'HTML')) if f.endswith('.html') and 'default' not in f] else: possibility.extend(['./','../']) @@ -1763,7 +1763,7 @@ def complete_open(self, text, line, begidx, endidx): if os.path.exists('MG5_debug'): possibility.append('MG5_debug') return self.list_completion(text, possibility) - + def complete_set(self, text, line, begidx, endidx): "Complete the set command" @@ -1784,27 +1784,27 @@ def complete_set(self, text, line, begidx, endidx): elif len(args) >2 and args[-1].endswith(os.path.sep): return self.path_completion(text, os.path.join('.',*[a for a in args if a.endswith(os.path.sep)]), - only_dirs = True) - + only_dirs = True) + def complete_survey(self, text, line, begidx, endidx): """ Complete the survey command """ - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + return self.list_completion(text, self._run_options, line) - + complete_refine = complete_survey complete_combine_events = complete_survey complite_store = complete_survey complete_generate_events = complete_survey complete_create_gridpack = complete_survey - + def complete_generate_events(self, text, line, begidx, endidx): """ Complete the generate events""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() @@ -1813,17 +1813,17 @@ def complete_generate_events(self, text, line, begidx, endidx): return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) def complete_initMadLoop(self, text, line, begidx, endidx): "Complete the initMadLoop command" - + numbers = [str(i) for i in range(10)] opts = ['-f','-r','--nPS='] - + args = self.split_arg(line[0:begidx], error=False) if len(line) >=6 and line[begidx-6:begidx]=='--nPS=': return self.list_completion(text, numbers, line) @@ -1840,18 +1840,18 @@ def complete_launch(self, *args, **opts): def complete_calculate_decay_widths(self, text, line, begidx, endidx): """ Complete the calculate_decay_widths command""" - + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] - + opts = self._run_options + self._calculate_decay_options return self.list_completion(text, opts, line) - + def complete_display(self, text, line, begidx, endidx): - """ Complete the display command""" - + """ Complete the display command""" + args = self.split_arg(line[0:begidx], error=False) if len(args) >= 2 and args[1] =='results': start = line.find('results') @@ -1860,44 +1860,44 @@ def complete_display(self, text, line, begidx, endidx): def complete_multi_run(self, text, line, begidx, endidx): """complete multi run command""" - + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: data = [str(i) for i in range(0,20)] return self.list_completion(text, data, line) - + if line.endswith('run=') and not text: return ['parton','pythia','pgs','delphes'] elif '--laststep=' in line.split()[-1] and line and line[-1] != ' ': return self.list_completion(text,['parton','pythia','pgs','delphes'],line) - + opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - - - + + + if line.endswith('nb_core=') and not text: import multiprocessing max = multiprocessing.cpu_count() return [str(i) for i in range(2,max+1)] opts = self._run_options + self._generate_options return self.list_completion(text, opts, line) - + def complete_plot(self, text, line, begidx, endidx): """ Complete the plot command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1: return self.list_completion(text, self._plot_mode) else: return self.list_completion(text, self._plot_mode + list(self.results.keys())) - + def complete_syscalc(self, text, line, begidx, endidx, formatting=True): """ Complete the syscalc command """ - + output = {} args = self.split_arg(line[0:begidx], error=False) - + if len(args) <=1: output['RUN_NAME'] = self.list_completion(list(self.results.keys())) output['MODE'] = self.list_completion(text, self._syscalc_mode) @@ -1907,12 +1907,12 @@ def complete_syscalc(self, text, line, begidx, endidx, formatting=True): if run in self.results: tags = ['--tag=%s' % tag['tag'] for tag in self.results[run]] output['options'] += tags - + return self.deal_multiple_categories(output, formatting) - + def complete_remove(self, text, line, begidx, endidx): """Complete the remove command """ - + args = self.split_arg(line[0:begidx], error=False) if len(args) > 1 and (text.startswith('--t')): run = args[1] @@ -1932,8 +1932,8 @@ def complete_remove(self, text, line, begidx, endidx): data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1] for n in data] return self.list_completion(text, ['all'] + data) - - + + def complete_shower(self,text, line, begidx, endidx): "Complete the shower command" args = self.split_arg(line[0:begidx], error=False) @@ -1941,7 +1941,7 @@ def complete_shower(self,text, line, begidx, endidx): return self.list_completion(text, self._interfaced_showers) elif len(args)>1 and args[1] in self._interfaced_showers: return getattr(self, 'complete_%s' % text)\ - (text, args[1],line.replace(args[0]+' ',''), + (text, args[1],line.replace(args[0]+' ',''), begidx-len(args[0])-1, endidx-len(args[0])-1) def complete_pythia8(self,text, line, begidx, endidx): @@ -1955,11 +1955,11 @@ def complete_pythia8(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_madanalysis5_parton(self,text, line, begidx, endidx): @@ -1978,19 +1978,19 @@ def complete_madanalysis5_parton(self,text, line, begidx, endidx): else: tmp2 = self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) - return tmp1 + tmp2 + return tmp1 + tmp2 elif '--MA5_stdout_lvl=' in line and not any(arg.startswith( '--MA5_stdout_lvl=') for arg in args): - return self.list_completion(text, - ['--MA5_stdout_lvl=%s'%opt for opt in + return self.list_completion(text, + ['--MA5_stdout_lvl=%s'%opt for opt in ['logging.INFO','logging.DEBUG','logging.WARNING', 'logging.CRITICAL','90']], line) else: - return self.list_completion(text, ['-f', + return self.list_completion(text, ['-f', '--MA5_stdout_lvl=','--no_default','--tag='], line) def complete_pythia(self,text, line, begidx, endidx): - "Complete the pythia command" + "Complete the pythia command" args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: @@ -2001,16 +2001,16 @@ def complete_pythia(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--no_default', '--tag='], line) return tmp1 + tmp2 elif line[-1] != '=': - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--no_default','--tag='], line) def complete_pgs(self,text, line, begidx, endidx): "Complete the pythia command" - args = self.split_arg(line[0:begidx], error=False) + args = self.split_arg(line[0:begidx], error=False) if len(args) == 1: #return valid run_name data = misc.glob(pjoin('*', '*_pythia_events.hep.gz'), pjoin(self.me_dir, 'Events')) @@ -2019,23 +2019,23 @@ def complete_pgs(self,text, line, begidx, endidx): if not self.run_name: return tmp1 else: - tmp2 = self.list_completion(text, self._run_options + ['-f', + tmp2 = self.list_completion(text, self._run_options + ['-f', '--tag=' ,'--no_default'], line) - return tmp1 + tmp2 + return tmp1 + tmp2 else: - return self.list_completion(text, self._run_options + ['-f', + return self.list_completion(text, self._run_options + ['-f', '--tag=','--no_default'], line) - complete_delphes = complete_pgs - complete_rivet = complete_pgs + complete_delphes = complete_pgs + complete_rivet = complete_pgs #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCmd): - """The command line processor of Mad Graph""" - + """The command line processor of Mad Graph""" + LO = True # Truth values @@ -2063,7 +2063,7 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm cluster_mode = 0 queue = 'madgraph' nb_core = None - + next_possibility = { 'start': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]', 'calculate_decay_widths [OPTIONS]', @@ -2080,9 +2080,9 @@ class MadEventCmd(CompleteForCmd, CmdExtended, HelpToCmd, common_run.CommonRunCm 'pgs': ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'], 'delphes' : ['generate_events [OPTIONS]', 'multi_run [OPTIONS]'] } - + asking_for_run = AskRun - + ############################################################################ def __init__(self, me_dir = None, options={}, *completekey, **stdin): """ add information to the cmd """ @@ -2095,16 +2095,16 @@ def __init__(self, me_dir = None, options={}, *completekey, **stdin): if self.web: os.system('touch %s' % pjoin(self.me_dir,'Online')) - self.load_results_db() + self.load_results_db() self.results.def_web_mode(self.web) self.Gdirs = None - + self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) self.configured = 0 # time for reading the card self._options = {} # for compatibility with extended_cmd - - + + def pass_in_web_mode(self): """configure web data""" self.web = True @@ -2113,22 +2113,22 @@ def pass_in_web_mode(self): if os.environ['MADGRAPH_BASE']: self.options['mg5_path'] = pjoin(os.environ['MADGRAPH_BASE'],'MG5') - ############################################################################ + ############################################################################ def check_output_type(self, path): """ Check that the output path is a valid madevent directory """ - + bin_path = os.path.join(path,'bin') if os.path.isfile(os.path.join(bin_path,'generate_events')): return True - else: + else: return False ############################################################################ def set_configuration(self, amcatnlo=False, final=True, **opt): - """assign all configuration variable from file + """assign all configuration variable from file loop over the different config file if config_file not define """ - - super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, + + super(MadEventCmd,self).set_configuration(amcatnlo=amcatnlo, final=final, **opt) if not final: @@ -2171,24 +2171,24 @@ def set_configuration(self, amcatnlo=False, final=True, **opt): if not os.path.exists(pjoin(path, 'sys_calc')): logger.info("No valid SysCalc path found") continue - # No else since the next line reinitialize the option to the + # No else since the next line reinitialize the option to the #previous value anyway self.options[key] = os.path.realpath(path) continue else: self.options[key] = None - - + + return self.options ############################################################################ - def do_banner_run(self, line): + def do_banner_run(self, line): """Make a run from the banner file""" - + args = self.split_arg(line) #check the validity of the arguments - self.check_banner_run(args) - + self.check_banner_run(args) + # Remove previous cards for name in ['delphes_trigger.dat', 'delphes_card.dat', 'pgs_card.dat', 'pythia_card.dat', 'madspin_card.dat', @@ -2197,20 +2197,20 @@ def do_banner_run(self, line): os.remove(pjoin(self.me_dir, 'Cards', name)) except Exception: pass - + banner_mod.split_banner(args[0], self.me_dir, proc_card=False) - + # Check if we want to modify the run if not self.force: ans = self.ask('Do you want to modify the Cards?', 'n', ['y','n']) if ans == 'n': self.force = True - + # Call Generate events self.exec_cmd('generate_events %s %s' % (self.run_name, self.force and '-f' or '')) - - - + + + ############################################################################ def do_display(self, line, output=sys.stdout): """Display current internal status""" @@ -2223,7 +2223,7 @@ def do_display(self, line, output=sys.stdout): #return valid run_name data = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) data = [n.rsplit('/',2)[1:] for n in data] - + if data: out = {} for name, tag in data: @@ -2235,11 +2235,11 @@ def do_display(self, line, output=sys.stdout): print('the runs available are:') for run_name, tags in out.items(): print(' run: %s' % run_name) - print(' tags: ', end=' ') + print(' tags: ', end=' ') print(', '.join(tags)) else: print('No run detected.') - + elif args[0] == 'options': outstr = " Run Options \n" outstr += " ----------- \n" @@ -2260,8 +2260,8 @@ def do_display(self, line, output=sys.stdout): if value == default: outstr += " %25s \t:\t%s\n" % (key,value) else: - outstr += " %25s \t:\t%s (user set)\n" % (key,value) - outstr += "\n" + outstr += " %25s \t:\t%s (user set)\n" % (key,value) + outstr += "\n" outstr += " Configuration Options \n" outstr += " --------------------- \n" for key, default in self.options_configuration.items(): @@ -2275,15 +2275,15 @@ def do_display(self, line, output=sys.stdout): self.do_print_results(' '.join(args[1:])) else: super(MadEventCmd, self).do_display(line, output) - + def do_save(self, line, check=True, to_keep={}): - """Not in help: Save information to file""" + """Not in help: Save information to file""" args = self.split_arg(line) # Check argument validity if check: self.check_save(args) - + if args[0] == 'options': # First look at options which should be put in MG5DIR/input to_define = {} @@ -2295,7 +2295,7 @@ def do_save(self, line, check=True, to_keep={}): for key, default in self.options_madevent.items(): if self.options[key] != self.options_madevent[key]: to_define[key] = self.options[key] - + if '--all' in args: for key, default in self.options_madgraph.items(): if self.options[key] != self.options_madgraph[key]: @@ -2312,12 +2312,12 @@ def do_save(self, line, check=True, to_keep={}): filepath = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basefile = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt') basedir = self.me_dir - + if to_keep: to_define = to_keep self.write_configuration(filepath, basefile, basedir, to_define) - - + + def do_edit_cards(self, line): @@ -2326,80 +2326,80 @@ def do_edit_cards(self, line): # Check argument's validity mode = self.check_generate_events(args) self.ask_run_configuration(mode) - + return ############################################################################ - + ############################################################################ def do_restart_gridpack(self, line): """ syntax restart_gridpack --precision=1.0 --restart_zero collect the result of the current run and relaunch each channel - not completed or optionally a completed one with a precision worse than + not completed or optionally a completed one with a precision worse than a threshold (and/or the zero result channel)""" - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) - + # initialize / remove lhapdf mode #self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) #self.configure_directory() - + gensym = gen_ximprove.gensym(self) - + min_precision = 1.0 resubmit_zero=False if '--precision=' in line: s = line.index('--precision=') + len('--precision=') arg=line[s:].split(1)[0] min_precision = float(arg) - + if '--restart_zero' in line: resubmit_zero = True - - + + gensym.resubmit(min_precision, resubmit_zero) self.monitor(run_type='All jobs submitted for gridpack', html=True) #will be done during the refine (more precisely in gen_ximprove) cross, error = sum_html.make_all_html_results(self) self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(gensym.run_statistics)) - + #self.exec_cmd('combine_events', postcmd=False) #self.exec_cmd('store_events', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) self.exec_cmd('create_gridpack', postcmd=False) - - - ############################################################################ + + + ############################################################################ ############################################################################ def do_generate_events(self, line): """Main Commands: launch the full chain """ - + self.banner = None self.Gdirs = None - + args = self.split_arg(line) # Check argument's validity mode = self.check_generate_events(args) switch_mode = self.ask_run_configuration(mode, args) if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir), None, 'parton') else: self.set_run_name(args[0], None, 'parton', True) args.pop(0) - + self.run_generate_events(switch_mode, args) self.postprocessing() @@ -2420,8 +2420,8 @@ def postprocessing(self): def rivet_postprocessing(self, rivet_config, postprocess_RIVET, postprocess_CONTUR): - # Check number of Rivet jobs to run - run_dirs = [pjoin(self.me_dir, 'Events',run_name) + # Check number of Rivet jobs to run + run_dirs = [pjoin(self.me_dir, 'Events',run_name) for run_name in self.postprocessing_dirs] nb_rivet = len(run_dirs) @@ -2550,10 +2550,10 @@ def wait_monitoring(Idle, Running, Done): wrapper = open(pjoin(self.me_dir, "Analysis", "contur", "run_contur.sh"), "w") wrapper.write(set_env) - + wrapper.write('{0}\n'.format(contur_cmd)) wrapper.close() - + misc.call(["run_contur.sh"], cwd=(pjoin(self.me_dir, "Analysis", "contur"))) logger.info("Contur outputs are stored in {0}".format(pjoin(self.me_dir, "Analysis", "contur","conturPlot"))) @@ -2572,7 +2572,7 @@ def run_generate_events(self, switch_mode, args): self.do_set('run_mode 2') self.do_set('nb_core 1') - if self.run_card['gridpack'] in self.true: + if self.run_card['gridpack'] in self.true: # Running gridpack warmup gridpack_opts=[('accuracy', 0.01), ('points', 2000), @@ -2593,7 +2593,7 @@ def run_generate_events(self, switch_mode, args): # Regular run mode logger.info('Generating %s events with run name %s' % (self.run_card['nevents'], self.run_name)) - + self.exec_cmd('survey %s %s' % (self.run_name,' '.join(args)), postcmd=False) nb_event = self.run_card['nevents'] @@ -2601,7 +2601,7 @@ def run_generate_events(self, switch_mode, args): self.exec_cmd('refine %s' % nb_event, postcmd=False) if not float(self.results.current['cross']): # Zero cross-section. Try to guess why - text = '''Survey return zero cross section. + text = '''Survey return zero cross section. Typical reasons are the following: 1) A massive s-channel particle has a width set to zero. 2) The pdf are zero for at least one of the initial state particles @@ -2613,17 +2613,17 @@ def run_generate_events(self, switch_mode, args): raise ZeroResult('See https://cp3.irmp.ucl.ac.be/projects/madgraph/wiki/FAQ-General-14') else: bypass_run = True - + #we can bypass the following if scan and first result is zero if not bypass_run: self.exec_cmd('refine %s --treshold=%s' % (nb_event,self.run_card['second_refine_treshold']) , postcmd=False) - + self.exec_cmd('combine_events', postcmd=False,printcmd=False) self.print_results_in_shell(self.results.current) if self.run_card['use_syst']: - if self.run_card['systematics_program'] == 'auto': + if self.run_card['systematics_program'] == 'auto': scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): to_use = 'systematics' @@ -2634,26 +2634,26 @@ def run_generate_events(self, switch_mode, args): else: logger.critical('Unvalid options for systematics_program: bypass computation of systematics variations.') to_use = 'none' - + if to_use == 'systematics': if self.run_card['systematics_arguments'] != ['']: self.exec_cmd('systematics %s %s ' % (self.run_name, - ' '.join(self.run_card['systematics_arguments'])), + ' '.join(self.run_card['systematics_arguments'])), postcmd=False, printcmd=False) else: self.exec_cmd('systematics %s --from_card' % self.run_name, - postcmd=False,printcmd=False) + postcmd=False,printcmd=False) elif to_use == 'syscalc': self.run_syscalc('parton') - - - self.create_plot('parton') - self.exec_cmd('store_events', postcmd=False) + + + self.create_plot('parton') + self.exec_cmd('store_events', postcmd=False) if self.run_card['boost_event'].strip() and self.run_card['boost_event'] != 'False': self.boost_events() - - - self.exec_cmd('reweight -from_cards', postcmd=False) + + + self.exec_cmd('reweight -from_cards', postcmd=False) self.exec_cmd('decay_events -from_cards', postcmd=False) if self.run_card['time_of_flight']>=0: self.exec_cmd("add_time_of_flight --threshold=%s" % self.run_card['time_of_flight'] ,postcmd=False) @@ -2664,43 +2664,43 @@ def run_generate_events(self, switch_mode, args): self.create_root_file(input , output) self.exec_cmd('madanalysis5_parton --no_default', postcmd=False, printcmd=False) - # shower launches pgs/delphes if needed + # shower launches pgs/delphes if needed self.exec_cmd('shower --no_default', postcmd=False, printcmd=False) self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) self.exec_cmd('rivet --no_default', postcmd=False, printcmd=False) self.store_result() - - if self.allow_notification_center: - misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), - '%s: %s +- %s ' % (self.results.current['run_name'], + + if self.allow_notification_center: + misc.system_notify('Run %s finished' % os.path.basename(self.me_dir), + '%s: %s +- %s ' % (self.results.current['run_name'], self.results.current['cross'], self.results.current['error'])) - + def boost_events(self): - + if not self.run_card['boost_event']: return - + if self.run_card['boost_event'].startswith('lambda'): if not isinstance(self, cmd.CmdShell): raise Exception("boost not allowed online") filter = eval(self.run_card['boost_event']) else: raise Exception - + path = [pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz'), pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')] - + for p in path: if os.path.exists(p): event_path = p break else: raise Exception("fail to find event file for the boost") - - + + lhe = lhe_parser.EventFile(event_path) with misc.TMP_directory() as tmp_dir: output = lhe_parser.EventFile(pjoin(tmp_dir, os.path.basename(event_path)), 'w') @@ -2711,28 +2711,28 @@ def boost_events(self): event.boost(filter) #write this modify event output.write(str(event)) - output.write('\n') + output.write('\n') lhe.close() - files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) - - - - - + files.mv(pjoin(tmp_dir, os.path.basename(event_path)), event_path) + + + + + def do_initMadLoop(self,line): - """Compile and run MadLoop for a certain number of PS point so as to + """Compile and run MadLoop for a certain number of PS point so as to initialize MadLoop (setup the zero helicity and loop filter.)""" - + args = line.split() # Check argument's validity options = self.check_initMadLoop(args) - + if not options['force']: self.ask_edit_cards(['MadLoopParams.dat'], mode='fixed', plot=False) self.exec_cmd('treatcards loop --no_MadLoopInit') if options['refresh']: - for filter in misc.glob('*Filter*', + for filter in misc.glob('*Filter*', pjoin(self.me_dir,'SubProcesses','MadLoop5_resources')): logger.debug("Resetting filter '%s'."%os.path.basename(filter)) os.remove(filter) @@ -2753,14 +2753,14 @@ def do_initMadLoop(self,line): def do_launch(self, line, *args, **opt): """Main Commands: exec generate_events for 2>N and calculate_width for 1>N""" - + if self.ninitial == 1: logger.info("Note that since 2.3. The launch for 1>N pass in event generation\n"+ " To have the previous behavior use the calculate_decay_widths function") # self.do_calculate_decay_widths(line, *args, **opt) #else: self.do_generate_events(line, *args, **opt) - + def print_results_in_shell(self, data): """Have a nice results prints in the shell, data should be of type: gen_crossxhtml.OneTagResults""" @@ -2770,7 +2770,7 @@ def print_results_in_shell(self, data): if data['run_statistics']: globalstat = sum_html.RunStatistics() - + logger.info(" " ) logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2786,13 +2786,13 @@ def print_results_in_shell(self, data): logger.warning(globalstat.get_warning_text()) logger.info(" ") - + logger.info(" === Results Summary for run: %s tag: %s ===\n" % (data['run_name'],data['tag'])) - + total_time = int(sum(_['cumulative_timing'] for _ in data['run_statistics'].values())) if total_time > 0: logger.info(" Cumulative sequential time for this run: %s"%misc.format_time(total_time)) - + if self.ninitial == 1: logger.info(" Width : %.4g +- %.4g GeV" % (data['cross'], data['error'])) else: @@ -2810,18 +2810,18 @@ def print_results_in_shell(self, data): if len(split)!=3: continue scale, cross, error = split - cross_sections[float(scale)] = (float(cross), float(error)) + cross_sections[float(scale)] = (float(cross), float(error)) if len(cross_sections)>0: logger.info(' Pythia8 merged cross-sections are:') for scale in sorted(cross_sections.keys()): logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ (scale,cross_sections[scale][0],cross_sections[scale][1])) - + else: if self.ninitial == 1: logger.info(" Matched width : %.4g +- %.4g GeV" % (data['cross_pythia'], data['error_pythia'])) else: - logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) + logger.info(" Matched cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) logger.info(" Nb of events after matching/merging : %d" % int(data['nb_event_pythia'])) if self.run_card['use_syst'] in self.true and \ (int(self.run_card['ickkw'])==1 or self.run_card['ktdurham']>0.0 @@ -2838,9 +2838,9 @@ def print_results_in_file(self, data, path, mode='w', format='full'): data should be of type: gen_crossxhtml.OneTagResults""" if not data: return - + fsock = open(path, mode) - + if data['run_statistics']: logger.debug(" === Run statistics summary ===") for key, value in data['run_statistics'].items(): @@ -2851,7 +2851,7 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if format == "full": fsock.write(" === Results Summary for run: %s tag: %s process: %s ===\n" % \ (data['run_name'],data['tag'], os.path.basename(self.me_dir))) - + if self.ninitial == 1: fsock.write(" Width : %.4g +- %.4g GeV\n" % (data['cross'], data['error'])) else: @@ -2861,20 +2861,20 @@ def print_results_in_file(self, data, path, mode='w', format='full'): if self.ninitial == 1: fsock.write(" Matched Width : %.4g +- %.4g GeV\n" % (data['cross_pythia'], data['error_pythia'])) else: - fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) + fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) fsock.write(" Nb of events after Matching : %s\n" % data['nb_event_pythia']) fsock.write(" \n" ) elif format == "short": if mode == "w": fsock.write("# run_name tag cross error Nb_event cross_after_matching nb_event_after matching\n") - + if data['cross_pythia'] and data['nb_event_pythia']: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s %(cross_pythia)s %(nb_event_pythia)s\n" else: text = "%(run_name)s %(tag)s %(cross)s %(error)s %(nb_event)s\n" fsock.write(text % data) - - ############################################################################ + + ############################################################################ def do_calculate_decay_widths(self, line): """Main Commands: launch decay width calculation and automatic inclusion of calculated widths and BRs in the param_card.""" @@ -2887,21 +2887,21 @@ def do_calculate_decay_widths(self, line): self.Gdirs = None if not args: - # No run name assigned -> assigned one automaticaly + # No run name assigned -> assigned one automaticaly self.set_run_name(self.find_available_run_name(self.me_dir)) else: self.set_run_name(args[0], reload_card=True) args.pop(0) self.configure_directory() - + # Running gridpack warmup opts=[('accuracy', accuracy), # default 0.01 ('points', 1000), ('iterations',9)] logger.info('Calculating decay widths with run name %s' % self.run_name) - + self.exec_cmd('survey %s %s' % \ (self.run_name, " ".join(['--' + opt + '=' + str(val) for (opt,val) \ @@ -2910,26 +2910,26 @@ def do_calculate_decay_widths(self, line): self.refine_mode = "old" # specify how to combine event self.exec_cmd('combine_events', postcmd=False) self.exec_cmd('store_events', postcmd=False) - + self.collect_decay_widths() self.print_results_in_shell(self.results.current) - self.update_status('calculate_decay_widths done', - level='parton', makehtml=False) + self.update_status('calculate_decay_widths done', + level='parton', makehtml=False) + - ############################################################################ def collect_decay_widths(self): - """ Collect the decay widths and calculate BRs for all particles, and put - in param_card form. + """ Collect the decay widths and calculate BRs for all particles, and put + in param_card form. """ - + particle_dict = {} # store the results run_name = self.run_name # Looping over the Subprocesses for P_path in SubProcesses.get_subP(self.me_dir): ids = SubProcesses.get_subP_ids(P_path) - # due to grouping we need to compute the ratio factor for the + # due to grouping we need to compute the ratio factor for the # ungroup resutls (that we need here). Note that initial particles # grouping are not at the same stage as final particle grouping nb_output = len(ids) / (len(set([p[0] for p in ids]))) @@ -2940,30 +2940,30 @@ def collect_decay_widths(self): particle_dict[particles[0]].append([particles[1:], result/nb_output]) except KeyError: particle_dict[particles[0]] = [[particles[1:], result/nb_output]] - + self.update_width_in_param_card(particle_dict, initial = pjoin(self.me_dir, 'Cards', 'param_card.dat'), output=pjoin(self.me_dir, 'Events', run_name, "param_card.dat")) - + @staticmethod def update_width_in_param_card(decay_info, initial=None, output=None): # Open the param_card.dat and insert the calculated decays and BRs - + if not output: output = initial - + param_card_file = open(initial) param_card = param_card_file.read().split('\n') param_card_file.close() decay_lines = [] line_number = 0 - # Read and remove all decays from the param_card + # Read and remove all decays from the param_card while line_number < len(param_card): line = param_card[line_number] if line.lower().startswith('decay'): - # Read decay if particle in decay_info - # DECAY 6 1.455100e+00 + # Read decay if particle in decay_info + # DECAY 6 1.455100e+00 line = param_card.pop(line_number) line = line.split() particle = 0 @@ -2996,7 +2996,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): break line=param_card[line_number] if particle and particle not in decay_info: - # No decays given, only total width + # No decays given, only total width decay_info[particle] = [[[], width]] else: # Not decay line_number += 1 @@ -3004,7 +3004,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): while not param_card[-1] or param_card[-1].startswith('#'): param_card.pop(-1) - # Append calculated and read decays to the param_card + # Append calculated and read decays to the param_card param_card.append("#\n#*************************") param_card.append("# Decay widths *") param_card.append("#*************************") @@ -3018,7 +3018,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): param_card.append("# BR NDA ID1 ID2 ...") brs = [[(val[1]/width).real, val[0]] for val in decay_info[key] if val[1]] for val in sorted(brs, reverse=True): - param_card.append(" %e %i %s # %s" % + param_card.append(" %e %i %s # %s" % (val[0].real, len(val[1]), " ".join([str(v) for v in val[1]]), val[0] * width @@ -3031,7 +3031,7 @@ def update_width_in_param_card(decay_info, initial=None, output=None): ############################################################################ def do_multi_run(self, line): - + args = self.split_arg(line) # Check argument's validity mode = self.check_multi_run(args) @@ -3047,7 +3047,7 @@ def do_multi_run(self, line): self.check_param_card(path, run=False) #store it locally to avoid relaunch param_card_iterator, self.param_card_iterator = self.param_card_iterator, [] - + crossoversig = 0 inv_sq_err = 0 nb_event = 0 @@ -3055,8 +3055,8 @@ def do_multi_run(self, line): self.nb_refine = 0 self.exec_cmd('generate_events %s_%s -f' % (main_name, i), postcmd=False) # Update collected value - nb_event += int(self.results[self.run_name][-1]['nb_event']) - self.results.add_detail('nb_event', nb_event , run=main_name) + nb_event += int(self.results[self.run_name][-1]['nb_event']) + self.results.add_detail('nb_event', nb_event , run=main_name) cross = self.results[self.run_name][-1]['cross'] error = self.results[self.run_name][-1]['error'] + 1e-99 crossoversig+=cross/error**2 @@ -3070,7 +3070,7 @@ def do_multi_run(self, line): os.mkdir(pjoin(self.me_dir,'Events', self.run_name)) except Exception: pass - os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' + os.system('%(bin)s/merge.pl %(event)s/%(name)s_*/unweighted_events.lhe.gz %(event)s/%(name)s/unweighted_events.lhe.gz %(event)s/%(name)s_banner.txt' % {'bin': self.dirbin, 'event': pjoin(self.me_dir,'Events'), 'name': self.run_name}) @@ -3084,19 +3084,19 @@ def do_multi_run(self, line): self.create_root_file('%s/unweighted_events.lhe' % self.run_name, '%s/unweighted_events.root' % self.run_name) - - path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") + + path = pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe") self.create_plot('parton', path, pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') ) - - if not os.path.exists('%s.gz' % path): + + if not os.path.exists('%s.gz' % path): misc.gzip(path) self.update_status('', level='parton') - self.print_results_in_shell(self.results.current) - + self.print_results_in_shell(self.results.current) + cpath = pjoin(self.me_dir,'Cards','param_card.dat') if param_card_iterator: @@ -3112,21 +3112,21 @@ def do_multi_run(self, line): path = pjoin(self.me_dir, 'Events','scan_%s.txt' % scan_name) logger.info("write all cross-section results in %s" % path, '$MG:BOLD') param_card_iterator.write_summary(path) - - ############################################################################ + + ############################################################################ def do_treatcards(self, line, mode=None, opt=None): """Advanced commands: create .inc files from param_card.dat/run_card.dat""" if not mode and not opt: args = self.split_arg(line) mode, opt = self.check_treatcards(args) - + # To decide whether to refresh MadLoop's helicity filters, it is necessary # to check if the model parameters where modified or not, before doing - # anything else. + # anything else. need_MadLoopFilterUpdate = False - # Just to record what triggered the reinitialization of MadLoop for a + # Just to record what triggered the reinitialization of MadLoop for a # nice debug message. type_of_change = '' if not opt['forbid_MadLoopInit'] and self.proc_characteristics['loop_induced'] \ @@ -3137,10 +3137,10 @@ def do_treatcards(self, line, mode=None, opt=None): (os.path.getmtime(paramDat)-os.path.getmtime(paramInc)) > 0.0: need_MadLoopFilterUpdate = True type_of_change = 'model' - + ML_in = pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat') ML_out = pjoin(self.me_dir,"SubProcesses", - "MadLoop5_resources", "MadLoopParams.dat") + "MadLoop5_resources", "MadLoopParams.dat") if (not os.path.isfile(ML_in)) or (not os.path.isfile(ML_out)) or \ (os.path.getmtime(ML_in)-os.path.getmtime(ML_out)) > 0.0: need_MadLoopFilterUpdate = True @@ -3148,7 +3148,7 @@ def do_treatcards(self, line, mode=None, opt=None): #check if no 'Auto' are present in the file self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) - + if mode in ['param', 'all']: model = self.find_model_name() tmp_model = os.path.basename(model) @@ -3160,9 +3160,9 @@ def do_treatcards(self, line, mode=None, opt=None): check_param_card.check_valid_param_card(mg5_param) opt['param_card'] = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') else: - check_param_card.check_valid_param_card(opt['param_card']) - - logger.debug('write compile file for card: %s' % opt['param_card']) + check_param_card.check_valid_param_card(opt['param_card']) + + logger.debug('write compile file for card: %s' % opt['param_card']) param_card = check_param_card.ParamCard(opt['param_card']) outfile = pjoin(opt['output_dir'], 'param_card.inc') ident_card = pjoin(self.me_dir,'Cards','ident_card.dat') @@ -3185,10 +3185,10 @@ def do_treatcards(self, line, mode=None, opt=None): devnull.close() default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat') - need_mp = self.proc_characteristics['loop_induced'] + need_mp = self.proc_characteristics['loop_induced'] param_card.write_inc_file(outfile, ident_card, default, need_mp=need_mp) - - + + if mode in ['run', 'all']: if not hasattr(self, 'run_card'): run_card = banner_mod.RunCard(opt['run_card'], path=pjoin(self.me_dir, 'Cards', 'run_card.dat')) @@ -3202,7 +3202,7 @@ def do_treatcards(self, line, mode=None, opt=None): run_card['lpp2'] = 0 run_card['ebeam1'] = 0 run_card['ebeam2'] = 0 - + # Ensure that the bias parameters has all the required input from the # run_card if run_card['bias_module'].lower() not in ['dummy','none']: @@ -3219,7 +3219,7 @@ def do_treatcards(self, line, mode=None, opt=None): mandatory_file,run_card['bias_module'])) misc.copytree(run_card['bias_module'], pjoin(self.me_dir,'Source','BIAS', os.path.basename(run_card['bias_module']))) - + #check expected parameters for the module. default_bias_parameters = {} start, last = False,False @@ -3244,50 +3244,50 @@ def do_treatcards(self, line, mode=None, opt=None): for pair in line.split(','): if not pair.strip(): continue - x,y =pair.split(':') + x,y =pair.split(':') x=x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y elif ':' in line: x,y = line.split(':') x = x.strip() if x.startswith(('"',"'")) and x.endswith(x[0]): - x = x[1:-1] + x = x[1:-1] default_bias_parameters[x] = y for key,value in run_card['bias_parameters'].items(): if key not in default_bias_parameters: logger.warning('%s not supported by the bias module. We discard this entry.', key) else: default_bias_parameters[key] = value - run_card['bias_parameters'] = default_bias_parameters - - - # Finally write the include file + run_card['bias_parameters'] = default_bias_parameters + + + # Finally write the include file run_card.write_include_file(opt['output_dir']) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: - self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, + self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat')) # The writing out of MadLoop filter is potentially dangerous # when running in multi-core with a central disk. So it is turned - # off here. If these filters were not initialized then they will + # off here. If these filters were not initialized then they will # have to be re-computed at the beginning of each run. if 'WriteOutFilters' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('WriteOutFilters'): logger.info( -"""You chose to have MadLoop writing out filters. +"""You chose to have MadLoop writing out filters. Beware that this can be dangerous for local multicore runs.""") self.MadLoopparam.set('WriteOutFilters',False, changeifuserset=False) - + # The conservative settings below for 'CTModeInit' and 'ZeroThres' # help adress issues for processes like g g > h z, and g g > h g - # where there are some helicity configuration heavily suppressed - # (by several orders of magnitude) so that the helicity filter + # where there are some helicity configuration heavily suppressed + # (by several orders of magnitude) so that the helicity filter # needs high numerical accuracy to correctly handle this spread in # magnitude. Also, because one cannot use the Born as a reference - # scale, it is better to force quadruple precision *for the + # scale, it is better to force quadruple precision *for the # initialization points only*. This avoids numerical accuracy issues # when setting up the helicity filters and does not significantly # slow down the run. @@ -3298,21 +3298,21 @@ def do_treatcards(self, line, mode=None, opt=None): # It is a bit superficial to use the level 2 which tries to numerically # map matching helicities (because of CP symmetry typically) together. -# It is useless in the context of MC over helicities and it can +# It is useless in the context of MC over helicities and it can # potentially make the helicity double checking fail. self.MadLoopparam.set('HelicityFilterLevel',1, changeifuserset=False) # To be on the safe side however, we ask for 4 consecutive matching # helicity filters. self.MadLoopparam.set('CheckCycle',4, changeifuserset=False) - + # For now it is tricky to have each channel performing the helicity # double check. What we will end up doing is probably some kind # of new initialization round at the beginning of each launch - # command, to reset the filters. + # command, to reset the filters. self.MadLoopparam.set('DoubleCheckHelicityFilter',False, changeifuserset=False) - + # Thanks to TIR recycling, TIR is typically much faster for Loop-induced # processes when not doing MC over helicities, so that we place OPP last. if not hasattr(self, 'run_card'): @@ -3349,7 +3349,7 @@ def do_treatcards(self, line, mode=None, opt=None): logger.warning( """You chose to also use a lorentz rotation for stability tests (see parameter NRotations_[DP|QP]). Beware that, for optimization purposes, MadEvent uses manual TIR cache clearing which is not compatible - with the lorentz rotation stability test. The number of these rotations to be used will be reset to + with the lorentz rotation stability test. The number of these rotations to be used will be reset to zero by MadLoop. You can avoid this by changing the parameter 'FORCE_ML_HELICITY_SUM' int he matrix.f files to be .TRUE. so that the sum over helicity configurations is performed within MadLoop (in which case the helicity of final state particles cannot be speicfied in the LHE file.""") @@ -3363,15 +3363,15 @@ def do_treatcards(self, line, mode=None, opt=None): # self.MadLoopparam.set('NRotations_DP',0,changeifuserset=False) # Revert to the above to be slightly less robust but twice faster. self.MadLoopparam.set('NRotations_DP',1,changeifuserset=False) - self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) - + self.MadLoopparam.set('NRotations_QP',0,changeifuserset=False) + # Finally, the stability tests are slightly less reliable for process - # with less or equal than 4 final state particles because the + # with less or equal than 4 final state particles because the # accessible kinematic is very limited (i.e. lorentz rotations don't # shuffle invariants numerics much). In these cases, we therefore # increase the required accuracy to 10^-7. # This is important for getting g g > z z [QCD] working with a - # ptheavy cut as low as 1 GeV. + # ptheavy cut as low as 1 GeV. if self.proc_characteristics['nexternal']<=4: if ('MLStabThres' in self.MadLoopparam.user_set and \ self.MadLoopparam.get('MLStabThres')>1.0e-7): @@ -3381,12 +3381,12 @@ def do_treatcards(self, line, mode=None, opt=None): than four external legs, so this is not recommended (especially not for g g > z z).""") self.MadLoopparam.set('MLStabThres',1.0e-7,changeifuserset=False) else: - self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) + self.MadLoopparam.set('MLStabThres',1.0e-4,changeifuserset=False) #write the output file self.MadLoopparam.write(pjoin(self.me_dir,"SubProcesses","MadLoop5_resources", "MadLoopParams.dat")) - + if self.proc_characteristics['loop_induced'] and mode in ['loop', 'all']: # Now Update MadLoop filters if necessary (if modifications were made to # the model parameters). @@ -3403,12 +3403,12 @@ def do_treatcards(self, line, mode=None, opt=None): elif not opt['forbid_MadLoopInit'] and \ MadLoopInitializer.need_MadLoopInit(self.me_dir): self.exec_cmd('initMadLoop -f') - - ############################################################################ + + ############################################################################ def do_survey(self, line): """Advanced commands: launch survey for the current process """ - - + + args = self.split_arg(line) # Check argument's validity self.check_survey(args) @@ -3416,7 +3416,7 @@ def do_survey(self, line): if os.path.exists(pjoin(self.me_dir,'error')): os.remove(pjoin(self.me_dir,'error')) - + self.configure_directory() # Save original random number self.random_orig = self.random @@ -3435,9 +3435,9 @@ def do_survey(self, line): P_zero_result = [] # check the number of times where they are no phase-space # File for the loop (for loop induced) - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources')) and cluster.need_transfer(self.options): - tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', + tf=tarfile.open(pjoin(self.me_dir, 'SubProcesses', 'MadLoop5_resources.tar.gz'), 'w:gz', dereference=True) tf.add(pjoin(self.me_dir,'SubProcesses','MadLoop5_resources'), arcname='MadLoop5_resources') @@ -3467,7 +3467,7 @@ def do_survey(self, line): except Exception as error: logger.debug(error) pass - + jobs, P_zero_result = ajobcreator.launch() # Check if all or only some fails if P_zero_result: @@ -3481,60 +3481,60 @@ def do_survey(self, line): self.get_Gdir() for P in P_zero_result: self.Gdirs[0][pjoin(self.me_dir,'SubProcesses',P)] = [] - + self.monitor(run_type='All jobs submitted for survey', html=True) if not self.history or 'survey' in self.history[-1] or self.ninitial ==1 or \ self.run_card['gridpack']: #will be done during the refine (more precisely in gen_ximprove) cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) - self.results.add_detail('error', error) + self.results.add_detail('error', error) self.exec_cmd("print_results %s" % self.run_name, - errorhandling=False, printcmd=False, precmd=False, postcmd=False) - + errorhandling=False, printcmd=False, precmd=False, postcmd=False) + self.results.add_detail('run_statistics', dict(ajobcreator.run_statistics)) self.update_status('End survey', 'parton', makehtml=False) ############################################################################ def pass_in_difficult_integration_mode(self, rate=1): """be more secure for the integration to not miss it due to strong cut""" - + # improve survey options if default if self.opts['points'] == self._survey_options['points'][1]: self.opts['points'] = (rate+2) * self._survey_options['points'][1] if self.opts['iterations'] == self._survey_options['iterations'][1]: self.opts['iterations'] = 1 + rate + self._survey_options['iterations'][1] if self.opts['accuracy'] == self._survey_options['accuracy'][1]: - self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) - + self.opts['accuracy'] = self._survey_options['accuracy'][1]/(rate+2) + # Modify run_config.inc in order to improve the refine conf_path = pjoin(self.me_dir, 'Source','run_config.inc') files.cp(conf_path, conf_path + '.bk') # text = open(conf_path).read() - min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) - + min_evt, max_evt = 2500 *(2+rate), 10000*(rate+1) + text = re.sub('''\(min_events = \d+\)''', '(min_events = %i )' % min_evt, text) text = re.sub('''\(max_events = \d+\)''', '(max_events = %i )' % max_evt, text) fsock = open(conf_path, 'w') fsock.write(text) fsock.close() - + # Compile for name in ['../bin/internal/gen_ximprove', 'all']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) - - - ############################################################################ + + + ############################################################################ def do_refine(self, line): """Advanced commands: launch survey for the current process """ - devnull = open(os.devnull, 'w') + devnull = open(os.devnull, 'w') self.nb_refine += 1 args = self.split_arg(line) treshold=None - - + + for a in args: if a.startswith('--treshold='): treshold = float(a.split('=',1)[1]) @@ -3548,8 +3548,8 @@ def do_refine(self, line): break # Check argument's validity self.check_refine(args) - - refine_opt = {'err_goal': args[0], 'split_channels': True} + + refine_opt = {'err_goal': args[0], 'split_channels': True} precision = args[0] if len(args) == 2: refine_opt['max_process']= args[1] @@ -3560,15 +3560,15 @@ def do_refine(self, line): # Update random number self.update_random() self.save_random() - + if self.cluster_mode: logger.info('Creating Jobs') self.update_status('Refine results to %s' % precision, level=None) - + self.total_jobs = 0 - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + # cleanning the previous job for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() @@ -3589,14 +3589,14 @@ def do_refine(self, line): level = 5 if value.has_warning(): level = 10 - logger.log(level, + logger.log(level, value.nice_output(str('/'.join([key[0],'G%s'%key[1]]))). replace(' statistics','')) logger.debug(globalstat.nice_output('combined', no_warning=True)) - + if survey_statistics: x_improve.run_statistics = survey_statistics - + x_improve.launch() # create the ajob for the refinment. if not self.history or 'refine' not in self.history[-1]: cross, error = x_improve.update_html() #update html results for survey @@ -3610,9 +3610,9 @@ def do_refine(self, line): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) - + if os.path.exists(pjoin(Pdir, 'ajob1')): cudacpp_backend = self.run_card['cudacpp_backend'] # the default value is defined in banner.py @@ -3629,7 +3629,7 @@ def do_refine(self, line): ###self.compile(['all'], cwd=Pdir) alljobs = misc.glob('ajob*', Pdir) - + #remove associated results.dat (ensure to not mix with all data) Gre = re.compile("\s*j=(G[\d\.\w]+)") for job in alljobs: @@ -3637,49 +3637,49 @@ def do_refine(self, line): for Gdir in Gdirs: if os.path.exists(pjoin(Pdir, Gdir, 'results.dat')): os.remove(pjoin(Pdir, Gdir,'results.dat')) - - nb_tot = len(alljobs) + + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), - run_type='Refine number %s on %s (%s/%s)' % + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) - self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine, html=True) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + if isinstance(x_improve, gen_ximprove.gen_ximprove_v4): # the merge of the events.lhe is handle in the x_improve class - # for splitted runs. (and partly in store_events). + # for splitted runs. (and partly in store_events). combine_runs.CombineRuns(self.me_dir) self.refine_mode = "old" else: self.refine_mode = "new" - + cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - self.results.add_detail('run_statistics', + self.results.add_detail('run_statistics', dict(self.results.get_detail('run_statistics'))) self.update_status('finish refine', 'parton', makehtml=False) devnull.close() - - ############################################################################ + + ############################################################################ def do_comine_iteration(self, line): """Not in help: Combine a given iteration combine_iteration Pdir Gdir S|R step - S is for survey + S is for survey R is for refine - step is the iteration number (not very critical)""" + step is the iteration number (not very critical)""" self.set_run_name("tmp") self.configure_directory(html_opening=False) @@ -3695,12 +3695,12 @@ def do_comine_iteration(self, line): gensym.combine_iteration(Pdir, Gdir, int(step)) elif mode == "R": refine = gen_ximprove.gen_ximprove_share(self) - refine.combine_iteration(Pdir, Gdir, int(step)) - - + refine.combine_iteration(Pdir, Gdir, int(step)) - - ############################################################################ + + + + ############################################################################ def do_combine_events(self, line): """Advanced commands: Launch combine events""" start=time.time() @@ -3710,11 +3710,11 @@ def do_combine_events(self, line): self.check_combine_events(args) self.update_status('Combining Events', level='parton') - + if self.run_card['gridpack'] and isinstance(self, GridPackCmd): return GridPackCmd.do_combine_events(self, line) - + # Define The Banner tag = self.run_card['run_tag'] # Update the banner with the pythia card @@ -3727,14 +3727,14 @@ def do_combine_events(self, line): self.banner.change_seed(self.random_orig) if not os.path.exists(pjoin(self.me_dir, 'Events', self.run_name)): os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) - self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, + self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -3751,12 +3751,12 @@ def do_combine_events(self, line): os.remove(pjoin(Gdir, 'events.lhe')) continue - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec'), result.get('xerru'), result.get('axsec') ) - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.run_card['nevents']) @@ -3765,13 +3765,13 @@ def do_combine_events(self, line): AllEvent.add(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() if len(AllEvent) == 0: - nb_event = 0 + nb_event = 0 else: nb_event = AllEvent.unweight(pjoin(self.me_dir, "Events", self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.run_card['nevents'], @@ -3791,22 +3791,22 @@ def do_combine_events(self, line): os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(self.me_dir, "Events", self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) - + if self.run_card['bias_module'].lower() not in ['dummy', 'none'] and nb_event: self.correct_bias() elif self.run_card['custom_fcts']: self.correct_bias() logger.info("combination of events done in %s s ", time.time()-start) - + self.to_store.append('event') - - ############################################################################ + + ############################################################################ def correct_bias(self): - """check the first event and correct the weight by the bias + """check the first event and correct the weight by the bias and correct the cross-section. - If the event do not have the bias tag it means that the bias is + If the event do not have the bias tag it means that the bias is one modifying the cross-section/shape so we have nothing to do """ @@ -3834,7 +3834,7 @@ def correct_bias(self): output.write('') output.close() lhe.close() - + # MODIFY THE BANNER i.e. INIT BLOCK # ensure information compatible with normalisation choice total_cross = sum(cross[key] for key in cross) @@ -3846,8 +3846,8 @@ def correct_bias(self): elif self.run_card['event_norm'] == 'unity': total_cross = self.results.current['cross'] * total_cross / nb_event for key in cross: - cross[key] *= total_cross / nb_event - + cross[key] *= total_cross / nb_event + bannerfile = lhe_parser.EventFile(pjoin(self.me_dir, 'Events', self.run_name, '.banner.tmp.gz'),'w') banner = banner_mod.Banner(lhe.banner) banner.modify_init_cross(cross) @@ -3862,12 +3862,12 @@ def correct_bias(self): os.remove(lhe.name) os.remove(bannerfile.name) os.remove(output.name) - - + + self.results.current['cross'] = total_cross self.results.current['error'] = 0 - - ############################################################################ + + ############################################################################ def do_store_events(self, line): """Advanced commands: Launch store events""" @@ -3883,16 +3883,16 @@ def do_store_events(self, line): if not os.path.exists(pjoin(self.me_dir, 'Events', run)): os.mkdir(pjoin(self.me_dir, 'Events', run)) if not os.path.exists(pjoin(self.me_dir, 'HTML', run)): - os.mkdir(pjoin(self.me_dir, 'HTML', run)) - + os.mkdir(pjoin(self.me_dir, 'HTML', run)) + # 1) Store overall process information #input = pjoin(self.me_dir, 'SubProcesses', 'results.dat') #output = pjoin(self.me_dir, 'SubProcesses', '%s_results.dat' % run) - #files.cp(input, output) + #files.cp(input, output) # 2) Treat the files present in the P directory - # Ensure that the number of events is different of 0 + # Ensure that the number of events is different of 0 if self.results.current['nb_event'] == 0 and not self.run_card['gridpack']: logger.warning("No event detected. No cleaning performed! This should allow to run:\n" + " cd Subprocesses; ../bin/internal/combine_events\n"+ @@ -3910,18 +3910,18 @@ def do_store_events(self, line): # if os.path.exists(pjoin(G_path, 'results.dat')): # input = pjoin(G_path, 'results.dat') # output = pjoin(G_path, '%s_results.dat' % run) - # files.cp(input, output) + # files.cp(input, output) #except Exception: - # continue + # continue # Store log try: if os.path.exists(pjoin(G_path, 'log.txt')): input = pjoin(G_path, 'log.txt') output = pjoin(G_path, '%s_log.txt' % run) - files.mv(input, output) + files.mv(input, output) except Exception: continue - #try: + #try: # # Grid # for name in ['ftn26']: # if os.path.exists(pjoin(G_path, name)): @@ -3930,7 +3930,7 @@ def do_store_events(self, line): # input = pjoin(G_path, name) # output = pjoin(G_path, '%s_%s' % (run,name)) # files.mv(input, output) - # misc.gzip(pjoin(G_path, output), error=None) + # misc.gzip(pjoin(G_path, output), error=None) #except Exception: # continue # Delete ftn25 to ensure reproducible runs @@ -3940,11 +3940,11 @@ def do_store_events(self, line): # 3) Update the index.html self.gen_card_html() - + # 4) Move the Files present in Events directory E_path = pjoin(self.me_dir, 'Events') O_path = pjoin(self.me_dir, 'Events', run) - + # The events file for name in ['events.lhe', 'unweighted_events.lhe']: finput = pjoin(E_path, name) @@ -3960,30 +3960,30 @@ def do_store_events(self, line): # os.remove(pjoin(O_path, '%s.gz' % name)) # input = pjoin(E_path, name) ## output = pjoin(O_path, name) - + self.update_status('End Parton', level='parton', makehtml=False) devnull.close() - - - ############################################################################ + + + ############################################################################ def do_create_gridpack(self, line): """Advanced commands: Create gridpack from present run""" self.update_status('Creating gridpack', level='parton') # compile gen_ximprove misc.compile(['../bin/internal/gen_ximprove'], cwd=pjoin(self.me_dir, "Source")) - + Gdir = self.get_Gdir() Pdir = set([os.path.dirname(G) for G in Gdir]) - for P in Pdir: + for P in Pdir: allG = misc.glob('G*', path=P) for G in allG: if pjoin(P, G) not in Gdir: logger.debug('removing %s', pjoin(P,G)) shutil.rmtree(pjoin(P,G)) - - + + args = self.split_arg(line) self.check_combine_events(args) if not self.run_tag: self.run_tag = 'tag_1' @@ -3996,13 +3996,13 @@ def do_create_gridpack(self, line): cwd=self.me_dir) misc.call(['./bin/internal/clean'], cwd=self.me_dir) misc.call(['./bin/internal/make_gridpack'], cwd=self.me_dir) - files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), + files.mv(pjoin(self.me_dir, 'gridpack.tar.gz'), pjoin(self.me_dir, '%s_gridpack.tar.gz' % self.run_name)) os.system("sed -i.bak \"s/\s*.true.*=.*GridRun/ .false. = GridRun/g\" %s/Cards/grid_card.dat" \ % self.me_dir) self.update_status('gridpack created', level='gridpack') - - ############################################################################ + + ############################################################################ def do_shower(self, line): """launch the shower""" @@ -4010,7 +4010,7 @@ def do_shower(self, line): if len(args)>1 and args[0] in self._interfaced_showers: chosen_showers = [args.pop(0)] elif '--no_default' in line: - # If '--no_default' was specified in the arguments, then only one + # If '--no_default' was specified in the arguments, then only one # shower will be run, depending on which card is present. # but we each of them are called. (each of them check if the file exists) chosen_showers = list(self._interfaced_showers) @@ -4021,9 +4021,9 @@ def do_shower(self, line): shower_priority = ['pythia8','pythia'] chosen_showers = [sorted(chosen_showers,key=lambda sh: shower_priority.index(sh) if sh in shower_priority else len(shower_priority)+1)[0]] - + for shower in chosen_showers: - self.exec_cmd('%s %s'%(shower,' '.join(args)), + self.exec_cmd('%s %s'%(shower,' '.join(args)), postcmd=False, printcmd=False) def do_madanalysis5_parton(self, line): @@ -4039,11 +4039,11 @@ def do_madanalysis5_parton(self, line): def mg5amc_py8_interface_consistency_warning(options): """ Check the consistency of the mg5amc_py8_interface installed with the current MG5 and Pythia8 versions. """ - + # All this is only relevant is Pythia8 is interfaced to MG5 if not options['pythia8_path']: return None - + if not options['mg5amc_py8_interface_path']: return \ """ @@ -4053,7 +4053,7 @@ def mg5amc_py8_interface_consistency_warning(options): Consider installing the MG5_aMC-PY8 interface with the following command: MG5_aMC>install mg5amc_py8_interface """ - + mg5amc_py8_interface_path = options['mg5amc_py8_interface_path'] py8_path = options['pythia8_path'] # If the specified interface path is relative, make it absolut w.r.t MGDIR if @@ -4062,7 +4062,7 @@ def mg5amc_py8_interface_consistency_warning(options): mg5amc_py8_interface_path = pjoin(MG5DIR,mg5amc_py8_interface_path) py8_path = pjoin(MG5DIR,py8_path) - # Retrieve all the on-install and current versions + # Retrieve all the on-install and current versions fsock = open(pjoin(mg5amc_py8_interface_path, 'MG5AMC_VERSION_ON_INSTALL')) MG5_version_on_install = fsock.read().replace('\n','') fsock.close() @@ -4074,7 +4074,7 @@ def mg5amc_py8_interface_consistency_warning(options): MG5_curr_version =misc.get_pkg_info()['version'] try: p = subprocess.Popen(['./get_pythia8_version.py',py8_path], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mg5amc_py8_interface_path) (out, err) = p.communicate() out = out.decode(errors='ignore').replace('\n','') @@ -4084,37 +4084,37 @@ def mg5amc_py8_interface_consistency_warning(options): float(out) except: PY8_curr_version = None - + if not MG5_version_on_install is None and not MG5_curr_version is None: if MG5_version_on_install != MG5_curr_version: return \ """ The current version of MG5_aMC (v%s) is different than the one active when - installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). + installing the 'mg5amc_py8_interface_path' (which was MG5aMC v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(MG5_curr_version, MG5_version_on_install) - + if not PY8_version_on_install is None and not PY8_curr_version is None: if PY8_version_on_install != PY8_curr_version: return \ """ The current version of Pythia8 (v%s) is different than the one active when - installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). + installing the 'mg5amc_py8_interface' tool (which was Pythia8 v%s). Please consider refreshing the installation of this interface with the command: MG5_aMC>install mg5amc_py8_interface """%(PY8_curr_version,PY8_version_on_install) - + return None def setup_Pythia8RunAndCard(self, PY8_Card, run_type): """ Setup the Pythia8 Run environment and card. In particular all the process and run specific parameters of the card are automatically set here. This function returns the path where HEPMC events will be output, if any.""" - + HepMC_event_output = None tag = self.run_tag - + PY8_Card.subruns[0].systemSet('Beams:LHEF',"unweighted_events.lhe.gz") hepmc_format = PY8_Card['HEPMCoutput:file'].lower() @@ -4185,7 +4185,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): misc.mkfifo(fifo_path) # Use defaultSet not to overwrite the current userSet status PY8_Card.defaultSet('HEPMCoutput:file',fifo_path) - HepMC_event_output=fifo_path + HepMC_event_output=fifo_path elif hepmc_format in ['','/dev/null','None']: logger.warning('User disabled the HepMC output of Pythia8.') HepMC_event_output = None @@ -4206,7 +4206,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): # only if it is not already user_set. if PY8_Card['JetMatching:qCut']==-1.0: PY8_Card.MadGraphSet('JetMatching:qCut',1.5*self.run_card['xqcut'], force=True) - + if PY8_Card['JetMatching:qCut']<(1.5*self.run_card['xqcut']): logger.error( 'The MLM merging qCut parameter you chose (%f) is less than '%PY8_Card['JetMatching:qCut']+ @@ -4233,7 +4233,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): if PY8_Card['JetMatching:qCut'] not in qCutList: qCutList.append(PY8_Card['JetMatching:qCut']) PY8_Card.MadGraphSet('SysCalc:qCutList', qCutList, force=True) - + if PY8_Card['SysCalc:qCutList']!='auto': for scale in PY8_Card['SysCalc:qCutList']: @@ -4244,7 +4244,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): "'sys_matchscale' in the run_card) is less than 1.5*xqcut, where xqcut is"+ ' the run_card parameter (=%f)\n'%self.run_card['xqcut']+ 'It would be better/safer to use a larger qCut or a smaller xqcut.') - + # Specific MLM settings # PY8 should not implement the MLM veto since the driver should do it # if merging scale variation is turned on @@ -4294,18 +4294,18 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): CKKW_cut = 'ktdurham' elif self.run_card['ptlund']>0.0 and self.run_card['ktdurham']<=0.0: PY8_Card.subruns[0].MadGraphSet('Merging:doPTLundMerging',True) - CKKW_cut = 'ptlund' + CKKW_cut = 'ptlund' else: raise InvalidCmd("*Either* the 'ptlund' or 'ktdurham' cut in "+\ " the run_card must be turned on to activate CKKW(L) merging"+ " with Pythia8, but *both* cuts cannot be turned on at the same time."+ "\n ptlund=%f, ktdurham=%f."%(self.run_card['ptlund'],self.run_card['ktdurham'])) - + # Automatically set qWeed to the CKKWL cut if not defined by the user. if PY8_Card['SysCalc:qWeed']==-1.0: PY8_Card.MadGraphSet('SysCalc:qWeed',self.run_card[CKKW_cut], force=True) - + # MadGraphSet sets the corresponding value (in system mode) # only if it is not already user_set. if PY8_Card['Merging:TMS']==-1.0: @@ -4319,7 +4319,7 @@ def setup_Pythia8RunAndCard(self, PY8_Card, run_type): 'The CKKWl merging scale you chose (%f) is less than '%PY8_Card['Merging:TMS']+ 'the %s cut specified in the run_card parameter (=%f).\n'%(CKKW_cut,self.run_card[CKKW_cut])+ 'It is incorrect to use a smaller CKKWl scale than the generation-level %s cut!'%CKKW_cut) - + PY8_Card.MadGraphSet('TimeShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:pTmaxMatch',1) PY8_Card.MadGraphSet('SpaceShower:rapidityOrder',False) @@ -4381,7 +4381,7 @@ def do_pythia8(self, line): try: import madgraph - except ImportError: + except ImportError: import internal.histograms as histograms else: import madgraph.various.histograms as histograms @@ -4400,16 +4400,16 @@ def do_pythia8(self, line): self.check_pythia8(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) - self.check_pythia8(args) + self.check_pythia8(args) # Update the banner with the pythia card if not self.banner or len(self.banner) <=1: # Here the level keyword 'pythia' must not be changed to 'pythia8'. self.banner = banner_mod.recover_banner(self.results, 'pythia') - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1], pythia_version=8, banner=self.banner) @@ -4425,7 +4425,7 @@ def do_pythia8(self, line): #"Please use 'event_norm = average' in the run_card to avoid this problem.") - + if not self.options['mg5amc_py8_interface_path'] or not \ os.path.exists(pjoin(self.options['mg5amc_py8_interface_path'], 'MG5aMC_PY8_interface')): @@ -4444,16 +4444,16 @@ def do_pythia8(self, line): # Again here 'pythia' is just a keyword for the simulation level. self.update_status('\033[92mRunning Pythia8 [arXiv:1410.3012]\033[0m', 'pythia8') - - tag = self.run_tag + + tag = self.run_tag # Now write Pythia8 card # Start by reading, starting from the default one so that the 'user_set' # tag are correctly set. - PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', + PY8_Card = banner_mod.PY8Card(pjoin(self.me_dir, 'Cards', 'pythia8_card_default.dat')) PY8_Card.read(pjoin(self.me_dir, 'Cards', 'pythia8_card.dat'), setter='user') - + run_type = 'default' merged_run_types = ['MLM','CKKW'] if int(self.run_card['ickkw'])==1: @@ -4471,7 +4471,7 @@ def do_pythia8(self, line): cmd_card = StringIO.StringIO() PY8_Card.write(cmd_card,pjoin(self.me_dir,'Cards','pythia8_card_default.dat'), direct_pythia_input=True) - + # Now setup the preamble to make sure that everything will use the locally # installed tools (if present) even if the user did not add it to its # environment variables. @@ -4486,13 +4486,13 @@ def do_pythia8(self, line): preamble = misc.get_HEPTools_location_setter( pjoin(MG5DIR,'HEPTools'),'lib') preamble += "\n unset PYTHIA8DATA\n" - + open(pythia_cmd_card,'w').write("""! ! It is possible to run this card manually with: ! %s %s ! """%(preamble+pythia_main,os.path.basename(pythia_cmd_card))+cmd_card.getvalue()) - + # launch pythia8 pythia_log = pjoin(self.me_dir , 'Events', self.run_name , '%s_pythia8.log' % tag) @@ -4504,13 +4504,13 @@ def do_pythia8(self, line): shell_exe = None if os.path.exists('/usr/bin/env'): shell_exe = '/usr/bin/env %s'%shell - else: + else: shell_exe = misc.which(shell) if not shell_exe: raise self.InvalidCmd('No s hell could be found in your environment.\n'+ "Make sure that either '%s' is in your path or that the"%shell+\ " command '/usr/bin/env %s' exists and returns a valid path."%shell) - + exe_cmd = "#!%s\n%s"%(shell_exe,' '.join( [preamble+pythia_main, os.path.basename(pythia_cmd_card)])) @@ -4528,7 +4528,7 @@ def do_pythia8(self, line): ( os.path.exists(HepMC_event_output) and \ stat.S_ISFIFO(os.stat(HepMC_event_output).st_mode)) startPY8timer = time.time() - + # Information that will be extracted from this PY8 run PY8_extracted_information={ 'sigma_m':None, 'Nacc':None, 'Ntry':None, 'cross_sections':{} } @@ -4556,7 +4556,7 @@ def do_pythia8(self, line): n_cores = max(int(self.options['cluster_size']),1) elif self.options['run_mode']==2: n_cores = max(int(self.cluster.nb_core),1) - + lhe_file_name = os.path.basename(PY8_Card.subruns[0]['Beams:LHEF']) lhe_file = lhe_parser.EventFile(pjoin(self.me_dir,'Events', self.run_name,PY8_Card.subruns[0]['Beams:LHEF'])) @@ -4574,7 +4574,7 @@ def do_pythia8(self, line): if self.options['run_mode']==2: min_n_events_per_job = 100 elif self.options['run_mode']==1: - min_n_events_per_job = 1000 + min_n_events_per_job = 1000 min_n_core = n_events//min_n_events_per_job n_cores = max(min(min_n_core,n_cores),1) @@ -4584,8 +4584,8 @@ def do_pythia8(self, line): logger.info('Follow Pythia8 shower by running the '+ 'following command (in a separate terminal):\n tail -f %s'%pythia_log) - if self.options['run_mode']==2 and self.options['nb_core']>1: - ret_code = self.cluster.launch_and_wait(wrapper_path, + if self.options['run_mode']==2 and self.options['nb_core']>1: + ret_code = self.cluster.launch_and_wait(wrapper_path, argument= [], stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events',self.run_name)) else: @@ -4630,10 +4630,10 @@ def do_pythia8(self, line): wrapper = open(wrapper_path,'w') if self.options['cluster_temp_path'] is None: exe_cmd = \ -"""#!%s +"""#!%s ./%s PY8Card.dat >& PY8_log.txt """ - else: + else: exe_cmd = \ """#!%s ln -s ./events_$1.lhe.gz ./events.lhe.gz @@ -4663,21 +4663,21 @@ def do_pythia8(self, line): # Set it as executable st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) - + # Split the .lhe event file, create event partition partition=[n_available_events//n_cores]*n_cores for i in range(n_available_events%n_cores): partition[i] += 1 - + # Splitting according to the total number of events requested by the user # Will be used to determine the number of events to indicate in the PY8 split cards. partition_for_PY8=[n_events//n_cores]*n_cores for i in range(n_events%n_cores): partition_for_PY8[i] += 1 - - logger.info('Splitting .lhe event file for PY8 parallelization...') - n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) - + + logger.info('Splitting .lhe event file for PY8 parallelization...') + n_splits = lhe_file.split(partition=partition, cwd=parallelization_dir, zip=True) + if n_splits!=len(partition): raise MadGraph5Error('Error during lhe file splitting. Expected %d files but obtained %d.' %(len(partition),n_splits)) @@ -4690,7 +4690,7 @@ def do_pythia8(self, line): # Add the necessary run content shutil.move(pjoin(parallelization_dir,lhe_file.name+'_%d.lhe.gz'%split_id), pjoin(parallelization_dir,split_files[-1])) - + logger.info('Submitting Pythia8 jobs...') for i, split_file in enumerate(split_files): # We must write a PY8Card tailored for each split so as to correct the normalization @@ -4706,7 +4706,7 @@ def do_pythia8(self, line): split_PY8_Card.write(pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,'PY8Card.dat'), add_missing=False) in_files = [pjoin(parallelization_dir,os.path.basename(pythia_main)), - pjoin(parallelization_dir,'PY8Card_%d.dat'%i), + pjoin(parallelization_dir,'PY8Card_%d.dat'%i), pjoin(parallelization_dir,split_file)] if self.options['cluster_temp_path'] is None: out_files = [] @@ -4718,35 +4718,35 @@ def do_pythia8(self, line): if os.path.basename(in_file)==split_file: ln(in_file,selected_cwd,name='events.lhe.gz') elif os.path.basename(in_file).startswith('PY8Card'): - ln(in_file,selected_cwd,name='PY8Card.dat') + ln(in_file,selected_cwd,name='PY8Card.dat') else: - ln(in_file,selected_cwd) + ln(in_file,selected_cwd) in_files = [] wrapper_path = os.path.basename(wrapper_path) else: out_files = ['split_%d.tar.gz'%i] selected_cwd = parallelization_dir - self.cluster.submit2(wrapper_path, - argument=[str(i)], cwd=selected_cwd, + self.cluster.submit2(wrapper_path, + argument=[str(i)], cwd=selected_cwd, input_files=in_files, output_files=out_files, required_output=out_files) - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return logger.info('Pythia8 shower jobs: %d Idle, %d Running, %d Done [%s]'\ %(Idle, Running, Done, misc.format_time(time.time() - startPY8timer))) self.cluster.wait(parallelization_dir,wait_monitoring) - + logger.info('Merging results from the split PY8 runs...') if self.options['cluster_temp_path']: # Decompressing the output for i, split_file in enumerate(split_files): misc.call(['tar','-xzf','split_%d.tar.gz'%i],cwd=parallelization_dir) os.remove(pjoin(parallelization_dir,'split_%d.tar.gz'%i)) - + # Now merge logs pythia_log_file = open(pythia_log,'w') n_added = 0 @@ -4778,7 +4778,7 @@ def wait_monitoring(Idle, Running, Done): if n_added>0: PY8_extracted_information['sigma_m'] /= float(n_added) pythia_log_file.close() - + # djr plots djr_HwU = None n_added = 0 @@ -4845,7 +4845,7 @@ def wait_monitoring(Idle, Running, Done): if not os.path.isfile(hepmc_file): continue all_hepmc_files.append(hepmc_file) - + if len(all_hepmc_files)>0: hepmc_output = pjoin(self.me_dir,'Events',self.run_name,HepMC_event_output) with misc.TMP_directory() as tmp_dir: @@ -4860,8 +4860,8 @@ def wait_monitoring(Idle, Running, Done): break header.close() tail = open(pjoin(tmp_dir,'tail.hepmc'),'w') - n_tail = 0 - + n_tail = 0 + for line in misc.reverse_readline(all_hepmc_files[-1]): if line.startswith('HepMC::'): n_tail += 1 @@ -4871,7 +4871,7 @@ def wait_monitoring(Idle, Running, Done): tail.close() if n_tail>1: raise MadGraph5Error('HEPMC files should only have one trailing command.') - ###################################################################### + ###################################################################### # This is the most efficient way of putting together HEPMC's, *BUT* # # WARNING: NEED TO RENDER THE CODE BELOW SAFE TOWARDS INJECTION # ###################################################################### @@ -4888,12 +4888,12 @@ def wait_monitoring(Idle, Running, Done): elif sys.platform == 'darwin': # sed on MAC has slightly different synthax than on os.system(' '.join(['sed','-i',"''","'%s;$d'"% - (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) - else: - # other UNIX systems + (';'.join('%id'%(i+1) for i in range(n_head))),hepmc_file])) + else: + # other UNIX systems os.system(' '.join(['sed','-i']+["-e '%id'"%(i+1) for i in range(n_head)]+ ["-e '$d'",hepmc_file])) - + os.system(' '.join(['cat',pjoin(tmp_dir,'header.hepmc')]+all_hepmc_files+ [pjoin(tmp_dir,'tail.hepmc'),'>',hepmc_output])) @@ -4915,12 +4915,12 @@ def wait_monitoring(Idle, Running, Done): 'Inclusive cross section:' not in '\n'.join(open(pythia_log,'r').readlines()[-20:]): logger.warning('Fail to produce a pythia8 output. More info in \n %s'%pythia_log) return - + # Plot for Pythia8 successful = self.create_plot('Pythia8') if not successful: logger.warning('Failed to produce Pythia8 merging plots.') - + self.to_store.append('pythia8') # Study matched cross-sections @@ -4931,7 +4931,7 @@ def wait_monitoring(Idle, Running, Done): if self.options['run_mode']==0 or (self.options['run_mode']==2 and self.options['nb_core']==1): PY8_extracted_information['sigma_m'],PY8_extracted_information['Nacc'],\ PY8_extracted_information['Ntry'] = self.parse_PY8_log_file( - pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) + pjoin(self.me_dir,'Events', self.run_name,'%s_pythia8.log' % tag)) else: logger.warning('Pythia8 cross-section could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1. YYYYY') @@ -4944,8 +4944,8 @@ def wait_monitoring(Idle, Running, Done): Ntry = PY8_extracted_information['Ntry'] sigma_m = PY8_extracted_information['sigma_m'] # Compute pythia error - error = self.results[self.run_name].return_tag(self.run_tag)['error'] - try: + error = self.results[self.run_name].return_tag(self.run_tag)['error'] + try: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) except ZeroDivisionError: # Cannot compute error @@ -4966,31 +4966,31 @@ def wait_monitoring(Idle, Running, Done): else: logger.warning('Pythia8 merged cross-sections could not be retreived.\n'+ 'Try turning parallelization off by setting the option nb_core to 1.XXXXX') - PY8_extracted_information['cross_sections'] = {} - + PY8_extracted_information['cross_sections'] = {} + cross_sections = PY8_extracted_information['cross_sections'] if cross_sections: - # Filter the cross_sections specified an keep only the ones + # Filter the cross_sections specified an keep only the ones # with central parameters and a different merging scale a_float_re = '[\+|-]?\d+(\.\d*)?([EeDd][\+|-]?\d+)?' central_merging_re = re.compile( '^\s*Weight_MERGING\s*=\s*(?P%s)\s*$'%a_float_re, - re.IGNORECASE) + re.IGNORECASE) cross_sections = dict( (float(central_merging_re.match(xsec).group('merging')),value) - for xsec, value in cross_sections.items() if not + for xsec, value in cross_sections.items() if not central_merging_re.match(xsec) is None) central_scale = PY8_Card['JetMatching:qCut'] if \ int(self.run_card['ickkw'])==1 else PY8_Card['Merging:TMS'] if central_scale in cross_sections: self.results.add_detail('cross_pythia8', cross_sections[central_scale][0]) self.results.add_detail('error_pythia8', cross_sections[central_scale][1]) - + #logger.info('Pythia8 merged cross-sections are:') #for scale in sorted(cross_sections.keys()): # logger.info(' > Merging scale = %-6.4g : %-11.5g +/- %-7.2g [pb]'%\ - # (scale,cross_sections[scale][0],cross_sections[scale][1])) - + # (scale,cross_sections[scale][0],cross_sections[scale][1])) + xsecs_file = open(pjoin(self.me_dir,'Events',self.run_name, '%s_merged_xsecs.txt'%tag),'w') if cross_sections: @@ -5003,9 +5003,9 @@ def wait_monitoring(Idle, Running, Done): xsecs_file.write('Cross-sections could not be read from the'+\ "XML node 'xsection' of the .dat file produced by Pythia8.") xsecs_file.close() - + #Update the banner - # We add directly the pythia command card because it has the full + # We add directly the pythia command card because it has the full # information self.banner.add(pythia_cmd_card) @@ -5022,13 +5022,13 @@ def wait_monitoring(Idle, Running, Done): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + def parse_PY8_log_file(self, log_file_path): """ Parse a log file to extract number of event and cross-section. """ pythiare = re.compile("Les Houches User Process\(es\)\s*\d+\s*\|\s*(?P\d+)\s*(?P\d+)\s*(?P\d+)\s*\|\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") pythia_xsec_re = re.compile("Inclusive cross section\s*:\s*(?P[\d\.e\-\+]+)\s*(?P[\d\.e\-\+]+)") sigma_m, Nacc, Ntry = None, None, None - for line in misc.BackRead(log_file_path): + for line in misc.BackRead(log_file_path): info = pythiare.search(line) if not info: # Also try to obtain the cross-section and error from the final xsec line of pythia8 log @@ -5058,7 +5058,7 @@ def parse_PY8_log_file(self, log_file_path): raise self.InvalidCmd("Could not find cross-section and event number information "+\ "in Pythia8 log\n '%s'."%log_file_path) - + def extract_cross_sections_from_DJR(self,djr_output): """Extract cross-sections from a djr XML output.""" import xml.dom.minidom as minidom @@ -5075,11 +5075,11 @@ def extract_cross_sections_from_DJR(self,djr_output): [float(xsec.childNodes[0].data.split()[0]), float(xsec.childNodes[0].data.split()[1])]) for xsec in xsections) - + def do_pythia(self, line): """launch pythia""" - - + + # Check argument's validity args = self.split_arg(line) if '--no_default' in args: @@ -5089,12 +5089,12 @@ def do_pythia(self, line): args.remove('--no_default') else: no_default = False - + if not self.run_name: self.check_pythia(args) self.configure_directory(html_opening =False) else: - # initialize / remove lhapdf mode + # initialize / remove lhapdf mode self.configure_directory(html_opening =False) self.check_pythia(args) @@ -5102,7 +5102,7 @@ def do_pythia(self, line): logger.error('pythia-pgs require event_norm to be on sum. Do not run pythia6') return - # the args are modify and the last arg is always the mode + # the args are modify and the last arg is always the mode if not no_default: self.ask_pythia_run_configuration(args[-1]) if self.options['automatic_html_opening']: @@ -5114,35 +5114,35 @@ def do_pythia(self, line): self.banner = banner_mod.recover_banner(self.results, 'pythia') pythia_src = pjoin(self.options['pythia-pgs_path'],'src') - + self.results.add_detail('run_mode', 'madevent') self.update_status('Running Pythia', 'pythia') try: os.remove(pjoin(self.me_dir,'Events','pythia.done')) except Exception: - pass - + pass + ## LAUNCHING PYTHIA # check that LHAPATH is define. if not re.search(r'^\s*LHAPATH=%s/PDFsets' % pythia_src, - open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), + open(pjoin(self.me_dir,'Cards','pythia_card.dat')).read(), re.M): f = open(pjoin(self.me_dir,'Cards','pythia_card.dat'),'a') f.write('\n LHAPATH=%s/PDFsets' % pythia_src) f.close() tag = self.run_tag pythia_log = pjoin(self.me_dir, 'Events', self.run_name , '%s_pythia.log' % tag) - #self.cluster.launch_and_wait('../bin/internal/run_pythia', + #self.cluster.launch_and_wait('../bin/internal/run_pythia', # argument= [pythia_src], stdout= pythia_log, # stderr=subprocess.STDOUT, # cwd=pjoin(self.me_dir,'Events')) output_files = ['pythia_events.hep'] if self.run_card['use_syst']: output_files.append('syst.dat') - if self.run_card['ickkw'] == 1: + if self.run_card['ickkw'] == 1: output_files += ['beforeveto.tree', 'xsecs.tree', 'events.tree'] - + os.environ['PDG_MASS_TBL'] = pjoin(pythia_src,'mass_width_2004.mc') self.cluster.launch_and_wait(pjoin(pythia_src, 'pythia'), input_files=[pjoin(self.me_dir, "Events", "unweighted_events.lhe"), @@ -5152,23 +5152,23 @@ def do_pythia(self, line): stdout= pythia_log, stderr=subprocess.STDOUT, cwd=pjoin(self.me_dir,'Events')) - + os.remove(pjoin(self.me_dir, "Events", "unweighted_events.lhe")) if not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): logger.warning('Fail to produce pythia output. More info in \n %s' % pythia_log) return - + self.to_store.append('pythia') - + # Find the matched cross-section if int(self.run_card['ickkw']): # read the line from the bottom of the file - #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, + #pythia_log = misc.BackRead(pjoin(self.me_dir,'Events', self.run_name, # '%s_pythia.log' % tag)) - pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") - for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, + pythiare = re.compile("\s*I\s+0 All included subprocesses\s+I\s+(?P\d+)\s+(?P\d+)\s+I\s+(?P[\d\.D\-+]+)\s+I") + for line in misc.reverse_readline(pjoin(self.me_dir,'Events', self.run_name, '%s_pythia.log' % tag)): info = pythiare.search(line) if not info: @@ -5188,16 +5188,16 @@ def do_pythia(self, line): self.results.add_detail('nb_event_pythia', Nacc) #compute pythia error error = self.results[self.run_name].return_tag(self.run_tag)['error'] - if Nacc: + if Nacc: error_m = math.sqrt((error * Nacc/Ntry)**2 + sigma_m**2 *(1-Nacc/Ntry)/Nacc) else: error_m = 10000 * sigma_m # works both for fixed number of generated events and fixed accepted events self.results.add_detail('error_pythia', error_m) - break + break #pythia_log.close() - + pydir = pjoin(self.options['pythia-pgs_path'], 'src') eradir = self.options['exrootanalysis_path'] madir = self.options['madanalysis_path'] @@ -5216,12 +5216,12 @@ def do_pythia(self, line): # Creating LHE file self.run_hep2lhe(banner_path) - + if int(self.run_card['ickkw']): misc.gzip(pjoin(self.me_dir,'Events','beforeveto.tree'), - stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + stdout=pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_beforeveto.tree.gz')) + - if self.run_card['use_syst'] in self.true: # Calculate syscalc info based on syst.dat try: @@ -5233,7 +5233,7 @@ def do_pythia(self, line): # Store syst.dat misc.gzip(pjoin(self.me_dir,'Events', 'syst.dat'), stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_syst.dat.gz')) - + # Store syscalc.dat if os.path.exists(pjoin(self.me_dir, 'Events', 'syscalc.dat')): filename = pjoin(self.me_dir, 'Events' ,self.run_name, @@ -5253,7 +5253,7 @@ def do_pythia(self, line): if self.options['delphes_path']: self.exec_cmd('delphes --no_default', postcmd=False, printcmd=False) self.print_results_in_shell(self.results.current) - + ################################################################################ def do_remove(self, line): @@ -5263,8 +5263,8 @@ def do_remove(self, line): run, tag, mode = self.check_remove(args) if 'banner' in mode: mode.append('all') - - + + if run == 'all': # Check first if they are not a run with a name run. if os.path.exists(pjoin(self.me_dir, 'Events', 'all')): @@ -5280,7 +5280,7 @@ def do_remove(self, line): logger.info(error) pass # run already clear return - + # Check that run exists if not os.path.exists(pjoin(self.me_dir, 'Events', run)): raise self.InvalidCmd('No run \'%s\' detected' % run) @@ -5294,7 +5294,7 @@ def do_remove(self, line): # Found the file to delete - + to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) to_delete += misc.glob('*', pjoin(self.me_dir, 'HTML', run)) # forbid the banner to be removed @@ -5314,7 +5314,7 @@ def do_remove(self, line): if os.path.exists(pjoin(self.me_dir, 'Events', run, 'unweighted_events.lhe.gz')): to_delete.append('unweighted_events.lhe.gz') if os.path.exists(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')): - to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) + to_delete.append(pjoin(self.me_dir, 'HTML', run,'plots_parton.html')) if nb_rm != len(to_delete): logger.warning('Be carefull that partonic information are on the point to be removed.') if 'all' in mode: @@ -5327,8 +5327,8 @@ def do_remove(self, line): if 'delphes' not in mode: to_delete = [f for f in to_delete if 'delphes' not in f] if 'parton' not in mode: - to_delete = [f for f in to_delete if 'delphes' in f - or 'pgs' in f + to_delete = [f for f in to_delete if 'delphes' in f + or 'pgs' in f or 'pythia' in f] if not self.force and len(to_delete): question = 'Do you want to delete the following files?\n %s' % \ @@ -5336,7 +5336,7 @@ def do_remove(self, line): ans = self.ask(question, 'y', choices=['y','n']) else: ans = 'y' - + if ans == 'y': for file2rm in to_delete: if os.path.exists(pjoin(self.me_dir, 'Events', run, file2rm)): @@ -5374,7 +5374,7 @@ def do_remove(self, line): if ans == 'y': for file2rm in to_delete: os.remove(file2rm) - + if 'banner' in mode: to_delete = misc.glob('*', pjoin(self.me_dir, 'Events', run)) if tag: @@ -5389,8 +5389,8 @@ def do_remove(self, line): return elif any(['banner' not in os.path.basename(p) for p in to_delete]): if to_delete: - raise MadGraph5Error('''Some output still exists for this run. - Please remove those output first. Do for example: + raise MadGraph5Error('''Some output still exists for this run. + Please remove those output first. Do for example: remove %s all banner ''' % run) else: @@ -5400,7 +5400,7 @@ def do_remove(self, line): return else: logger.info('''The banner is not removed. In order to remove it run: - remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) + remove %s all banner %s''' % (run, tag and '--tag=%s ' % tag or '')) # update database. self.results.clean(mode, run, tag) @@ -5420,7 +5420,7 @@ def do_plot(self, line): logger.info('plot for run %s' % self.run_name) if not self.force: self.ask_edit_cards(['plot_card.dat'], args, plot=True) - + if any([arg in ['all','parton'] for arg in args]): filename = pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe') if os.path.exists(filename+'.gz'): @@ -5438,8 +5438,8 @@ def do_plot(self, line): except Exception: pass else: - logger.info('No valid files for partonic plot') - + logger.info('No valid files for partonic plot') + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_events.lhe' % self.run_tag) @@ -5452,10 +5452,10 @@ def do_plot(self, line): stdout= "%s.gz" % filename) else: logger.info('No valid files for pythia plot') - - + + if any([arg in ['all','pgs'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) @@ -5464,15 +5464,15 @@ def do_plot(self, line): misc.gzip(filename) else: logger.info('No valid files for pgs plot') - + if any([arg in ['all','delphes'] for arg in args]): - filename = pjoin(self.me_dir, 'Events', self.run_name, + filename = pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % self.run_tag) if os.path.exists(filename+'.gz'): misc.gunzip("%s.gz" % filename) if os.path.exists(filename): self.create_plot('Delphes') - misc.gzip(filename) + misc.gzip(filename) else: logger.info('No valid files for delphes plot') @@ -5488,9 +5488,9 @@ def do_syscalc(self, line): if self.ninitial == 1: logger.error('SysCalc can\'t be run for decay processes') return - + logger.info('Calculating systematics for run %s' % self.run_name) - + self.ask_edit_cards(['run_card.dat'], args, plot=False) self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat')) if any([arg in ['all','parton'] for arg in args]): @@ -5504,7 +5504,7 @@ def do_syscalc(self, line): stdout="%s.gz" % filename) else: logger.info('No valid files for parton level systematics run.') - + if any([arg in ['all','pythia'] for arg in args]): filename = pjoin(self.me_dir, 'Events' ,self.run_name, '%s_pythia_syst.dat' % self.run_tag) @@ -5525,17 +5525,17 @@ def do_syscalc(self, line): else: logger.info('No valid files for pythia level') - + def store_result(self): - """ tar the pythia results. This is done when we are quite sure that + """ tar the pythia results. This is done when we are quite sure that the pythia output will not be use anymore """ if not self.run_name: return - + if not self.to_store: - return - + return + tag = self.run_card['run_tag'] self.update_status('storing files of previous run', level=None,\ error=True) @@ -5546,14 +5546,14 @@ def store_result(self): misc.gzip(pjoin(self.me_dir,'Events',self.run_name,"unweighted_events.lhe")) if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) - + if 'pythia' in self.to_store: self.update_status('Storing Pythia files of previous run', level='pythia', error=True) p = pjoin(self.me_dir,'Events') n = self.run_name t = tag self.to_store.remove('pythia') - misc.gzip(pjoin(p,'pythia_events.hep'), + misc.gzip(pjoin(p,'pythia_events.hep'), stdout=pjoin(p, str(n),'%s_pythia_events.hep' % t),forceexternal=True) if 'pythia8' in self.to_store: @@ -5581,26 +5581,26 @@ def store_result(self): os.system("mv " + file_path + hepmc_fileformat + " " + move_hepmc_path) self.update_status('Done', level='pythia',makehtml=False,error=True) - self.results.save() - + self.results.save() + self.to_store = [] - def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, + def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, run_type='', mode=None, **opt): """ """ argument = [str(arg) for arg in argument] if mode is None: mode = self.cluster_mode - + # ensure that exe is executable if os.path.exists(exe) and not os.access(exe, os.X_OK): os.system('chmod +x %s ' % exe) elif (cwd and os.path.exists(pjoin(cwd, exe))) and not \ os.access(pjoin(cwd, exe), os.X_OK): os.system('chmod +x %s ' % pjoin(cwd, exe)) - + if mode == 0: - self.update_status((remaining, 1, + self.update_status((remaining, 1, self.total_jobs - remaining -1, run_type), level=None, force=False) start = time.time() #os.system('cd %s; ./%s' % (cwd,exe)) @@ -5613,24 +5613,24 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, elif mode in [1,2]: exename = os.path.basename(exe) # For condor cluster, create the input/output files - if 'ajob' in exename: + if 'ajob' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat','dname.mg', pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) - + output_files = [] required_output = [] - + #Find the correct PDF input file input_files.append(self.get_pdf_input_filename()) - + #Find the correct ajob Gre = re.compile("\s*j=(G[\d\.\w]+)") origre = re.compile("grid_directory=(G[\d\.\w]+)") - try : + try : fsock = open(exe) except Exception: fsock = open(pjoin(cwd,exe)) @@ -5648,21 +5648,21 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if os.path.isdir(pjoin(cwd,G)): input_files.append(G) required_output.append('%s/results.dat' % G) - + if origre.search(text): G_grid = origre.search(text).groups()[0] input_files.append(pjoin(G_grid, 'ftn26')) - + #submitting - self.cluster.submit2(exe, stdout=stdout, cwd=cwd, + self.cluster.submit2(exe, stdout=stdout, cwd=cwd, input_files=input_files, output_files=output_files, required_output=required_output) elif 'survey' in exename: input_files = ['madevent','input_app.txt','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - if os.path.exists(pjoin(self.me_dir,'SubProcesses', + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5671,7 +5671,7 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [] required_output = [] - + #Find the correct ajob suffix = "_%s" % int(float(argument[0])) if suffix == '_0': @@ -5685,12 +5685,12 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, if '.' in argument[0]: offset = int(str(argument[0]).split('.')[1]) else: - offset = 0 - + offset = 0 + if offset ==0 or offset == int(float(argument[0])): if os.path.exists(pjoin(cwd, G, 'input_app.txt')): os.remove(pjoin(cwd, G, 'input_app.txt')) - + if os.path.exists(os.path.realpath(pjoin(cwd, G, 'ftn25'))): if offset == 0 or offset == int(float(argument[0])): os.remove(pjoin(cwd, G, 'ftn25')) @@ -5706,16 +5706,16 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, pass #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, required_output=required_output, **opt) elif "refine_splitted.sh" in exename: input_files = ['madevent','symfact.dat','iproc.dat', 'dname.mg', - pjoin(self.me_dir, 'SubProcesses','randinit')] - + pjoin(self.me_dir, 'SubProcesses','randinit')] + if os.path.exists(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) and cluster.need_transfer(self.options): - input_files.append(pjoin(self.me_dir,'SubProcesses', + input_files.append(pjoin(self.me_dir,'SubProcesses', 'MadLoop5_resources.tar.gz')) #Find the correct PDF input file @@ -5725,25 +5725,25 @@ def launch_job(self,exe, cwd=None, stdout=None, argument = [], remaining=0, output_files = [argument[0]] required_output = [] for G in output_files: - required_output.append('%s/results.dat' % G) + required_output.append('%s/results.dat' % G) input_files.append(pjoin(argument[1], "input_app.txt")) input_files.append(pjoin(argument[1], "ftn26")) - + #submitting - self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, + self.cluster.cluster_submit(exe, stdout=stdout, cwd=cwd, argument=argument, input_files=input_files, output_files=output_files, - required_output=required_output, **opt) + required_output=required_output, **opt) + + - - else: self.cluster.submit(exe, argument=argument, stdout=stdout, cwd=cwd, **opt) - + ############################################################################ def find_madevent_mode(self): """Find if Madevent is in Group mode or not""" - + # The strategy is too look in the files Source/run_configs.inc # if we found: ChanPerJob=3 then it's a group mode. file_path = pjoin(self.me_dir, 'Source', 'run_config.inc') @@ -5752,11 +5752,11 @@ def find_madevent_mode(self): return 'group' else: return 'v4' - + ############################################################################ def monitor(self, run_type='monitor', mode=None, html=False): """ monitor the progress of running job """ - + starttime = time.time() if mode is None: @@ -5772,8 +5772,8 @@ def monitor(self, run_type='monitor', mode=None, html=False): else: update_status = lambda idle, run, finish: None update_first = None - try: - self.cluster.wait(self.me_dir, update_status, update_first=update_first) + try: + self.cluster.wait(self.me_dir, update_status, update_first=update_first) except Exception as error: logger.info(error) if not self.force: @@ -5788,24 +5788,24 @@ def monitor(self, run_type='monitor', mode=None, html=False): raise except KeyboardInterrupt as error: self.cluster.remove() - raise - - + raise + - ############################################################################ + + ############################################################################ def configure_directory(self, html_opening=True): - """ All action require before any type of run """ + """ All action require before any type of run """ # Basic check assert os.path.exists(pjoin(self.me_dir,'SubProcesses')) # environmental variables to be included in make_opts self.make_opts_var = {} - + #see when the last file was modified time_mod = max([os.path.getmtime(pjoin(self.me_dir,'Cards','run_card.dat')), os.path.getmtime(pjoin(self.me_dir,'Cards','param_card.dat'))]) - + if self.configured >= time_mod and hasattr(self, 'random') and hasattr(self, 'run_card'): #just ensure that cluster specific are correctly handled if self.cluster: @@ -5820,7 +5820,7 @@ def configure_directory(self, html_opening=True): #open only once the web page # Change current working directory self.launching_dir = os.getcwd() - + # Check if we need the MSSM special treatment model = self.find_model_name() if model == 'mssm' or model.startswith('mssm-'): @@ -5828,14 +5828,14 @@ def configure_directory(self, html_opening=True): mg5_param = pjoin(self.me_dir, 'Source', 'MODEL', 'MG5_param.dat') check_param_card.convert_to_mg5card(param_card, mg5_param) check_param_card.check_valid_param_card(mg5_param) - + # limit the number of event to 100k self.check_nb_events() # this is in order to avoid conflicts between runs with and without # lhapdf. not needed anymore the makefile handles it automaticallu #misc.compile(['clean4pdf'], cwd = pjoin(self.me_dir, 'Source')) - + self.make_opts_var['pdlabel1'] = '' self.make_opts_var['pdlabel2'] = '' if self.run_card['pdlabel1'] in ['eva', 'iww']: @@ -5866,7 +5866,7 @@ def configure_directory(self, html_opening=True): self.copy_lep_densities(self.run_card['pdlabel'], pjoin(self.me_dir, 'Source')) self.make_opts_var['pdlabel1'] = 'ee' self.make_opts_var['pdlabel2'] = 'ee' - + # set random number if self.run_card['iseed'] != 0: self.random = int(self.run_card['iseed']) @@ -5885,18 +5885,18 @@ def configure_directory(self, html_opening=True): break else: self.random = random.randint(1, 30107) - + #set random seed for python part of the code if self.run_card['python_seed'] == -2: #-2 means same as run_card import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] elif self.run_card['python_seed'] >= 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] if self.run_card['ickkw'] == 2: logger.info('Running with CKKW matching') self.treat_ckkw_matching() @@ -5905,12 +5905,12 @@ def configure_directory(self, html_opening=True): self.update_make_opts(self.run_card) # reset list of Gdirectory self.Gdirs = None - + # create param_card.inc and run_card.inc self.do_treatcards('') - + logger.info("compile Source Directory") - + # Compile for name in [ 'all']:#, '../bin/internal/combine_events']: self.compile(arg=[name], cwd=os.path.join(self.me_dir, 'Source')) @@ -5933,7 +5933,7 @@ def configure_directory(self, html_opening=True): os.remove(pjoin(self.me_dir, 'lib','libbias.a')) force_subproc_clean = True - + # Finally compile the bias module as well if self.run_card['bias_module'] not in ['dummy',None]: logger.debug("Compiling the bias module '%s'"%bias_name) @@ -5945,7 +5945,7 @@ def configure_directory(self, html_opening=True): 'INVALID' in str(bias_module_valid).upper(): raise InvalidCmd("The bias module '%s' cannot be used because of:\n%s"% (bias_name,bias_module_valid)) - + self.compile(arg=[], cwd=os.path.join(self.me_dir, 'Source','BIAS',bias_name)) self.proc_characteristics['bias_module']=bias_name # Update the proc_characterstics file @@ -5954,7 +5954,7 @@ def configure_directory(self, html_opening=True): if force_subproc_clean: # Make sure that madevent will be recompiled - subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', + subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] for nb_proc,subdir in enumerate(subproc): Pdir = pjoin(self.me_dir, 'SubProcesses',subdir.strip()) @@ -5971,20 +5971,20 @@ def configure_directory(self, html_opening=True): ############################################################################ @staticmethod def check_dir(path, default=''): - """check if the directory exists. if so return the path otherwise the + """check if the directory exists. if so return the path otherwise the default""" - + if os.path.isdir(path): return path else: return default - + ############################################################################ def get_Gdir(self, Pdir=None, symfact=None): """get the list of Gdirectory if not yet saved.""" - + if hasattr(self, "Gdirs") and self.Gdirs: if self.me_dir in self.Gdirs[0]: if Pdir is None: @@ -6000,8 +6000,8 @@ def get_Gdir(self, Pdir=None, symfact=None): Pdirs = self.get_Pdir() - Gdirs = {self.me_dir:[]} - mfactors = {} + Gdirs = {self.me_dir:[]} + mfactors = {} for P in Pdirs: Gdirs[P] = [] #for the next line do not use P, since in readonly mode it might not have symfact @@ -6012,7 +6012,7 @@ def get_Gdir(self, Pdir=None, symfact=None): mfactors[pjoin(P, "G%s" % tag)] = mfactor self.Gdirs = (Gdirs, mfactors) return self.get_Gdir(Pdir, symfact=symfact) - + ############################################################################ def set_run_name(self, name, tag=None, level='parton', reload_card=False, allow_new_tag=True): @@ -6030,8 +6030,8 @@ def get_last_tag(self, level): tagRun = self.results[self.run_name][i] if tagRun.pythia or tagRun.shower or tagRun.pythia8 : return tagRun['tag'] - - + + # when are we force to change the tag new_run:previous run requiring changes upgrade_tag = {'parton': ['parton','pythia','pgs','delphes','madanalysis5_hadron','madanalysis5_parton', 'rivet'], 'pythia': ['pythia','pgs','delphes','madanalysis5_hadron'], @@ -6044,7 +6044,7 @@ def get_last_tag(self, level): 'syscalc':[], 'rivet':['rivet']} - if name == self.run_name: + if name == self.run_name: if reload_card: run_card = pjoin(self.me_dir, 'Cards','run_card.dat') self.run_card = banner_mod.RunCard(run_card) @@ -6064,13 +6064,13 @@ def get_last_tag(self, level): break return get_last_tag(self, level) - + # save/clean previous run if self.run_name: self.store_result() # store new name self.run_name = name - + new_tag = False # First call for this run -> set the banner self.banner = banner_mod.recover_banner(self.results, level, name) @@ -6079,8 +6079,8 @@ def get_last_tag(self, level): else: # Read run_card run_card = pjoin(self.me_dir, 'Cards','run_card.dat') - self.run_card = banner_mod.RunCard(run_card) - + self.run_card = banner_mod.RunCard(run_card) + if tag: self.run_card['run_tag'] = tag new_tag = True @@ -6093,7 +6093,7 @@ def get_last_tag(self, level): self.results.update('add run %s' % name, 'all', makehtml=False) else: for tag in upgrade_tag[level]: - + if getattr(self.results[self.run_name][-1], tag): # LEVEL is already define in the last tag -> need to switch tag tag = self.get_available_tag() @@ -6103,8 +6103,8 @@ def get_last_tag(self, level): if not new_tag: # We can add the results to the current run tag = self.results[self.run_name][-1]['tag'] - self.run_card['run_tag'] = tag # ensure that run_tag is correct - + self.run_card['run_tag'] = tag # ensure that run_tag is correct + if allow_new_tag and (name in self.results and not new_tag): self.results.def_current(self.run_name) else: @@ -6113,15 +6113,15 @@ def get_last_tag(self, level): self.run_tag = self.run_card['run_tag'] return get_last_tag(self, level) - - - + + + ############################################################################ def check_nb_events(self): - """Find the number of event in the run_card, and check that this is not + """Find the number of event in the run_card, and check that this is not too large""" - + nb_event = int(self.run_card['nevents']) if nb_event > 1000000: logger.warning("Attempting to generate more than 1M events") @@ -6133,20 +6133,20 @@ def check_nb_events(self): return - - ############################################################################ + + ############################################################################ def update_random(self): """ change random number""" - + self.random += 3 if self.random > 30081*30081: # can't use too big random number raise MadGraph5Error('Random seed too large ' + str(self.random) + ' > 30081*30081') - if self.run_card['python_seed'] == -2: + if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.random) + random.seed(self.random) random.mg_seedset = self.random - + ############################################################################ def save_random(self): """save random number in appropirate file""" @@ -6155,14 +6155,14 @@ def save_random(self): fsock.writelines('r=%s\n' % self.random) def do_quit(self, *args, **opts): - + return common_run.CommonRunCmd.do_quit(self, *args, **opts) #return CmdExtended.do_quit(self, *args, **opts) - + ############################################################################ def treat_CKKW_matching(self): """check for ckkw""" - + lpp1 = self.run_card['lpp1'] lpp2 = self.run_card['lpp2'] e1 = self.run_card['ebeam1'] @@ -6170,19 +6170,19 @@ def treat_CKKW_matching(self): pd = self.run_card['pdlabel'] lha = self.run_card['lhaid'] xq = self.run_card['xqcut'] - translation = {'e1': e1, 'e2':e2, 'pd':pd, + translation = {'e1': e1, 'e2':e2, 'pd':pd, 'lha':lha, 'xq':xq} if lpp1 or lpp2: - # Remove ':s from pd + # Remove ':s from pd if pd.startswith("'"): pd = pd[1:] if pd.endswith("'"): - pd = pd[:-1] + pd = pd[:-1] if xq >2 or xq ==2: xq = 2 - + # find data file if pd == "lhapdf": issudfile = 'lib/issudgrid-%(e1)s-%(e2)s-%(pd)s-%(lha)s-%(xq)s.dat.gz' @@ -6192,9 +6192,9 @@ def treat_CKKW_matching(self): issudfile = pjoin(self.webbin, issudfile % translation) else: issudfile = pjoin(self.me_dir, issudfile % translation) - + logger.info('Sudakov grid file: %s' % issudfile) - + # check that filepath exists if os.path.exists(issudfile): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') @@ -6203,20 +6203,20 @@ def treat_CKKW_matching(self): msg = 'No sudakov grid file for parameter choice. Start to generate it. This might take a while' logger.info(msg) self.update_status('GENERATE SUDAKOV GRID', level='parton') - + for i in range(-2,6): - self.cluster.submit('%s/gensudgrid ' % self.dirbin, + self.cluster.submit('%s/gensudgrid ' % self.dirbin, argument = ['%d'%i], - cwd=self.me_dir, + cwd=self.me_dir, stdout=open(pjoin(self.me_dir, 'gensudgrid%s.log' % i),'w')) self.monitor() for i in range(-2,6): path = pjoin(self.me_dir, 'lib', 'issudgrid.dat') os.system('cat %s/gensudgrid%s.log >> %s' % (self.me_dir, path)) misc.gzip(path, stdout=issudfile) - + ############################################################################ - def create_root_file(self, input='unweighted_events.lhe', + def create_root_file(self, input='unweighted_events.lhe', output='unweighted_events.root' ): """create the LHE root file """ self.update_status('Creating root files', level='parton') @@ -6233,14 +6233,14 @@ def create_root_file(self, input='unweighted_events.lhe', totar = False torm = True input = input[:-3] - + try: - misc.call(['%s/ExRootLHEFConverter' % eradir, + misc.call(['%s/ExRootLHEFConverter' % eradir, input, output], cwd=pjoin(self.me_dir, 'Events')) except Exception: logger.warning('fail to produce Root output [problem with ExRootAnalysis]') - + if totar: if os.path.exists('%s.gz' % input): try: @@ -6251,13 +6251,13 @@ def create_root_file(self, input='unweighted_events.lhe', misc.gzip(input) if torm: os.remove(input) - + def run_syscalc(self, mode='parton', event_path=None, output=None): - """create the syscalc output""" + """create the syscalc output""" if self.run_card['use_syst'] not in self.true: return - + scdir = self.options['syscalc_path'] if not scdir or not os.path.exists(scdir): return @@ -6265,12 +6265,12 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): if self.run_card['event_norm'] != 'sum': logger.critical('SysCalc works only when event_norm is on \'sum\'.') return - logger.info('running SysCalc on mode %s' % mode) - + logger.info('running SysCalc on mode %s' % mode) + # Restore the old default for SysCalc+PY6 if self.run_card['sys_matchscale']=='auto': self.run_card['sys_matchscale'] = "30 50" - + # Check that all pdfset are correctly installed lhaid = [self.run_card.get_lhapdf_id()] if '&&' in self.run_card['sys_pdf']: @@ -6285,20 +6285,20 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.debug(str(error)) logger.warning('Systematic computation requires lhapdf to run. Bypass SysCalc') return - + # Copy all the relevant PDF sets [self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid] - + to_syscalc={'sys_scalefact': self.run_card['sys_scalefact'], 'sys_alpsfact': self.run_card['sys_alpsfact'], 'sys_matchscale': self.run_card['sys_matchscale'], 'sys_scalecorrelation': self.run_card['sys_scalecorrelation'], 'sys_pdf': self.run_card['sys_pdf']} - - tag = self.run_card['run_tag'] + + tag = self.run_card['run_tag'] card = pjoin(self.me_dir, 'bin','internal', 'syscalc_card.dat') template = open(pjoin(self.me_dir, 'bin','internal', 'syscalc_template.dat')).read() - + if '&&' in to_syscalc['sys_pdf']: to_syscalc['sys_pdf'] = to_syscalc['sys_pdf'].split('#',1)[0].replace('&&',' \n ') else: @@ -6311,8 +6311,8 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): new.append(d) else: new[-1] += ' %s' % d - to_syscalc['sys_pdf'] = '\n'.join(new) - + to_syscalc['sys_pdf'] = '\n'.join(new) + if to_syscalc['sys_pdf'].lower() in ['', 'f', 'false', 'none', '.false.']: to_syscalc['sys_pdf'] = '' if to_syscalc['sys_alpsfact'].lower() in ['', 'f', 'false', 'none','.false.']: @@ -6320,17 +6320,17 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): - + # check if the scalecorrelation parameter is define: if not 'sys_scalecorrelation' in self.run_card: self.run_card['sys_scalecorrelation'] = -1 open(card,'w').write(template % self.run_card) - + if not os.path.exists(card): return False - - + + event_dir = pjoin(self.me_dir, 'Events') if not event_path: @@ -6353,19 +6353,19 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): raise SysCalcError('qcut value for sys_matchscale lower than qcut in pythia_card. Bypass syscalc') if float(value) < xqcut: raise SysCalcError('qcut value for sys_matchscale lower than xqcut in run_card. Bypass syscalc') - - + + event_path = pjoin(event_dir,'syst.dat') output = pjoin(event_dir, 'syscalc.dat') else: raise self.InvalidCmd('Invalid mode %s' % mode) - + if not os.path.exists(event_path): if os.path.exists(event_path+'.gz'): misc.gunzip(event_path+'.gz') else: raise SysCalcError('Events file %s does not exits' % event_path) - + self.update_status('Calculating systematics for %s level' % mode, level = mode.lower()) try: proc = misc.call([os.path.join(scdir, 'sys_calc'), @@ -6374,7 +6374,7 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): stderr = subprocess.STDOUT, cwd=event_dir) # Wait 5 s to make sure file is finished writing - time.sleep(5) + time.sleep(5) except OSError as error: logger.error('fail to run syscalc: %s. Please check that SysCalc is correctly installed.' % error) else: @@ -6382,11 +6382,11 @@ def run_syscalc(self, mode='parton', event_path=None, output=None): logger.warning('SysCalc Failed. Please read the associate log to see the reason. Did you install the associate PDF set?') elif mode == 'parton': files.mv(output, event_path) - + self.update_status('End syscalc for %s level' % mode, level = mode.lower(), makehtml=False) - - return True + + return True action_switcher = AskRun @@ -6399,23 +6399,23 @@ def ask_run_configuration(self, mode=None, args=[]): passing_cmd.append('reweight=ON') if '-M' in args or '--madspin' in args: passing_cmd.append('madspin=ON') - + switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, mode=mode, line_args=args, force=self.force, first_cmd=passing_cmd, return_instance=True) # - self.switch = switch # store the value of the switch for plugin purpose + self.switch = switch # store the value of the switch for plugin purpose if 'dynamical' in switch: mode = 'auto' - + # Now that we know in which mode we are check that all the card #exists (copy default if needed) - + cards = ['param_card.dat', 'run_card.dat'] if switch['shower'] == 'Pythia6': cards.append('pythia_card.dat') if switch['shower'] == 'Pythia8': - cards.append('pythia8_card.dat') + cards.append('pythia8_card.dat') if switch['detector'] in ['PGS','DELPHES+PGS']: cards.append('pgs_card.dat') if switch['detector'] in ['Delphes', 'DELPHES+PGS']: @@ -6438,29 +6438,29 @@ def ask_run_configuration(self, mode=None, args=[]): cards.append('rivet_card.dat') self.keep_cards(cards) - + first_cmd = cmd_switch.get_cardcmd() - + if os.path.isfile(pjoin(self.me_dir,'Cards','MadLoopParams.dat')): cards.append('MadLoopParams.dat') - + if self.force: self.check_param_card(pjoin(self.me_dir,'Cards','param_card.dat' )) return switch - + if 'dynamical' in switch and switch['dynamical']: self.ask_edit_cards(cards, plot=False, mode='auto', first_cmd=first_cmd) else: self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) return switch - + ############################################################################ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None): """Ask the question when launching pythia""" - + pythia_suffix = '' if pythia_version==6 else '%d'%pythia_version - + available_mode = ['0', '1'] if pythia_version==6: available_mode.append('2') @@ -6485,10 +6485,10 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = self.ask(question, '0', options) elif not mode: mode = 'auto' - + if mode.isdigit(): mode = name[mode] - + auto = False if mode == 'auto': auto = True @@ -6497,7 +6497,7 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) mode = 'pgs' elif os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')): mode = 'delphes' - else: + else: mode = 'pythia%s'%pythia_suffix logger.info('Will run in mode %s' % mode) # Now that we know in which mode we are check that all the card @@ -6513,15 +6513,15 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) cards.append('delphes_trigger.dat') self.keep_cards(cards, ignore=['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat', 'plot_card.dat']) - + if self.force: return mode - + if not banner: banner = self.banner - + if auto: - self.ask_edit_cards(cards, from_banner=['param', 'run'], + self.ask_edit_cards(cards, from_banner=['param', 'run'], mode='auto', plot=(pythia_version==6), banner=banner ) else: @@ -6529,12 +6529,12 @@ def ask_pythia_run_configuration(self, mode=None, pythia_version=6, banner=None) plot=(pythia_version==6), banner=banner) return mode - + #=============================================================================== # MadEventCmd #=============================================================================== class MadEventCmdShell(MadEventCmd, cmd.CmdShell): - """The command line processor of MadGraph""" + """The command line processor of MadGraph""" @@ -6548,11 +6548,11 @@ class SubProcesses(object): @classmethod def clean(cls): cls.name_to_pdg = {} - + @staticmethod def get_subP(me_dir): """return the list of Subprocesses""" - + out = [] for line in open(pjoin(me_dir,'SubProcesses', 'subproc.mg')): if not line: @@ -6560,9 +6560,9 @@ def get_subP(me_dir): name = line.strip() if os.path.exists(pjoin(me_dir, 'SubProcesses', name)): out.append(pjoin(me_dir, 'SubProcesses', name)) - + return out - + @staticmethod @@ -6623,9 +6623,9 @@ def get_subP_ids(path): particles = re.search("/([\d,-]+)/", line) all_ids.append([int(p) for p in particles.group(1).split(',')]) return all_ids - - -#=============================================================================== + + +#=============================================================================== class GridPackCmd(MadEventCmd): """The command for the gridpack --Those are not suppose to be use interactively--""" @@ -6639,7 +6639,7 @@ def __init__(self, me_dir = None, nb_event=0, seed=0, gran=-1, *completekey, **s self.random = seed self.random_orig = self.random self.granularity = gran - + self.options['automatic_html_opening'] = False #write the grid_card.dat on disk self.nb_event = int(nb_event) @@ -6680,7 +6680,7 @@ def write_RunWeb(self, me_dir): def write_gridcard(self, nb_event, seed, gran): """write the grid_card.dat file at appropriate location""" - + # first try to write grid_card within the gridpack. print("WRITE GRIDCARD", self.me_dir) if self.readonly: @@ -6689,35 +6689,35 @@ def write_gridcard(self, nb_event, seed, gran): fsock = open('grid_card.dat','w') else: fsock = open(pjoin(self.me_dir, 'Cards', 'grid_card.dat'),'w') - + gridpackcard = banner_mod.GridpackCard() gridpackcard['GridRun'] = True gridpackcard['gevents'] = nb_event gridpackcard['gseed'] = seed gridpackcard['ngran'] = gran - + gridpackcard.write(fsock) ############################################################################ def get_Pdir(self): """get the list of Pdirectory if not yet saved.""" - + if hasattr(self, "Pdirs"): if self.me_dir in self.Pdirs[0]: return self.Pdirs - + if not self.readonly: - self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) + self.Pdirs = [pjoin(self.me_dir, 'SubProcesses', l.strip()) for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] else: - self.Pdirs = [l.strip() - for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] - + self.Pdirs = [l.strip() + for l in open(pjoin(self.me_dir,'SubProcesses', 'subproc.mg'))] + return self.Pdirs - + def prepare_local_dir(self): """create the P directory structure in the local directory""" - + if not self.readonly: os.chdir(self.me_dir) else: @@ -6726,7 +6726,7 @@ def prepare_local_dir(self): os.mkdir(p) files.cp(pjoin(self.me_dir,'SubProcesses',p,'symfact.dat'), pjoin(p, 'symfact.dat')) - + def launch(self, nb_event, seed): """ launch the generation for the grid """ @@ -6742,13 +6742,13 @@ def launch(self, nb_event, seed): if self.run_card['python_seed'] == -2: import random if not hasattr(random, 'mg_seedset'): - random.seed(seed) + random.seed(seed) random.mg_seedset = seed elif self.run_card['python_seed'] > 0: import random if not hasattr(random, 'mg_seedset'): - random.seed(self.run_card['python_seed']) - random.mg_seedset = self.run_card['python_seed'] + random.seed(self.run_card['python_seed']) + random.mg_seedset = self.run_card['python_seed'] # 2) Run the refine for the grid self.update_status('Generating Events', level=None) #misc.call([pjoin(self.me_dir,'bin','refine4grid'), @@ -6767,70 +6767,70 @@ def launch(self, nb_event, seed): self.exec_cmd('decay_events -from_cards', postcmd=False) elif self.run_card['use_syst'] and self.run_card['systematics_program'] == 'systematics': self.options['nb_core'] = 1 - self.exec_cmd('systematics %s --from_card' % + self.exec_cmd('systematics %s --from_card' % pjoin('Events', self.run_name, 'unweighted_events.lhe.gz'), postcmd=False,printcmd=False) - + def refine4grid(self, nb_event): """Special refine for gridpack run.""" self.nb_refine += 1 - + precision = nb_event self.opts = dict([(key,value[1]) for (key,value) in \ self._survey_options.items()]) - + # initialize / remove lhapdf mode # self.configure_directory() # All this has been done before self.cluster_mode = 0 # force single machine # Store seed in randinit file, to be read by ranmar.f self.save_random() - + self.update_status('Refine results to %s' % precision, level=None) logger.info("Using random number seed offset = %s" % self.random) refine_opt = {'err_goal': nb_event, 'split_channels': False, - 'ngran':self.granularity, 'readonly': self.readonly} + 'ngran':self.granularity, 'readonly': self.readonly} x_improve = gen_ximprove.gen_ximprove_gridpack(self, refine_opt) x_improve.launch() # create the ajob for the refinment and run those! - self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack - - + self.gscalefact = x_improve.gscalefact #store jacobian associate to the gridpack + + #bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) #print 'run combine!!!' #combine_runs.CombineRuns(self.me_dir) - + return #update html output Presults = sum_html.collect_result(self) cross, error = Presults.xsec, Presults.xerru self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + #self.update_status('finish refine', 'parton', makehtml=False) #devnull.close() - - - + + + return self.total_jobs = 0 - subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if + subproc = [P for P in os.listdir(pjoin(self.me_dir,'SubProcesses')) if P.startswith('P') and os.path.isdir(pjoin(self.me_dir,'SubProcesses', P))] devnull = open(os.devnull, 'w') for nb_proc,subdir in enumerate(subproc): subdir = subdir.strip() Pdir = pjoin(self.me_dir, 'SubProcesses',subdir) bindir = pjoin(os.path.relpath(self.dirbin, Pdir)) - + logger.info(' %s ' % subdir) # clean previous run for match in misc.glob('*ajob*', Pdir): if os.path.basename(match)[:4] in ['ajob', 'wait', 'run.', 'done']: os.remove(pjoin(Pdir, match)) - + logfile = pjoin(Pdir, 'gen_ximprove.log') misc.call([pjoin(bindir, 'gen_ximprove')], @@ -6840,40 +6840,40 @@ def refine4grid(self, nb_event): if os.path.exists(pjoin(Pdir, 'ajob1')): alljobs = misc.glob('ajob*', Pdir) - nb_tot = len(alljobs) + nb_tot = len(alljobs) self.total_jobs += nb_tot for i, job in enumerate(alljobs): job = os.path.basename(job) - self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), + self.launch_job('%s' % job, cwd=Pdir, remaining=(nb_tot-i-1), run_type='Refine number %s on %s (%s/%s)' % (self.nb_refine, subdir, nb_proc+1, len(subproc))) if os.path.exists(pjoin(self.me_dir,'error')): self.monitor(html=True) raise MadEventError('Error detected in dir %s: %s' % \ (Pdir, open(pjoin(self.me_dir,'error')).read())) - self.monitor(run_type='All job submitted for refine number %s' % + self.monitor(run_type='All job submitted for refine number %s' % self.nb_refine) - + self.update_status("Combining runs", level='parton') try: os.remove(pjoin(Pdir, 'combine_runs.log')) except Exception: pass - + bindir = pjoin(os.path.relpath(self.dirbin, pjoin(self.me_dir,'SubProcesses'))) combine_runs.CombineRuns(self.me_dir) - + #update html output cross, error = self.make_make_all_html_results() self.results.add_detail('cross', cross) self.results.add_detail('error', error) - - + + self.update_status('finish refine', 'parton', makehtml=False) devnull.close() def do_combine_events(self, line): - """Advanced commands: Launch combine events""" + """Advanced commands: Launch combine events""" if self.readonly: outdir = 'Events' @@ -6895,17 +6895,17 @@ def do_combine_events(self, line): self.banner.add_generation_info(self.results.current['cross'], self.run_card['nevents']) if not hasattr(self, 'random_orig'): self.random_orig = 0 self.banner.change_seed(self.random_orig) - - + + if not os.path.exists(pjoin(outdir, self.run_name)): os.mkdir(pjoin(outdir, self.run_name)) - self.banner.write(pjoin(outdir, self.run_name, + self.banner.write(pjoin(outdir, self.run_name, '%s_%s_banner.txt' % (self.run_name, tag))) - - get_wgt = lambda event: event.wgt + + get_wgt = lambda event: event.wgt AllEvent = lhe_parser.MultiEventFile() AllEvent.banner = self.banner - + partials = 0 # if too many file make some partial unweighting sum_xsec, sum_xerru, sum_axsec = 0,[],0 Gdirs = self.get_Gdir() @@ -6915,7 +6915,7 @@ def do_combine_events(self, line): if os.path.exists(pjoin(Gdir, 'events.lhe')): result = sum_html.OneResult('') result.read_results(pjoin(Gdir, 'results.dat')) - AllEvent.add(pjoin(Gdir, 'events.lhe'), + AllEvent.add(pjoin(Gdir, 'events.lhe'), result.get('xsec')*gscalefact[Gdir], result.get('xerru')*gscalefact[Gdir], result.get('axsec')*gscalefact[Gdir] @@ -6924,7 +6924,7 @@ def do_combine_events(self, line): sum_xsec += result.get('xsec')*gscalefact[Gdir] sum_xerru.append(result.get('xerru')*gscalefact[Gdir]) sum_axsec += result.get('axsec')*gscalefact[Gdir] - + if len(AllEvent) >= 80: #perform a partial unweighting AllEvent.unweight(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), get_wgt, log_level=5, trunc_error=1e-2, event_target=self.nb_event) @@ -6933,26 +6933,26 @@ def do_combine_events(self, line): AllEvent.add(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % partials), sum_xsec, math.sqrt(sum(x**2 for x in sum_xerru)), - sum_axsec) + sum_axsec) partials +=1 - + if not hasattr(self,'proc_characteristic'): self.proc_characteristic = self.get_characteristics() - + self.banner.add_generation_info(sum_xsec, self.nb_event) nb_event = AllEvent.unweight(pjoin(outdir, self.run_name, "unweighted_events.lhe.gz"), get_wgt, trunc_error=1e-2, event_target=self.nb_event, log_level=logging.DEBUG, normalization=self.run_card['event_norm'], proc_charac=self.proc_characteristic) - - + + if partials: for i in range(partials): try: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe.gz" % i)) except Exception: os.remove(pjoin(outdir, self.run_name, "partials%s.lhe" % i)) - + self.results.add_detail('nb_event', nb_event) self.banner.add_generation_info(sum_xsec, nb_event) if self.run_card['bias_module'].lower() not in ['dummy', 'none']: @@ -6961,7 +6961,7 @@ def do_combine_events(self, line): class MadLoopInitializer(object): """ A container class for the various methods for initializing MadLoop. It is - placed in MadEventInterface because it is used by Madevent for loop-induced + placed in MadEventInterface because it is used by Madevent for loop-induced simulations. """ @staticmethod @@ -6974,7 +6974,7 @@ def make_and_run(dir_name,checkRam=False): if os.path.isfile(pjoin(dir_name,'check')): os.remove(pjoin(dir_name,'check')) os.remove(pjoin(dir_name,'check_sa.o')) - os.remove(pjoin(dir_name,'loop_matrix.o')) + os.remove(pjoin(dir_name,'loop_matrix.o')) # Now run make devnull = open(os.devnull, 'w') start=time.time() @@ -6996,7 +6996,7 @@ def make_and_run(dir_name,checkRam=False): stdout=devnull, stderr=devnull, close_fds=True) try: ptimer.execute() - #poll as often as possible; otherwise the subprocess might + #poll as often as possible; otherwise the subprocess might # "sneak" in some extra memory usage while you aren't looking # Accuracy of .2 seconds is enough for the timing. while ptimer.poll(): @@ -7028,7 +7028,7 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, If mu_r > 0.0, then the renormalization constant value will be hardcoded directly in check_sa.f, if is is 0 it will be set to Sqrt(s) and if it is < 0.0 the value in the param_card.dat is used. - If the split_orders target (i.e. the target squared coupling orders for + If the split_orders target (i.e. the target squared coupling orders for the computation) is != -1, it will be changed in check_sa.f via the subroutine CALL SET_COUPLINGORDERS_TARGET(split_orders).""" @@ -7043,12 +7043,12 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, file_path = pjoin(directories[0],'check_sa.f') if not os.path.isfile(file_path): raise MadGraph5Error('Could not find the location of check_sa.f'+\ - ' from the specified path %s.'%str(file_path)) + ' from the specified path %s.'%str(file_path)) file = open(file_path, 'r') check_sa = file.read() file.close() - + file = open(file_path, 'w') check_sa = re.sub(r"READPS = \S+\)","READPS = %s)"%('.TRUE.' if read_ps \ else '.FALSE.'), check_sa) @@ -7064,42 +7064,42 @@ def fix_PSPoint_in_check(dir_path, read_ps = True, npoints = 1, (("%.17e"%mu_r).replace('e','d')),check_sa) elif mu_r < 0.0: check_sa = re.sub(r"MU_R=SQRTS","",check_sa) - + if split_orders > 0: check_sa = re.sub(r"SET_COUPLINGORDERS_TARGET\(-?\d+\)", - "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) - + "SET_COUPLINGORDERS_TARGET(%d)"%split_orders,check_sa) + file.write(check_sa) file.close() - @staticmethod + @staticmethod def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ req_files = ['HelFilter.dat','LoopFilter.dat'], attempts = [4,15]): - """ Run the initialization of the process in 'run_dir' with success + """ Run the initialization of the process in 'run_dir' with success characterized by the creation of the files req_files in this directory. The directory containing the driving source code 'check_sa.f'. - The list attempt gives the successive number of PS points the + The list attempt gives the successive number of PS points the initialization should be tried with before calling it failed. Returns the number of PS points which were necessary for the init. Notice at least run_dir or SubProc_dir must be provided. A negative attempt number given in input means that quadprec will be forced for initialization.""" - + # If the user does not want detailed info, then set the dictionary # to a dummy one. if infos is None: infos={} - + if SubProc_dir is None and run_dir is None: raise MadGraph5Error('At least one of [SubProc_dir,run_dir] must'+\ ' be provided in run_initialization.') - + # If the user does not specify where is check_sa.f, then it is assumed # to be one levels above run_dir if SubProc_dir is None: SubProc_dir = os.path.abspath(pjoin(run_dir,os.pardir)) - + if run_dir is None: directories =[ dir for dir in misc.glob('P[0-9]*', SubProc_dir) if os.path.isdir(dir) ] @@ -7109,7 +7109,7 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find a valid running directory'+\ ' in %s.'%str(SubProc_dir)) - # Use the presence of the file born_matrix.f to decide if it is a + # Use the presence of the file born_matrix.f to decide if it is a # loop-induced process or not. It's not crucial, but just that because # of the dynamic adjustment of the ref scale used for deciding what are # the zero contributions, more points are neeeded for loop-induced. @@ -7128,9 +7128,9 @@ def run_initialization(run_dir=None, SubProc_dir=None, infos=None,\ raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ %MLCardPath) else: - MLCard = banner_mod.MadLoopParam(MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) MLCard_orig = banner_mod.MadLoopParam(MLCard) - + # Make sure that LoopFilter really is needed. if not MLCard['UseLoopFilter']: try: @@ -7153,11 +7153,11 @@ def need_init(): proc_prefix+fname)) for fname in my_req_files]) or \ not os.path.isfile(pjoin(run_dir,'check')) or \ not os.access(pjoin(run_dir,'check'), os.X_OK) - + # Check if this is a process without born by checking the presence of the # file born_matrix.f is_loop_induced = os.path.exists(pjoin(run_dir,'born_matrix.f')) - + # For loop induced processes, always attempt quadruple precision if # double precision attempts fail and the user didn't specify himself # quadruple precision initializations attempts @@ -7166,11 +7166,11 @@ def need_init(): use_quad_prec = 1 curr_attempt = 1 - MLCard.set('WriteOutFilters',True) - + MLCard.set('WriteOutFilters',True) + while to_attempt!=[] and need_init(): curr_attempt = to_attempt.pop() - # if the attempt is a negative number it means we must force + # if the attempt is a negative number it means we must force # quadruple precision at initialization time if curr_attempt < 0: use_quad_prec = -1 @@ -7183,11 +7183,11 @@ def need_init(): MLCard.set('ZeroThres',1e-9) # Plus one because the filter are written on the next PS point after curr_attempt = abs(curr_attempt+1) - MLCard.set('MaxAttempts',curr_attempt) + MLCard.set('MaxAttempts',curr_attempt) MLCard.write(pjoin(SubProc_dir,'MadLoopParams.dat')) # initialization is performed. - MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, + MadLoopInitializer.fix_PSPoint_in_check(run_dir, read_ps = False, npoints = curr_attempt) compile_time, run_time, ram_usage = \ MadLoopInitializer.make_and_run(run_dir) @@ -7200,7 +7200,7 @@ def need_init(): infos['Process_compilation']==None: infos['Process_compilation'] = compile_time infos['Initialization'] = run_time - + MLCard_orig.write(pjoin(SubProc_dir,'MadLoopParams.dat')) if need_init(): return None @@ -7219,8 +7219,8 @@ def need_init(ML_resources_path, proc_prefix, r_files): MLCardPath = pjoin(proc_dir,'SubProcesses','MadLoopParams.dat') if not os.path.isfile(MLCardPath): raise MadGraph5Error('Could not find MadLoopParams.dat at %s.'\ - %MLCardPath) - MLCard = banner_mod.MadLoopParam(MLCardPath) + %MLCardPath) + MLCard = banner_mod.MadLoopParam(MLCardPath) req_files = ['HelFilter.dat','LoopFilter.dat'] # Make sure that LoopFilter really is needed. @@ -7234,9 +7234,9 @@ def need_init(ML_resources_path, proc_prefix, r_files): req_files.remove('HelFilter.dat') except ValueError: pass - + for v_folder in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)): + '%s*'%subproc_prefix)): # Make sure it is a valid MadLoop directory if not os.path.isdir(v_folder) or not os.path.isfile(\ pjoin(v_folder,'loop_matrix.f')): @@ -7247,7 +7247,7 @@ def need_init(ML_resources_path, proc_prefix, r_files): if need_init(pjoin(proc_dir,'SubProcesses','MadLoop5_resources'), proc_prefix, req_files): return True - + return False @staticmethod @@ -7265,7 +7265,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['treatCardsLoopNoInit'], cwd=pjoin(proc_dir,'Source')) else: interface.do_treatcards('all --no_MadLoopInit') - + # First make sure that IREGI and CUTTOOLS are compiled if needed if os.path.exists(pjoin(proc_dir,'Source','CutTools')): misc.compile(arg=['libcuttools'],cwd=pjoin(proc_dir,'Source')) @@ -7273,8 +7273,8 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, misc.compile(arg=['libiregi'],cwd=pjoin(proc_dir,'Source')) # Then make sure DHELAS and MODEL are compiled misc.compile(arg=['libmodel'],cwd=pjoin(proc_dir,'Source')) - misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) - + misc.compile(arg=['libdhelas'],cwd=pjoin(proc_dir,'Source')) + # Now initialize the MadLoop outputs logger.info('Initializing MadLoop loop-induced matrix elements '+\ '(this can take some time)...') @@ -7283,7 +7283,7 @@ def init_MadLoop(proc_dir, n_PS=None, subproc_prefix='PV', MG_options=None, if MG_options: if interface and hasattr(interface, 'cluster') and isinstance(interface.cluster, cluster.MultiCore): mcore = interface.cluster - else: + else: mcore = cluster.MultiCore(**MG_options) else: mcore = cluster.onecore @@ -7294,10 +7294,10 @@ def run_initialization_wrapper(run_dir, infos, attempts): run_dir=run_dir, infos=infos) else: n_PS = MadLoopInitializer.run_initialization( - run_dir=run_dir, infos=infos, attempts=attempts) + run_dir=run_dir, infos=infos, attempts=attempts) infos['nPS'] = n_PS return 0 - + def wait_monitoring(Idle, Running, Done): if Idle+Running+Done == 0: return @@ -7307,21 +7307,21 @@ def wait_monitoring(Idle, Running, Done): init_info = {} # List all virtual folders while making sure they are valid MadLoop folders VirtualFolders = [f for f in glob.iglob(pjoin(proc_dir,'SubProcesses', - '%s*'%subproc_prefix)) if (os.path.isdir(f) or + '%s*'%subproc_prefix)) if (os.path.isdir(f) or os.path.isfile(pjoin(f,'loop_matrix.f')))] logger.debug("Now Initializing MadLoop matrix element in %d folder%s:"%\ (len(VirtualFolders),'s' if len(VirtualFolders)>1 else '')) - logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in + logger.debug(', '.join("'%s'"%os.path.basename(v_folder) for v_folder in VirtualFolders)) for v_folder in VirtualFolders: init_info[v_folder] = {} - + # We try all multiples of n_PS from 1 to max_mult, first in DP and then # in QP before giving up, or use default values if n_PS is None. max_mult = 3 if n_PS is None: # Then use the default list of number of PS points to try - mcore.submit(run_initialization_wrapper, + mcore.submit(run_initialization_wrapper, [pjoin(v_folder), init_info[v_folder], None]) else: # Use specific set of PS points @@ -7348,8 +7348,8 @@ def wait_monitoring(Idle, Running, Done): '%d PS points (%s), in %.3g(compil.) + %.3g(init.) secs.'%( abs(init['nPS']),'DP' if init['nPS']>0 else 'QP', init['Process_compilation'],init['Initialization'])) - - logger.info('MadLoop initialization finished.') + + logger.info('MadLoop initialization finished.') AskforEditCard = common_run.AskforEditCard @@ -7364,16 +7364,16 @@ def wait_monitoring(Idle, Running, Done): import os import optparse - # Get the directory of the script real path (bin) - # and add it to the current PYTHONPATH + # Get the directory of the script real path (bin) + # and add it to the current PYTHONPATH #root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath( __file__ )))) sys.path.insert(0, root_path) - class MyOptParser(optparse.OptionParser): + class MyOptParser(optparse.OptionParser): class InvalidOption(Exception): pass def error(self, msg=''): raise MyOptParser.InvalidOption(msg) - # Write out nice usage message if called with -h or --help + # Write out nice usage message if called with -h or --help usage = "usage: %prog [options] [FILE] " parser = MyOptParser(usage=usage) parser.add_option("-l", "--logging", default='INFO', @@ -7384,7 +7384,7 @@ def error(self, msg=''): help='force to launch debug mode') parser_error = '' done = False - + for i in range(len(sys.argv)-1): try: (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) @@ -7394,7 +7394,7 @@ def error(self, msg=''): else: args += sys.argv[len(sys.argv)-i:] if not done: - # raise correct error: + # raise correct error: try: (options, args) = parser.parse_args() except MyOptParser.InvalidOption as error: @@ -7407,8 +7407,8 @@ def error(self, msg=''): import subprocess import logging import logging.config - # Set logging level according to the logging level given by options - #logging.basicConfig(level=vars(logging)[options.logging]) + # Set logging level according to the logging level given by options + #logging.basicConfig(level=vars(logging)[options.logging]) import internal import internal.coloring_logging # internal.file = XXX/bin/internal/__init__.py @@ -7431,13 +7431,13 @@ def error(self, msg=''): raise pass - # Call the cmd interface main loop + # Call the cmd interface main loop try: if args: # a single command is provided if '--web' in args: - i = args.index('--web') - args.pop(i) + i = args.index('--web') + args.pop(i) cmd_line = MadEventCmd(me_dir, force_run=True) else: cmd_line = MadEventCmdShell(me_dir, force_run=True) @@ -7457,13 +7457,13 @@ def error(self, msg=''): pass - - - - - - - - + + + + + + + + diff --git a/epochX/cudacpp/pp_tt012j.mad/src/cudacpp_src.mk b/epochX/cudacpp/pp_tt012j.mad/src/cudacpp_src.mk index d4cc628aec..b4e446bc45 100644 --- a/epochX/cudacpp/pp_tt012j.mad/src/cudacpp_src.mk +++ b/epochX/cudacpp/pp_tt012j.mad/src/cudacpp_src.mk @@ -1,12 +1,7 @@ # Copyright (C) 2020-2023 CERN and UCLouvain. # Licensed under the GNU Lesser General Public License (version 3 or later). # Created by: S. Roiser (Feb 2020) for the MG5aMC CUDACPP plugin. -# Further modified by: O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. - -#=== Determine the name of this makefile (https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html) -#=== NB: assume that the same name (e.g. cudacpp.mk, Makefile...) is used in the Subprocess and src directories - -THISMK = $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)) +# Further modified by: S. Hageboeck, J. Teig, O. Mattelaer, S. Roiser, A. Valassi (2020-2023) for the MG5aMC CUDACPP plugin. #------------------------------------------------------------------------------- @@ -16,165 +11,24 @@ SHELL := /bin/bash #------------------------------------------------------------------------------- -#=== Configure common compiler flags for CUDA and C++ - -INCFLAGS = -I. -OPTFLAGS = -O3 # this ends up in CUFLAGS too (should it?), cannot add -Ofast or -ffast-math here - -#------------------------------------------------------------------------------- - #=== Configure the C++ compiler -CXXFLAGS = $(OPTFLAGS) -std=c++17 $(INCFLAGS) $(USE_NVTX) -fPIC -Wall -Wshadow -Wextra +include ../Source/make_opts + +MG_CXXFLAGS += -fPIC -I. $(USE_NVTX) ifeq ($(shell $(CXX) --version | grep ^nvc++),) -CXXFLAGS+= -ffast-math # see issue #117 +MG_CXXFLAGS += -ffast-math # see issue #117 endif -###CXXFLAGS+= -Ofast # performance is not different from --fast-math -###CXXFLAGS+= -g # FOR DEBUGGING ONLY # Note: AR, CXX and FC are implicitly defined if not set externally # See https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html ###RANLIB = ranlib -# Add -mmacosx-version-min=11.3 to avoid "ld: warning: object file was built for newer macOS version than being linked" -LDFLAGS = -ifneq ($(shell $(CXX) --version | egrep '^Apple clang'),) -CXXFLAGS += -mmacosx-version-min=11.3 -LDFLAGS += -mmacosx-version-min=11.3 -endif - -#------------------------------------------------------------------------------- - -#=== Configure the CUDA compiler (note: NVCC is already exported including ccache) - -###$(info NVCC=$(NVCC)) - -#------------------------------------------------------------------------------- - -#=== Configure ccache for C++ builds (note: NVCC is already exported including ccache) - -# Enable ccache if USECCACHE=1 -ifeq ($(USECCACHE)$(shell echo $(CXX) | grep ccache),1) - override CXX:=ccache $(CXX) -endif -#ifeq ($(USECCACHE)$(shell echo $(AR) | grep ccache),1) -# override AR:=ccache $(AR) -#endif - -#------------------------------------------------------------------------------- - -#=== Configure PowerPC-specific compiler flags for CUDA and C++ - -# Assuming uname is available, detect if architecture is PowerPC -UNAME_P := $(shell uname -p) - -# PowerPC-specific CXX compiler flags (being reviewed) -ifeq ($(UNAME_P),ppc64le) - CXXFLAGS+= -mcpu=power9 -mtune=power9 # gains ~2-3% both for none and sse4 - # Throughput references without the extra flags below: none=1.41-1.42E6, sse4=2.15-2.19E6 - ###CXXFLAGS+= -DNO_WARN_X86_INTRINSICS # no change - ###CXXFLAGS+= -fpeel-loops # no change - ###CXXFLAGS+= -funroll-loops # gains ~1% for none, loses ~1% for sse4 - ###CXXFLAGS+= -ftree-vectorize # no change - ###CXXFLAGS+= -flto # BUILD ERROR IF THIS ADDED IN SRC?! -else - ###AR=gcc-ar # needed by -flto - ###RANLIB=gcc-ranlib # needed by -flto - ###CXXFLAGS+= -flto # NB: build error from src/Makefile unless gcc-ar and gcc-ranlib are used - ######CXXFLAGS+= -fno-semantic-interposition # no benefit (neither alone, nor combined with -flto) -endif - -#------------------------------------------------------------------------------- - #=== Set the CUDA/C++ compiler flags appropriate to user-defined choices of AVX, FPTYPE, HELINL, HRDCOD, RNDGEN # Set the build flags appropriate to OMPFLAGS ###$(info OMPFLAGS=$(OMPFLAGS)) -CXXFLAGS += $(OMPFLAGS) - -# Set the build flags appropriate to each AVX choice (example: "make AVX=none") -# [NB MGONGPU_PVW512 is needed because "-mprefer-vector-width=256" is not exposed in a macro] -# [See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96476] -$(info AVX=$(AVX)) -ifeq ($(UNAME_P),ppc64le) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # Power9 VSX with 128 width (VSR registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on PowerPC for the moment) - endif -else ifeq ($(UNAME_P),arm) - ifeq ($(AVX),sse4) - override AVXFLAGS = -D__SSE4_2__ # ARM NEON with 128 width (Q/quadword registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none' and 'sse4' are supported on ARM for the moment) - endif -else ifneq ($(shell $(CXX) --version | grep ^nvc++),) # support nvc++ #531 - ifeq ($(AVX),none) - override AVXFLAGS = -mno-sse3 # no SIMD - else ifeq ($(AVX),sse4) - override AVXFLAGS = -mno-avx # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -else - ifeq ($(AVX),none) - override AVXFLAGS = -march=x86-64 # no SIMD (see #588) - else ifeq ($(AVX),sse4) - override AVXFLAGS = -march=nehalem # SSE4.2 with 128 width (xmm registers) - else ifeq ($(AVX),avx2) - override AVXFLAGS = -march=haswell # AVX2 with 256 width (ymm registers) [DEFAULT for clang] - else ifeq ($(AVX),512y) - override AVXFLAGS = -march=skylake-avx512 -mprefer-vector-width=256 # AVX512 with 256 width (ymm registers) [DEFAULT for gcc] - else ifeq ($(AVX),512z) - override AVXFLAGS = -march=skylake-avx512 -DMGONGPU_PVW512 # AVX512 with 512 width (zmm registers) - else ifneq ($(AVX),none) - $(error Unknown AVX='$(AVX)': only 'none', 'sse4', 'avx2', '512y' and '512z' are supported) - endif -endif -# For the moment, use AVXFLAGS everywhere: eventually, use them only in encapsulated implementations? -CXXFLAGS+= $(AVXFLAGS) - -# Set the build flags appropriate to each FPTYPE choice (example: "make FPTYPE=f") -###$(info FPTYPE=$(FPTYPE)) -ifeq ($(FPTYPE),d) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_DOUBLE -else ifeq ($(FPTYPE),f) - CXXFLAGS += -DMGONGPU_FPTYPE_FLOAT -DMGONGPU_FPTYPE2_FLOAT -else ifeq ($(FPTYPE),m) - CXXFLAGS += -DMGONGPU_FPTYPE_DOUBLE -DMGONGPU_FPTYPE2_FLOAT -else - $(error Unknown FPTYPE='$(FPTYPE)': only 'd', 'f' and 'm' are supported) -endif - -# Set the build flags appropriate to each HELINL choice (example: "make HELINL=1") -###$(info HELINL=$(HELINL)) -ifeq ($(HELINL),1) - CXXFLAGS += -DMGONGPU_INLINE_HELAMPS -else ifneq ($(HELINL),0) - $(error Unknown HELINL='$(HELINL)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each HRDCOD choice (example: "make HRDCOD=1") -###$(info HRDCOD=$(HRDCOD)) -ifeq ($(HRDCOD),1) - CXXFLAGS += -DMGONGPU_HARDCODE_PARAM -else ifneq ($(HRDCOD),0) - $(error Unknown HRDCOD='$(HRDCOD)': only '0' and '1' are supported) -endif - -# Set the build flags appropriate to each RNDGEN choice (example: "make RNDGEN=hasNoCurand") -###$(info RNDGEN=$(RNDGEN)) -ifeq ($(RNDGEN),hasNoCurand) - CXXFLAGS += -DMGONGPU_HAS_NO_CURAND -else ifneq ($(RNDGEN),hasCurand) - $(error Unknown RNDGEN='$(RNDGEN)': only 'hasCurand' and 'hasNoCurand' are supported) -endif +MG_CXXFLAGS += $(OMPFLAGS) #------------------------------------------------------------------------------- @@ -182,28 +36,18 @@ endif # Build directory "short" tag (defines target and path to the optional build directory) # (Rationale: keep directory names shorter, e.g. do not include random number generator choice) -override DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) +DIRTAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD) # Build lockfile "full" tag (defines full specification of build options that cannot be intermixed) # (Rationale: avoid mixing of CUDA and no-CUDA environment builds with different random number generators) -override TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) +TAG = $(AVX)_$(FPTYPE)_inl$(HELINL)_hrd$(HRDCOD)_$(RNDGEN) -# Build directory: current directory by default, or build.$(DIRTAG) if USEBUILDDIR==1 -###$(info Current directory is $(shell pwd)) -ifeq ($(USEBUILDDIR),1) - override BUILDDIR = build.$(DIRTAG) - override LIBDIRREL = ../lib/$(BUILDDIR) - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR=1 is set)) -else - override BUILDDIR = . - override LIBDIRREL = ../lib - ###$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG) (USEBUILDDIR is not set)) -endif -######$(info Building in BUILDDIR=$(BUILDDIR) for tag=$(TAG)) +# Build directory: +BUILDDIR := build.$(DIRTAG) +LIBDIRREL := ../lib/$(BUILDDIR) # Workaround for Mac #375 (I did not manage to fix rpath with @executable_path): use absolute paths for LIBDIR # (NB: this is quite ugly because it creates the directory if it does not exist - to avoid removing src by mistake) -UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Darwin) override LIBDIR = $(shell mkdir -p $(LIBDIRREL); cd $(LIBDIRREL); pwd) ifeq ($(wildcard $(LIBDIR)),) @@ -223,55 +67,35 @@ endif MG5AMC_COMMONLIB = mg5amc_common # First target (default goal) -all.$(TAG): $(BUILDDIR)/.build.$(TAG) $(LIBDIR)/.build.$(TAG) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - -# Target (and build options): debug -debug: OPTFLAGS = -g -O0 -debug: all.$(TAG) - -# Target: tag-specific build lockfiles -override oldtagsb=`if [ -d $(BUILDDIR) ]; then find $(BUILDDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` -override oldtagsl=`if [ -d $(LIBDIR) ]; then find $(LIBDIR) -maxdepth 1 -name '.build.*' ! -name '.build.$(TAG)' -exec echo $(shell pwd)/{} \; ; fi` - -$(BUILDDIR)/.build.$(TAG): $(LIBDIR)/.build.$(TAG) - -$(LIBDIR)/.build.$(TAG): - @if [ "$(oldtagsl)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(LIBDIR) for other tags:\n$(oldtagsl)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ "$(oldtagsb)" != "" ]; then echo -e "Cannot build for tag=$(TAG) as old builds exist in $(BUILDDIR) for other tags:\n$(oldtagsb)\nPlease run 'make clean' first\nIf 'make clean' is not enough: run 'make clean USEBUILDDIR=1 AVX=$(AVX) FPTYPE=$(FPTYPE)' or 'make cleanall'"; exit 1; fi - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - @touch $(LIBDIR)/.build.$(TAG) - @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - @touch $(BUILDDIR)/.build.$(TAG) +all.$(TAG): $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so #------------------------------------------------------------------------------- # Generic target and build rules: objects from C++ compilation -$(BUILDDIR)/%.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -fPIC -c $< -o $@ + $(CXX) $(MG_CXXFLAGS) $(CXXFLAGS) -c $< -o $@ # Generic target and build rules: objects from CUDA compilation -$(BUILDDIR)/%_cu.o : %.cc *.h $(BUILDDIR)/.build.$(TAG) +$(BUILDDIR)/%_cu.o : %.cc *.h @if [ ! -d $(BUILDDIR) ]; then echo "mkdir -p $(BUILDDIR)"; mkdir -p $(BUILDDIR); fi - $(NVCC) $(CPPFLAGS) $(CUFLAGS) -Xcompiler -fPIC -c -x cu $< -o $@ + $(NVCC) $(MG_NVCCFLAGS) $(NVCCFLAGS) -c -x cu $< -o $@ #------------------------------------------------------------------------------- cxx_objects=$(addprefix $(BUILDDIR)/, Parameters_sm.o read_slha.o) -ifneq ($(NVCC),) +ifeq ($(AVX),cuda) +COMPILER=$(NVCC) cu_objects=$(addprefix $(BUILDDIR)/, Parameters_sm_cu.o) +else +COMPILER=$(CXX) +cu_objects= endif # Target (and build rules): common (src) library -ifneq ($(NVCC),) $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) $(cu_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(NVCC) -shared -o $@ $(cxx_objects) $(cu_objects) $(LDFLAGS) -else -$(LIBDIR)/lib$(MG5AMC_COMMONLIB).so : $(cxx_objects) - @if [ ! -d $(LIBDIR) ]; then echo "mkdir -p $(LIBDIR)"; mkdir -p $(LIBDIR); fi - $(CXX) -shared -o $@ $(cxx_objects) $(LDFLAGS) -endif + mkdir -p $(LIBDIR) + $(COMPILER) -shared -o $@ $(cxx_objects) $(cu_objects) $(MG_LDFLAGS) $(LDFLAGS) #------------------------------------------------------------------------------- @@ -279,19 +103,7 @@ endif .PHONY: clean clean: -ifeq ($(USEBUILDDIR),1) - rm -rf $(LIBDIR) - rm -rf $(BUILDDIR) -else - rm -f $(LIBDIR)/.build.* $(LIBDIR)/lib$(MG5AMC_COMMONLIB).so - rm -f $(BUILDDIR)/.build.* $(BUILDDIR)/*.o $(BUILDDIR)/*.exe -endif - -cleanall: - @echo - $(MAKE) clean -f $(THISMK) - @echo - rm -rf $(LIBDIR)/build.* - rm -rf build.* + $(RM) -f ../lib/build.*/*.so + $(RM) -rf build.* #------------------------------------------------------------------------------- diff --git a/epochX/cudacpp/pp_tt012j.mad/src/mgOnGpuCxtypes.h b/epochX/cudacpp/pp_tt012j.mad/src/mgOnGpuCxtypes.h index ca9a9f00c0..3290d314d6 100644 --- a/epochX/cudacpp/pp_tt012j.mad/src/mgOnGpuCxtypes.h +++ b/epochX/cudacpp/pp_tt012j.mad/src/mgOnGpuCxtypes.h @@ -21,10 +21,14 @@ // Complex type in cuda: thrust or cucomplex or cxsmpl #ifdef __CUDACC__ #if defined MGONGPU_CUCXTYPE_THRUST +#ifdef __CLANG__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wtautological-compare" // for icpx2021/clang13 (https://stackoverflow.com/a/15864661) +#endif #include +#ifdef __CLANG__ #pragma clang diagnostic pop +#endif #elif defined MGONGPU_CUCXTYPE_CUCOMPLEX #include #elif not defined MGONGPU_CUCXTYPE_CXSMPL