From 2145e0501d8d14d18254fc754ba5c884f68f9400 Mon Sep 17 00:00:00 2001 From: Adriano Di Florio Date: Mon, 7 Jul 2025 11:53:31 +0200 Subject: [PATCH 1/4] Lighter das-up-to-nevents when running tests, moving limited matrix data wfs to fixed events --- .../python/relval_steps.py | 4 +-- .../scripts/das-up-to-nevents.py | 27 +++++++++++++------ .../scripts/runTheMatrix.py | 26 ++++++++++-------- 3 files changed, 36 insertions(+), 21 deletions(-) diff --git a/Configuration/PyReleaseValidation/python/relval_steps.py b/Configuration/PyReleaseValidation/python/relval_steps.py index 1820b0e2f63c5..3f1aa1e2f8269 100644 --- a/Configuration/PyReleaseValidation/python/relval_steps.py +++ b/Configuration/PyReleaseValidation/python/relval_steps.py @@ -2340,11 +2340,11 @@ def lhegensim2018ml(fragment,howMuch): steps['HLTDR3_2023B']=merge( [ {'-s':'L1REPACK:Full,HLT:@%s'%hltKey2023,},{'--conditions':'auto:run3_hlt_relval'},{'--era' : 'Run3'},steps['HLTD'] ] ) -steps['HLTDR3_2024']=merge( [ {'-s':'L1REPACK:Full,HLT:@%s'%hltKey2025,},{'--conditions':'auto:run3_hlt_relval'},{'--era' : 'Run3_2024'},steps['HLTD'] ] ) +steps['HLTDR3_2024']=merge( [ {'-s':'L1REPACK:Full,HLT:@%s'%hltKey2024,},{'--conditions':'auto:run3_hlt_relval'},{'--era' : 'Run3_2024'},steps['HLTD'] ] ) steps['HLTDR3_2025']=merge( [ {'-s':'L1REPACK:Full,HLT:@%s'%hltKey2025,},{'--conditions':'auto:run3_hlt_relval'},{'--era' : 'Run3_2025'},steps['HLTD'] ] ) -steps['HLTDR3_ScoutingPFMonitor_2024']=merge( [ {'-s':'L1REPACK:Full,HLT:@%s'%hltKey2025,}, +steps['HLTDR3_ScoutingPFMonitor_2024']=merge( [ {'-s':'L1REPACK:Full,HLT:@%s'%hltKey2024,}, {'--conditions':'auto:run3_hlt_relval'}, {'--era' : 'Run3_2024'}, {'--filein' : '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/Scouting/Run3/ScoutingPFMonitor/300684ed-1a51-474f-8c4f-b3bf1e1f5044_skimmed.root'}, diff --git a/Configuration/PyReleaseValidation/scripts/das-up-to-nevents.py b/Configuration/PyReleaseValidation/scripts/das-up-to-nevents.py index 8bf702db82a32..c4d7018a099f8 100755 --- a/Configuration/PyReleaseValidation/scripts/das-up-to-nevents.py +++ b/Configuration/PyReleaseValidation/scripts/das-up-to-nevents.py @@ -37,6 +37,7 @@ def get_lumi_ranges(i): return result def das_do_command(cmd): + print( "Running DAS command: %s"%cmd) ##TODO: remove me out = subprocess.check_output(cmd, shell=True, executable="/bin/bash").decode('utf8') return out.split("\n") @@ -197,30 +198,38 @@ def no_intersection(): if (len(golden_data_runs)==0): no_intersection() + if testing: + golden_data_runs = golden_data_runs[:1] # take only the first run # building the dataframe, cleaning for bad lumis golden_data_runs_tocheck = golden_data_runs - if testing or args.precheck: + if args.precheck and not testing: golden_data_runs_tocheck = [] # Here we check run per run. # This implies more dasgoclient queries, but smaller outputs # useful when running the IB/PR tests not to have huge # query results that have to be cached. - sum_events = 0 - for r in golden_data_runs: sum_events = sum_events + int(das_run_events_data(dataset,r)) golden_data_runs_tocheck.append(r) if events > 0 and sum_events > events: break - das_opt = "run in %s"%(str([int(g) for g in golden_data_runs_tocheck])) - - df = das_lumi_data(dataset,opt=das_opt).merge(das_file_data(dataset,opt=das_opt),on="file",how="inner") # merge file informations with run and lumis + + if testing: + golden_data_runs_tocheck = golden_data_runs[:1] # take only the first run + # in testing mode we just take the first file + das_opt = "run=%s"%(golden_data_runs_tocheck[0]) + + if not testing: + df = das_lumi_data(dataset,opt=das_opt).merge(das_file_data(dataset,opt=das_opt),on="file",how="inner") # merge file informations with run and lumis + else: + df = das_lumi_data(dataset,opt=das_opt) + df["lumis"] = [[int(ff) for ff in f.replace("[","").replace("]","").split(",")] for f in df.lumis.values] - if not args.nogolden: + if not args.nogolden and not testing: df_rs = [] for r in golden_data_runs_tocheck: @@ -247,13 +256,15 @@ def no_intersection(): df.loc[:,"max_lumi"] = [max(f) for f in df.lumis] df = df.sort_values(["run","min_lumi","max_lumi"]) + if testing: + df = df.head(1) # take only the first file if site is not None: df = df.merge(das_file_site(dataset,site),on="file",how="inner") if args.pandas: df.to_csv(dataset.replace("/","")+".csv") - if events > 0: + if events > 0 and not testing: df = df[df["events"] <= events] #jump too big files df.loc[:,"sum_evs"] = df.loc[:,"events"].cumsum() df = df[df["sum_evs"] < events] diff --git a/Configuration/PyReleaseValidation/scripts/runTheMatrix.py b/Configuration/PyReleaseValidation/scripts/runTheMatrix.py index 2596efbe7c018..f1614d2365735 100755 --- a/Configuration/PyReleaseValidation/scripts/runTheMatrix.py +++ b/Configuration/PyReleaseValidation/scripts/runTheMatrix.py @@ -99,7 +99,7 @@ def runSelected(opt): 12846.0, # RelValZEE_13 2024 13034.0, # RelValTTbar_14TeV 2024 PU = Run3_Flat55To75_PoissonOOTPU 16834.0, # RelValTTbar_14TeV 2025 - 17034.0, # RelValTTbar_14TeV 2025 PU = Run3_Flat55To75_PoissonOOTPU + 17034.0, # RelValTTbar_14TeV 2025 PU = Run3_Flat55To75_PoissonOOTPU 14034.0, # RelValTTbar_14TeV Run3_2023_FastSim 14234.0, # RelValTTbar_14TeV Run3_2023_FastSim PU = Run3_Flat55To75_PoissonOOTPU 2500.3001, # RelValTTbar_14TeV NanoAOD from existing MINI @@ -110,20 +110,24 @@ def runSelected(opt): 139.001, # Run2021 MinimumBias Commissioning2021 # 2022 - 140.045, # Run2022C JetHT + 2022.106001, # Run2022C JetHT # 2023 - 141.042, # Run2023D ZeroBias + 2023.211001, # Run2023D ZeroBias # 2024 - 145.014, # Run2024B ZeroBias - 145.104, # Run2024C JetMet0 - 145.202, # Run2024D EGamma0 - 145.301, # Run2024E DisplacedJet - 145.408, # Run2024F ParkingDoubleMuonLowMass0 - 145.500, # Run2024G BTagMu - 145.604, # Run2024H JetMET0 - 145.713, # Run2024I Tau + 2024.014001, # Run2024B ZeroBias + 2024.204001, # Run2024C JetMet0 + 2024.202001, # Run2024D EGamma0 + 2024.301001, # Run2024E DisplacedJet + 2024.408001, # Run2024F ParkingDoubleMuonLowMass0 + 2024.500001, # Run2024G BTagMu + 2024.504001, # Run2024H JetMET0 + 2024.713001, # Run2024I Tau + + # 2025 + 2025.014001, # Run2025B ZeroBias + 2025.104001, # Run2025C JetMet0 ], 'phase2' : [ From 35712263dfdf486e6b627b51168e81d3832d5bc7 Mon Sep 17 00:00:00 2001 From: Adriano Di Florio Date: Mon, 7 Jul 2025 11:53:56 +0200 Subject: [PATCH 2/4] Simplifying the definition of fixed sizes data wfs --- .../PyReleaseValidation/python/MatrixUtil.py | 7 +- .../python/relval_data_highstats.py | 162 ++++++------------ .../python/relval_standard.py | 70 ++++---- .../python/relval_steps.py | 147 ++++++++-------- .../PyReleaseValidation/scripts/README.md | 25 +-- .../scripts/das-up-to-nevents.py | 8 +- .../scripts/runTheMatrix.py | 26 +-- 7 files changed, 196 insertions(+), 249 deletions(-) diff --git a/Configuration/PyReleaseValidation/python/MatrixUtil.py b/Configuration/PyReleaseValidation/python/MatrixUtil.py index 42256047a5098..c45437f8a66cf 100644 --- a/Configuration/PyReleaseValidation/python/MatrixUtil.py +++ b/Configuration/PyReleaseValidation/python/MatrixUtil.py @@ -133,12 +133,7 @@ def das(self, das_options, dataset): elif not self.skimEvents: command = "dasgoclient %s --query '%s'" % (das_options, self.queries(dataset)[0]) elif self.skimEvents: - from os import getenv - if getenv("JENKINS_PREFIX") is not None: - # to be sure that whatever happens the files are only those at CERN - command = "das-up-to-nevents.py -d %s -e %d -pc -l lumi_ranges.txt"%(dataset,self.events) - else: - command = "das-up-to-nevents.py -d %s -e %d -l lumi_ranges.txt"%(dataset,self.events) + command = "das-up-to-nevents.py -d %s -e %d -l lumi_ranges.txt"%(dataset,self.events) # Run filter on DAS output if self.ib_blacklist: command += " | grep -E -v " diff --git a/Configuration/PyReleaseValidation/python/relval_data_highstats.py b/Configuration/PyReleaseValidation/python/relval_data_highstats.py index 6cf17099dcefd..60e8881d6b621 100644 --- a/Configuration/PyReleaseValidation/python/relval_data_highstats.py +++ b/Configuration/PyReleaseValidation/python/relval_data_highstats.py @@ -1,5 +1,6 @@ # import the definition of the steps and input files: from Configuration.PyReleaseValidation.relval_steps import * +from functools import partial # here only define the workflows as a combination of the steps defined above: workflows = Matrix() @@ -7,110 +8,57 @@ ## Here we define fixed high stats data workflows ## not to be run as default. 10k, 50k, 150k, 250k, 500k or 1M events each -offset_era = 0.1 # less than 10 eras per year (hopefully!) -offset_pd = 0.001 # less than 100 pds per year -offset_events = 0.0001 # less than 10 event setups (10k,50k,150k,250k,500k,1M) - -## 2025 -base_wf = 2025.0 -for e_n,era in enumerate(eras_2025): - for p_n,pd in enumerate(pds_2025): - for e_key,evs in event_steps_dict.items(): - wf_number = base_wf - wf_number = wf_number + offset_era * e_n - wf_number = wf_number + offset_pd * p_n - wf_number = wf_number + offset_events * evs - wf_number = round(wf_number,6) - - ## ZeroBias has its own RECO and HARVESTING setup - ## ScoutingPFMonitor has its own HLT, RECO and HARVESTING setup - recoharv = hlt = '' - if 'ZeroBias' in pd: - recoharv = 'ZB_' - elif 'ScoutingPFMonitor' in pd: - hlt = recoharv = 'ScoutingPFMonitor_' - - recosetup = 'RECONANORUN3_' + recoharv + 'reHLT_2025' - - y = str(int(base_wf)) - - ## this is because ParkingDouble* PDs would end up with a too long name for the submission infrastructure - step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1] + '_' + e_key - - workflows[wf_number] = ['',[step_name,'HLTDR3_' + hlt + y,'RECONANORUN3_' + recoharv + 'reHLT_'+y,'HARVESTRUN3_' + recoharv + y]] - -## 2024 -base_wf = 2024.0 -for e_n,era in enumerate(eras_2024): - for p_n,pd in enumerate(pds_2024): - for e_key,evs in event_steps_dict.items(): - wf_number = base_wf - wf_number = wf_number + offset_era * e_n - wf_number = wf_number + offset_pd * p_n - wf_number = wf_number + offset_events * evs - wf_number = round(wf_number,6) - - ## Here we use JetMET1 PD to run the TeVJet skims - skim = 'TeVJet' if pd == 'JetMET1' else '' - - ## ZeroBias has its own RECO and HARVESTING setup - suff = 'ZB_' if 'ZeroBias' in pd else '' - - # Running C,D,E with the offline GT. - # Could be removed once 2025 wfs are in and we'll test the online GT with them - recosetup = 'RECONANORUN3_' + suff + 'reHLT_2024' - recosetup = recosetup if era[-1] > 'E' else recosetup + '_Offline' - - y = str(int(base_wf)) - - ## this is because ParkingDouble* PDs would end up with a too long name for the submission infrastructure - step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1] + skim + '_' + e_key - - workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]] - -## 2023 -base_wf = 2023.0 -for e_n,era in enumerate(eras_2023): - for p_n,pd in enumerate(pds_2023): - for e_key,evs in event_steps_dict.items(): - wf_number = base_wf - wf_number = wf_number + offset_era * e_n - wf_number = wf_number + offset_pd * p_n - wf_number = wf_number + offset_events * evs - wf_number = round(wf_number,6) - - ## this is because ParkingDouble* PDs would end up with a too long name for the submission infrastructure - step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1] + '_' + e_key - - y = str(int(base_wf)) + 'B' if '2023B' in era else str(int(base_wf)) - suff = 'ZB_' if 'ZeroBias' in step_name else '' - workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]] - -## 2022 -base_wf = 2022.0 -for e_n,era in enumerate(eras_2022_1): - for p_n,pd in enumerate(pds_2022_1): - for e_key,evs in event_steps_dict.items(): - wf_number = base_wf - wf_number = wf_number + offset_era * e_n - wf_number = wf_number + offset_pd * p_n - wf_number = wf_number + offset_events * evs - wf_number = round(wf_number,6) - step_name = 'Run' + pd + era.split('Run')[1] + '_' + e_key - y = str(int(base_wf)) - suff = 'ZB_' if 'ZeroBias' in step_name else '' - workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]] - -# PD names changed during 2022 -for e_n,era in enumerate(eras_2022_2): - for p_n,pd in enumerate(pds_2022_2): - for e_key,evs in event_steps_dict.items(): - wf_number = base_wf - wf_number = wf_number + offset_era * (e_n + len(eras_2022_1)) - wf_number = wf_number + offset_pd * (p_n + len(pds_2022_1)) - wf_number = wf_number + offset_events * evs - wf_number = round(wf_number,6) - step_name = 'Run' + pd + era.split('Run')[1] + '_' + e_key - y = str(int(base_wf)) - suff = 'ZB_' if 'ZeroBias' in step_name else '' - workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]] +def run3NameMod(name): + # ParkingDouble* PDs would end up with a too long name for the submission infrastructure + return name.replace('ParkingDouble','Park2') + +def run3RecoMod(pd): + ## ZeroBias and ScoutingPFMonitor have + ## their own RECO and HARVESTING setup + if 'ZeroBias' in pd: + return 'ZB_' + elif 'ScoutingPFMonitor' in pd: + return 'ScoutingPFMonitor_' + else: + return '' + +def run3HLTMod(pd): + ## ScoutingPFMonitor has its own HLT setup + if 'ScoutingPFMonitor' in pd: + return 'ScoutingPFMonitor_' + else: + return '' + +def addFixedEventsWfs(years, pds, eras, suffreco = None, suffhlt = None, namemod = None): + + for y in years: + for era in eras: + for pd in pds: + for e_key,evs in event_steps_dict.items(): + ## ZeroBias have their own HARVESTING + suff = 'ZB_' if 'ZeroBias' in pd else '' + + wf_number = float(y) + offset_pd * pds.index(pd) + wf_number = wf_number + offset_era * eras.index(era) + wf_number = round(wf_number + offset_events * evs, 6) + + # Here we customise the steps depending on the PD name + recoharv = suffreco(pd) if suffreco is not None else '' + hlt = suffhlt(pd) if suffhlt is not None else '' + + recosetup = 'RECONANORUN3_' + recoharv + 'reHLT_2025' + harvsetup = 'HARVESTRUN3_' + recoharv + y + hltsetup = 'HLTDR3_' + hlt + y + + step_name = 'Run' + pd.replace('ParkingDouble','Park2') + y + era + '_' + e_key + if namemod is not None: + step_name = namemod(step_name) + + workflows[wf_number] = ['',[step_name, hltsetup, recosetup, harvsetup]] + +run3FixedWfs = partial(addFixedEventsWfs,suffreco = run3RecoMod, suffhlt = run3HLTMod, namemod = run3NameMod) +run3FixedWfs(['2025'],pds_2025,eras_2025) +run3FixedWfs(['2024'],pds_2024,eras_2024) +run3FixedWfs(['2023'],pds_2023,eras_2023) +run3FixedWfs(['2022'],pds_2022_2,eras_2022_2) +run3FixedWfs(['2022'],pds_2022_1,eras_2022_1) diff --git a/Configuration/PyReleaseValidation/python/relval_standard.py b/Configuration/PyReleaseValidation/python/relval_standard.py index 7d4475168a392..094bf86910f7c 100644 --- a/Configuration/PyReleaseValidation/python/relval_standard.py +++ b/Configuration/PyReleaseValidation/python/relval_standard.py @@ -575,40 +575,48 @@ workflows[143.912] = ['',['RunUPC2024','RECODR3_2025_UPC_OXY','HARVESTDPROMPTR3']] workflows[143.921] = ['',['RunUPC2024','RECODR3_2025_OXY_SKIMIONPHYSICS0','HARVESTDPROMPTR3']] -## Lumi mask fixed 2024 wfs -base_wf = 145.0 -offset_era = 0.1 # less than 10 eras per year (hopefully) -offset_pd = 0.001 # less than 100 pds per year - -for e_n,era in enumerate(era_mask_2024): - for p_n,pd in enumerate(pds_2024): - - # JetMET1 PD is used to run the TeVJet skims - # we don't really need it here - # (also as is the numbering conflicts with - # the scouting wf below, so if we really want to - # extend the pds for standar relvals for 2024 data - # one needs to change the 145.415 below) - if pd == 'JetMET1': - continue - - wf_number = round(base_wf + offset_era * e_n + offset_pd * p_n,3) - dataset = '/' + pd + '/' + era + '-v1/RAW' - - ## ZeroBias have their own HARVESTING - suff = 'ZB_' if 'ZeroBias' in step_name else '' - - # Running C,D,E with the offline GT. - # Could be removed once 2025 wfs are in and we'll test the online GT with them - recosetup = 'RECONANORUN3_' + suff + 'reHLT_2024' - recosetup = recosetup if era[-1] > 'E' else recosetup + '_Offline' - - step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1] - workflows[wf_number] = ['',[step_name,'HLTDR3_2024',recosetup,'HARVESTRUN3_' + suff + '2024']] - ## special HLT scouting workflow (with hardcoded private input file from ScoutingPFMonitor skimmed to remove all events without scouting) workflows[145.415] = ['',['HLTDR3_ScoutingPFMonitor_2024','RECONANORUN3_ScoutingPFMonitor_reHLT_2024','HARVESTRUN3_ScoutingPFMonitor_2024']] +###################################################################################################################################### +###################################################################################################################################### + +## Run3 Fixed Events for Testing and IBs +## (with 1k events in input, cut to 100 at step2) + +fixed_events_offset = 1e-7 # to have it unique + +def addFixedEventsTestingWfs(years, pds, eras): + + for y in years: + for era,pd in zip(eras, pds): + + ## ZeroBias have their own HARVESTING + suff = 'ZB_' if 'ZeroBias' in pd else '' + + wf_number = round(float(y) + offset_pd * pds.index(pd) + fixed_events_offset, 7) + step_name = 'Run' + pd.replace('ParkingDouble','Park2') + y + era + "_10k" + + workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_reHLT_' + y,'HARVESTRUN3_' + suff + y]] + +## 2024/2025 +pds = ['ZeroBias', 'JetMET0', 'EGamma0', 'DisplacedJet', 'ParkingDoubleMuonLowMass0', 'BTagMu', 'Muon0', 'Tau'] +eras = ['B','C','D','E','F','G','H','I'] +addFixedEventsTestingWfs(['2024','2025'], pds, eras) + +## 2023 +pds = ['ZeroBias', 'EGamma0', 'JetMET0'] +eras = ['B','C','D'] +addFixedEventsTestingWfs(['2023'], pds, eras) + +## 2022 +pds = ['ZeroBias', 'JetHT', 'Tau', 'BTagMu'] +eras = ['B','C','D','E'] +addFixedEventsTestingWfs(['2022'], pds, eras) + +###################################################################################################################################### +###################################################################################################################################### + ################################################################## ### run3 (2024) skims - Era F ### workflows[146.101] = ['',['RunZeroBias2024F','HLTDR3_2024','SKIMZEROBIASRUN3_reHLT_2024','HARVESTRUN3_ZB_2024']] diff --git a/Configuration/PyReleaseValidation/python/relval_steps.py b/Configuration/PyReleaseValidation/python/relval_steps.py index 3f1aa1e2f8269..974050973cdcd 100644 --- a/Configuration/PyReleaseValidation/python/relval_steps.py +++ b/Configuration/PyReleaseValidation/python/relval_steps.py @@ -1,4 +1,5 @@ import sys +from functools import partial from .MatrixUtil import * @@ -51,10 +52,6 @@ steps = Steps() -#### Event to runs -event_steps = [0.01,0.05,0.1,0.15,0.25,0.5,1] #in millions -event_steps_k = ["10k","50k","100k","150k","250k","500k","1M"] ##TODO add an helper to convert the numbers to strings -event_steps_dict = dict(zip(event_steps_k,event_steps)) #### Production test section #### steps['ProdMinBias']=merge([{'cfg':'MinBias_8TeV_pythia8_TuneCUETP8M1_cff','--relval':'9000,300'},step1Defaults]) steps['ProdTTbar']=merge([{'cfg':'TTbar_8TeV_TuneCUETP8M1_cfi','--relval':'9000,100'},step1Defaults]) @@ -649,105 +646,105 @@ steps['RunHIPhysicsRawPrime2023A']={'INPUT':InputInfo(dataSet='/HIPhysicsRawPrime0/HIRun2023A-v1/RAW',label='HI2023A',events=100000,location='STD', ls=RunHI2023)} steps['RunHLTMonitor2024I']={'INPUT':InputInfo(dataSet='/HLTMonitor/Run2024I-Express-v2/FEVTHLTALL',label='2024I',events=100000,location='STD', ls={386801: [[32, 111]]})} -################################################################## + +#################################################################################################################################### +#################################################################################################################################### + ### Golden Data Steps # Reading good runs directly from the latest golden json # in https://cms-service-dqmdc.web.cern.ch/CAF/certification/ -# or (if available) from eos. the number of events limits -# the files used as input +# or (if available) from eos. The number of events limits +# the files used as input. -###2025 -pds_2025 = ['BTagMu', 'DisplacedJet', 'EGamma0', 'HcalNZS', 'JetMET0', 'Muon0', 'MuonEG', 'NoBPTX', 'ParkingDoubleMuonLowMass0', 'ParkingHH', 'ParkingLLP', 'ParkingSingleMuon0', 'ParkingVBF0', 'Tau', 'ZeroBias','JetMET1','ScoutingPFMonitor'] -eras_2025 = ['Run2025B', 'Run2025C','Run2025D','Run2025E', 'Run2025F','Run2025G','Run2025H','Run2025I'] -for era in eras_2025: - for pd in pds_2025: - dataset = "/" + pd + "/" + era - skim = '' +offset_era = 0.1 # less than 10 eras per year (hopefully!) +offset_pd = 0.001 # less than 100 pds per year +offset_events = 0.0001 # less than 10 event setups (10k,50k,150k,250k,500k,1M) - dataset = dataset + '-v1/RAW' - - for e_key,evs in event_steps_dict.items(): - step_name = "Run" + pd.replace("ParkingDouble","Park2") + era.split("Run")[1] + skim + "_" + e_key - steps[step_name] = {'INPUT':InputInfo(dataSet=dataset,label=era.split("Run")[1],events=int(evs*1e6), skimEvents=True, location='STD')} - - -###2024 -## N.B. here we use JetMet0 as "starndard" PD and JetMET1 for the TeVJet skims +#### PDs to run +pds_2025 = ['BTagMu', 'DisplacedJet', 'EGamma0', 'HcalNZS', 'JetMET0', 'Muon0', 'MuonEG', 'NoBPTX', 'ParkingDoubleMuonLowMass0', 'ParkingHH', 'ParkingLLP', 'ParkingSingleMuon0', 'ParkingVBF0', 'Tau', 'ZeroBias','JetMET1','ScoutingPFMonitor'] pds_2024 = ['BTagMu', 'DisplacedJet', 'EGamma0', 'HcalNZS', 'JetMET0', 'Muon0', 'MuonEG', 'NoBPTX', 'ParkingDoubleMuonLowMass0', 'ParkingHH', 'ParkingLLP', 'ParkingSingleMuon0', 'ParkingVBF0', 'Tau', 'ZeroBias','JetMET1'] -eras_2024 = ['Run2024B', 'Run2024C', 'Run2024D', 'Run2024E', 'Run2024F','Run2024G','Run2024H','Run2024I'] -for era in eras_2024: - for pd in pds_2024: - dataset = "/" + pd + "/" + era - skim = '' - - if pd == 'JetMET1': - dataset = dataset + '-TeVJet-PromptReco-v1/RAW-RECO' - skim = 'TeVJet' - else: - dataset = dataset + '-v1/RAW' - - for e_key,evs in event_steps_dict.items(): - step_name = "Run" + pd.replace("ParkingDouble","Park2") + era.split("Run")[1] + skim + "_" + e_key - steps[step_name] = {'INPUT':InputInfo(dataSet=dataset,label=era.split("Run")[1],events=int(evs*1e6), skimEvents=True, location='STD')} - -###2023 - pds_2023 = ['BTagMu', 'DisplacedJet', 'EGamma0', 'HcalNZS', 'JetMET0', 'Muon0', 'MuonEG', 'NoBPTX', 'ParkingDoubleElectronLowMass', 'ParkingDoubleMuonLowMass0', 'Tau', 'ZeroBias'] -eras_2023 = ['Run2023B', 'Run2023C', 'Run2023D'] -# 'MinimumBias' is excluded since apprently no Golden run for /MinimumBias/Run2023{B,C,D}-v1/RAW -for era in eras_2023: - for pd in pds_2023: - dataset = "/" + pd + "/" + era + "-v1/RAW" - for e_key,evs in event_steps_dict.items(): - step_name = "Run" + pd.replace("ParkingDouble","Park2") + era.split("Run")[1] + "_" + e_key - steps[step_name] = {'INPUT':InputInfo(dataSet=dataset,label=era.split("Run")[1],events=int(evs*1e6), skimEvents=True, location='STD')} - -###2022 - pds_2022_1 = ['BTagMu', 'DisplacedJet', 'DoubleMuon', 'SingleMuon', 'EGamma', 'HcalNZS', 'JetHT', 'MET', 'MinimumBias', 'MuonEG', 'NoBPTX', 'Tau', 'ZeroBias'] -eras_2022_1 = ['Run2022B', 'Run2022C'] -for era in eras_2022_1: - for pd in pds_2022_1: - dataset = "/" + pd + "/" + era + "-v1/RAW" - for e_key,evs in event_steps_dict.items(): - step_name = "Run" + pd + era.split("Run")[1] + "_" + e_key - steps[step_name] = {'INPUT':InputInfo(dataSet=dataset,label=era.split("Run")[1],events=int(evs*1e6), skimEvents=True, location='STD')} - # PD names changed during the year (!) pds_2022_2 = ['BTagMu', 'DisplacedJet', 'Muon', 'EGamma', 'HcalNZS', 'JetMET', 'MuonEG', 'NoBPTX', 'Tau', 'ZeroBias'] -# Note: 'MinimumBias' is excluded since apprently no Golden run for /MinimumBias/Run2022{D,E}-v1/RAW -eras_2022_2 = ['Run2022D', 'Run2022E'] -for era in eras_2022_2: - for pd in pds_2022_2: - dataset = "/" + pd + "/" + era + "-v1/RAW" - for e_key,evs in event_steps_dict.items(): - step_name = "Run" + pd + era.split("Run")[1] + "_" + e_key - steps[step_name] = {'INPUT':InputInfo(dataSet=dataset,label=era.split("Run")[1],events=int(evs*1e6), skimEvents=True, location='STD')} +#### Eras to run +eras_2025 = ['B','C','D','E','F','G','H','I'] +eras_2024 = eras_2025 +eras_2023 = ['B','C','D'] +eras_2022_1 = ['B','C'] +eras_2022_2 = ['D','E'] +#### Event to run +event_steps = [0.01,0.05,0.1,0.15,0.25,0.5,1] #in millions +event_steps_k = ["10k","50k","100k","150k","250k","500k","1M"] ##TODO add an helper to convert the numbers to strings +event_steps_dict = dict(zip(event_steps_k,event_steps)) +def run3SkimMod(pd): + ## We use JetMET1 to run the TevJet Skims + if pd == 'JetMET1': + return '-TeVJet-PromptReco-v1/RAW-RECO' + else: + return '-v1/RAW' + + +def addFixedEventsInputs(years, pds, eras, evstep = event_steps_dict, skimmod = None): + + ''' Add fixed events inputs for Run3 data + year : list of years to consider, strings. + pds : list of PDs to consider, strings. + eras : list of eras to consider, strings. + evstep : dictionary of event string and number e.g. {'10k': 10000} + skimmod : function to modify the skim name (optional) based on the PD input: + e.g. skimmod = lambda pd: '-TeVJet-PromptReco-v1/RAW-RECO' if pd=='JetMET1' else '-v1/RAW' + ''' + + for y in years: + for pd in pds: + pd_name = pd.replace('ParkingDouble','Park2') + for era in eras: + + dataset = '/' + pd + '/' + 'Run' + y + era + + dataset = dataset + '-v1/RAW' if skimmod is None else dataset + skimmod(pd) + skim = '' if skimmod is None else skimmod(pd).split('/')[0].split('-')[0] + + for e_key,evs in event_steps_dict.items(): + step_name = 'Run' + pd_name + y + era + skim + '_' + e_key + steps[step_name] = {'INPUT':InputInfo(dataSet=dataset,label=pd_name+y+era,events=int(evs*1e6), skimEvents=True, location='STD')} + + +run3FixedSteps = partial(addFixedEventsInputs,evstep = event_steps_dict, skimmod = run3SkimMod) +run3FixedSteps(['2025'],pds_2025,eras_2025) +run3FixedSteps(['2024'],pds_2024,eras_2024) +run3FixedSteps(['2023'],pds_2023,eras_2023) +run3FixedSteps(['2022'],pds_2022_2,eras_2022_2) +run3FixedSteps(['2022'],pds_2022_1,eras_2022_1) + +#################################################################################################################################### +### TODO: if everything works fine for testing using the fixed steps wfs, remove these ### 2024 single lumi mask wfs for the limited matrix only ### Mask chosen from golden json away from run start good_runs_2024 = [379238,379454,380360,381079,382258,383814,385889,386593] lumi_mask_2024 = [{ r : [[110, 111]]} for r in good_runs_2024] -era_mask_2024 = dict(zip(eras_2024,lumi_mask_2024)) +era_mask_2024 = dict(zip(['B','C','D','E','F','G','H','I'],lumi_mask_2024)) for era in era_mask_2024: for pd in pds_2024: - dataset = '/' + pd + '/' + era + dataset = '/' + pd + '/' + 'Run2024' + era lm = era_mask_2024[era] ## Here we use JetMET1 PD to run the TeVJet skims dataset = dataset + '-TeVJet-PromptReco-v1/RAW-RECO' if pd == 'JetMET1' else dataset + '-v1/RAW' skim = 'TeVJet' if pd == 'JetMET1' else '' + pd_name = pd.replace('ParkingDouble','Park2') + step_name = 'Run' + pd_name + '2024' + era + skim - step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1] + skim - - steps[step_name]={'INPUT':InputInfo(dataSet=dataset,label=era.split('Run')[1],events=100000,location='STD', ls=lm)} + steps[step_name]={'INPUT':InputInfo(dataSet=dataset,label=pd_name+'2024'+era,events=100000,location='STD', ls=lm)} -################################################################## +#################################################################################################################################### # Highstat HLTPhysics Run2015DHS=selectedLS([258712,258713,258714,258741,258742,258745,258749,258750,259626,259637,259683,259685,259686,259721,259809,259810,259818,259820,259821,259822,259862,259890,259891]) @@ -2909,7 +2906,7 @@ def lhegensim2018ml(fragment,howMuch): steps['RECODR3_reHLT_2022']=merge([{'--conditions':'auto:run3_data_relval', '--hltProcess':'reHLT'},steps['RECODR3']]) steps['RECODR3_reHLT_2023']=merge([{'--conditions':'auto:run3_data_relval', '--hltProcess':'reHLT'},steps['RECODR3_2023']]) steps['RECODR3_reHLT_2023B']=merge([{'--conditions':'auto:run3_data_relval', '--hltProcess':'reHLT'},steps['RECODR3']]) -steps['RECODR3_reHLT_2024']=merge([{'--conditions':'auto:run3_data_prompt_relval', '--hltProcess':'reHLT'},steps['RECODR3_2024']]) +steps['RECODR3_reHLT_2024']=merge([{'--conditions':'auto:run3_data_relval', '--hltProcess':'reHLT'},steps['RECODR3_2024']]) steps['RECODR3_reHLT_2025']=merge([{'--conditions':'auto:run3_data_prompt_relval', '--hltProcess':'reHLT'},steps['RECODR3_2025']]) # Added to run with the offline GT on few 2024 Eras. # Could be removed once 2025 wfs are in and we'll test the online GT with them diff --git a/Configuration/PyReleaseValidation/scripts/README.md b/Configuration/PyReleaseValidation/scripts/README.md index ccc12bf65a9e7..1e32d3100494f 100644 --- a/Configuration/PyReleaseValidation/scripts/README.md +++ b/Configuration/PyReleaseValidation/scripts/README.md @@ -367,20 +367,21 @@ pp Data reRECO workflows: | 2021 | | | | | | 139.001 | Run2021 MinimumBias | run3_hlt_relval | Run3 | HLT@relval2022 (Commissioning2021) | | 2022 | | | | | -| 2022.002001 | Run2022D ZeroBias | run3_hlt_relval + run3_data_relval | Run3 | HLT:@relval2022 | -| 2022.000001 | Run2022D JetHT | run3_hlt_relval + run3_data_relval | Run3 | HLT:@relval2022 | +| 2022.0030001 | Run2022D JetHT | run3_hlt_relval + run3_data_relval | Run3 | HLT:@relval2022 | | 2023 | | | | | -| 2023.002001 | Run2023D ZeroBias | run3_hlt_relval + run3_data_relval| Run3_2023 | HLT:@relval2023 | -| 2023.000001 | Run2023D MuonEG | run3_hlt_relval + run3_data_relval| Run3_2023 | HLT:@relval2023 | +| 2023.0020001 | Run2023D JetMET0 | run3_hlt_relval + run3_data_relval| Run3_2023 | HLT:@relval2023 | | 2024 | | | | | -| 145.014 | Run2024B ZeroBias | run3_hlt_relval + run3_data_relval| Run3_2024 | HLT:@relval2025 | -| 145.104 | Run2024C JetMet0 | run3_hlt_relval + run3_data_relval| Run3_2024 | HLT:@relval2025 | -| 145.202 | Run2024D EGamma0 | run3_hlt_relval + run3_data_relval| Run3_2024 | HLT:@relval2025 | -| 145.301 | Run2024E DisplacedJet | run3_hlt_relval + run3_data_prompt_relval| Run3_2024 | HLT:@relval2025 | -| 145.408 | Run2024B ParkingDoubleMuonLowMass0 | run3_hlt_relval + run3_data_prompt_relval| Run3_2024 | HLT:@relval2025 | -| 145.500 | Run2024B BTagMu | run3_hlt_relval + run3_data_prompt_relval| Run3_2024 | HLT:@relval2025 | -| 145.604 | Run2024B JetMET0 | run3_hlt_relval + run3_data_prompt_relval| Run3_2024 | HLT:@relval2025 | -| 145.713 | Run2024B Tau | run3_hlt_relval + run3_data_prompt_relval| Run3_2024 | HLT:@relval2025 | +| 2024.0000001 | Run2024B ZeroBias | run3_hlt_relval + run3_data_relval| Run3_2024 | HLT:@relval2024 | +| 2024.0010001 | Run2024C JetMET0 | run3_hlt_relval + run3_data_relval| Run3_2024 | HLT:@relval2024 | +| 2024.0020001 | Run2024D EGamma0 | run3_hlt_relval + run3_data_relval| Run3_2024 | HLT:@relval2024 | +| 2024.0030001 | Run2024E DisplacedJet | run3_hlt_relval + run3_data_relval| Run3_2024 | HLT:@relval2024 | +| 2024.0040001 | Run2024F ParkingDoubleMuonLowMass0 | run3_hlt_relval + run3_data_relval| Run3_2024 | HLT:@relval2024 | +| 2024.0050001 | Run2024G BTagMu | run3_hlt_relval + run3_data_relval| Run3_2024 | HLT:@relval2024 | +| 2024.0060001 | Run2024H Muon0 | run3_hlt_relval + run3_data_relval| Run3_2024 | HLT:@relval2024 | +| 2024.0070001 | Run2024I Tau | run3_hlt_relval + run3_data_relval| Run3_2024 | HLT:@relval2024 | +| 2025 | | | | | +| 2025.0000001 | Run2024I ZeroBias | run3_hlt_relval + run3_data_prompt_relval| Run3_2025 | HLT:@relval2025 | +| 2025.0000001 | Run2024I JetMET0 | run3_hlt_relval + run3_data_prompt_relval| Run3_2025 | HLT:@relval2025 | And Heavy Ion workflows: diff --git a/Configuration/PyReleaseValidation/scripts/das-up-to-nevents.py b/Configuration/PyReleaseValidation/scripts/das-up-to-nevents.py index c4d7018a099f8..65c08143c65a6 100755 --- a/Configuration/PyReleaseValidation/scripts/das-up-to-nevents.py +++ b/Configuration/PyReleaseValidation/scripts/das-up-to-nevents.py @@ -37,7 +37,6 @@ def get_lumi_ranges(i): return result def das_do_command(cmd): - print( "Running DAS command: %s"%cmd) ##TODO: remove me out = subprocess.check_output(cmd, shell=True, executable="/bin/bash").decode('utf8') return out.split("\n") @@ -151,7 +150,7 @@ def no_intersection(): json_list = os.listdir(cert_path) if len(json_list) == 0: web_fallback == True - json_list = [c for c in json_list if "golden" in c.lower() and "era" not in c.lower()] + json_list = [c for c in json_list if "golden" in c.lower() and "era" not in c.lower() and "ppref" not in c.lower()] json_list = [c for c in json_list if c.lower().startswith("cert_c") and c.endswith("json")] else: web_fallback = True @@ -159,14 +158,13 @@ def no_intersection(): if web_fallback: cert_url = base_cert_url + cert_type + "/" json_list = get_url_clean(cert_url).split("\n") - json_list = [c for c in json_list if "golden" in c.lower() and "era" not in c.lower() and "cert_c" in c.lower()] + json_list = [c for c in json_list if "golden" in c.lower() and "era" not in c.lower() and "cert_c" in c.lower() and "ppref" not in c.lower()] json_list = [[cc for cc in c.split(" ") if cc.lower().startswith("cert_c") and cc.endswith("json")][0] for c in json_list] # the larger the better, assuming file naming schema # Cert_X_RunStart_RunFinish_Type.json # TODO if args.run keep golden only with right range - - run_ranges = [int(c.split("_")[3]) - int(c.split("_")[2]) for c in json_list] + run_ranges = [int(c.split("_")[-2]) - int(c.split("_")[-3]) for c in json_list] latest_json = np.array(json_list[np.argmax(run_ranges)]).reshape(1,-1)[0].astype(str) best_json = str(latest_json[0]) if not web_fallback: diff --git a/Configuration/PyReleaseValidation/scripts/runTheMatrix.py b/Configuration/PyReleaseValidation/scripts/runTheMatrix.py index f1614d2365735..3e9bdc87de2bf 100755 --- a/Configuration/PyReleaseValidation/scripts/runTheMatrix.py +++ b/Configuration/PyReleaseValidation/scripts/runTheMatrix.py @@ -99,7 +99,7 @@ def runSelected(opt): 12846.0, # RelValZEE_13 2024 13034.0, # RelValTTbar_14TeV 2024 PU = Run3_Flat55To75_PoissonOOTPU 16834.0, # RelValTTbar_14TeV 2025 - 17034.0, # RelValTTbar_14TeV 2025 PU = Run3_Flat55To75_PoissonOOTPU + 17034.0, # RelValTTbar_14TeV 2025 PU = Run3_Flat55To75_PoissonOOTPU 14034.0, # RelValTTbar_14TeV Run3_2023_FastSim 14234.0, # RelValTTbar_14TeV Run3_2023_FastSim PU = Run3_Flat55To75_PoissonOOTPU 2500.3001, # RelValTTbar_14TeV NanoAOD from existing MINI @@ -110,24 +110,24 @@ def runSelected(opt): 139.001, # Run2021 MinimumBias Commissioning2021 # 2022 - 2022.106001, # Run2022C JetHT + 2022.0030001, # Run2022C JetHT # 2023 - 2023.211001, # Run2023D ZeroBias + 2023.0020001, # Run2023D JetMET0 # 2024 - 2024.014001, # Run2024B ZeroBias - 2024.204001, # Run2024C JetMet0 - 2024.202001, # Run2024D EGamma0 - 2024.301001, # Run2024E DisplacedJet - 2024.408001, # Run2024F ParkingDoubleMuonLowMass0 - 2024.500001, # Run2024G BTagMu - 2024.504001, # Run2024H JetMET0 - 2024.713001, # Run2024I Tau + 2024.0000001, # Run2024B ZeroBias + 2024.0010001, # Run2024C JetMET0 + 2024.0020001, # Run2024D EGamma0 + 2024.0030001, # Run2024E DisplacedJet + 2024.0040001, # Run2024F ParkingDoubleMuonLowMass0 + 2024.0050001, # Run2024G BTagMu + 2024.0060001, # Run2024H Muon0 + 2024.0070001, # Run2024I Tau # 2025 - 2025.014001, # Run2025B ZeroBias - 2025.104001, # Run2025C JetMet0 + 2025.0000001, # Run2025B ZeroBias + 2025.0010001, # Run2025C JetMET0 ], 'phase2' : [ From 5d2c1c6185865650c53d1ce5705db06ea83b8a60 Mon Sep 17 00:00:00 2001 From: Adriano Di Florio Date: Thu, 4 Sep 2025 23:16:12 +0200 Subject: [PATCH 3/4] Add HFlav for ParkingDoubleMu --- .../python/relval_data_highstats.py | 39 +++++++++++++------ .../python/relval_steps.py | 5 ++- 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/Configuration/PyReleaseValidation/python/relval_data_highstats.py b/Configuration/PyReleaseValidation/python/relval_data_highstats.py index 60e8881d6b621..1853795decd85 100644 --- a/Configuration/PyReleaseValidation/python/relval_data_highstats.py +++ b/Configuration/PyReleaseValidation/python/relval_data_highstats.py @@ -12,9 +12,21 @@ def run3NameMod(name): # ParkingDouble* PDs would end up with a too long name for the submission infrastructure return name.replace('ParkingDouble','Park2') +def run3HarvMod(pd): + ## ZeroBias, ScoutingPFMonitor and ParkingDoubleMuonLowMass + ## have their own HARVESTING setup + if 'ZeroBias' in pd: + return 'ZB_' + elif 'ScoutingPFMonitor' in pd: + return 'ScoutingPFMonitor_' + elif 'ParkingDoubleMuonLowMass' in pd: + return 'HFLAV_' + else: + return '' + def run3RecoMod(pd): ## ZeroBias and ScoutingPFMonitor have - ## their own RECO and HARVESTING setup + ## their own RECO setup if 'ZeroBias' in pd: return 'ZB_' elif 'ScoutingPFMonitor' in pd: @@ -29,36 +41,39 @@ def run3HLTMod(pd): else: return '' -def addFixedEventsWfs(years, pds, eras, suffreco = None, suffhlt = None, namemod = None): +def addFixedEventsWfs(years, pds, eras, offset = 0, suffreco = None, suffhlt = None, suffharv = None, namemod = None): for y in years: for era in eras: for pd in pds: for e_key,evs in event_steps_dict.items(): - ## ZeroBias have their own HARVESTING - suff = 'ZB_' if 'ZeroBias' in pd else '' wf_number = float(y) + offset_pd * pds.index(pd) wf_number = wf_number + offset_era * eras.index(era) + wf_number = wf_number + offset wf_number = round(wf_number + offset_events * evs, 6) # Here we customise the steps depending on the PD name - recoharv = suffreco(pd) if suffreco is not None else '' - hlt = suffhlt(pd) if suffhlt is not None else '' + reco = suffreco(pd) if suffreco is not None else '' + harv = suffharv(pd) if suffharv is not None else '' + hlt = suffhlt(pd) if suffhlt is not None else '' + name = namemod(pd) if namemod is not None else '' - recosetup = 'RECONANORUN3_' + recoharv + 'reHLT_2025' - harvsetup = 'HARVESTRUN3_' + recoharv + y + recosetup = 'RECONANORUN3_' + reco + 'reHLT_2025' + harvsetup = 'HARVESTRUN3_' + harv + y hltsetup = 'HLTDR3_' + hlt + y - step_name = 'Run' + pd.replace('ParkingDouble','Park2') + y + era + '_' + e_key + step_name = 'Run' + name + y + era + '_' + e_key if namemod is not None: step_name = namemod(step_name) workflows[wf_number] = ['',[step_name, hltsetup, recosetup, harvsetup]] -run3FixedWfs = partial(addFixedEventsWfs,suffreco = run3RecoMod, suffhlt = run3HLTMod, namemod = run3NameMod) + return wf_number - float(y) #to concatenate the offset + +run3FixedWfs = partial(addFixedEventsWfs,suffreco = run3RecoMod, suffhlt = run3HLTMod, suffharv = run3HarvMod, namemod = run3NameMod) run3FixedWfs(['2025'],pds_2025,eras_2025) run3FixedWfs(['2024'],pds_2024,eras_2024) run3FixedWfs(['2023'],pds_2023,eras_2023) -run3FixedWfs(['2022'],pds_2022_2,eras_2022_2) -run3FixedWfs(['2022'],pds_2022_1,eras_2022_1) +offset_2022 = run3FixedWfs(['2022'],pds_2022_2,eras_2022_2) +run3FixedWfs(['2022'],pds_2022_1,eras_2022_1,offset = offset_2022) diff --git a/Configuration/PyReleaseValidation/python/relval_steps.py b/Configuration/PyReleaseValidation/python/relval_steps.py index 974050973cdcd..450bbb1bad046 100644 --- a/Configuration/PyReleaseValidation/python/relval_steps.py +++ b/Configuration/PyReleaseValidation/python/relval_steps.py @@ -661,7 +661,7 @@ offset_events = 0.0001 # less than 10 event setups (10k,50k,150k,250k,500k,1M) #### PDs to run -pds_2025 = ['BTagMu', 'DisplacedJet', 'EGamma0', 'HcalNZS', 'JetMET0', 'Muon0', 'MuonEG', 'NoBPTX', 'ParkingDoubleMuonLowMass0', 'ParkingHH', 'ParkingLLP', 'ParkingSingleMuon0', 'ParkingVBF0', 'Tau', 'ZeroBias','JetMET1','ScoutingPFMonitor'] +pds_2025 = ['BTagMu', 'DisplacedJet', 'EGamma0', 'HcalNZS', 'JetMET0', 'Muon0', 'MuonEG', 'NoBPTX', 'ParkingDoubleMuonLowMass0', 'ParkingHH', 'ParkingLLP0', 'ParkingSingleMuon0', 'ParkingVBF0', 'Tau', 'ZeroBias','JetMET1','ScoutingPFMonitor'] pds_2024 = ['BTagMu', 'DisplacedJet', 'EGamma0', 'HcalNZS', 'JetMET0', 'Muon0', 'MuonEG', 'NoBPTX', 'ParkingDoubleMuonLowMass0', 'ParkingHH', 'ParkingLLP', 'ParkingSingleMuon0', 'ParkingVBF0', 'Tau', 'ZeroBias','JetMET1'] pds_2023 = ['BTagMu', 'DisplacedJet', 'EGamma0', 'HcalNZS', 'JetMET0', 'Muon0', 'MuonEG', 'NoBPTX', 'ParkingDoubleElectronLowMass', 'ParkingDoubleMuonLowMass0', 'Tau', 'ZeroBias'] pds_2022_1 = ['BTagMu', 'DisplacedJet', 'DoubleMuon', 'SingleMuon', 'EGamma', 'HcalNZS', 'JetHT', 'MET', 'MinimumBias', 'MuonEG', 'NoBPTX', 'Tau', 'ZeroBias'] @@ -4171,13 +4171,16 @@ def gen2024HiMix(fragment,howMuch): steps['HARVESTRUN3_ZB_2023B']=merge([{'--era':'Run3', '-s':'HARVESTING:@rerecoZeroBiasFakeHLT+@miniAODDQM+@nanoAODDQM'},steps['HARVESTRUN3_2022']]) steps['HARVESTRUN3_ZB_2023']=merge([{'--era':'Run3_2023', '-s':'HARVESTING:@rerecoZeroBiasFakeHLT+@miniAODDQM+@nanoAODDQM'},steps['HARVESTRUN3_2023']]) steps['HARVESTRUN3_COS_2023']=merge([{'--scenario':'cosmics', '--era':'Run3_2023', '-s':'HARVESTING:dqmHarvesting'},steps['HARVESTRUN3_2022']]) +steps['HARVESTRUN3_HFLAV_2023']=merge([{'--era':'Run3_2024', '-s':'HARVESTING:@standardDQM+@miniAODDQM+@nanoAODDQM+@heavyFlavor'},steps['HARVESTDRUN3']]) # 2024 steps['HARVESTRUN3_ZB_2024']=merge([{'--era':'Run3_2024', '-s':'HARVESTING:@rerecoZeroBias+@miniAODDQM+@nanoAODDQM'},steps['HARVESTDRUN3']]) steps['HARVESTRUN3_2024']=merge([{'--era':'Run3_2024', '-s':'HARVESTING:@standardDQM+@miniAODDQM+@nanoAODDQM'},steps['HARVESTDRUN3']]) +steps['HARVESTRUN3_HFLAV_2024']=merge([{'--era':'Run3_2024', '-s':'HARVESTING:@standardDQM+@miniAODDQM+@nanoAODDQM+@heavyFlavor'},steps['HARVESTDRUN3']]) steps['HARVESTRUN3_ScoutingPFMonitor_2024']=merge([{'--era':'Run3_2024', '-s':'HARVESTING:@standardDQM+@miniAODDQM+@nanoAODDQM+@hltScouting'},steps['HARVESTDRUN3']]) # 2025 steps['HARVESTRUN3_ZB_2025']=merge([{'--era':'Run3_2025', '-s':'HARVESTING:@rerecoZeroBias+@miniAODDQM+@nanoAODDQM'},steps['HARVESTDRUN3']]) steps['HARVESTRUN3_2025']=merge([{'--era':'Run3_2025', '-s':'HARVESTING:@standardDQM+@miniAODDQM+@nanoAODDQM'},steps['HARVESTDRUN3']]) +steps['HARVESTRUN3_HFLAV_2025']=merge([{'--era':'Run3_2025', '-s':'HARVESTING:@standardDQM+@miniAODDQM+@nanoAODDQM+@heavyFlavor'},steps['HARVESTDRUN3']]) steps['HARVESTRUN3_ScoutingPFMonitor_2025']=merge([{'--era':'Run3_2025', '-s':'HARVESTING:@standardDQM+@miniAODDQM+@nanoAODDQM+@hltScouting'},steps['HARVESTDRUN3']]) # HI steps['HARVESTRUN3_HI2023A']=merge([{'--era':'Run3_pp_on_PbPb_approxSiStripClusters_2023', '-s':'HARVESTING:@standardDQM+@miniAODDQM'},steps['HARVESTRUN3_2022']]) From 1efc5f04802172060ec0e5656b305c6c57c2a1b7 Mon Sep 17 00:00:00 2001 From: Adriano Di Florio Date: Fri, 5 Sep 2025 11:58:30 +0200 Subject: [PATCH 4/4] Reducing the number of eras for 2025 --- .../PyReleaseValidation/python/relval_standard.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/Configuration/PyReleaseValidation/python/relval_standard.py b/Configuration/PyReleaseValidation/python/relval_standard.py index 094bf86910f7c..47ae2ef20d4f5 100644 --- a/Configuration/PyReleaseValidation/python/relval_standard.py +++ b/Configuration/PyReleaseValidation/python/relval_standard.py @@ -598,11 +598,15 @@ def addFixedEventsTestingWfs(years, pds, eras): step_name = 'Run' + pd.replace('ParkingDouble','Park2') + y + era + "_10k" workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_reHLT_' + y,'HARVESTRUN3_' + suff + y]] - -## 2024/2025 + +## 2025 +pds = ['ZeroBias', 'JetMET0', 'EGamma0'] +eras = ['B','C','D'] +addFixedEventsTestingWfs(['2025'], pds, eras) +## 2024 pds = ['ZeroBias', 'JetMET0', 'EGamma0', 'DisplacedJet', 'ParkingDoubleMuonLowMass0', 'BTagMu', 'Muon0', 'Tau'] eras = ['B','C','D','E','F','G','H','I'] -addFixedEventsTestingWfs(['2024','2025'], pds, eras) +addFixedEventsTestingWfs(['2024'], pds, eras) ## 2023 pds = ['ZeroBias', 'EGamma0', 'JetMET0']