Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 1 addition & 6 deletions Configuration/PyReleaseValidation/python/MatrixUtil.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,12 +133,7 @@ def das(self, das_options, dataset):
elif not self.skimEvents:
command = "dasgoclient %s --query '%s'" % (das_options, self.queries(dataset)[0])
elif self.skimEvents:
from os import getenv
if getenv("JENKINS_PREFIX") is not None:
# to be sure that whatever happens the files are only those at CERN
command = "das-up-to-nevents.py -d %s -e %d -pc -l lumi_ranges.txt"%(dataset,self.events)
else:
command = "das-up-to-nevents.py -d %s -e %d -l lumi_ranges.txt"%(dataset,self.events)
command = "das-up-to-nevents.py -d %s -e %d -l lumi_ranges.txt"%(dataset,self.events)
# Run filter on DAS output
if self.ib_blacklist:
command += " | grep -E -v "
Expand Down
177 changes: 70 additions & 107 deletions Configuration/PyReleaseValidation/python/relval_data_highstats.py
Original file line number Diff line number Diff line change
@@ -1,116 +1,79 @@
# import the definition of the steps and input files:
from Configuration.PyReleaseValidation.relval_steps import *
from functools import partial

# here only define the workflows as a combination of the steps defined above:
workflows = Matrix()

## Here we define fixed high stats data workflows
## not to be run as default. 10k, 50k, 150k, 250k, 500k or 1M events each

offset_era = 0.1 # less than 10 eras per year (hopefully!)
offset_pd = 0.001 # less than 100 pds per year
offset_events = 0.0001 # less than 10 event setups (10k,50k,150k,250k,500k,1M)

## 2025
base_wf = 2025.0
for e_n,era in enumerate(eras_2025):
for p_n,pd in enumerate(pds_2025):
for e_key,evs in event_steps_dict.items():
wf_number = base_wf
wf_number = wf_number + offset_era * e_n
wf_number = wf_number + offset_pd * p_n
wf_number = wf_number + offset_events * evs
wf_number = round(wf_number,6)

## ZeroBias has its own RECO and HARVESTING setup
## ScoutingPFMonitor has its own HLT, RECO and HARVESTING setup
recoharv = hlt = ''
if 'ZeroBias' in pd:
recoharv = 'ZB_'
elif 'ScoutingPFMonitor' in pd:
hlt = recoharv = 'ScoutingPFMonitor_'

recosetup = 'RECONANORUN3_' + recoharv + 'reHLT_2025'

y = str(int(base_wf))

## this is because ParkingDouble* PDs would end up with a too long name for the submission infrastructure
step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1] + '_' + e_key

workflows[wf_number] = ['',[step_name,'HLTDR3_' + hlt + y,'RECONANORUN3_' + recoharv + 'reHLT_'+y,'HARVESTRUN3_' + recoharv + y]]

## 2024
base_wf = 2024.0
for e_n,era in enumerate(eras_2024):
for p_n,pd in enumerate(pds_2024):
for e_key,evs in event_steps_dict.items():
wf_number = base_wf
wf_number = wf_number + offset_era * e_n
wf_number = wf_number + offset_pd * p_n
wf_number = wf_number + offset_events * evs
wf_number = round(wf_number,6)

## Here we use JetMET1 PD to run the TeVJet skims
skim = 'TeVJet' if pd == 'JetMET1' else ''

## ZeroBias has its own RECO and HARVESTING setup
suff = 'ZB_' if 'ZeroBias' in pd else ''

# Running C,D,E with the offline GT.
# Could be removed once 2025 wfs are in and we'll test the online GT with them
recosetup = 'RECONANORUN3_' + suff + 'reHLT_2024'
recosetup = recosetup if era[-1] > 'E' else recosetup + '_Offline'

y = str(int(base_wf))

## this is because ParkingDouble* PDs would end up with a too long name for the submission infrastructure
step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1] + skim + '_' + e_key

workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]]

## 2023
base_wf = 2023.0
for e_n,era in enumerate(eras_2023):
for p_n,pd in enumerate(pds_2023):
for e_key,evs in event_steps_dict.items():
wf_number = base_wf
wf_number = wf_number + offset_era * e_n
wf_number = wf_number + offset_pd * p_n
wf_number = wf_number + offset_events * evs
wf_number = round(wf_number,6)

## this is because ParkingDouble* PDs would end up with a too long name for the submission infrastructure
step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1] + '_' + e_key

y = str(int(base_wf)) + 'B' if '2023B' in era else str(int(base_wf))
suff = 'ZB_' if 'ZeroBias' in step_name else ''
workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]]

## 2022
base_wf = 2022.0
for e_n,era in enumerate(eras_2022_1):
for p_n,pd in enumerate(pds_2022_1):
for e_key,evs in event_steps_dict.items():
wf_number = base_wf
wf_number = wf_number + offset_era * e_n
wf_number = wf_number + offset_pd * p_n
wf_number = wf_number + offset_events * evs
wf_number = round(wf_number,6)
step_name = 'Run' + pd + era.split('Run')[1] + '_' + e_key
y = str(int(base_wf))
suff = 'ZB_' if 'ZeroBias' in step_name else ''
workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]]

# PD names changed during 2022
for e_n,era in enumerate(eras_2022_2):
for p_n,pd in enumerate(pds_2022_2):
for e_key,evs in event_steps_dict.items():
wf_number = base_wf
wf_number = wf_number + offset_era * (e_n + len(eras_2022_1))
wf_number = wf_number + offset_pd * (p_n + len(pds_2022_1))
wf_number = wf_number + offset_events * evs
wf_number = round(wf_number,6)
step_name = 'Run' + pd + era.split('Run')[1] + '_' + e_key
y = str(int(base_wf))
suff = 'ZB_' if 'ZeroBias' in step_name else ''
workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]]
def run3NameMod(name):
# ParkingDouble* PDs would end up with a too long name for the submission infrastructure
return name.replace('ParkingDouble','Park2')

def run3HarvMod(pd):
## ZeroBias, ScoutingPFMonitor and ParkingDoubleMuonLowMass
## have their own HARVESTING setup
if 'ZeroBias' in pd:
return 'ZB_'
elif 'ScoutingPFMonitor' in pd:
return 'ScoutingPFMonitor_'
elif 'ParkingDoubleMuonLowMass' in pd:
return 'HFLAV_'
else:
return ''

def run3RecoMod(pd):
## ZeroBias and ScoutingPFMonitor have
## their own RECO setup
if 'ZeroBias' in pd:
return 'ZB_'
elif 'ScoutingPFMonitor' in pd:
return 'ScoutingPFMonitor_'
else:
return ''

def run3HLTMod(pd):
## ScoutingPFMonitor has its own HLT setup
if 'ScoutingPFMonitor' in pd:
return 'ScoutingPFMonitor_'
else:
return ''

def addFixedEventsWfs(years, pds, eras, offset = 0, suffreco = None, suffhlt = None, suffharv = None, namemod = None):

for y in years:
for era in eras:
for pd in pds:
for e_key,evs in event_steps_dict.items():

wf_number = float(y) + offset_pd * pds.index(pd)
wf_number = wf_number + offset_era * eras.index(era)
wf_number = wf_number + offset
wf_number = round(wf_number + offset_events * evs, 6)

# Here we customise the steps depending on the PD name
reco = suffreco(pd) if suffreco is not None else ''
harv = suffharv(pd) if suffharv is not None else ''
hlt = suffhlt(pd) if suffhlt is not None else ''
name = namemod(pd) if namemod is not None else ''

recosetup = 'RECONANORUN3_' + reco + 'reHLT_2025'
harvsetup = 'HARVESTRUN3_' + harv + y
hltsetup = 'HLTDR3_' + hlt + y

step_name = 'Run' + name + y + era + '_' + e_key
if namemod is not None:
step_name = namemod(step_name)

workflows[wf_number] = ['',[step_name, hltsetup, recosetup, harvsetup]]

return wf_number - float(y) #to concatenate the offset

run3FixedWfs = partial(addFixedEventsWfs,suffreco = run3RecoMod, suffhlt = run3HLTMod, suffharv = run3HarvMod, namemod = run3NameMod)
run3FixedWfs(['2025'],pds_2025,eras_2025)
run3FixedWfs(['2024'],pds_2024,eras_2024)
run3FixedWfs(['2023'],pds_2023,eras_2023)
offset_2022 = run3FixedWfs(['2022'],pds_2022_2,eras_2022_2)
run3FixedWfs(['2022'],pds_2022_1,eras_2022_1,offset = offset_2022)
74 changes: 43 additions & 31 deletions Configuration/PyReleaseValidation/python/relval_standard.py
Original file line number Diff line number Diff line change
Expand Up @@ -575,40 +575,52 @@
workflows[143.912] = ['',['RunUPC2024','RECODR3_2025_UPC_OXY','HARVESTDPROMPTR3']]
workflows[143.921] = ['',['RunUPC2024','RECODR3_2025_OXY_SKIMIONPHYSICS0','HARVESTDPROMPTR3']]

## Lumi mask fixed 2024 wfs
base_wf = 145.0
offset_era = 0.1 # less than 10 eras per year (hopefully)
offset_pd = 0.001 # less than 100 pds per year

for e_n,era in enumerate(era_mask_2024):
for p_n,pd in enumerate(pds_2024):

# JetMET1 PD is used to run the TeVJet skims
# we don't really need it here
# (also as is the numbering conflicts with
# the scouting wf below, so if we really want to
# extend the pds for standar relvals for 2024 data
# one needs to change the 145.415 below)
if pd == 'JetMET1':
continue

wf_number = round(base_wf + offset_era * e_n + offset_pd * p_n,3)
dataset = '/' + pd + '/' + era + '-v1/RAW'

## ZeroBias have their own HARVESTING
suff = 'ZB_' if 'ZeroBias' in step_name else ''

# Running C,D,E with the offline GT.
# Could be removed once 2025 wfs are in and we'll test the online GT with them
recosetup = 'RECONANORUN3_' + suff + 'reHLT_2024'
recosetup = recosetup if era[-1] > 'E' else recosetup + '_Offline'

step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1]
workflows[wf_number] = ['',[step_name,'HLTDR3_2024',recosetup,'HARVESTRUN3_' + suff + '2024']]

## special HLT scouting workflow (with hardcoded private input file from ScoutingPFMonitor skimmed to remove all events without scouting)
workflows[145.415] = ['',['HLTDR3_ScoutingPFMonitor_2024','RECONANORUN3_ScoutingPFMonitor_reHLT_2024','HARVESTRUN3_ScoutingPFMonitor_2024']]

######################################################################################################################################
######################################################################################################################################

## Run3 Fixed Events for Testing and IBs
## (with 1k events in input, cut to 100 at step2)

fixed_events_offset = 1e-7 # to have it unique

def addFixedEventsTestingWfs(years, pds, eras):

for y in years:
for era,pd in zip(eras, pds):

## ZeroBias have their own HARVESTING
suff = 'ZB_' if 'ZeroBias' in pd else ''

wf_number = round(float(y) + offset_pd * pds.index(pd) + fixed_events_offset, 7)
step_name = 'Run' + pd.replace('ParkingDouble','Park2') + y + era + "_10k"

workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_reHLT_' + y,'HARVESTRUN3_' + suff + y]]

## 2025
pds = ['ZeroBias', 'JetMET0', 'EGamma0']
eras = ['B','C','D']
addFixedEventsTestingWfs(['2025'], pds, eras)
## 2024
pds = ['ZeroBias', 'JetMET0', 'EGamma0', 'DisplacedJet', 'ParkingDoubleMuonLowMass0', 'BTagMu', 'Muon0', 'Tau']
eras = ['B','C','D','E','F','G','H','I']
addFixedEventsTestingWfs(['2024'], pds, eras)

## 2023
pds = ['ZeroBias', 'EGamma0', 'JetMET0']
eras = ['B','C','D']
addFixedEventsTestingWfs(['2023'], pds, eras)

## 2022
pds = ['ZeroBias', 'JetHT', 'Tau', 'BTagMu']
eras = ['B','C','D','E']
addFixedEventsTestingWfs(['2022'], pds, eras)

######################################################################################################################################
######################################################################################################################################

##################################################################
### run3 (2024) skims - Era F ###
workflows[146.101] = ['',['RunZeroBias2024F','HLTDR3_2024','SKIMZEROBIASRUN3_reHLT_2024','HARVESTRUN3_ZB_2024']]
Expand Down
Loading