Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
1d242bb
new configurations
Mar 11, 2019
ec7c7aa
fixup fiducial volumes. Don't apply bnbcorrection weight twice in SBND.
Mar 11, 2019
317a772
add in more configurations for weights handling
Mar 11, 2019
4b5f595
fix bug in config
Mar 11, 2019
197a884
use Event weight for weighting events. Add in option to scale all eve…
Mar 11, 2019
a4f2ddb
use Event weight for weighting events. Add in option to scale all eve…
Mar 11, 2019
94f75fb
Add in more configuration options to select interaction mode. Set tra…
Mar 11, 2019
59e5a20
add in more histograms
Mar 11, 2019
833d1bd
Add in accounting of individual event categories
Mar 11, 2019
a8b07b6
More on event categories. Change slightly how signal and background i…
Mar 11, 2019
daf907d
keep track of lepton start momentum in NuMuInteraction output
Mar 11, 2019
5b35567
Merge branch 'feature/numu_update' into feature/numu_configurations
Mar 11, 2019
c4b493a
config for selecting a region in SBND to the LHS of the beam
Mar 11, 2019
892480d
fix stray dependence of Event on larsoft
mastbaum Mar 31, 2019
1b255a7
temporary structure for event weights
mastbaum Apr 1, 2019
d296df4
add initial (unswapped) neutrino flavor
mastbaum Apr 1, 2019
a996443
Switch to using weight information from sbncode event. Also get POT f…
Apr 1, 2019
63f3f6c
fix random number generation in energy smearing
Apr 1, 2019
65608bf
Merge branch 'develop' into feature/numu_update
Apr 1, 2019
04ed5f9
Add in subrun handling to post processor class.
Apr 1, 2019
5121ed3
Make backtracker and particle inventory services usable
Apr 1, 2019
eac7100
Correctly calculate baseline of oscillated neutrinos. Correctly calcu…
Apr 19, 2019
aa26815
Fix random number generation in energy smearing. Make length calculat…
Apr 19, 2019
3f0837c
Update to config of fiducial volumes. Update configuration of coviara…
Apr 19, 2019
3471dfe
merge in updates to weight format
Jun 10, 2019
41abbad
add in option to use separate uni weights for CC and NC events
Jun 10, 2019
415a045
Fix bugs in visible energy calculation. Add in a few other attempts a…
Jun 10, 2019
891a407
try a different visible energy calculation
Jun 10, 2019
507836d
Merge branch 'feature/numu_configurations' into feature/numu_update
Jun 10, 2019
c91b0cc
tweak fiducial volumes
Jun 10, 2019
3ea20bc
add in no background configs
Jun 10, 2019
b79ef8d
add in config used for physics book analysis
Jun 10, 2019
43daf94
add in example macro for processing output files
Jun 10, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions sbnanalysis/ana/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ include_directories( $ENV{MESSAGEFACILITY_INC} )
link_directories(
$ENV{MESSAGEFACILITY_LIB}
$ENV{FHICLCPP_LIB}
$ENV{BOOST_LIB}
$ENV{CANVAS_LIB}
$ENV{CANVAS_ROOT_IO_LIB}
$ENV{CETLIB_EXCEPT_LIB}
Expand Down
98 changes: 75 additions & 23 deletions sbnanalysis/ana/SBNOsc/Chi2Sensitivity.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -154,22 +154,17 @@ void Chi2Sensitivity::Initialize(fhicl::ParameterSet* config) {
}
}

// Uniformly applied weights
if (pconfig.is_key_to_sequence("UniformWeights")) {
fUniformWeights = pconfig.get<std::vector<std::string> >("UniformWeights");
}

// start at 0th event sample
fSampleIndex = 0;
}

Chi2Sensitivity::EventSample::EventSample(const fhicl::ParameterSet& config) {
// scaling stuff
fName = config.get<std::string>("name", "");
fScaleFactor = config.get<double>("scalefactor", 0.);
fScalePOT = config.get<double>("ScalePOT", 0.);
fPOT = 0.;

// setup detector stuff
fDistance = config.get<double>("Distance", 0);
std::vector<double> xlim = config.get<std::vector<double> >("DetX");
std::vector<double> ylim = config.get<std::vector<double> >("DetY");
std::vector<double> zlim = config.get<std::vector<double> >("DetZ");
Expand Down Expand Up @@ -206,13 +201,39 @@ Chi2Sensitivity::EventSample::EventSample(const fhicl::ParameterSet& config) {
// Take distance along z-axis as limits
int numBinsPerMeter = config.get<int>("NumDistanceBinsPerMeter", 1);
double numBinsPerCM = numBinsPerMeter * 0.01; // unit conversion
unsigned n_bins = (unsigned) ((fZlim[1] - fZlim[0]) * numBinsPerCM) + 1; /* round up to be on the safe side */

// get beam center and baseline in cm
fBaseline = config.get<double>("Baseline");
fBeamCenterX = config.get<double>("BeamCenterX", 0.);
fBeamCenterY = config.get<double>("BeamCenterY", 0.);
fBeamFrontZ = config.get<double>("BeamFrontZ", 0.);

// get full range of distances across detector
double detector_distance = sqrt((fZlim[1] - fZlim[0]) * (fZlim[1] - fZlim[0]) \
+ (fXlim[1] - fXlim[0]) * (fXlim[1] - fXlim[0]) \
+ (fYlim[1] - fYlim[0]) * (fYlim[1] - fYlim[0]));
// get full range of beam
// taken approximately from eyeballing MCFlux information
double beam_width = sqrt( 125. * 125. /* x */ + 125. * 125. /* y */ + 5500. * 5500. /* z */);
unsigned n_bins = (unsigned) ((detector_distance + beam_width) * numBinsPerCM) + 1;
unsigned n_limits = n_bins + 1;
double dist_binwidth = 1./numBinsPerCM;

// minimum distance from decayed vertex to detector
// taken approximately from MCFlux info
double min_baseline = fBaseline - 5500.;

for (unsigned i = 0; i < n_limits; i++) {
fDistBins.push_back(fDistance + (i * dist_binwidth) / 100000. /* cm -> km */);
fDistBins.push_back((min_baseline + (i * dist_binwidth)) / 100000. /* cm -> km */);
}

// scaling in reco energy bins
fEnergyBinScale = config.get<std::vector<double>>("energy_bin_scale", {});
if (fEnergyBinScale.size() == 0) {
fEnergyBinScale = std::vector<double>(fBins.size() - 1, 1.0);
}
else assert(fEnergyBinScale.size() == fBins.size() - 1);

// setup histograms
fBkgCounts = new TH1D((fName + " Background").c_str(), (fName + " Background").c_str(), fBins.size() - 1, &fBins[0]);
fSignalCounts = new TH3D((fName + " Signal").c_str(), (fName + " Signal").c_str(),
Expand All @@ -227,6 +248,11 @@ void Chi2Sensitivity::FileCleanup(TTree *eventTree) {
// onto the next sample
fSampleIndex ++;
}

void Chi2Sensitivity::ProcessSubRun(const SubRun *subrun) {
fCovariance.ProcessSubRun(subrun);
fEventSamples[fSampleIndex].fPOT += subrun->totgoodpot;
}

void Chi2Sensitivity::ProcessEvent(const Event *event) {
// have the covariance process the event
Expand All @@ -247,17 +273,15 @@ void Chi2Sensitivity::ProcessEvent(const Event *event) {
nuE = event->reco[n].reco_energy;
}

// Get distance travelled along z in km
// (To reproduce proposal)
double dist = fEventSamples[fSampleIndex].fDistance +
(event->truth[truth_ind].neutrino.position.Z() - fEventSamples[fSampleIndex].fZlim[0])/100000. /* cm -> km */;

// Get distance travelled (assuming nu started at (x, y, z) = (0, 0, min_det_zdim - det_dist))
//double dx = (event->truth[truth_ind].neutrino.position.X() - (fDetDims[sample.fDet][0][1] + fDetDims[sample.fDet][0][0])/2) / 100000 /* cm -> km */,
//dy = (event->truth[truth_ind].neutrino.position.Y() - (fDetDims[sample.fDet][1][1] + fDetDims[sample.fDet][1][0])/2) / 100000 /* cm -> km */,
//dz = (event->truth[truth_ind].neutrino.position.Z() - fDetDims[sample.fDet][2][0]) / 100000 /* cm -> km */;
//double dist = TMath::Sqrt( dx*dx + dy*dy + (fDetDists[sample.fDet] + dz)*(fDetDists[sample.fDet] + dz) );
// get distance travelled from decay vertex
// parent decay vertex
TVector3 p_decay_vtx = event->reco[n].truth.neutrino.parentDecayVtx;
TVector3 neutrino_vtx = event->reco[n].truth.neutrino.position;

double dz = fEventSamples[fSampleIndex].fBaseline - p_decay_vtx.Z() + neutrino_vtx.Z() - fEventSamples[fSampleIndex].fBeamFrontZ;
double dy = p_decay_vtx.Y() - neutrino_vtx.Y() + fEventSamples[fSampleIndex].fBeamCenterY;
double dx = p_decay_vtx.X() - neutrino_vtx.X() + fEventSamples[fSampleIndex].fBeamCenterX;
double dist = sqrt(dx*dx + dy*dy + dz*dz) / 100000. /* cm -> km */;

// check if energy is within bounds
if (nuE < fEventSamples[fSampleIndex].fBins[0] || nuE > fEventSamples[fSampleIndex].fBins[fEventSamples[fSampleIndex].fBins.size()-1]) {
Expand All @@ -278,12 +302,16 @@ void Chi2Sensitivity::ProcessEvent(const Event *event) {

// Apply selection (or rejection) efficiencies
int isCC = event->truth[truth_ind].neutrino.iscc;
double wgt = isCC*(fSelectionEfficiency) + (1-isCC)*(1 - fBackgroundRejection);
// double wgt = isCC*(fSelectionEfficiency) + (1-isCC)*(1 - fBackgroundRejection);
// and scale weight
wgt *= fEventSamples[fSampleIndex].fScaleFactor;
// wgt *= fEventSamples[fSampleIndex].fScaleFactor;
// apply uniform weights
for (auto const &key: fUniformWeights) {
wgt *= event->truth[truth_ind].weights.at(key)[0];
// for (auto const &key: fUniformWeights) {
// wgt *= event->truth[truth_ind].weights.at(key)[0];
// }
double wgt = event->reco[n].weight;
if (fEventSamples[fSampleIndex].fScalePOT > 0) {
wgt *= fEventSamples[fSampleIndex].fScalePOT / fEventSamples[fSampleIndex].fPOT;
}

// fill in hitograms
Expand All @@ -299,6 +327,30 @@ void Chi2Sensitivity::ProcessEvent(const Event *event) {
}
}

// apply reco energy bin scaling
void Chi2Sensitivity::Scale() {
for (int sample = 0; sample < fEventSamples.size(); sample++) {
for (int i = 1; i < fEventSamples[sample].fSignalCounts->GetNbinsX()+1; i++) {
double scale = fEventSamples[sample].fEnergyBinScale[i-1];
for (int j = 1; j < fEventSamples[sample].fSignalCounts->GetNbinsY()+1; j++) {
for (int k = 1; k < fEventSamples[sample].fSignalCounts->GetNbinsZ()+1; k++) {
double content = fEventSamples[sample].fSignalCounts->GetBinContent(i, j, k);
//std::cout << "pre sample: " << sample << " bin content: " << fEventSamples[sample].fSignalCounts->GetBinContent(i);
fEventSamples[sample].fSignalCounts->SetBinContent(i, j, k, content * scale);
//std::cout << "post sample: " << sample << " bin content: " << fEventSamples[sample].fSignalCounts->GetBinContent(i);
}
}
}

for (int i = 1; i < fEventSamples[sample].fBkgCounts->GetNbinsX()+1; i++) {
double scale = fEventSamples[sample].fEnergyBinScale[i-1];
double content = fEventSamples[sample].fBkgCounts->GetBinContent(i);
fEventSamples[sample].fBkgCounts->SetBinContent(i, content * scale);
}
}
}


void Chi2Sensitivity::GetChi2() {

//// Invert Error Matrix
Expand Down
19 changes: 15 additions & 4 deletions sbnanalysis/ana/SBNOsc/Chi2Sensitivity.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,18 @@ class Chi2Sensitivity: public core::PostProcessorBase {
// Implement post-processor
void Initialize(fhicl::ParameterSet* config);
void ProcessEvent(const Event* event);
void ProcessSubRun(const SubRun *subrun);
void FileCleanup(TTree* eventTree);

void Finalize() { fCovariance.Finalize(); GetChi2(); GetContours(); Write(); }
void Finalize() {
for (int i = 0; i < fEventSamples.size(); i++) {
std::cout << "POT: " << fEventSamples[i].fPOT << " to: " << fEventSamples[i].fScalePOT << " factor: " << fEventSamples[i].fScalePOT / fEventSamples[i].fPOT << std::endl;
}

fCovariance.Finalize(); Scale(); GetChi2(); GetContours(); Write(); }

// API Functions
void Scale();
void GetChi2();
void GetContours();
void Write();
Expand All @@ -58,12 +65,17 @@ class Chi2Sensitivity: public core::PostProcessorBase {

// Config
std::string fName;
double fDistance; //!< Distance in km
double fBaseline; //!< baseline (along z) in cm
double fBeamCenterX; //!< Center of beam in detector coordinates in x-dir in cm
double fBeamCenterY; //!< Center of beam in detector coordinates in y-dir in cm
double fBeamFrontZ; //!< Front face of detector along z-dir in cm
std::array<double, 2> fXlim; //!< Detector size in cm
std::array<double, 2> fYlim; //!< Detector size in cm
std::array<double, 2> fZlim; //!< Detector size in cm
double fScaleFactor; //!< Factor for POT (etc.) scaling
double fScalePOT; //!< Factor for POT (etc.) scaling
double fPOT;
int fOscType; //!< Oscilaltion type: 0 == None, 1 == numu -> nue, 2 == numu -> numu
std::vector<double> fEnergyBinScale;

// Storage
TH1D *fBkgCounts; //!< Background count Histogram
Expand All @@ -83,7 +95,6 @@ class Chi2Sensitivity: public core::PostProcessorBase {
int fNumDm2;
int fNumSin;
std::vector <double> fLogDm2Lims, fLogSinLims;
std::vector<std::string> fUniformWeights;

std::string fOutputFile;
// whether to save stuff
Expand Down
101 changes: 81 additions & 20 deletions sbnanalysis/ana/SBNOsc/Covariance.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,13 @@ Covariance::EventSample::EventSample(const fhicl::ParameterSet &config, unsigned
// get configuration stuff
fBins = config.get<std::vector<double> >("binlims");
fName = config.get<std::string>("name", "");
fScaleFactor = config.get<double>("scalefactor", 1.0);
fScalePOT = config.get<double>("ScalePOT", -1);
fPOT = 0.;
fEnergyBinScale = config.get<std::vector<double>>("energy_bin_scale", {});
if (fEnergyBinScale.size() == 0) {
fEnergyBinScale = std::vector<double>(fBins.size() - 1, 1.0);
}
else assert(fEnergyBinScale.size() == fBins.size() - 1);

// setup histograms
std::string cv_title = fName + " Central Value";
Expand All @@ -49,18 +55,14 @@ Covariance::EventSample::EventSample(const fhicl::ParameterSet &config, unsigned


// Gets scale factors (weights) for different universes
std::vector <double> GetUniWeights(const std::map <std::string, std::vector <double> > &weights, const std::vector<std::string> &keys, int n_unis) {

// Tentative format: universe u scale factor is the product of the u-th entries on each vector
// inside the map. For vectors with less than u entries, use the (u - vec_size)-th entry

std::vector <double> GetUniWeights(const std::map <std::string, std::vector <double> > &weights, const std::vector<std::string> &keys, int n_unis, int uni_offset) {
std::vector <double> uweights;

for (int u = 0; u < n_unis; u++) {
double weight = 1.;
for (auto const &key: keys) {
const std::vector<double>& this_weights = weights.at(key);
int wind = u % this_weights.size();
int wind = u + uni_offset;
weight *= this_weights.at(wind);
}
uweights.push_back(weight);
Expand All @@ -79,15 +81,36 @@ void Covariance::Initialize(fhicl::ParameterSet* config) {
fhicl::ParameterSet pconfig = config->get<fhicl::ParameterSet>("Covariance");

fWeightKeys = pconfig.get<std::vector<std::vector<std::string>>>("WeightKey");
fWeightKeysCC = pconfig.get<std::vector<std::vector<std::string>>>("WeightKeyCC", {});
fWeightKeysNC = pconfig.get<std::vector<std::vector<std::string>>>("WeightKeyNC", {});
fNVariations = fWeightKeys.size();

// uniformly applied weights
if (pconfig.is_key_to_sequence("UniformWeights")) {
fUniformWeights = pconfig.get<std::vector<std::string> >("UniformWeights");
// all should have the same size
assert(fWeightKeysCC.size() == 0 || fWeightKeysCC.size() == fWeightKeys.size());
assert(fWeightKeysNC.size() == 0 || fWeightKeysNC.size() == fWeightKeys.size());

// merge weight keys into CC/NC keys
if (fWeightKeysCC.size() == 0) {
fWeightKeysCC = fWeightKeys;
}
else {
for (unsigned i = 0; i < fNVariations; i++) {
fWeightKeysCC[i].insert(fWeightKeysCC[i].end(), fWeightKeys[i].begin(), fWeightKeys[i].end());
}
}
if (fWeightKeysNC.size() == 0) {
fWeightKeysNC = fWeightKeys;
}
else {
for (unsigned i = 0; i < fNVariations; i++) {
fWeightKeysNC[i].insert(fWeightKeysNC[i].end(), fWeightKeys[i].begin(), fWeightKeys[i].end());
}
}

// number of universes to be used
fNumAltUnis = pconfig.get<int>("NumAltUnis", 0);
// and offset into the eventweight vector
fAltUniOffset = pconfig.get<int>("AltUniOffset", 0);

// Type of energy
fEnergyType = pconfig.get<std::string>("EnergyType", "Reco");
Expand All @@ -100,7 +123,7 @@ void Covariance::Initialize(fhicl::ParameterSet* config) {
// If an event has any weight larger than this value, it will be
// thrown away for the purposes of covariance construction
fWeightMax = pconfig.get<double>("WeightMax", -1.);

// get event samples
std::vector<fhicl::ParameterSet> configEventSamples = \
config->get<std::vector<fhicl::ParameterSet> >("EventSamples");
Expand All @@ -119,6 +142,10 @@ void Covariance::Initialize(fhicl::ParameterSet* config) {
fSampleIndex = 0;
}

void Covariance::ProcessSubRun(const SubRun *subrun) {
fEventSamples[fSampleIndex].fPOT += subrun->totgoodpot;
}

void Covariance::ProcessEvent(const Event *event) {
// iterate over each interaction in the event
for (int n = 0; n < event->reco.size(); n++) {
Expand All @@ -137,20 +164,35 @@ void Covariance::ProcessEvent(const Event *event) {

// Apply selection (or rejection) efficiencies
int isCC = event->truth[truth_ind].neutrino.iscc;
double wgt = isCC*(fSelectionEfficiency) + (1-isCC)*(1 - fBackgroundRejection);
// double wgt = isCC*(fSelectionEfficiency) + (1-isCC)*(1 - fBackgroundRejection);
// apply scaling from fScaleFactor
wgt *= fEventSamples[fSampleIndex].fScaleFactor;
// wgt *= fEventSamples[fSampleIndex].fScaleFactor;
// apply uniform weights
for (auto const &key: fUniformWeights) {
wgt *= event->truth[truth_ind].weights.at(key)[0];
// for (auto const &key: fUniformWeights) {
// wgt *= event->truth[truth_ind].weightmap.at(key)[0];
// }
double wgt = event->reco[n].weight;
// apply POT scaling if configured
if (fEventSamples[fSampleIndex].fScalePOT > 0) {
wgt *= fEventSamples[fSampleIndex].fScalePOT / fEventSamples[fSampleIndex].fPOT;
}

// Get weights for each alternative universe
std::vector<std::vector <double>> uweights;
for (std::vector<std::string> &weight_keys: fWeightKeys) {
uweights.push_back(
GetUniWeights(event->truth[truth_ind].weights, weight_keys, fNumAltUnis)
);
// Get weights for each alternative universe
if (isCC) {
for (std::vector<std::string> &weight_keys: fWeightKeysCC) {
uweights.push_back(
GetUniWeights(event->truth[truth_ind].weightmap, weight_keys, fNumAltUnis, fAltUniOffset)
);
}
}
else {
for (std::vector<std::string> &weight_keys: fWeightKeysNC) {
uweights.push_back(
GetUniWeights(event->truth[truth_ind].weightmap, weight_keys, fNumAltUnis, fAltUniOffset)
);
}

}

// see if weight is too big
Expand Down Expand Up @@ -179,6 +221,25 @@ void Covariance::ProcessEvent(const Event *event) {
}
}

void Covariance::Scale() {
for (int sample_i = 0; sample_i < fEventSamples.size(); sample_i++) {
for (int i = 1; i < fEventSamples[sample_i].fCentralValue->GetNbinsX()+1; i++) {
//std::cout << "pre sample: " << sample_i << " bin content: " << fEventSamples[sample_i].fCentralValue->GetBinContent(i);
fEventSamples[sample_i].fCentralValue->SetBinContent(i,
fEventSamples[sample_i].fEnergyBinScale[i-1] * fEventSamples[sample_i].fCentralValue->GetBinContent(i));
//std::cout << "post sample: " << sample_i << " bin content: " << fEventSamples[sample_i].fCentralValue->GetBinContent(i);
}
for (int variation = 0; variation < fNVariations; variation++) {
for (int uni = 0; uni < fNumAltUnis; uni++) {
for (int i = 1; i < fEventSamples[sample_i].fUniverses[variation][uni]->GetNbinsX(); i++) {
double content = fEventSamples[sample_i].fUniverses[variation][uni]->GetBinContent(i);
fEventSamples[sample_i].fUniverses[variation][uni]->SetBinContent(i, content * fEventSamples[sample_i].fEnergyBinScale[i-1]);
}
}
}
}
}


void Covariance::FileCleanup(TTree *eventTree) {
// onto the next sample
Expand Down
Loading