DAS
3.0
Das Analysis System
|
Namespaces | |
BTagging | |
Chebyshev | |
DoubleCrystalBall | |
JetEnergy | |
Jets | |
JetVeto | |
MissingET | |
MN | |
Muon | |
Muons | |
Normalisation | |
Photon | |
Prefiring | |
PUprofile | |
PUstaub | |
Uncertainties | |
Unfolding | |
Classes | |
class | AbstractEvent |
struct | AbstractPhysicsObject |
struct | ControlPlots |
struct | Di |
class | GenericSFApplier |
struct | GenEvent |
struct | GenJet |
struct | GenMuon |
struct | GenPhoton |
struct | Greta |
struct | GretaPolynomial |
class | GretaPolynomialReader |
struct | Helper |
struct | JMEmatching |
struct | MET |
struct | Parameters |
class | PhysicsObject |
struct | PileUp |
struct | PrimaryVertex |
struct | RecEvent |
struct | RecJet |
struct | RecMuon |
struct | RecPhoton |
class | Teddy |
struct | Thunberg |
struct | Trigger |
struct | TriggerLumi |
struct | Weight |
Typedefs | |
using | GenDijet = Di< GenJet, GenJet > |
using | GenDimuon = Di< GenMuon, GenMuon > |
using | GenZJet = Di< GenDimuon, GenJet > |
using | RecDijet = Di< RecJet, RecJet > |
using | RecDimuon = Di< RecMuon, RecMuon > |
using | RecZJet = Di< RecDimuon, RecJet > |
typedef ROOT::Math::LorentzVector< ROOT::Math::PtEtaPhiM4D< float > > | FourVector |
typedef std::vector< Weight > | Weights |
Functions | |
void | mergeNtuples (const vector< fs::path > &inputs, const fs::path &output, const pt::ptree &config, const int steering, const DT::Slice slice={1, 0}) |
std::vector< TString > | MakeTitle (const std::vector< double > &edges, const char *v, bool lowEdge, bool highEdge, std::function< const char *(double)> format) |
Darwin::Tools::Options | Options (const char *, int=Darwin::Tools::none) |
std::unique_ptr< TH1 > | getHistSafe (std::unique_ptr< TFile > &f, const std::string &name) |
template<class Object > | |
Weights & | weightsRef (Object &obj) |
double | identity (double x) |
template<const size_t d> | |
TF1 * | GetSmoothFit (TH1 *h, int ifm, int ifM, int nSigmaStop=0, const char *options="Q0SNRE", bool autoStop=false) |
template<const size_t d> | |
TF1 * | GetSmoothFit (TH1 *h, double m, double M, int nSigmaStop=0, const char *options="Q0SNRE", bool autoStop=false) |
bool | pt_sort (const PhysicsObject &j1, const PhysicsObject &j2) |
template<typename TTreePtr > | |
bool | branchExists (const TTreePtr &tree, TString brName) |
template<typename T > | |
std::vector< T * > | GetObjects (TDirectory *dir) |
TDirectory * | GetDirectory (TDirectory *dir, const std::vector< const char * > &names) |
void | EventLoop (const vector< fs::path > &inputs, const fs::path &output, const pt::ptree &config, const int steering, const DT::Slice slice={1, 0}) |
void | SimpleExec (const fs::path &input, const fs::path &output, const pt::ptree &config, const int steering) |
vector< double > | GetTriggerTurnons (const fs::path &f) |
void | getMETfraction (const vector< fs::path > &inputs, const fs::path &output, const pt::ptree &config, const int steering, const DT::Slice slice={1, 0}) |
std::map< int, TriggerLumi > | GetLumiFromFiles (const std::filesystem::path &lumi_file, const std::filesystem::path &turnon_file) |
map< int, TriggerLumi > | GetLumiFromFiles (const fs::path &lumi_file, const fs::path &turnon_file) |
bool | operator== (const PhysicsObject &l, const PhysicsObject &r) |
bool | operator< (const PhysicsObject &l, const PhysicsObject &r) |
bool | operator> (const PhysicsObject &l, const PhysicsObject &r) |
bool | operator== (const Weight &w, const int v) |
bool | operator== (const Weight &w, const float v) |
bool | operator== (const Weight &w, const double v) |
bool | operator== (const Weight &l, const Weight &r) |
double | operator* (const Weight &w, const int v) |
double | operator* (const int v, const Weight &w) |
double | operator* (const Weight &w, const float v) |
double | operator* (const float v, const Weight &w) |
double | operator* (const Weight &w, const double v) |
double | operator* (const double v, const Weight &w) |
Weight & | operator*= (Weight &w, const int v) |
Weight & | operator/= (Weight &w, const int v) |
Weight & | operator*= (Weight &w, const float v) |
Weight & | operator/= (Weight &w, const float v) |
Weight & | operator*= (Weight &w, const double v) |
Weight & | operator/= (Weight &w, const double v) |
double | operator* (const Weight &w1, const Weight &w2) |
Weights & | operator*= (Weights &wgts, const int v) |
Weights & | operator/= (Weights &wgts, const int v) |
Weights & | operator*= (Weights &wgts, const float v) |
Weights & | operator/= (Weights &wgts, const float v) |
Weights & | operator*= (Weights &wgts, const double v) |
Weights & | operator/= (Weights &wgts, const double v) |
void | getHighScalePUeventIDs (const vector< fs::path > &inputs, const fs::path &output, const pt::ptree &config, const int steering, const DT::Slice slice={1, 0}) |
void | getHighScalePUeventIDsHT (const vector< fs::path > &inputs, const fs::path &output, const pt::ptree &config, const int steering, const DT::Slice slice={1, 0}) |
pair< int, int > | GetBinRange (TH1 *h) |
void | ScaleWidth (TH1 *h, TH2 *cov) |
void | getSmoothFits (const fs::path &input, const fs::path &output, const pt::ptree &config, const int steering, const DT::Slice slice={1, 0}) |
Eigen::VectorXd | identity (const Eigen::VectorXd &x) |
Variables | |
static const std::vector< double > | pthat_edges {30,34,39,44,50,56,63,71,80,89,98,108,120,131,143,156,170,196,226,260,300,336,375,420,470,500,531,564,600,645,693,744,800,846,894,946,1000,1088,1183,1287,1400,1491,1587,1690,1800,1934,2078,2233,2400,2579,2771,2978,3200,3578,4000,4472,5000} |
static const std::vector< double > | pt_edges {9, 12, 15,18,21,24,28,32,37,43,49,56,64,74,84,97,114,133,153,174,196,220,245,272,300,330,362,395,430,468,507,548,592,638,686,737,790,846,905,967,1032,1101,1172,1248,1327,1410,1497,1588,1684,1784,1890,2000,2116,2238,2366,2500,2640,2787,2941,3103,3273,3450,3637,3832,4037} |
static const std::vector< double > | Mjj_edges {160, 200, 249, 306, 372, 449, 539, 641, 756, 887, 1029, 1187, 1361, 1556, 1769, 2008, 2273, 2572, 2915, 3306, 3754, 4244, 4805, 5374, 6094, 6908, 7861, 8929, 10050 } |
static const std::vector< double > | pt_av_edges {147, 175, 207, 243, 284, 329, 380, 437, 499, 569, 646, 732, 827, 931, 1046, 1171, 1307, 1458, 1621, 1806, 2003, 2217, 2453, 2702} |
static const std::vector< double > | y_edges {0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0} |
static const std::vector< double > | n_edges {0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5} |
static const int | nYbins = y_edges.size()-1 |
static const int | nPtBins = pt_edges.size()-1 |
static const int | nMjjBins = Mjj_edges.size()-1 |
static const int | nPtDijetBins = pt_av_edges.size()-1 |
static const int | maxMult = n_edges.size()-1 |
static const std::vector< TString > | yBins = MakeTitle(y_edges, "|y|", false, true, [](double v) { return Form("%.1f", v);} ) |
static const double | ptmin = pt_edges.front() |
static const double | ptmax = pt_edges.back() |
static const double | ymin = y_edges.front() |
static const double | ymax = y_edges.back() |
static const int | nPUbins = 100 |
const std::string | SysUp = "Up" |
const std::string | SysDown = "Down" |
typedef ROOT::Math::LorentzVector< ROOT::Math::PtEtaPhiM4D< float > > FourVector |
bool DAS::branchExists | ( | const TTreePtr & | tree, |
TString | brName | ||
) |
Check if branch exists
Loop over branches of tree
and check if brName
exists
void DAS::EventLoop | ( | const vector< fs::path > & | inputs, |
const fs::path & | output, | ||
const pt::ptree & | config, | ||
const int | steering, | ||
const DT::Slice | slice = {1,0} |
||
) |
pair<int,int> DAS::GetBinRange | ( | TH1 * | h | ) |
|
inline |
Get ((...)sub)subdirectory of dir
.
void DAS::getHighScalePUeventIDs | ( | const vector< fs::path > & | inputs, |
const fs::path & | output, | ||
const pt::ptree & | config, | ||
const int | steering, | ||
const DT::Slice | slice = {1,0} |
||
) |
Make a 2-column list of MB event IDs with the max genpt (if any).
Unlike most commands, this command should be run on the output of the n-tupliser directly, and only makes sense (even though it would technically work) for MB samples without pileup.
inputs | input ROOT files (n-tuples) |
output | output ROOT file (n-tuple) |
config | config handled with `Darwin::Tools::options` |
steering | parameters obtained from explicit options |
slice | number and index of slice |
void DAS::getHighScalePUeventIDsHT | ( | const vector< fs::path > & | inputs, |
const fs::path & | output, | ||
const pt::ptree & | config, | ||
const int | steering, | ||
const DT::Slice | slice = {1,0} |
||
) |
Make a 2-column list of MB event IDs with the max genht (if any).
Unlike most commands, this command should be run on the output of the n-tupliser directly, and only makes sense (even though it would technically work) for MB samples without pileup.
inputs | input ROOT files (n-tuples) |
output | output ROOT file (n-tuple) |
config | config handled with `Darwin::Tools::options` |
steering | parameters obtained from explicit options |
slice | number and index of slice |
|
inline |
Gets an histogram from a TFile.
Darwin::Exceptions::BadInput | if the histogram doesn't exist |
map<int, TriggerLumi> DAS::GetLumiFromFiles | ( | const fs::path & | lumi_file, |
const fs::path & | turnon_file | ||
) |
GetLumiFromFiles returns a map containing key value pairs corresponding to the trigger threshold at HLT (e.g. 40, 60, etc.) and the object containing the turn-on point and its effective luminosity.
std::map<int, TriggerLumi> DAS::GetLumiFromFiles | ( | const std::filesystem::path & | lumi_file, |
const std::filesystem::path & | turnon_file | ||
) |
path to text file with turn-on points
lumi_file | path to text file with effective luminosities |
void DAS::getMETfraction | ( | const vector< fs::path > & | inputs, |
const fs::path & | output, | ||
const pt::ptree & | config, | ||
const int | steering, | ||
const DT::Slice | slice = {1,0} |
||
) |
std::vector<T*> DAS::GetObjects | ( | TDirectory * | dir | ) |
Find all objects of type T
directly in a TDirectory
(i.e. not recursive)
TF1* DAS::GetSmoothFit | ( | TH1 * | h, |
double | m, | ||
double | M, | ||
int | nSigmaStop = 0 , |
||
const char * | options = "Q0SNRE" , |
||
bool | autoStop = false |
||
) |
h | histogram to fit (NB: assumed to be normalised to bin width) |
m | min |
M | max |
nSigmaStop | stop automatically before maxdegree if good chi2/ndf +/- nSigma ~ 1 |
options | fit options |
TF1* DAS::GetSmoothFit | ( | TH1 * | h, |
int | ifm, | ||
int | ifM, | ||
int | nSigmaStop = 0 , |
||
const char * | options = "Q0SNRE" , |
||
bool | autoStop = false |
||
) |
h | histogram to fit (NB: assumed to be normalised to bin width) |
ifm | min index |
ifM | max index |
nSigmaStop | stop automatically before maxdegree if good chi2/ndf +/- nSigma ~ 1 |
options | fit options |
void DAS::getSmoothFits | ( | const fs::path & | input, |
const fs::path & | output, | ||
const pt::ptree & | config, | ||
const int | steering, | ||
const DT::Slice | slice = {1,0} |
||
) |
vector<double> DAS::GetTriggerTurnons | ( | const fs::path & | f | ) |
Load the two-column file with the HLT and corresponding offline PF thresholds.
Eigen::VectorXd DAS::identity | ( | const Eigen::VectorXd & | x | ) |
identity (Eigen)
Trivial function returning argument, used as default function in the constructor of Teddy
|
inline |
Make a vector of properly formated titles from bin edges.
void DAS::mergeNtuples | ( | const vector< fs::path > & | inputs, |
const fs::path & | output, | ||
const pt::ptree & | config, | ||
const int | steering, | ||
const DT::Slice | slice = {1,0} |
||
) |
Get fractioned n-tuples after the n-tuplisation and merge them into a single file. Complementary information about the pile-up may be included. A minimal selection is applied, e.g. on the primary vertex (PV).
inputs | input ROOT file (n-tuple) |
output | output ROOT file (n-tuple) |
config | config handled with `Darwin::Tools::options` |
steering | parameters obtained from explicit options |
slice | number and index of slice |
|
inline |
|
inline |
|
inline |
DT::Options Options | ( | const char * | description, |
int | options = Darwin::Tools::none |
||
) |
Constructs Darwin options with the correct commit information.
|
inline |
void DAS::ScaleWidth | ( | TH1 * | h, |
TH2 * | cov | ||
) |
Divide the histogram by bin width and adapt the covariance matrix accordingly.
void DAS::SimpleExec | ( | const fs::path & | input, |
const fs::path & | output, | ||
const pt::ptree & | config, | ||
const int | steering | ||
) |
Template for function (TODO)
Weights& DAS::weightsRef | ( | Object & | obj | ) |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
static |
|
inline |
Suffix used for "down" uncertainties. Follows the Combine convention.
|
inline |
Suffix used for "up" uncertainties. Follows the Combine convention.
|
static |
|
static |
|
static |
|
static |