Back to home page

sPhenix code displayed by LXR

 
 

    


File indexing completed on 2025-08-05 08:14:10

0001 // Class: ReadCuts
0002 // Automatically generated by MethodBase::MakeClass
0003 //
0004 
0005 /* configuration options =====================================================
0006 
0007 #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
0008 
0009 Method         : Cuts::Cuts
0010 TMVA Release   : 4.2.0         [262656]
0011 ROOT Release   : 5.34/38       [336422]
0012 Creator        : vassalli
0013 Date           : Fri Aug 23 18:30:11 2019
0014 Host           : Linux cvmfswrite02.sdcc.bnl.gov 3.10.0-693.11.6.el7.x86_64 #1 SMP Wed Jan 3 18:09:42 CST 2018 x86_64 x86_64 x86_64 GNU/Linux
0015 Dir            : /direct/phenix+u/vassalli/sphenix/single/Training
0016 Training events: 6750
0017 Analysis type  : [Classification]
0018 
0019 
0020 #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
0021 
0022 # Set by User:
0023 # Default:
0024 V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
0025 VerbosityLevel: "Default" [Verbosity level]
0026 VarTransform: "None" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
0027 H: "False" [Print method-specific help message]
0028 CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
0029 IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
0030 FitMethod: "GA" [Minimisation Method (GA, SA, and MC are the primary methods to be used; the others have been introduced for testing purposes and are depreciated)]
0031 EffMethod: "EffSel" [Selection Method]
0032 CutRangeMin[0]: "-1.000000e+00" [Minimum of allowed cut range (set per variable)]
0033     CutRangeMin[1]: "-1.000000e+00"
0034     CutRangeMin[2]: "-1.000000e+00"
0035     CutRangeMin[3]: "-1.000000e+00"
0036     CutRangeMin[4]: "-1.000000e+00"
0037 CutRangeMax[0]: "-1.000000e+00" [Maximum of allowed cut range (set per variable)]
0038     CutRangeMax[1]: "-1.000000e+00"
0039     CutRangeMax[2]: "-1.000000e+00"
0040     CutRangeMax[3]: "-1.000000e+00"
0041     CutRangeMax[4]: "-1.000000e+00"
0042 VarProp[0]: "NotEnforced" [Categorisation of cuts]
0043     VarProp[1]: "NotEnforced"
0044     VarProp[2]: "NotEnforced"
0045     VarProp[3]: "NotEnforced"
0046     VarProp[4]: "NotEnforced"
0047 ##
0048 
0049 
0050 #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
0051 
0052 NVar 5
0053 abs(track_deta)               abs_track_deta_               abs(track_deta)               abs(track_deta)                                                 'F'    [3.57627868652e-07,1.97799444199]
0054 abs(cluster_deta)             abs_cluster_deta_             abs(cluster_deta)             abs(cluster_deta)                                               'F'    [0,0.0315845087171]
0055 abs(cluster_dphi)             abs_cluster_dphi_             abs(cluster_dphi)             abs(cluster_dphi)                                               'F'    [0,6.23594331741]
0056 abs(track_dlayer)             abs_track_dlayer_             abs(track_dlayer)             abs(track_dlayer)                                               'I'    [0,15]
0057 approach_dist                 approach_dist                 approach_dist                 approach_dist                                                   'F'    [1.41833572798e-06,73.1929855347]
0058 NSpec 4
0059 track_layer                   track_layer                   track_layer                   I                                                               'F'    [0,22]
0060 track_pT                      track_pT                      track_pT                      F                                                               'F'    [2.01860404015,9702.08203125]
0061 track_dca                     track_dca                     track_dca                     F                                                               'F'    [1.29460659082e-05,64.1267471313]
0062 cluster_prob                  cluster_prob                  cluster_prob                  F                                                               'F'    [0,0.999049782753]
0063 
0064 
0065 ============================================================================ */
0066 
0067 #include <vector>
0068 #include <cmath>
0069 #include <string>
0070 #include <iostream>
0071 
0072 #ifndef IClassifierReader__def
0073 #define IClassifierReader__def
0074 
0075 class IClassifierReader {
0076 
0077  public:
0078 
0079    // constructor
0080    IClassifierReader() : fStatusIsClean( true ) {}
0081    virtual ~IClassifierReader() {}
0082 
0083    // return classifier response
0084    virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
0085 
0086    // returns classifier status
0087    bool IsStatusClean() const { return fStatusIsClean; }
0088 
0089  protected:
0090 
0091    bool fStatusIsClean;
0092 };
0093 
0094 #endif
0095 
0096 class ReadCuts : public IClassifierReader {
0097 
0098  public:
0099 
0100    // constructor
0101    ReadCuts( std::vector<std::string>& theInputVars ) 
0102       : IClassifierReader(),
0103         fClassName( "ReadCuts" ),
0104         fNvars( 5 ),
0105         fIsNormalised( false )
0106    {      
0107       // the training input variables
0108       const char* inputVars[] = { "abs(track_deta)", "abs(cluster_deta)", "abs(cluster_dphi)", "abs(track_dlayer)", "approach_dist" };
0109 
0110       // sanity checks
0111       if (theInputVars.size() <= 0) {
0112          std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
0113          fStatusIsClean = false;
0114       }
0115 
0116       if (theInputVars.size() != fNvars) {
0117          std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
0118                    << theInputVars.size() << " != " << fNvars << std::endl;
0119          fStatusIsClean = false;
0120       }
0121 
0122       // validate input variables
0123       for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
0124          if (theInputVars[ivar] != inputVars[ivar]) {
0125             std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
0126                       << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
0127             fStatusIsClean = false;
0128          }
0129       }
0130 
0131       // initialize min and max vectors (for normalisation)
0132       fVmin[0] = 0;
0133       fVmax[0] = 0;
0134       fVmin[1] = 0;
0135       fVmax[1] = 0;
0136       fVmin[2] = 0;
0137       fVmax[2] = 0;
0138       fVmin[3] = 0;
0139       fVmax[3] = 0;
0140       fVmin[4] = 0;
0141       fVmax[4] = 0;
0142 
0143       // initialize input variable types
0144       fType[0] = 'F';
0145       fType[1] = 'F';
0146       fType[2] = 'F';
0147       fType[3] = 'I';
0148       fType[4] = 'F';
0149 
0150       // initialize constants
0151       Initialize();
0152 
0153    }
0154 
0155    // destructor
0156    virtual ~ReadCuts() {
0157       Clear(); // method-specific
0158    }
0159 
0160    // the classifier response
0161    // "inputValues" is a vector of input values in the same order as the 
0162    // variables given to the constructor
0163    double GetMvaValue( const std::vector<double>& inputValues ) const;
0164 
0165  private:
0166 
0167    // method-specific destructor
0168    void Clear();
0169 
0170    // common member variables
0171    const char* fClassName;
0172 
0173    const size_t fNvars;
0174    size_t GetNvar()           const { return fNvars; }
0175    char   GetType( int ivar ) const { return fType[ivar]; }
0176 
0177    // normalisation of input variables
0178    const bool fIsNormalised;
0179    bool IsNormalised() const { return fIsNormalised; }
0180    double fVmin[5];
0181    double fVmax[5];
0182    double NormVariable( double x, double xmin, double xmax ) const {
0183       // normalise to output range: [-1, 1]
0184       return 2*(x - xmin)/(xmax - xmin) - 1.0;
0185    }
0186 
0187    // type of input variable: 'F' or 'I'
0188    char   fType[5];
0189 
0190    // initialize internal variables
0191    void Initialize();
0192    double GetMvaValue__( const std::vector<double>& inputValues ) const;
0193 
0194    // private members (method specific)
0195    // not implemented for class: "ReadCuts"
0196 };
0197    inline double ReadCuts::GetMvaValue( const std::vector<double>& inputValues ) const
0198    {
0199       // classifier response value
0200       double retval = 0;
0201 
0202       // classifier response, sanity check first
0203       if (!IsStatusClean()) {
0204          std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
0205                    << " because status is dirty" << std::endl;
0206          retval = 0;
0207       }
0208       else {
0209          if (IsNormalised()) {
0210             // normalise variables
0211             std::vector<double> iV;
0212             iV.reserve(inputValues.size());
0213             int ivar = 0;
0214             for (std::vector<double>::const_iterator varIt = inputValues.begin();
0215                  varIt != inputValues.end(); varIt++, ivar++) {
0216                iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
0217             }
0218             retval = GetMvaValue__( iV );
0219          }
0220          else {
0221             retval = GetMvaValue__( inputValues );
0222          }
0223       }
0224 
0225       return retval;
0226    }