Back to home page

sPhenix code displayed by LXR

 
 

    


File indexing completed on 2025-08-05 08:14:07

0001 // Class: ReadLD
0002 // Automatically generated by MethodBase::MakeClass
0003 //
0004 
0005 /* configuration options =====================================================
0006 
0007 #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
0008 
0009 Method         : LD::LD
0010 TMVA Release   : 4.2.0         [262656]
0011 ROOT Release   : 5.34/38       [336422]
0012 Creator        : vassalli
0013 Date           : Wed Jan 23 17:51:47 2019
0014 Host           : Linux cvmfswrite02.sdcc.bnl.gov 3.10.0-693.11.6.el7.x86_64 #1 SMP Wed Jan 3 18:09:42 CST 2018 x86_64 x86_64 x86_64 GNU/Linux
0015 Dir            : /direct/phenix+u/vassalli/sphenix/single/Training
0016 Training events: 2407
0017 Analysis type  : [Classification]
0018 
0019 
0020 #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
0021 
0022 # Set by User:
0023 # Default:
0024 V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
0025 VerbosityLevel: "Default" [Verbosity level]
0026 VarTransform: "None" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
0027 H: "False" [Print method-specific help message]
0028 CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
0029 IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
0030 ##
0031 
0032 
0033 #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
0034 
0035 NVar 10
0036 track_deta                    track_deta                    track_deta                    track_deta                                                      'F'    [1.05425715446e-06,1.83402311802]
0037 track_dlayer                  track_dlayer                  track_dlayer                  track_dlayer                                                    'I'    [0,14]
0038 track_layer                   track_layer                   track_layer                   track_layer                                                     'I'    [0,23]
0039 track_pT                      track_pT                      track_pT                      track_pT                                                        'F'    [0.209833949804,34.1584281921]
0040 approach_dist                 approach_dist                 approach_dist                 approach_dist                                                   'F'    [1.69032391568e-05,12.8133029938]
0041 vtx_radius                    vtx_radius                    vtx_radius                    vtx_radius                                                      'F'    [0.00339345191605,20.9999389648]
0042 vtxTrack_dist                 vtxTrack_dist                 vtxTrack_dist                 vtxTrack_dist                                                   'F'    [0.0258899498731,10.0588207245]
0043 photon_m                      photon_m                      photon_m                      photon_m                                                        'F'    [1.044480443,713.936157227]
0044 photon_pT                     photon_pT                     photon_pT                     photon_pT                                                       'F'    [0.061235960573,5008.76708984]
0045 cluster_prob                  cluster_prob                  cluster_prob                  cluster_prob                                                    'F'    [0,0.999874174595]
0046 NSpec 1
0047 vtx_chi2                      vtx_chi2                      vtx_chi2                      F                                                               'F'    [0,3.33078734987e-36]
0048 
0049 
0050 ============================================================================ */
0051 
0052 #include <vector>
0053 #include <cmath>
0054 #include <string>
0055 #include <iostream>
0056 
0057 #ifndef IClassifierReader__def
0058 #define IClassifierReader__def
0059 
0060 class IClassifierReader {
0061 
0062  public:
0063 
0064    // constructor
0065    IClassifierReader() : fStatusIsClean( true ) {}
0066    virtual ~IClassifierReader() {}
0067 
0068    // return classifier response
0069    virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
0070 
0071    // returns classifier status
0072    bool IsStatusClean() const { return fStatusIsClean; }
0073 
0074  protected:
0075 
0076    bool fStatusIsClean;
0077 };
0078 
0079 #endif
0080 
0081 class ReadLD : public IClassifierReader {
0082 
0083  public:
0084 
0085    // constructor
0086    ReadLD( std::vector<std::string>& theInputVars ) 
0087       : IClassifierReader(),
0088         fClassName( "ReadLD" ),
0089         fNvars( 10 ),
0090         fIsNormalised( false )
0091    {      
0092       // the training input variables
0093       const char* inputVars[] = { "track_deta", "track_dlayer", "track_layer", "track_pT", "approach_dist", "vtx_radius", "vtxTrack_dist", "photon_m", "photon_pT", "cluster_prob" };
0094 
0095       // sanity checks
0096       if (theInputVars.size() <= 0) {
0097          std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
0098          fStatusIsClean = false;
0099       }
0100 
0101       if (theInputVars.size() != fNvars) {
0102          std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
0103                    << theInputVars.size() << " != " << fNvars << std::endl;
0104          fStatusIsClean = false;
0105       }
0106 
0107       // validate input variables
0108       for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
0109          if (theInputVars[ivar] != inputVars[ivar]) {
0110             std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
0111                       << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
0112             fStatusIsClean = false;
0113          }
0114       }
0115 
0116       // initialize min and max vectors (for normalisation)
0117       fVmin[0] = 0;
0118       fVmax[0] = 0;
0119       fVmin[1] = 0;
0120       fVmax[1] = 0;
0121       fVmin[2] = 0;
0122       fVmax[2] = 0;
0123       fVmin[3] = 0;
0124       fVmax[3] = 0;
0125       fVmin[4] = 0;
0126       fVmax[4] = 0;
0127       fVmin[5] = 0;
0128       fVmax[5] = 0;
0129       fVmin[6] = 0;
0130       fVmax[6] = 0;
0131       fVmin[7] = 0;
0132       fVmax[7] = 0;
0133       fVmin[8] = 0;
0134       fVmax[8] = 0;
0135       fVmin[9] = 0;
0136       fVmax[9] = 0;
0137 
0138       // initialize input variable types
0139       fType[0] = 'F';
0140       fType[1] = 'I';
0141       fType[2] = 'I';
0142       fType[3] = 'F';
0143       fType[4] = 'F';
0144       fType[5] = 'F';
0145       fType[6] = 'F';
0146       fType[7] = 'F';
0147       fType[8] = 'F';
0148       fType[9] = 'F';
0149 
0150       // initialize constants
0151       Initialize();
0152 
0153    }
0154 
0155    // destructor
0156    virtual ~ReadLD() {
0157       Clear(); // method-specific
0158    }
0159 
0160    // the classifier response
0161    // "inputValues" is a vector of input values in the same order as the 
0162    // variables given to the constructor
0163    double GetMvaValue( const std::vector<double>& inputValues ) const;
0164 
0165  private:
0166 
0167    // method-specific destructor
0168    void Clear();
0169 
0170    // common member variables
0171    const char* fClassName;
0172 
0173    const size_t fNvars;
0174    size_t GetNvar()           const { return fNvars; }
0175    char   GetType( int ivar ) const { return fType[ivar]; }
0176 
0177    // normalisation of input variables
0178    const bool fIsNormalised;
0179    bool IsNormalised() const { return fIsNormalised; }
0180    double fVmin[10];
0181    double fVmax[10];
0182    double NormVariable( double x, double xmin, double xmax ) const {
0183       // normalise to output range: [-1, 1]
0184       return 2*(x - xmin)/(xmax - xmin) - 1.0;
0185    }
0186 
0187    // type of input variable: 'F' or 'I'
0188    char   fType[10];
0189 
0190    // initialize internal variables
0191    void Initialize();
0192    double GetMvaValue__( const std::vector<double>& inputValues ) const;
0193 
0194    // private members (method specific)
0195    std::vector<double> fLDCoefficients;
0196 };
0197 
0198 inline void ReadLD::Initialize() 
0199 {
0200    fLDCoefficients.push_back( -0.00860460026319 );
0201    fLDCoefficients.push_back( -0.291427546467 );
0202    fLDCoefficients.push_back( -0.0549111519284 );
0203    fLDCoefficients.push_back( 0.0819497205068 );
0204    fLDCoefficients.push_back( 0.0233954513391 );
0205    fLDCoefficients.push_back( -0.0104368591752 );
0206    fLDCoefficients.push_back( 0.0176627661619 );
0207    fLDCoefficients.push_back( -0.0637555472149 );
0208    fLDCoefficients.push_back( -0.00139779660884 );
0209    fLDCoefficients.push_back( 0.000155714488801 );
0210    fLDCoefficients.push_back( -0.12525007511 );
0211 
0212    // sanity check
0213    if (fLDCoefficients.size() != fNvars+1) {
0214       std::cout << "Problem in class \"" << fClassName << "\"::Initialize: mismatch in number of input values"
0215                 << fLDCoefficients.size() << " != " << fNvars+1 << std::endl;
0216       fStatusIsClean = false;
0217    }         
0218 }
0219 
0220 inline double ReadLD::GetMvaValue__( const std::vector<double>& inputValues ) const
0221 {
0222    double retval = fLDCoefficients[0];
0223    for (size_t ivar = 1; ivar < fNvars+1; ivar++) {
0224       retval += fLDCoefficients[ivar]*inputValues[ivar-1];
0225    }
0226 
0227    return retval;
0228 }
0229 
0230 // Clean up
0231 inline void ReadLD::Clear() 
0232 {
0233    // clear coefficients
0234    fLDCoefficients.clear(); 
0235 }
0236    inline double ReadLD::GetMvaValue( const std::vector<double>& inputValues ) const
0237    {
0238       // classifier response value
0239       double retval = 0;
0240 
0241       // classifier response, sanity check first
0242       if (!IsStatusClean()) {
0243          std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
0244                    << " because status is dirty" << std::endl;
0245          retval = 0;
0246       }
0247       else {
0248          if (IsNormalised()) {
0249             // normalise variables
0250             std::vector<double> iV;
0251             iV.reserve(inputValues.size());
0252             int ivar = 0;
0253             for (std::vector<double>::const_iterator varIt = inputValues.begin();
0254                  varIt != inputValues.end(); varIt++, ivar++) {
0255                iV.push_back(NormVariable( *varIt, fVmin[ivar], fVmax[ivar] ));
0256             }
0257             retval = GetMvaValue__( iV );
0258          }
0259          else {
0260             retval = GetMvaValue__( inputValues );
0261          }
0262       }
0263 
0264       return retval;
0265    }