Back to home page

sPhenix code displayed by LXR

 
 

    


File indexing completed on 2025-08-06 08:13:51

0001 // Class: ReadMLPnew
0002 // Automatically generated by MethodBase::MakeClass
0003 //
0004 
0005 /* configuration options =====================================================
0006 
0007 #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
0008 
0009 Method         : MLP::MLPnew
0010 TMVA Release   : 4.2.1         [262657]
0011 ROOT Release   : 6.22/02       [398850]
0012 Creator        : cdean
0013 Date           : Tue Apr 20 16:39:06 2021
0014 Host           : Linux cvmfswrite02.sdcc.bnl.gov 3.10.0-957.12.2.el7.x86_64 #1 SMP Tue May 14 15:23:27 CDT 2019 x86_64 x86_64 x86_64 GNU/Linux
0015 Dir            : /gpfs/mnt/gpfs02/sphenix/user/cdean/scripts/HF_trigger_ML/TMVA/Odd/wCalo
0016 Training events: 31922
0017 Analysis type  : [Classification]
0018 
0019 
0020 #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
0021 
0022 # Set by User:
0023 NCycles: "400" [Number of training cycles]
0024 HiddenLayers: "N+10" [Specification of hidden layer architecture]
0025 NeuronType: "tanh" [Neuron activation function type]
0026 EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
0027 V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
0028 VarTransform: "N" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
0029 H: "True" [Print method-specific help message]
0030 TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
0031 TestRate: "5" [Test for overtraining performed at each #th epochs]
0032 UseRegulator: "False" [Use regulator to avoid over-training]
0033 # Default:
0034 RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
0035 NeuronInputType: "sum" [Neuron input function type]
0036 VerbosityLevel: "Default" [Verbosity level]
0037 CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
0038 IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
0039 LearningRate: "2.000000e-02" [ANN learning rate parameter]
0040 DecayRate: "1.000000e-02" [Decay rate for learning parameter]
0041 EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
0042 Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
0043 SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
0044 SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
0045 SamplingTraining: "True" [The training sample is sampled]
0046 SamplingTesting: "False" [The testing sample is sampled]
0047 ResetStep: "50" [How often BFGS should reset history]
0048 Tau: "3.000000e+00" [LineSearch "size step"]
0049 BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
0050 BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
0051 ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
0052 ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
0053 UpdateLimit: "10000" [Maximum times of regulator update]
0054 CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
0055 WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
0056 ##
0057 
0058 
0059 #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
0060 
0061 NVar 4
0062 max(track_1_IP,track_2_IP)    maxTrackDCA_3D                maxTrackDCA_3D                track-vertex 3D DCA, max      units                             'F'    [0.00383873376995,4.99581956863]
0063 max(abs(track_1_IP_xy),abs(track_2_IP_xy)) maxTrackDCA_2D                             maxTrackDCA_2D                             track-vertex 2D DCA, max                   units                                          'F'    [9.33057162911e-05,4.8996257782]
0064 track_1_track_2_DCA           track_1_track_2_DCA           track_1_track_2_DCA           track-track 3D DCA            units                             'F'    [1.57269468559e-07,0.0499997623265]
0065 INTT_meanHits                 INTT_meanHits                 INTT_meanHits                 INTT avg. hits                units                             'F'    [0,93]
0066 NSpec 0
0067 
0068 
0069 ============================================================================ */
0070 
0071 #include <array>
0072 #include <vector>
0073 #include <cmath>
0074 #include <string>
0075 #include <iostream>
0076 
0077 #ifndef IClassifierReader__def
0078 #define IClassifierReader__def
0079 
0080 class IClassifierReader {
0081 
0082  public:
0083 
0084    // constructor
0085    IClassifierReader() : fStatusIsClean( true ) {}
0086    virtual ~IClassifierReader() {}
0087 
0088    // return classifier response
0089    virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
0090 
0091    // returns classifier status
0092    bool IsStatusClean() const { return fStatusIsClean; }
0093 
0094  protected:
0095 
0096    bool fStatusIsClean;
0097 };
0098 
0099 #endif
0100 
0101 class ReadMLPnew : public IClassifierReader {
0102 
0103  public:
0104 
0105    // constructor
0106    ReadMLPnew( std::vector<std::string>& theInputVars )
0107       : IClassifierReader(),
0108         fClassName( "ReadMLPnew" ),
0109         fNvars( 4 )
0110    {
0111       // the training input variables
0112       const char* inputVars[] = { "max(track_1_IP,track_2_IP)", "max(abs(track_1_IP_xy),abs(track_2_IP_xy))", "track_1_track_2_DCA", "INTT_meanHits" };
0113 
0114       // sanity checks
0115       if (theInputVars.size() <= 0) {
0116          std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
0117          fStatusIsClean = false;
0118       }
0119 
0120       if (theInputVars.size() != fNvars) {
0121          std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
0122                    << theInputVars.size() << " != " << fNvars << std::endl;
0123          fStatusIsClean = false;
0124       }
0125 
0126       // validate input variables
0127       for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
0128          if (theInputVars[ivar] != inputVars[ivar]) {
0129             std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
0130                       << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
0131             fStatusIsClean = false;
0132          }
0133       }
0134 
0135       // initialize min and max vectors (for normalisation)
0136       fVmin[0] = -1;
0137       fVmax[0] = 1;
0138       fVmin[1] = -1;
0139       fVmax[1] = 1;
0140       fVmin[2] = -1;
0141       fVmax[2] = 1;
0142       fVmin[3] = -1;
0143       fVmax[3] = 1;
0144 
0145       // initialize input variable types
0146       fType[0] = 'F';
0147       fType[1] = 'F';
0148       fType[2] = 'F';
0149       fType[3] = 'F';
0150 
0151       // initialize constants
0152       Initialize();
0153 
0154       // initialize transformation
0155       InitTransform();
0156    }
0157 
0158    // destructor
0159    virtual ~ReadMLPnew() {
0160       Clear(); // method-specific
0161    }
0162 
0163    // the classifier response
0164    // "inputValues" is a vector of input values in the same order as the
0165    // variables given to the constructor
0166    double GetMvaValue( const std::vector<double>& inputValues ) const override;
0167 
0168  private:
0169 
0170    // method-specific destructor
0171    void Clear();
0172 
0173    // input variable transformation
0174 
0175    double fOff_1[3][4];
0176    double fScal_1[3][4];
0177    void InitTransform_1();
0178    void Transform_1( std::vector<double> & iv, int sigOrBgd ) const;
0179    void InitTransform();
0180    void Transform( std::vector<double> & iv, int sigOrBgd ) const;
0181 
0182    // common member variables
0183    const char* fClassName;
0184 
0185    const size_t fNvars;
0186    size_t GetNvar()           const { return fNvars; }
0187    char   GetType( int ivar ) const { return fType[ivar]; }
0188 
0189    // normalisation of input variables
0190    double fVmin[4];
0191    double fVmax[4];
0192    double NormVariable( double x, double xmin, double xmax ) const {
0193       // normalise to output range: [-1, 1]
0194       return 2*(x - xmin)/(xmax - xmin) - 1.0;
0195    }
0196 
0197    // type of input variable: 'F' or 'I'
0198    char   fType[4];
0199 
0200    // initialize internal variables
0201    void Initialize();
0202    double GetMvaValue__( const std::vector<double>& inputValues ) const;
0203 
0204    // private members (method specific)
0205 
0206    double ActivationFnc(double x) const;
0207    double OutputActivationFnc(double x) const;
0208 
0209    double fWeightMatrix0to1[15][5];   // weight matrix from layer 0 to 1
0210    double fWeightMatrix1to2[1][15];   // weight matrix from layer 1 to 2
0211 
0212 };
0213 
0214 inline void ReadMLPnew::Initialize()
0215 {
0216    // build network structure
0217    // weight matrix from layer 0 to 1
0218    fWeightMatrix0to1[0][0] = -0.371270017742686;
0219    fWeightMatrix0to1[1][0] = 1.7706044423266;
0220    fWeightMatrix0to1[2][0] = 8.85034476272466;
0221    fWeightMatrix0to1[3][0] = 0.924328409008795;
0222    fWeightMatrix0to1[4][0] = -1.63470086717199;
0223    fWeightMatrix0to1[5][0] = -1.38327094346314;
0224    fWeightMatrix0to1[6][0] = 0.790241163419693;
0225    fWeightMatrix0to1[7][0] = 4.901736345446;
0226    fWeightMatrix0to1[8][0] = 0.018716727029684;
0227    fWeightMatrix0to1[9][0] = -0.948170278427112;
0228    fWeightMatrix0to1[10][0] = -0.312912878308065;
0229    fWeightMatrix0to1[11][0] = 1.04508957646327;
0230    fWeightMatrix0to1[12][0] = -1.28950287560597;
0231    fWeightMatrix0to1[13][0] = -1.77048436582237;
0232    fWeightMatrix0to1[0][1] = -1.69168540540321;
0233    fWeightMatrix0to1[1][1] = 0.714828096779357;
0234    fWeightMatrix0to1[2][1] = 4.66267105393972;
0235    fWeightMatrix0to1[3][1] = 0.959704657855793;
0236    fWeightMatrix0to1[4][1] = 1.43739718535443;
0237    fWeightMatrix0to1[5][1] = 1.34319766207685;
0238    fWeightMatrix0to1[6][1] = -1.73412006454761;
0239    fWeightMatrix0to1[7][1] = 0.103214066908003;
0240    fWeightMatrix0to1[8][1] = 0.592676290122851;
0241    fWeightMatrix0to1[9][1] = -0.114018136019056;
0242    fWeightMatrix0to1[10][1] = 0.194105805540034;
0243    fWeightMatrix0to1[11][1] = 0.624177892237752;
0244    fWeightMatrix0to1[12][1] = 1.71732503026765;
0245    fWeightMatrix0to1[13][1] = -2.09324808031756;
0246    fWeightMatrix0to1[0][2] = -2.30489691537058;
0247    fWeightMatrix0to1[1][2] = 0.0667223243786911;
0248    fWeightMatrix0to1[2][2] = -0.044951481188298;
0249    fWeightMatrix0to1[3][2] = 0.299313324362303;
0250    fWeightMatrix0to1[4][2] = -3.76254878333605;
0251    fWeightMatrix0to1[5][2] = -0.132620702387949;
0252    fWeightMatrix0to1[6][2] = 0.492688487613024;
0253    fWeightMatrix0to1[7][2] = -0.0872273658076029;
0254    fWeightMatrix0to1[8][2] = -0.20536823401227;
0255    fWeightMatrix0to1[9][2] = 1.59851054104555;
0256    fWeightMatrix0to1[10][2] = 0.211968624661105;
0257    fWeightMatrix0to1[11][2] = 0.811910894269588;
0258    fWeightMatrix0to1[12][2] = 0.447295320986063;
0259    fWeightMatrix0to1[13][2] = -0.0788413828958315;
0260    fWeightMatrix0to1[0][3] = 0.138462425960674;
0261    fWeightMatrix0to1[1][3] = 1.30817671579522;
0262    fWeightMatrix0to1[2][3] = 0.296894156804524;
0263    fWeightMatrix0to1[3][3] = -1.37924684322792;
0264    fWeightMatrix0to1[4][3] = -0.300113690750916;
0265    fWeightMatrix0to1[5][3] = -0.106768886795979;
0266    fWeightMatrix0to1[6][3] = 0.523243735326475;
0267    fWeightMatrix0to1[7][3] = 0.263751223946695;
0268    fWeightMatrix0to1[8][3] = 4.39310071154711;
0269    fWeightMatrix0to1[9][3] = 0.679174593843537;
0270    fWeightMatrix0to1[10][3] = -3.06099647360978;
0271    fWeightMatrix0to1[11][3] = 0.657959885057513;
0272    fWeightMatrix0to1[12][3] = -0.972902308462804;
0273    fWeightMatrix0to1[13][3] = -2.25841040863338;
0274    fWeightMatrix0to1[0][4] = -3.0329522856525;
0275    fWeightMatrix0to1[1][4] = -2.20972874165338;
0276    fWeightMatrix0to1[2][4] = 13.1479419758704;
0277    fWeightMatrix0to1[3][4] = 0.815659600799068;
0278    fWeightMatrix0to1[4][4] = -4.00407814730395;
0279    fWeightMatrix0to1[5][4] = -0.0676116494636533;
0280    fWeightMatrix0to1[6][4] = -0.975150440233607;
0281    fWeightMatrix0to1[7][4] = 4.23770153459808;
0282    fWeightMatrix0to1[8][4] = 4.77875155056132;
0283    fWeightMatrix0to1[9][4] = 1.01949141227115;
0284    fWeightMatrix0to1[10][4] = -2.06107769438511;
0285    fWeightMatrix0to1[11][4] = -2.54106696459123;
0286    fWeightMatrix0to1[12][4] = 0.0687823820410542;
0287    fWeightMatrix0to1[13][4] = -5.92121191119336;
0288    // weight matrix from layer 1 to 2
0289    fWeightMatrix1to2[0][0] = -0.621509855707168;
0290    fWeightMatrix1to2[0][1] = 0.730121868385032;
0291    fWeightMatrix1to2[0][2] = 2.72002615212586;
0292    fWeightMatrix1to2[0][3] = -0.110318549183625;
0293    fWeightMatrix1to2[0][4] = 1.27445226847816;
0294    fWeightMatrix1to2[0][5] = 0.0565701746288308;
0295    fWeightMatrix1to2[0][6] = -0.63427021171353;
0296    fWeightMatrix1to2[0][7] = -0.509103185500456;
0297    fWeightMatrix1to2[0][8] = 1.47592612258331;
0298    fWeightMatrix1to2[0][9] = 0.23839821537841;
0299    fWeightMatrix1to2[0][10] = 0.54206934271779;
0300    fWeightMatrix1to2[0][11] = 2.09060833313788;
0301    fWeightMatrix1to2[0][12] = -0.212155780718553;
0302    fWeightMatrix1to2[0][13] = -0.678193913202105;
0303    fWeightMatrix1to2[0][14] = -2.94224644247805;
0304 }
0305 
0306 inline double ReadMLPnew::GetMvaValue__( const std::vector<double>& inputValues ) const
0307 {
0308    if (inputValues.size() != (unsigned int)4) {
0309       std::cout << "Input vector needs to be of size " << 4 << std::endl;
0310       return 0;
0311    }
0312 
0313    std::array<double, 15> fWeights1 {{}};
0314    std::array<double, 1> fWeights2 {{}};
0315    fWeights1.back() = 1.;
0316 
0317    // layer 0 to 1
0318    for (int o=0; o<14; o++) {
0319       std::array<double, 5> buffer; // no need to initialise
0320       for (int i = 0; i<5 - 1; i++) {
0321          buffer[i] = fWeightMatrix0to1[o][i] * inputValues[i];
0322       } // loop over i
0323       buffer.back() = fWeightMatrix0to1[o][4];
0324       for (int i=0; i<5; i++) {
0325          fWeights1[o] += buffer[i];
0326       } // loop over i
0327     } // loop over o
0328    for (int o=0; o<14; o++) {
0329       fWeights1[o] = ActivationFnc(fWeights1[o]);
0330    } // loop over o
0331    // layer 1 to 2
0332    for (int o=0; o<1; o++) {
0333       std::array<double, 15> buffer; // no need to initialise
0334       for (int i=0; i<15; i++) {
0335          buffer[i] = fWeightMatrix1to2[o][i] * fWeights1[i];
0336       } // loop over i
0337       for (int i=0; i<15; i++) {
0338          fWeights2[o] += buffer[i];
0339       } // loop over i
0340     } // loop over o
0341    for (int o=0; o<1; o++) {
0342       fWeights2[o] = OutputActivationFnc(fWeights2[o]);
0343    } // loop over o
0344 
0345    return fWeights2[0];
0346 }
0347 
0348 double ReadMLPnew::ActivationFnc(double x) const {
0349    // fast hyperbolic tan approximation
0350    if (x > 4.97) return 1;
0351    if (x < -4.97) return -1;
0352    float x2 = x * x;
0353    float a = x * (135135.0f + x2 * (17325.0f + x2 * (378.0f + x2)));
0354    float b = 135135.0f + x2 * (62370.0f + x2 * (3150.0f + x2 * 28.0f));
0355    return a / b;
0356 }
0357 double ReadMLPnew::OutputActivationFnc(double x) const {
0358    // sigmoid
0359    return 1.0/(1.0+exp(-x));
0360 }
0361 
0362 // Clean up
0363 inline void ReadMLPnew::Clear()
0364 {
0365 }
0366 inline double ReadMLPnew::GetMvaValue( const std::vector<double>& inputValues ) const
0367 {
0368    // classifier response value
0369    double retval = 0;
0370 
0371    // classifier response, sanity check first
0372    if (!IsStatusClean()) {
0373       std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
0374                 << " because status is dirty" << std::endl;
0375    }
0376    else {
0377          std::vector<double> iV(inputValues);
0378          Transform( iV, -1 );
0379          retval = GetMvaValue__( iV );
0380    }
0381 
0382    return retval;
0383 }
0384 
0385 //_______________________________________________________________________
0386 inline void ReadMLPnew::InitTransform_1()
0387 {
0388    double fMin_1[3][4];
0389    double fMax_1[3][4];
0390    // Normalization transformation, initialisation
0391    fMin_1[0][0] = 0.0262883696705;
0392    fMax_1[0][0] = 4.92083930969;
0393    fScal_1[0][0] = 2.0/(fMax_1[0][0]-fMin_1[0][0]);
0394    fOff_1[0][0] = fMin_1[0][0]*fScal_1[0][0]+1.;
0395    fMin_1[1][0] = 0.00383873376995;
0396    fMax_1[1][0] = 4.99581956863;
0397    fScal_1[1][0] = 2.0/(fMax_1[1][0]-fMin_1[1][0]);
0398    fOff_1[1][0] = fMin_1[1][0]*fScal_1[1][0]+1.;
0399    fMin_1[2][0] = 0.00383873376995;
0400    fMax_1[2][0] = 4.99581956863;
0401    fScal_1[2][0] = 2.0/(fMax_1[2][0]-fMin_1[2][0]);
0402    fOff_1[2][0] = fMin_1[2][0]*fScal_1[2][0]+1.;
0403    fMin_1[0][1] = 0.00748753221706;
0404    fMax_1[0][1] = 4.8996257782;
0405    fScal_1[0][1] = 2.0/(fMax_1[0][1]-fMin_1[0][1]);
0406    fOff_1[0][1] = fMin_1[0][1]*fScal_1[0][1]+1.;
0407    fMin_1[1][1] = 9.33057162911e-05;
0408    fMax_1[1][1] = 4.55496644974;
0409    fScal_1[1][1] = 2.0/(fMax_1[1][1]-fMin_1[1][1]);
0410    fOff_1[1][1] = fMin_1[1][1]*fScal_1[1][1]+1.;
0411    fMin_1[2][1] = 9.33057162911e-05;
0412    fMax_1[2][1] = 4.8996257782;
0413    fScal_1[2][1] = 2.0/(fMax_1[2][1]-fMin_1[2][1]);
0414    fOff_1[2][1] = fMin_1[2][1]*fScal_1[2][1]+1.;
0415    fMin_1[0][2] = 3.39120670105e-05;
0416    fMax_1[0][2] = 0.0499259270728;
0417    fScal_1[0][2] = 2.0/(fMax_1[0][2]-fMin_1[0][2]);
0418    fOff_1[0][2] = fMin_1[0][2]*fScal_1[0][2]+1.;
0419    fMin_1[1][2] = 1.57269468559e-07;
0420    fMax_1[1][2] = 0.0499997623265;
0421    fScal_1[1][2] = 2.0/(fMax_1[1][2]-fMin_1[1][2]);
0422    fOff_1[1][2] = fMin_1[1][2]*fScal_1[1][2]+1.;
0423    fMin_1[2][2] = 1.57269468559e-07;
0424    fMax_1[2][2] = 0.0499997623265;
0425    fScal_1[2][2] = 2.0/(fMax_1[2][2]-fMin_1[2][2]);
0426    fOff_1[2][2] = fMin_1[2][2]*fScal_1[2][2]+1.;
0427    fMin_1[0][3] = 1;
0428    fMax_1[0][3] = 70;
0429    fScal_1[0][3] = 2.0/(fMax_1[0][3]-fMin_1[0][3]);
0430    fOff_1[0][3] = fMin_1[0][3]*fScal_1[0][3]+1.;
0431    fMin_1[1][3] = 0;
0432    fMax_1[1][3] = 93;
0433    fScal_1[1][3] = 2.0/(fMax_1[1][3]-fMin_1[1][3]);
0434    fOff_1[1][3] = fMin_1[1][3]*fScal_1[1][3]+1.;
0435    fMin_1[2][3] = 0;
0436    fMax_1[2][3] = 93;
0437    fScal_1[2][3] = 2.0/(fMax_1[2][3]-fMin_1[2][3]);
0438    fOff_1[2][3] = fMin_1[2][3]*fScal_1[2][3]+1.;
0439 }
0440 
0441 //_______________________________________________________________________
0442 inline void ReadMLPnew::Transform_1( std::vector<double>& iv, int cls) const
0443 {
0444    // Normalization transformation
0445    if (cls < 0 || cls > 2) {
0446    if (2 > 1 ) cls = 2;
0447       else cls = 2;
0448    }
0449    const int nVar = 4;
0450 
0451    // get indices of used variables
0452 
0453    // define the indices of the variables which are transformed by this transformation
0454    static std::vector<int> indicesGet;
0455    static std::vector<int> indicesPut;
0456 
0457    if ( indicesGet.empty() ) {
0458       indicesGet.reserve(fNvars);
0459       indicesGet.push_back( 0);
0460       indicesGet.push_back( 1);
0461       indicesGet.push_back( 2);
0462       indicesGet.push_back( 3);
0463    }
0464    if ( indicesPut.empty() ) {
0465       indicesPut.reserve(fNvars);
0466       indicesPut.push_back( 0);
0467       indicesPut.push_back( 1);
0468       indicesPut.push_back( 2);
0469       indicesPut.push_back( 3);
0470    }
0471 
0472    static std::vector<double> dv;
0473    dv.resize(nVar);
0474    for (int ivar=0; ivar<nVar; ivar++) dv[ivar] = iv[indicesGet.at(ivar)];
0475    for (int ivar=0;ivar<4;ivar++) {
0476       double offset = fOff_1[cls][ivar];
0477       double scale  = fScal_1[cls][ivar];
0478       iv[indicesPut.at(ivar)] = scale*dv[ivar]-offset;
0479    }
0480 }
0481 
0482 //_______________________________________________________________________
0483 inline void ReadMLPnew::InitTransform()
0484 {
0485    InitTransform_1();
0486 }
0487 
0488 //_______________________________________________________________________
0489 inline void ReadMLPnew::Transform( std::vector<double>& iv, int sigOrBgd ) const
0490 {
0491    Transform_1( iv, sigOrBgd );
0492 }