From 39b060ef225b62d41ec4cf9c615cdbbc4e715c11 Mon Sep 17 00:00:00 2001 From: Rene Brun <Rene.Brun@cern.ch> Date: Fri, 17 Nov 2006 16:00:03 +0000 Subject: [PATCH] New version of TMVA fixing many coding conventions violations. New version of the tmva test suite. To execute it run the script TMVAnalysis.C git-svn-id: http://root.cern.ch/svn/root/trunk@16805 27541ba8-7e3a-0410-8455-c3a389f83636 --- tmva/Module.mk | 1 + tmva/inc/BinarySearchTree.h | 10 +- tmva/inc/BinarySearchTreeNode.h | 6 +- tmva/inc/BinaryTree.h | 6 +- tmva/inc/CrossEntropy.h | 4 +- tmva/inc/DataSet.h | 96 ++++---- tmva/inc/DecisionTree.h | 12 +- tmva/inc/DecisionTreeNode.h | 6 +- tmva/inc/Event.h | 6 +- tmva/inc/Factory.h | 12 +- tmva/inc/GeneticANN.h | 6 +- tmva/inc/GeneticBase.h | 4 +- tmva/inc/GeneticCuts.h | 6 +- tmva/inc/GeneticGenes.h | 4 +- tmva/inc/GeneticPopulation.h | 4 +- tmva/inc/GeneticRange.h | 4 +- tmva/inc/GiniIndex.h | 4 +- tmva/inc/IMethod.h | 10 +- tmva/inc/MethodBDT.h | 30 +-- tmva/inc/MethodBase.h | 60 ++--- tmva/inc/MethodBayesClassifier.h | 4 +- tmva/inc/MethodCFMlpANN.h | 8 +- tmva/inc/MethodCFMlpANN_Utils.h | 6 +- tmva/inc/MethodCFMlpANN_def.h | 6 +- tmva/inc/MethodCommittee.h | 10 +- tmva/inc/MethodCuts.h | 36 +-- tmva/inc/MethodFisher.h | 14 +- tmva/inc/MethodHMatrix.h | 10 +- tmva/inc/MethodLikelihood.h | 10 +- tmva/inc/MethodMLP.h | 34 +-- tmva/inc/MethodPDERS.h | 10 +- tmva/inc/MethodRuleFit.h | 6 +- tmva/inc/MethodSVM.h | 4 +- tmva/inc/MethodTMlpANN.h | 6 +- tmva/inc/MethodVariable.h | 6 +- tmva/inc/Methods.h | 6 +- tmva/inc/MisClassificationError.h | 4 +- tmva/inc/MsgLogger.h | 46 ++-- tmva/inc/Node.h | 6 +- tmva/inc/Option.h | 6 +- tmva/inc/PDF.h | 10 +- tmva/inc/Ranking.h | 8 +- tmva/inc/Reader.h | 8 +- tmva/inc/RootFinder.h | 6 +- tmva/inc/Rule.h | 4 +- tmva/inc/RuleEnsemble.h | 10 +- tmva/inc/RuleFit.h | 4 +- tmva/inc/RuleFitParams.h | 4 +- tmva/inc/SdivSqrtSplusB.h | 4 +- tmva/inc/SeparationBase.h | 4 +- tmva/inc/SimulatedAnnealingBase.h | 6 +- tmva/inc/SimulatedAnnealingCuts.h | 6 +- tmva/inc/TActivationChooser.h | 22 +- tmva/inc/TNeuron.h | 4 +- tmva/inc/TNeuronInputChooser.h | 10 +- tmva/inc/TSpline1.h | 6 +- tmva/inc/TSpline2.h | 6 +- tmva/inc/Timer.h | 6 +- tmva/inc/Tools.h | 6 +- tmva/inc/Types.h | 50 ++-- tmva/inc/VariableInfo.h | 26 +- tmva/inc/Volume.h | 6 +- tmva/src/BinarySearchTree.cxx | 10 +- tmva/src/BinarySearchTreeNode.cxx | 6 +- tmva/src/BinaryTree.cxx | 6 +- tmva/src/CrossEntropy.cxx | 4 +- tmva/src/DataSet.cxx | 208 +++++++++++----- tmva/src/DecisionTree.cxx | 48 ++-- tmva/src/DecisionTreeNode.cxx | 8 +- tmva/src/Event.cxx | 28 ++- tmva/src/Factory.cxx | 70 +++--- tmva/src/GeneticANN.cxx | 8 +- tmva/src/GeneticBase.cxx | 8 +- tmva/src/GeneticCuts.cxx | 8 +- tmva/src/GeneticGenes.cxx | 4 +- tmva/src/GeneticPopulation.cxx | 4 +- tmva/src/GeneticRange.cxx | 6 +- tmva/src/GiniIndex.cxx | 4 +- tmva/src/MethodANNBase.cxx | 30 ++- tmva/src/MethodBDT.cxx | 104 +++++--- tmva/src/MethodBase.cxx | 63 ++++- tmva/src/MethodBayesClassifier.cxx | 15 +- tmva/src/MethodCFMlpANN.cxx | 16 +- tmva/src/MethodCFMlpANN_Utils.cxx | 8 +- tmva/src/MethodCommittee.cxx | 74 +++--- tmva/src/MethodCuts.cxx | 77 ++++-- tmva/src/MethodFisher.cxx | 29 ++- tmva/src/MethodHMatrix.cxx | 43 +++- tmva/src/MethodLikelihood.cxx | 31 ++- tmva/src/MethodMLP.cxx | 39 ++- tmva/src/MethodPDERS.cxx | 198 ++++++++------- tmva/src/MethodRuleFit.cxx | 36 ++- tmva/src/MethodSVM.cxx | 13 +- tmva/src/MethodTMlpANN.cxx | 21 +- tmva/src/MethodVariable.cxx | 10 +- tmva/src/MisClassificationError.cxx | 4 +- tmva/src/MsgLogger.cxx | 39 +-- tmva/src/Node.cxx | 6 +- tmva/src/Option.cxx | 6 +- tmva/src/PDF.cxx | 8 +- tmva/src/Ranking.cxx | 29 ++- tmva/src/Reader.cxx | 46 ++-- tmva/src/RootFinder.cxx | 6 +- tmva/src/Rule.cxx | 77 +++--- tmva/src/RuleEnsemble.cxx | 88 +++++-- tmva/src/RuleFit.cxx | 40 ++- tmva/src/RuleFitParams.cxx | 56 +++-- tmva/src/SdivSqrtSplusB.cxx | 4 +- tmva/src/SeparationBase.cxx | 4 +- tmva/src/SimulatedAnnealingBase.cxx | 34 +-- tmva/src/SimulatedAnnealingCuts.cxx | 15 +- tmva/src/TNeuron.cxx | 4 +- tmva/src/TSpline1.cxx | 6 +- tmva/src/TSpline2.cxx | 6 +- tmva/src/Timer.cxx | 6 +- tmva/src/Tools.cxx | 25 +- tmva/src/Types.cxx | 37 +-- tmva/src/VariableInfo.cxx | 10 +- tmva/src/Volume.cxx | 8 +- tmva/test/BDT.C | 189 +++++++++++++++ tmva/test/TMVAGui.C | 109 +++++++++ tmva/test/TMVAlogon.C | 57 +++++ tmva/test/TMVAnalysis.C | 363 +++++++++++++--------------- tmva/test/TMVAnalysis.py | 257 ++++++++++++++++++++ tmva/test/TMVApplication.C | 176 ++++++++++++++ tmva/test/annconvergencetest.C | 67 +++++ tmva/test/compareanapp.C | 155 ++++++++++++ tmva/test/correlations.C | 37 +-- tmva/test/correlationscatters.C | 160 ++++++++++++ tmva/test/efficiencies.C | 134 +++++----- tmva/test/likelihoodrefs.C | 181 ++++++++++++++ tmva/test/line-small.png | Bin 0 -> 294 bytes tmva/test/mutransform.C | 137 +++++++++++ tmva/test/mvas.C | 87 ++++--- tmva/test/network.C | 313 ++++++++++++++++++++++++ tmva/test/plotall.C | 23 ++ tmva/test/sigmoid-small.png | Bin 0 -> 363 bytes tmva/test/tmvaglob.C | 267 ++++++++++++-------- tmva/test/variables.C | 298 ++++++++++++----------- 139 files changed, 4009 insertions(+), 1547 deletions(-) create mode 100644 tmva/test/BDT.C create mode 100644 tmva/test/TMVAGui.C create mode 100644 tmva/test/TMVAlogon.C create mode 100644 tmva/test/TMVAnalysis.py create mode 100644 tmva/test/TMVApplication.C create mode 100644 tmva/test/annconvergencetest.C create mode 100644 tmva/test/compareanapp.C create mode 100644 tmva/test/correlationscatters.C create mode 100644 tmva/test/likelihoodrefs.C create mode 100644 tmva/test/line-small.png create mode 100644 tmva/test/mutransform.C create mode 100644 tmva/test/network.C create mode 100644 tmva/test/plotall.C create mode 100644 tmva/test/sigmoid-small.png diff --git a/tmva/Module.mk b/tmva/Module.mk index 14b3fd7f8de..825dcc07ed6 100644 --- a/tmva/Module.mk +++ b/tmva/Module.mk @@ -3,6 +3,7 @@ # # Author: Fons Rademakers, 20/6/2005 + MODDIR := tmva MODDIRS := $(MODDIR)/src MODDIRI := $(MODDIR)/inc diff --git a/tmva/inc/BinarySearchTree.h b/tmva/inc/BinarySearchTree.h index bcd42733054..91f1495ec66 100644 --- a/tmva/inc/BinarySearchTree.h +++ b/tmva/inc/BinarySearchTree.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: BinarySearchTree.h,v 1.17 2006/11/02 08:03:18 helgevoss Exp $ +// @(#)root/tmva $Id: BinarySearchTree.h,v 1.19 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -104,8 +104,8 @@ namespace TMVA { // create the search tree from the events in a TTree // using the variables specified in "theVars" Int_t Fill( const DataSet& ds, TTree* theTree, Int_t theType = -1, - Types::PreprocessingMethod corr = Types::kNone, - Types::SBType type = Types::kSignal ); + Types::EPreprocessingMethod corr = Types::kNone, + Types::ESBType type = Types::kSignal ); // Create the search tree from the event collection // using ONLY the variables specified in "theVars" diff --git a/tmva/inc/BinarySearchTreeNode.h b/tmva/inc/BinarySearchTreeNode.h index 8a2ba84e5ec..e41d366b4ad 100644 --- a/tmva/inc/BinarySearchTreeNode.h +++ b/tmva/inc/BinarySearchTreeNode.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: BinarySearchTreeNode.h,v 1.4 2006/11/13 15:49:49 helgevoss Exp $ +// @(#)root/tmva $Id: BinarySearchTreeNode.h,v 1.5 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/BinaryTree.h b/tmva/inc/BinaryTree.h index fd71c7077d9..0795c54862e 100644 --- a/tmva/inc/BinaryTree.h +++ b/tmva/inc/BinaryTree.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: BinaryTree.h,v 1.24 2006/11/14 14:19:17 andreas.hoecker Exp $ +// @(#)root/tmva $Id: BinaryTree.h,v 1.25 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/CrossEntropy.h b/tmva/inc/CrossEntropy.h index 7b78f04171a..36280439304 100644 --- a/tmva/inc/CrossEntropy.h +++ b/tmva/inc/CrossEntropy.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: CrossEntropy.h,v 1.9 2006/11/06 00:10:16 helgevoss Exp $ +// @(#)root/tmva $Id: CrossEntropy.h,v 1.10 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -12,7 +12,7 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * diff --git a/tmva/inc/DataSet.h b/tmva/inc/DataSet.h index beace90ab4e..ea0393fdc35 100644 --- a/tmva/inc/DataSet.h +++ b/tmva/inc/DataSet.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: DataSet.h,v 1.39 2006/11/14 00:47:42 stelzer Exp $ +// @(#)root/tmva $Id: DataSet.h,v 1.41 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * @@ -82,8 +82,6 @@ namespace TMVA { DataSet(); virtual ~DataSet(); - enum TreeType { kTraining=0, kTesting, kMaxTreeType }; - const char* GetName() const { return "DataSet"; } // the tree data @@ -138,7 +136,7 @@ namespace TMVA { // plot variables // possible values for tree are 'training', 'multi' - void PlotVariables( TString tree, TString folderName, Types::PreprocessingMethod corr = Types::kNone ); + void PlotVariables( TString tree, TString folderName, Types::EPreprocessingMethod corr = Types::kNone ); // auxiliary functions to compute decorrelation void GetCorrelationMatrix( Bool_t isSignal, TMatrixDBase* mat ); @@ -151,68 +149,68 @@ namespace TMVA { // properties of the dataset // normalisation init - void CalcNorm(Types::PreprocessingMethod corr = Types::kNone); + void CalcNorm(Types::EPreprocessingMethod corr = Types::kNone); // normalisation accessors - Double_t GetRMS( Int_t ivar, Types::PreprocessingMethod corr = Types::kNone) const { + Double_t GetRMS( Int_t ivar, Types::EPreprocessingMethod corr = Types::kNone) const { return fVariables[ivar].GetRMS(corr); } - Double_t GetRMS( const TString& var, Types::PreprocessingMethod corr = Types::kNone) const { + Double_t GetRMS( const TString& var, Types::EPreprocessingMethod corr = Types::kNone) const { return GetRMS(FindVar(var), corr); } - Double_t GetMean( Int_t ivar, Types::PreprocessingMethod corr = Types::kNone) const { + Double_t GetMean( Int_t ivar, Types::EPreprocessingMethod corr = Types::kNone) const { return fVariables[ivar].GetMean(corr); } - Double_t GetMean( const TString& var, Types::PreprocessingMethod corr = Types::kNone) const { + Double_t GetMean( const TString& var, Types::EPreprocessingMethod corr = Types::kNone) const { return GetMean(FindVar(var), corr); } - Double_t GetXmin( Int_t ivar, Types::PreprocessingMethod corr = Types::kNone) const { + Double_t GetXmin( Int_t ivar, Types::EPreprocessingMethod corr = Types::kNone) const { return fVariables[ivar].GetMin(corr); } - Double_t GetXmax( Int_t ivar, Types::PreprocessingMethod corr = Types::kNone) const { + Double_t GetXmax( Int_t ivar, Types::EPreprocessingMethod corr = Types::kNone) const { return fVariables[ivar].GetMax(corr); } - Double_t GetXmin( const TString& var, Types::PreprocessingMethod corr = Types::kNone) const { + Double_t GetXmin( const TString& var, Types::EPreprocessingMethod corr = Types::kNone) const { return GetXmin(FindVar(var), corr); } - Double_t GetXmax( const TString& var, Types::PreprocessingMethod corr = Types::kNone) const { + Double_t GetXmax( const TString& var, Types::EPreprocessingMethod corr = Types::kNone) const { return GetXmax(FindVar(var), corr); } - void SetRMS ( const TString& var, Double_t x, Types::PreprocessingMethod corr = Types::kNone) { + void SetRMS ( const TString& var, Double_t x, Types::EPreprocessingMethod corr = Types::kNone) { SetRMS(FindVar(var), x, corr); } - void SetRMS( Int_t ivar, Double_t x, Types::PreprocessingMethod corr = Types::kNone) { + void SetRMS( Int_t ivar, Double_t x, Types::EPreprocessingMethod corr = Types::kNone) { fVariables[ivar].SetRMS(x, corr); } - void SetMean ( const TString& var, Double_t x, Types::PreprocessingMethod corr = Types::kNone) { + void SetMean ( const TString& var, Double_t x, Types::EPreprocessingMethod corr = Types::kNone) { SetMean(FindVar(var), x, corr); } - void SetMean( Int_t ivar, Double_t x, Types::PreprocessingMethod corr = Types::kNone) { + void SetMean( Int_t ivar, Double_t x, Types::EPreprocessingMethod corr = Types::kNone) { fVariables[ivar].SetMean(x, corr); } - void SetXmin( Int_t ivar, Double_t x, Types::PreprocessingMethod corr = Types::kNone) { + void SetXmin( Int_t ivar, Double_t x, Types::EPreprocessingMethod corr = Types::kNone) { fVariables[ivar].SetMin(x, corr); } - void SetXmax( Int_t ivar, Double_t x, Types::PreprocessingMethod corr = Types::kNone) { + void SetXmax( Int_t ivar, Double_t x, Types::EPreprocessingMethod corr = Types::kNone) { fVariables[ivar].SetMax(x, corr); } - void SetXmin( const TString& var, Double_t x, Types::PreprocessingMethod corr = Types::kNone) { + void SetXmin( const TString& var, Double_t x, Types::EPreprocessingMethod corr = Types::kNone) { SetXmin(FindVar(var), x, corr); } - void SetXmax( const TString& var, Double_t x, Types::PreprocessingMethod corr = Types::kNone) { + void SetXmax( const TString& var, Double_t x, Types::EPreprocessingMethod corr = Types::kNone) { SetXmax(FindVar(var), x, corr); } - void UpdateNorm ( Int_t ivar, Double_t x, Types::PreprocessingMethod corr = Types::kNone); + void UpdateNorm ( Int_t ivar, Double_t x, Types::EPreprocessingMethod corr = Types::kNone); // event reading - Bool_t ReadEvent(TTree* tr, UInt_t evidx, Types::PreprocessingMethod corr = Types::kNone, - Types::SBType type = Types::kSignal) const; - Bool_t ReadTrainingEvent( UInt_t evidx, Types::PreprocessingMethod corr = Types::kNone, - Types::SBType type = Types::kSignal) const { + Bool_t ReadEvent(TTree* tr, UInt_t evidx, Types::EPreprocessingMethod corr = Types::kNone, + Types::ESBType type = Types::kSignal) const; + Bool_t ReadTrainingEvent( UInt_t evidx, Types::EPreprocessingMethod corr = Types::kNone, + Types::ESBType type = Types::kSignal) const { return ReadEvent(GetTrainingTree(),evidx,corr,type); } - Bool_t ReadTestEvent( UInt_t evidx, Types::PreprocessingMethod corr = Types::kNone, - Types::SBType type = Types::kSignal) const { + Bool_t ReadTestEvent( UInt_t evidx, Types::EPreprocessingMethod corr = Types::kNone, + Types::ESBType type = Types::kSignal) const { return ReadEvent(GetTestTree(),evidx, corr,type); } @@ -229,23 +227,23 @@ namespace TMVA { const TMVA::Event& Event() const { return *fEvent; } // Warning, this requires an existing event object // decorrelation Matrix accessors - const TMatrixD* CorrelationMatrix (Types::SBType sigbgd) const { return fDecorrMatrix[sigbgd]; } - TPrincipal* PrincipalComponents(Types::SBType sigbgd) const { return fPrincipal[sigbgd]; } + const TMatrixD* CorrelationMatrix (Types::ESBType sigbgd) const { return fDecorrMatrix[sigbgd]; } + TPrincipal* PrincipalComponents(Types::ESBType sigbgd) const { return fPrincipal[sigbgd]; } // the weight void SetWeightExpression(const TString& expr) { fWeightExp = expr; } // some dataset stats - Int_t GetNEvtTrain() const { return fDataStats[kTraining][Types::kSBBoth]; } - Int_t GetNEvtSigTrain() const { return fDataStats[kTraining][Types::kSignal]; } - Int_t GetNEvtBkgdTrain() const { return fDataStats[kTraining][Types::kBackground]; } - Int_t GetNEvtTest() const { return fDataStats[kTesting][Types::kSBBoth]; } - Int_t GetNEvtSigTest() const { return fDataStats[kTesting][Types::kSignal]; } - Int_t GetNEvtBkgdTest() const { return fDataStats[kTesting][Types::kBackground]; } + Int_t GetNEvtTrain() const { return fDataStats[Types::kTraining][Types::kSBBoth]; } + Int_t GetNEvtSigTrain() const { return fDataStats[Types::kTraining][Types::kSignal]; } + Int_t GetNEvtBkgdTrain() const { return fDataStats[Types::kTraining][Types::kBackground]; } + Int_t GetNEvtTest() const { return fDataStats[Types::kTesting][Types::kSBBoth]; } + Int_t GetNEvtSigTest() const { return fDataStats[Types::kTesting][Types::kSignal]; } + Int_t GetNEvtBkgdTest() const { return fDataStats[Types::kTesting][Types::kBackground]; } // write and read functions - void WriteVarsToStream ( std::ostream& o, Types::PreprocessingMethod corr ) const; - void ReadVarsFromStream ( std::istream& istr, Types::PreprocessingMethod corr ); + void WriteVarsToStream ( std::ostream& o, Types::EPreprocessingMethod corr ) const; + void ReadVarsFromStream ( std::istream& istr, Types::EPreprocessingMethod corr ); void WriteCorrMatToStream ( std::ostream& o ) const; void ReadCorrMatFromStream( std::istream& istr ); @@ -254,29 +252,29 @@ namespace TMVA { void ResetCurrentTree() { fCurrentTree = 0; } // transformation for preprocessing - Bool_t ApplyTransformation(Types::PreprocessingMethod corr = Types::kNone, Bool_t useSignal = kTRUE) const; + Bool_t ApplyTransformation(Types::EPreprocessingMethod corr = Types::kNone, Bool_t useSignal = kTRUE) const; // preprocessing flag Bool_t DoPreprocessing() const { return fDoPreprocessing; } - Bool_t PreprocessingEnabled(Types::PreprocessingMethod corr) { return fPreprocessingEnabled[corr]; } + Bool_t PreprocessingEnabled(Types::EPreprocessingMethod corr) { return fPreprocessingEnabled[corr]; } void SetPreprocessing( Bool_t doit ) { fDoPreprocessing = doit; fPreprocessingEnabled[Types::kDecorrelated] = doit; } - void EnablePreprocess( Types::PreprocessingMethod corr ) { + void EnablePreprocess( Types::EPreprocessingMethod corr ) { fPreprocessingEnabled[corr] = kTRUE; } - Bool_t Preprocess(Types::PreprocessingMethod); + Bool_t Preprocess(Types::EPreprocessingMethod); private: // data manipulation helper functions // helper functions for writing decorrelated data - Bool_t PreparePreprocessing( Types::PreprocessingMethod corr, TTree* originalTree ); - Bool_t HasBeenPreprocessed( Types::PreprocessingMethod corr ) const { + Bool_t PreparePreprocessing( Types::EPreprocessingMethod corr, TTree* originalTree ); + Bool_t HasBeenPreprocessed( Types::EPreprocessingMethod corr ) const { return fFlagPreprocessed[corr]; } - void FlagAsPreprocessed( Types::PreprocessingMethod corr ) { + void FlagAsPreprocessed( Types::EPreprocessingMethod corr ) { fFlagPreprocessed[corr] = kTRUE; } @@ -295,7 +293,7 @@ namespace TMVA { // plot variables void PlotVariables( TTree* theTree, TString folderName = "input_variables", - Types::PreprocessingMethod corr = Types::kDecorrelated ); + Types::EPreprocessingMethod corr = Types::kDecorrelated ); // data members @@ -320,7 +318,7 @@ namespace TMVA { TTree* fMultiCutTestTree; //! tree used for testing of multicut method [correlated/decorrelated] // data stats - UInt_t fDataStats[kMaxTreeType][Types::kMaxSBType]; //! statistics of the dataset for training/test tree + UInt_t fDataStats[Types::kMaxTreeType][Types::kMaxSBType]; //! statistics of the dataset for training/test tree // TMatrixD* fCovarianceMatrix[2]; //! Covariance matrix [signal/background] diff --git a/tmva/inc/DecisionTree.h b/tmva/inc/DecisionTree.h index 23da170ba14..7e81c701589 100644 --- a/tmva/inc/DecisionTree.h +++ b/tmva/inc/DecisionTree.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: DecisionTree.h,v 1.33 2006/11/14 14:19:17 andreas.hoecker Exp $ +// @(#)root/tmva $Id: DecisionTree.h,v 1.35 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -107,8 +107,8 @@ namespace TMVA { void ClearTree(); // set pruning method - enum PruneMethod { kExpectedErrorPruning=0, kCostComplexityPruning, kMCC }; - void SetPruneMethod( PruneMethod m = kExpectedErrorPruning ) { fPruneMethod = m; } + enum EPruneMethod { kExpectedErrorPruning=0, kCostComplexityPruning, kMCC }; + void SetPruneMethod( EPruneMethod m = kExpectedErrorPruning ) { fPruneMethod = m; } // recursive pruning of the tree void PruneTree(); @@ -196,7 +196,7 @@ namespace TMVA { Bool_t fUseSearchTree; //cut scan done with binary trees or simple event loop. Double_t fPruneStrength; //a parameter to set the "amount" of pruning..needs to be adjusted - PruneMethod fPruneMethod; // method used for prunig + EPruneMethod fPruneMethod; // method used for prunig vector< Double_t > fVariableImportance; // the relative importance of the different variables diff --git a/tmva/inc/DecisionTreeNode.h b/tmva/inc/DecisionTreeNode.h index da731baf9e4..6a003fc5f38 100644 --- a/tmva/inc/DecisionTreeNode.h +++ b/tmva/inc/DecisionTreeNode.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: DecisionTreeNode.h,v 1.21 2006/11/13 15:49:49 helgevoss Exp $ +// @(#)root/tmva $Id: DecisionTreeNode.h,v 1.22 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/Event.h b/tmva/inc/Event.h index 6982eb15550..c77a6fcdb48 100644 --- a/tmva/inc/Event.h +++ b/tmva/inc/Event.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Event.h,v 1.21 2006/11/14 14:19:17 andreas.hoecker Exp $ +// @(#)root/tmva $Id: Event.h,v 1.22 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/Factory.h b/tmva/inc/Factory.h index cd12cbe7e36..f9cc6cfd611 100644 --- a/tmva/inc/Factory.h +++ b/tmva/inc/Factory.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Factory.h,v 1.25 2006/10/14 23:19:19 andreas.hoecker Exp $ +// @(#)root/tmva $Id: Factory.h,v 1.27 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -16,13 +16,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -136,9 +136,9 @@ namespace TMVA { void ProcessMultipleMVA(); Bool_t BookMethod( TString theMethodName, TString methodTitle, TString theOption = "" ); - Bool_t BookMethod( Types::MVA theMethod, TString methodTitle, TString theOption = "" ); - Bool_t BookMethod( TMVA::Types::MVA theMethod, TString methodTitle, TString methodOption, - TMVA::Types::MVA theCommittee, TString committeeOption = "" ); + Bool_t BookMethod( Types::EMVA theMethod, TString methodTitle, TString theOption = "" ); + Bool_t BookMethod( TMVA::Types::EMVA theMethod, TString methodTitle, TString methodOption, + TMVA::Types::EMVA theCommittee, TString committeeOption = "" ); // booking the method with a given weight file --> testing or application only // Bool_t BookMethod( IMethod *theMethod ); diff --git a/tmva/inc/GeneticANN.h b/tmva/inc/GeneticANN.h index 87462786303..83fa2b7003e 100644 --- a/tmva/inc/GeneticANN.h +++ b/tmva/inc/GeneticANN.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticANN.h,v 1.3 2006/10/10 17:43:51 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticANN.h,v 1.4 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Matt Jachowski, Peter Speckmayer, Helge Voss, Kai Voss /********************************************************************************** @@ -14,13 +14,13 @@ * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA * * Peter Speckmayer <speckmay@mail.cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/GeneticBase.h b/tmva/inc/GeneticBase.h index 2d933adbe74..b9379477d38 100644 --- a/tmva/inc/GeneticBase.h +++ b/tmva/inc/GeneticBase.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticBase.h,v 1.14 2006/10/15 23:32:38 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticBase.h,v 1.15 2006/11/16 22:51:58 helgevoss Exp $ // Author: Peter Speckmayer /********************************************************************************** @@ -16,7 +16,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/GeneticCuts.h b/tmva/inc/GeneticCuts.h index c62c248d73b..3ca8b30e329 100644 --- a/tmva/inc/GeneticCuts.h +++ b/tmva/inc/GeneticCuts.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticCuts.h,v 1.13 2006/10/10 17:43:51 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticCuts.h,v 1.14 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Matt Jachowski, Peter Speckmayer, Helge Voss, Kai Voss /********************************************************************************** @@ -14,13 +14,13 @@ * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA * * Peter Speckmayer <speckmay@mail.cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/GeneticGenes.h b/tmva/inc/GeneticGenes.h index 51ba164ce51..b9ab0449f07 100644 --- a/tmva/inc/GeneticGenes.h +++ b/tmva/inc/GeneticGenes.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticGenes.h,v 1.8 2006/10/10 17:43:51 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticGenes.h,v 1.9 2006/11/16 22:51:58 helgevoss Exp $ // Author: Peter Speckmayer /********************************************************************************** @@ -16,7 +16,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/GeneticPopulation.h b/tmva/inc/GeneticPopulation.h index fa6f48af0a6..cbb3c749f9d 100644 --- a/tmva/inc/GeneticPopulation.h +++ b/tmva/inc/GeneticPopulation.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticPopulation.h,v 1.11 2006/10/15 23:32:38 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticPopulation.h,v 1.12 2006/11/16 22:51:58 helgevoss Exp $ // Author: Peter Speckmayer /********************************************************************************** @@ -16,7 +16,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/GeneticRange.h b/tmva/inc/GeneticRange.h index a5b11acb361..99a09043d91 100644 --- a/tmva/inc/GeneticRange.h +++ b/tmva/inc/GeneticRange.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticRange.h,v 1.7 2006/10/10 17:43:51 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticRange.h,v 1.8 2006/11/16 22:51:58 helgevoss Exp $ // Author: Peter Speckmayer /********************************************************************************** @@ -16,7 +16,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/GiniIndex.h b/tmva/inc/GiniIndex.h index 5a7ad2ed0bb..255d0c19cab 100644 --- a/tmva/inc/GiniIndex.h +++ b/tmva/inc/GiniIndex.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GiniIndex.h,v 1.8 2006/11/06 00:10:17 helgevoss Exp $ +// @(#)root/tmva $Id: GiniIndex.h,v 1.9 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -23,7 +23,7 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * diff --git a/tmva/inc/IMethod.h b/tmva/inc/IMethod.h index 0244880081e..08eefaf2740 100644 --- a/tmva/inc/IMethod.h +++ b/tmva/inc/IMethod.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: IMethod.h,v 1.18 2006/11/02 15:44:49 andreas.hoecker Exp $ +// @(#)root/tmva $Id: IMethod.h,v 1.20 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -133,9 +133,9 @@ namespace TMVA { virtual const TString& GetJobName ( void ) const = 0; virtual const TString& GetMethodName ( void ) const = 0; virtual const TString& GetMethodTitle( void ) const = 0; - virtual const Types::MVA GetMethodType ( void ) const = 0; + virtual const Types::EMVA GetMethodType ( void ) const = 0; - virtual Types::PreprocessingMethod GetPreprocessingMethod() const = 0; + virtual Types::EPreprocessingMethod GetPreprocessingMethod() const = 0; virtual void SetJobName( TString jobName ) = 0; diff --git a/tmva/inc/MethodBDT.h b/tmva/inc/MethodBDT.h index 3dde936c4e3..ea580346290 100644 --- a/tmva/inc/MethodBDT.h +++ b/tmva/inc/MethodBDT.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodBDT.h,v 1.33 2006/11/14 18:08:25 helgevoss Exp $ +// @(#)root/tmva $Id: MethodBDT.h,v 1.36 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -188,17 +188,19 @@ namespace TMVA { void InitBDT( void ); //some histograms for monitoring - TH1F* fBoostWeightHist; // weights applied in boosting - TH2F* fErrFractHist; // error fraction vs tree number - TTree* fMonitorNtuple; // monitoring ntuple - Int_t fITree; // ntuple var: ith tree - Double_t fBoostWeight; // ntuple var: boost weight - Double_t fErrorFraction; // ntuple var: misclassification error fraction - Int_t fNnodes; // ntuple var: nNodes - Double_t fPruneStrength; // a parameter to set the "amount" of pruning..needs to be adjusted - TMVA::DecisionTree::PruneMethod fPruneMethod; // method used for prunig - TString fPruneMethodS; // prune method option String - Bool_t fAutomatic; // use user given prune strength or automatically determined one using a validation sample + TH1F* fBoostWeightHist; // weights applied in boosting + TH1F* fBoostWeightVsTree;// weights applied in boosting vs tree number + TH1F* fErrFractHist; // error fraction vs tree number + TH1I* fNodesBeforePruningVsTree; // nNodesBeforePruning vs tree number + TH1I* fNodesAfterPruningVsTree; // nNodesAfterPruning vs tree number + TTree* fMonitorNtuple; // monitoring ntuple + Int_t fITree; // ntuple var: ith tree + Double_t fBoostWeight; // ntuple var: boost weight + Double_t fErrorFraction; // ntuple var: misclassification error fraction + Double_t fPruneStrength; // a parameter to set the "amount" of pruning..needs to be adjusted + TMVA::DecisionTree::EPruneMethod fPruneMethod; // method used for prunig + TString fPruneMethodS; // prune method option String + Bool_t fAutomatic; // use user given prune strength or automatically determined one using a validation sample std::vector<Double_t> fVariableImportance; // the relative importance of the different variables diff --git a/tmva/inc/MethodBase.h b/tmva/inc/MethodBase.h index 58c6a5131e9..dc73e2044d2 100644 --- a/tmva/inc/MethodBase.h +++ b/tmva/inc/MethodBase.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodBase.h,v 1.55 2006/11/14 23:02:57 stelzer Exp $ +// @(#)root/tmva $Id: MethodBase.h,v 1.57 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -76,7 +76,7 @@ namespace TMVA { public: - enum WeightFileType { kROOT=0, kTEXT }; + enum EWeightFileType { kROOT=0, kTEXT }; // default constructur MethodBase( TString jobName, @@ -126,19 +126,19 @@ namespace TMVA { const TString& GetMethodName ( void ) const { return fMethodName; } const char* GetName ( void ) const { return GetMethodName().Data(); } const TString& GetMethodTitle( void ) const { return fMethodTitle; } - const Types::MVA GetMethodType ( void ) const { return fMethodType; } + const Types::EMVA GetMethodType ( void ) const { return fMethodType; } void SetJobName ( TString jobName ) { fJobName = jobName; } void SetMethodName ( TString methodName ) { fMethodName = methodName; } void SetMethodTitle( TString methodTitle ) { fMethodTitle = methodTitle; } - void SetMethodType ( Types::MVA methodType ) { fMethodType = methodType; } + void SetMethodType ( Types::EMVA methodType ) { fMethodType = methodType; } TString GetOptions ( void ) const { return fOptions; } TString GetWeightFileExtension( void ) const { return fFileExtension; } void SetWeightFileExtension( TString fileExtension ) { fFileExtension = fileExtension; } - void SetWeightFileType( WeightFileType w ) { fWeightFileType = w; } - WeightFileType GetWeightFileType() const { return fWeightFileType; } + void SetWeightFileType( EWeightFileType w ) { fWeightFileType = w; } + EWeightFileType GetWeightFileType() const { return fWeightFileType; } TString GetWeightFileDir( void ) const { return fFileDir; } @@ -182,14 +182,14 @@ namespace TMVA { virtual Double_t GetmuTransform ( TTree* ); // normalisation accessors - Double_t GetXmin( Int_t ivar, Types::PreprocessingMethod corr = Types::kNone ) const { return fXminNorm[(Int_t) corr][ivar]; } - Double_t GetXmax( Int_t ivar, Types::PreprocessingMethod corr = Types::kNone ) const { return fXmaxNorm[(Int_t) corr][ivar]; } - Double_t GetXmin( const TString& var, Types::PreprocessingMethod corr = Types::kNone ) const { return GetXmin(Data().FindVar(var), corr); } - Double_t GetXmax( const TString& var, Types::PreprocessingMethod corr = Types::kNone ) const { return GetXmax(Data().FindVar(var), corr); } - void SetXmin( Int_t ivar, Double_t x, Types::PreprocessingMethod corr = Types::kNone ) { fXminNorm[(Int_t) corr][ivar] = x; } - void SetXmax( Int_t ivar, Double_t x, Types::PreprocessingMethod corr = Types::kNone ) { fXmaxNorm[(Int_t) corr][ivar] = x; } - void SetXmin( const TString& var, Double_t x, Types::PreprocessingMethod corr = Types::kNone ) { SetXmin(Data().FindVar(var), x, corr); } - void SetXmax( const TString& var, Double_t x, Types::PreprocessingMethod corr = Types::kNone ) { SetXmax(Data().FindVar(var), x, corr); } + Double_t GetXmin( Int_t ivar, Types::EPreprocessingMethod corr = Types::kNone ) const { return fXminNorm[(Int_t) corr][ivar]; } + Double_t GetXmax( Int_t ivar, Types::EPreprocessingMethod corr = Types::kNone ) const { return fXmaxNorm[(Int_t) corr][ivar]; } + Double_t GetXmin( const TString& var, Types::EPreprocessingMethod corr = Types::kNone ) const { return GetXmin(Data().FindVar(var), corr); } + Double_t GetXmax( const TString& var, Types::EPreprocessingMethod corr = Types::kNone ) const { return GetXmax(Data().FindVar(var), corr); } + void SetXmin( Int_t ivar, Double_t x, Types::EPreprocessingMethod corr = Types::kNone ) { fXminNorm[(Int_t) corr][ivar] = x; } + void SetXmax( Int_t ivar, Double_t x, Types::EPreprocessingMethod corr = Types::kNone ) { fXmaxNorm[(Int_t) corr][ivar] = x; } + void SetXmin( const TString& var, Double_t x, Types::EPreprocessingMethod corr = Types::kNone ) { SetXmin(Data().FindVar(var), x, corr); } + void SetXmax( const TString& var, Double_t x, Types::EPreprocessingMethod corr = Types::kNone ) { SetXmax(Data().FindVar(var), x, corr); } // main normalization method is in Tools Double_t Norm ( Int_t ivar, Double_t x ) const; @@ -202,18 +202,18 @@ namespace TMVA { // write method-specific histograms to file void WriteEvaluationHistosToFile( TDirectory* targetDir ); - Types::PreprocessingMethod GetPreprocessingMethod() const { return fPreprocessingMethod; } - void SetPreprocessingMethod ( Types::PreprocessingMethod m ) { fPreprocessingMethod = m; } + Types::EPreprocessingMethod GetPreprocessingMethod() const { return fPreprocessingMethod; } + void SetPreprocessingMethod ( Types::EPreprocessingMethod m ) { fPreprocessingMethod = m; } Bool_t Verbose( void ) const { return fVerbose; } void SetVerbose( Bool_t v = kTRUE ) { fVerbose = v; } DataSet& Data() const { return fData; } - Bool_t ReadTrainingEvent( UInt_t ievt, Types::SBType type = Types::kMaxSBType ) { + Bool_t ReadTrainingEvent( UInt_t ievt, Types::ESBType type = Types::kMaxSBType ) { return Data().ReadTrainingEvent( ievt, GetPreprocessingMethod(), (type == Types::kMaxSBType) ? GetPreprocessingType() : type ); } - virtual Bool_t ReadTestEvent( UInt_t ievt, Types::SBType type = Types::kMaxSBType ) { + virtual Bool_t ReadTestEvent( UInt_t ievt, Types::ESBType type = Types::kMaxSBType ) { return Data().ReadTestEvent( ievt, GetPreprocessingMethod(), (type == Types::kMaxSBType) ? GetPreprocessingType() : type ); } @@ -233,8 +233,8 @@ namespace TMVA { protected: // used in efficiency computation - enum CutOrientation { kNegative = -1, kPositive = +1 }; - CutOrientation GetCutOrientation() const { return fCutOrientation; } + enum ECutOrientation { kNegative = -1, kPositive = +1 }; + ECutOrientation GetCutOrientation() const { return fCutOrientation; } // reset required for RootFinder void ResetThisBase( void ) { fgThisBase = this; } @@ -245,17 +245,17 @@ namespace TMVA { void SetSignalReferenceCut( Double_t cut ) { fSignalReferenceCut = cut; } // some basic statistical analysis - void Statistics( TMVA::Types::TreeType treeType, const TString& theVarName, + void Statistics( TMVA::Types::ETreeType treeType, const TString& theVarName, Double_t&, Double_t&, Double_t&, Double_t&, Double_t&, Double_t&, Bool_t norm = kFALSE ); - Types::SBType GetPreprocessingType() const { return fPreprocessingType; } - void SetPreprocessingType( Types::SBType t ) { fPreprocessingType = t; } + Types::ESBType GetPreprocessingType() const { return fPreprocessingType; } + void SetPreprocessingType( Types::ESBType t ) { fPreprocessingType = t; } private: Double_t fSignalReferenceCut; // minimum requirement on the MVA output to declare an event signal-like - Types::SBType fPreprocessingType; // this is the event type (sig or bgd) assumed for preprocessing + Types::ESBType fPreprocessingType; // this is the event type (sig or bgd) assumed for preprocessing private: @@ -291,7 +291,7 @@ namespace TMVA { TString fJobName; // name of job -> user defined, appears in weight files TString fMethodName; // name of the method (set in derived class) - Types::MVA fMethodType; // type of method (set in derived class) + Types::EMVA fMethodType; // type of method (set in derived class) TString fMethodTitle; // user-defined title for method (used for weight-file names) TString fTestvar; // variable used in evaluation, etc (mostly the MVA) TString fTestvarPrefix; // 'MVA_' prefix of MVA variable @@ -309,7 +309,7 @@ namespace TMVA { TString fFileDir; // unix sub-directory for weight files (default: "weights") TString fWeightFile; // weight file name - WeightFileType fWeightFileType; // The type of weight file {kROOT,kTEXT} + EWeightFileType fWeightFileType; // The type of weight file {kROOT,kTEXT} protected: @@ -362,7 +362,7 @@ namespace TMVA { Double_t fXmax; // maximum (signal and background) Bool_t fUseDecorr; // Use decorrelated Variables (kept for backward compatibility) - Types::PreprocessingMethod fPreprocessingMethod; // Decorrelation, PCA, etc. + Types::EPreprocessingMethod fPreprocessingMethod; // Decorrelation, PCA, etc. TString fPreprocessingString; // labels preprocessing method TString fPreprocessingTypeString; // labels preprocessing type @@ -378,7 +378,7 @@ namespace TMVA { Int_t fNbinsH; // number of bins in evaluation histograms // orientation of cut: depends on signal and background mean values - CutOrientation fCutOrientation; // +1 if Sig>Bkg, -1 otherwise + ECutOrientation fCutOrientation; // +1 if Sig>Bkg, -1 otherwise // for root finder TSpline1* fSplRefS; // helper splines for RootFinder (signal) diff --git a/tmva/inc/MethodBayesClassifier.h b/tmva/inc/MethodBayesClassifier.h index 64456f6a9c9..0ee55d16f86 100644 --- a/tmva/inc/MethodBayesClassifier.h +++ b/tmva/inc/MethodBayesClassifier.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodBayesClassifier.h,v 1.2 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodBayesClassifier.h,v 1.3 2006/11/16 22:51:58 helgevoss Exp $ // Author: Abhishek Narain /********************************************************************************** @@ -17,7 +17,7 @@ * University of Houston, * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/MethodCFMlpANN.h b/tmva/inc/MethodCFMlpANN.h index 7ea8ea05fcb..a1e521a87a8 100644 --- a/tmva/inc/MethodCFMlpANN.h +++ b/tmva/inc/MethodCFMlpANN.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodCFMlpANN.h,v 1.20 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodCFMlpANN.h,v 1.21 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -57,13 +57,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -71,7 +71,7 @@ * (http://tmva.sourceforge.net/LICENSE) * * * * File and Version Information: * - * $Id: MethodCFMlpANN.h,v 1.20 2006/11/02 15:44:50 andreas.hoecker Exp $ + * $Id: MethodCFMlpANN.h,v 1.21 2006/11/16 22:51:58 helgevoss Exp $ **********************************************************************************/ #ifndef ROOT_TMVA_MethodCFMlpANN diff --git a/tmva/inc/MethodCFMlpANN_Utils.h b/tmva/inc/MethodCFMlpANN_Utils.h index 9c9d10ca224..2e02aa9971c 100644 --- a/tmva/inc/MethodCFMlpANN_Utils.h +++ b/tmva/inc/MethodCFMlpANN_Utils.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodCFMlpANN_Utils.h,v 1.13 2006/10/10 17:43:51 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodCFMlpANN_Utils.h,v 1.14 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -18,13 +18,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/MethodCFMlpANN_def.h b/tmva/inc/MethodCFMlpANN_def.h index f9db848bcff..880ae1e77a8 100644 --- a/tmva/inc/MethodCFMlpANN_def.h +++ b/tmva/inc/MethodCFMlpANN_def.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodCFMlpANN_def.h,v 1.5 2006/08/30 22:19:58 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodCFMlpANN_def.h,v 1.6 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/MethodCommittee.h b/tmva/inc/MethodCommittee.h index 9c2cb54b848..216fec55bdf 100644 --- a/tmva/inc/MethodCommittee.h +++ b/tmva/inc/MethodCommittee.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodCommittee.h,v 1.5 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodCommittee.h,v 1.7 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -57,7 +57,7 @@ namespace TMVA { TString committeeTitle, DataSet& theData, TString committeeOptions, - Types::MVA method, + Types::EMVA method, TString methodOptions, TDirectory* theTargetDir = 0 ); @@ -122,7 +122,7 @@ namespace TMVA { TString fBoostType; // string specifying the boost type // options for the MVA method - Types::MVA fMemberType; // the MVA method to be boosted + Types::EMVA fMemberType; // the MVA method to be boosted TString fMemberOption; // the options for that method Bool_t fUseMemberDecision; // use binary information from IsSignal diff --git a/tmva/inc/MethodCuts.h b/tmva/inc/MethodCuts.h index b35e6bb71d5..38960edf799 100644 --- a/tmva/inc/MethodCuts.h +++ b/tmva/inc/MethodCuts.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodCuts.h,v 1.34 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodCuts.h,v 1.36 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Matt Jachowski, Peter Speckmayer, Helge Voss, Kai Voss /********************************************************************************** @@ -16,13 +16,13 @@ * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA * * Peter Speckmayer <speckmay@mail.cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -123,34 +123,34 @@ namespace TMVA { private: // determines type of data to be optimised - enum ConstrainType { kConstrainEffS = 0, - kConstrainEffB } fConstrainType; + enum EConstrainType { kConstrainEffS = 0, + kConstrainEffB } fConstrainType; // optimisation method - enum FitMethodType { kUseMonteCarlo = 0, - kUseGeneticAlgorithm, - kUseSimulatedAnnealing }; + enum EFitMethodType { kUseMonteCarlo = 0, + kUseGeneticAlgorithm, + kUseSimulatedAnnealing }; // efficiency calculation method // - kUseEventSelection: computes efficiencies from given data sample // - kUsePDFs : creates smoothed PDFs from data samples, and // uses this to compute efficiencies - enum EffMethod { kUseEventSelection = 0, - kUsePDFs }; + enum EEffMethod { kUseEventSelection = 0, + kUsePDFs }; // improve the Monte Carlo by providing some additional information - enum FitParameters { kNotEnforced = 0, - kForceMin, - kForceMax, - kForceSmart, - kForceVerySmart }; + enum EFitParameters { kNotEnforced = 0, + kForceMin, + kForceMax, + kForceSmart, + kForceVerySmart }; // general TString fFitMethodS; // chosen fit method (string) - FitMethodType fFitMethod; // chosen fit method + EFitMethodType fFitMethod; // chosen fit method TString fEffMethodS; // chosen efficiency calculation method (string) - EffMethod fEffMethod; // chosen efficiency calculation method - vector<FitParameters>* fFitParams; // vector for series of fit methods + EEffMethod fEffMethod; // chosen efficiency calculation method + vector<EFitParameters>* fFitParams; // vector for series of fit methods Double_t fTestSignalEff; // used to test optimized signal efficiency Double_t fEffSMin; // used to test optimized signal efficiency Double_t fEffSMax; // used to test optimized signal efficiency diff --git a/tmva/inc/MethodFisher.h b/tmva/inc/MethodFisher.h index f6ce6aca5f5..4772543b1d0 100644 --- a/tmva/inc/MethodFisher.h +++ b/tmva/inc/MethodFisher.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodFisher.h,v 1.19 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodFisher.h,v 1.21 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Xavier Prudent, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -17,13 +17,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -31,7 +31,7 @@ * (http://tmva.sourceforge.net/LICENSE) * * * * File and Version Information: * - * $Id: MethodFisher.h,v 1.19 2006/11/02 15:44:50 andreas.hoecker Exp $ + * $Id: MethodFisher.h,v 1.21 2006/11/17 14:59:24 stelzer Exp $ **********************************************************************************/ #ifndef ROOT_TMVA_MethodFisher @@ -90,8 +90,8 @@ namespace TMVA { // calculate the MVA value virtual Double_t GetMvaValue(); - enum FisherMethod { kFisher, kMahalanobis }; - virtual FisherMethod GetFisherMethod( void ) { return fFisherMethod; } + enum EFisherMethod { kFisher, kMahalanobis }; + virtual EFisherMethod GetFisherMethod( void ) { return fFisherMethod; } // ranking of input variables const Ranking* CreateRanking(); @@ -146,7 +146,7 @@ namespace TMVA { Double_t fF0; // method to be used (Fisher or Mahalanobis) - FisherMethod fFisherMethod; + EFisherMethod fFisherMethod; // default initialisation called by all constructors void InitFisher( void ); diff --git a/tmva/inc/MethodHMatrix.h b/tmva/inc/MethodHMatrix.h index 5acb385b416..52f31b8f769 100644 --- a/tmva/inc/MethodHMatrix.h +++ b/tmva/inc/MethodHMatrix.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodHMatrix.h,v 1.18 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodHMatrix.h,v 1.20 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Xavier Prudent, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -18,13 +18,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -95,8 +95,8 @@ namespace TMVA { virtual void ProcessOptions(); // returns chi2 estimator for given type (signal or background) - Double_t GetChi2( Event *e, Types::SBType ) const; - Double_t GetChi2( Types::SBType ) const; + Double_t GetChi2( Event *e, Types::ESBType ) const; + Double_t GetChi2( Types::ESBType ) const; // compute correlation matrices void ComputeCovariance( Bool_t, TMatrixD* ); diff --git a/tmva/inc/MethodLikelihood.h b/tmva/inc/MethodLikelihood.h index fdee69cfd71..26a2e55d70f 100644 --- a/tmva/inc/MethodLikelihood.h +++ b/tmva/inc/MethodLikelihood.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodLikelihood.h,v 1.21 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodLikelihood.h,v 1.23 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -18,13 +18,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -96,7 +96,7 @@ namespace TMVA { const Ranking* CreateRanking() { return 0; } // overload test event reading - virtual Bool_t ReadTestEvent(UInt_t ievt, Types::SBType type = Types::kSignal) { + virtual Bool_t ReadTestEvent(UInt_t ievt, Types::ESBType type = Types::kSignal) { return Data().ReadTestEvent( ievt, Types::kNone, type ); } @@ -113,7 +113,7 @@ namespace TMVA { Int_t fAverageEvtPerBin; // average events per bin; used to calculate fNbins // type of Splines used to smooth PDFs - PDF::SmoothMethod fSmoothMethod; + PDF::ESmoothMethod fSmoothMethod; // global weight file -- (needed !) TFile* fFin; diff --git a/tmva/inc/MethodMLP.h b/tmva/inc/MethodMLP.h index 197092d5e2f..1299b703926 100644 --- a/tmva/inc/MethodMLP.h +++ b/tmva/inc/MethodMLP.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodMLP.h,v 1.21 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodMLP.h,v 1.22 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Matt Jachowski /********************************************************************************** @@ -81,8 +81,8 @@ namespace TMVA { // for GA Double_t ComputeEstimator(const std::vector<Double_t>& parameters); - enum TrainingMethod { kBP=0, kGA }; - enum BPTrainingMode { kSequential=0, kBatch }; + enum ETrainingMethod { kBP=0, kGA }; + enum EBPTrainingMode { kSequential=0, kBatch }; private: @@ -96,7 +96,7 @@ namespace TMVA { void InitializeLearningRates(); // although this is only needed by backprop // used as a measure of success in all minimization techniques - Double_t CalculateEstimator( Types::TreeType treeType = Types::kTrain ); + Double_t CalculateEstimator( Types::ETreeType treeType = Types::kTraining ); // backpropagation functions void BackPropagationMinimize( Int_t nEpochs ); @@ -125,23 +125,23 @@ namespace TMVA { #endif // general - TrainingMethod fTrainingMethod; // method of training, BP or GA - TString fTrainMethodS; // training method option param + ETrainingMethod fTrainingMethod; // method of training, BP or GA + TString fTrainMethodS; // training method option param // backpropagation variables - Double_t fLearnRate; // learning rate for synapse weight adjustments - Double_t fDecayRate; // decay rate for above learning rate - BPTrainingMode fBPMode; // backprop learning mode (sequential or batch) - TString fBpModeS; // backprop learning mode option string (sequential or batch) - Int_t fBatchSize; // batch size, only matters if in batch learning mode - Int_t fTestRate; // test for overtraining performed at each #th epochs + Double_t fLearnRate; // learning rate for synapse weight adjustments + Double_t fDecayRate; // decay rate for above learning rate + EBPTrainingMode fBPMode; // backprop learning mode (sequential or batch) + TString fBpModeS; // backprop learning mode option string (sequential or batch) + Int_t fBatchSize; // batch size, only matters if in batch learning mode + Int_t fTestRate; // test for overtraining performed at each #th epochs // genetic algorithm variables - Int_t fGA_nsteps; // GA settings: number of steps - Int_t fGA_preCalc; // GA settings: number of pre-calc steps - Int_t fGA_SC_steps; // GA settings: SC_steps - Int_t fGA_SC_offsteps; // GA settings: SC_offsteps - Double_t fGA_SC_factor; // GA settings: SC_factor + Int_t fGA_nsteps; // GA settings: number of steps + Int_t fGA_preCalc; // GA settings: number of pre-calc steps + Int_t fGA_SC_steps; // GA settings: SC_steps + Int_t fGA_SC_offsteps; // GA settings: SC_offsteps + Double_t fGA_SC_factor; // GA settings: SC_factor #ifdef MethodMLP_UseMinuit__ // minuit variables -- commented out because they rely on a static pointer diff --git a/tmva/inc/MethodPDERS.h b/tmva/inc/MethodPDERS.h index db7075d78c9..09a885ef849 100644 --- a/tmva/inc/MethodPDERS.h +++ b/tmva/inc/MethodPDERS.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodPDERS.h,v 1.21 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodPDERS.h,v 1.23 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Yair Mahalalel, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -21,13 +21,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Yair Mahalalel <Yair.Mahalalel@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * @@ -132,7 +132,7 @@ namespace TMVA { TString fVolumeRange; // option volume range TString fKernelString; // option kernel estimator - enum VolumeRangeMode { + enum EVolumeRangeMode { kUnsupported = 0, kMinMax, kRMS, @@ -140,7 +140,7 @@ namespace TMVA { kUnscaled } fVRangeMode; - enum KernelEstimator { + enum EKernelEstimator { kBox = 0, kSphere, kTeepee, diff --git a/tmva/inc/MethodRuleFit.h b/tmva/inc/MethodRuleFit.h index f879a51885f..c1be599c74a 100644 --- a/tmva/inc/MethodRuleFit.h +++ b/tmva/inc/MethodRuleFit.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodRuleFit.h,v 1.24 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodRuleFit.h,v 1.25 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Fredrik Tegenfeldt, Helge Voss, Kai Voss /********************************************************************************** @@ -19,13 +19,13 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * * (http://tmva.sourceforge.net/LICENSE) * - * $Id: MethodRuleFit.h,v 1.24 2006/11/02 15:44:50 andreas.hoecker Exp $ + * $Id: MethodRuleFit.h,v 1.25 2006/11/16 22:51:58 helgevoss Exp $ **********************************************************************************/ #ifndef ROOT_TMVA_MethodRuleFit diff --git a/tmva/inc/MethodSVM.h b/tmva/inc/MethodSVM.h index de68da37dea..3f2b73499a0 100644 --- a/tmva/inc/MethodSVM.h +++ b/tmva/inc/MethodSVM.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodSVM.h,v 1.11 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodSVM.h,v 1.12 2006/11/16 22:51:58 helgevoss Exp $ // Author: Marcin .... /********************************************************************************** @@ -16,7 +16,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/MethodTMlpANN.h b/tmva/inc/MethodTMlpANN.h index 9d11bd36861..ece6c887a5f 100644 --- a/tmva/inc/MethodTMlpANN.h +++ b/tmva/inc/MethodTMlpANN.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodTMlpANN.h,v 1.19 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodTMlpANN.h,v 1.20 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -16,13 +16,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/MethodVariable.h b/tmva/inc/MethodVariable.h index d57c73b415d..ef63a604e4e 100644 --- a/tmva/inc/MethodVariable.h +++ b/tmva/inc/MethodVariable.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodVariable.h,v 1.15 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodVariable.h,v 1.16 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -14,13 +14,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/Methods.h b/tmva/inc/Methods.h index 0f744f3c6e4..1234f692c13 100644 --- a/tmva/inc/Methods.h +++ b/tmva/inc/Methods.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Methods.h,v 1.3 2006/10/26 19:55:40 andreas.hoecker Exp $ +// @(#)root/tmva $Id: Methods.h,v 1.4 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/MisClassificationError.h b/tmva/inc/MisClassificationError.h index 7f305190983..d68f8dd735a 100644 --- a/tmva/inc/MisClassificationError.h +++ b/tmva/inc/MisClassificationError.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MisClassificationError.h,v 1.9 2006/11/06 00:10:17 helgevoss Exp $ +// @(#)root/tmva $Id: MisClassificationError.h,v 1.10 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -15,7 +15,7 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * diff --git a/tmva/inc/MsgLogger.h b/tmva/inc/MsgLogger.h index c4477d94d81..63fc5a0a21b 100644 --- a/tmva/inc/MsgLogger.h +++ b/tmva/inc/MsgLogger.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MsgLogger.h,v 1.6 2006/10/17 21:22:29 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MsgLogger.h,v 1.8 2006/11/16 22:51:59 helgevoss Exp $ // Author: Attila Krasznahorkay /********************************************************************************** @@ -15,7 +15,7 @@ * * * Copyright (c) 2005: * * CERN, Switzerland, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -48,7 +48,7 @@ namespace TMVA { // define outside of class to facilite access - enum MsgType { + enum EMsgType { kVERBOSE = 1, kDEBUG = 2, kINFO = 3, @@ -62,16 +62,16 @@ namespace TMVA { public: - MsgLogger( const TObject* source, MsgType minType = kINFO ); - MsgLogger( const std::string& source, MsgType minType = kINFO ); - MsgLogger( MsgType minType = kINFO ); + MsgLogger( const TObject* source, EMsgType minType = kINFO ); + MsgLogger( const std::string& source, EMsgType minType = kINFO ); + MsgLogger( EMsgType minType = kINFO ); MsgLogger( const MsgLogger& parent ); ~MsgLogger(); // Accessors void SetSource ( const std::string& source ) { fStrSource = source; } - MsgType GetMinType() const { return fMinType; } - void SetMinType( MsgType minType ) { fMinType = minType; } + EMsgType GetMinType() const { return fMinType; } + void SetMinType( EMsgType minType ) { fMinType = minType; } UInt_t GetMaxSourceSize() const { return (UInt_t)fMaxSourceSize; } std::string GetPrintedSource() const; std::string GetFormattedSource() const; @@ -80,7 +80,7 @@ namespace TMVA { MsgLogger& operator= ( const MsgLogger& parent ); // Stream modifier(s) - static MsgLogger& endmsg( MsgLogger& logger ); + static MsgLogger& Endmsg( MsgLogger& logger ); // Accept stream modifiers MsgLogger& operator<< ( MsgLogger& ( *_f )( MsgLogger& ) ); @@ -88,7 +88,7 @@ namespace TMVA { MsgLogger& operator<< ( std::ios& ( *_f )( std::ios& ) ); // Accept message type specification - MsgLogger& operator<< ( MsgType type ); + MsgLogger& operator<< ( EMsgType type ); // For all the "conventional" inputs template <class T> MsgLogger& operator<< ( T arg ) { @@ -101,18 +101,18 @@ namespace TMVA { // private utility routines void Send(); void InitMaps(); - void WriteMsg( MsgType type, const std::string& line ) const; + void WriteMsg( EMsgType type, const std::string& line ) const; - const TObject* fObjSource; // the source TObject (used for name) - std::string fStrSource; // alternative string source - const std::string fPrefix; // the prefix of the source name - const std::string fSuffix; // suffix following source name - MsgType fActiveType; // active type - const std::string::size_type fMaxSourceSize; // maximum length of source name + const TObject* fObjSource; // the source TObject (used for name) + std::string fStrSource; // alternative string source + const std::string fPrefix; // the prefix of the source name + const std::string fSuffix; // suffix following source name + EMsgType fActiveType; // active type + const std::string::size_type fMaxSourceSize; // maximum length of source name - std::map<MsgType, std::string> fTypeMap; // matches output types with strings - std::map<MsgType, std::string> fColorMap; // matches output types with terminal colors - MsgType fMinType; // minimum type for output + std::map<EMsgType, std::string> fTypeMap; // matches output types with strings + std::map<EMsgType, std::string> fColorMap; // matches output types with terminal colors + EMsgType fMinType; // minimum type for output ClassDef(MsgLogger,0) // ostringstream derivative to redirect and format logging output ; @@ -135,7 +135,7 @@ namespace TMVA { return *this; } - inline MsgLogger& MsgLogger::operator<< ( MsgType type ) + inline MsgLogger& MsgLogger::operator<< ( EMsgType type ) { fActiveType = type; return *this; @@ -144,8 +144,8 @@ namespace TMVA { // Although the proper definition of "Endl" as a function pointer // would be nicer C++-wise, it introduces some "unused variable" // warnings so let's use the #define definition after all... - // static MsgLogger& ( *Endl )( MsgLogger& ) = &MsgLogger::endmsg; -#define Endl MsgLogger::endmsg + // static MsgLogger& ( *Endl )( MsgLogger& ) = &MsgLogger::Endmsg; +#define Endl MsgLogger::Endmsg } diff --git a/tmva/inc/Node.h b/tmva/inc/Node.h index 59abacb9cdc..e559c19c737 100644 --- a/tmva/inc/Node.h +++ b/tmva/inc/Node.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Node.h,v 1.19 2006/11/13 15:49:49 helgevoss Exp $ +// @(#)root/tmva $Id: Node.h,v 1.20 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/Option.h b/tmva/inc/Option.h index aa2ab79946e..c1d513e76be 100644 --- a/tmva/inc/Option.h +++ b/tmva/inc/Option.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Option.h,v 1.17 2006/11/14 23:02:57 stelzer Exp $ +// @(#)root/tmva $Id: Option.h,v 1.18 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/PDF.h b/tmva/inc/PDF.h index ffc2844a650..8962a8a8b64 100644 --- a/tmva/inc/PDF.h +++ b/tmva/inc/PDF.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: PDF.h,v 1.18 2006/10/15 12:06:32 andreas.hoecker Exp $ +// @(#)root/tmva $Id: PDF.h,v 1.20 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -52,10 +52,10 @@ namespace TMVA { public: - enum SmoothMethod { kSpline0, kSpline1, kSpline2, kSpline3, kSpline5 }; + enum ESmoothMethod { kSpline0, kSpline1, kSpline2, kSpline3, kSpline5 }; PDF( const TH1* theHist, - PDF::SmoothMethod method = kSpline2, + ESmoothMethod method = kSpline2, Int_t nsmooth = 0 ); virtual ~PDF( void ); diff --git a/tmva/inc/Ranking.h b/tmva/inc/Ranking.h index ab920200e77..6df05e19f37 100644 --- a/tmva/inc/Ranking.h +++ b/tmva/inc/Ranking.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Ranking.h,v 1.8 2006/10/15 23:32:38 andreas.hoecker Exp $ +// @(#)root/tmva $Id: Ranking.h,v 1.9 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,18 +13,18 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * * (http://tmva.sourceforge.net/LICENSE) * * * * File and Version Information: * - * $Id: Ranking.h,v 1.8 2006/10/15 23:32:38 andreas.hoecker Exp $ + * $Id: Ranking.h,v 1.9 2006/11/16 22:51:59 helgevoss Exp $ **********************************************************************************/ #ifndef ROOT_TMVA_Ranking diff --git a/tmva/inc/Reader.h b/tmva/inc/Reader.h index 286875abcb4..85b71903334 100644 --- a/tmva/inc/Reader.h +++ b/tmva/inc/Reader.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Reader.h,v 1.14 2006/11/13 23:43:34 stelzer Exp $ +// @(#)root/tmva $Id: Reader.h,v 1.16 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -14,13 +14,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -98,7 +98,7 @@ namespace TMVA { private: // this booking method is internal - IMethod* BookMVA( Types::MVA method, TString weightfile ); + IMethod* BookMVA( Types::EMVA method, TString weightfile ); DataSet * fDataSet; // the data set diff --git a/tmva/inc/RootFinder.h b/tmva/inc/RootFinder.h index 42bf50f6615..e7960a84f37 100644 --- a/tmva/inc/RootFinder.h +++ b/tmva/inc/RootFinder.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: RootFinder.h,v 1.12 2006/10/15 22:34:22 andreas.hoecker Exp $ +// @(#)root/tmva $Id: RootFinder.h,v 1.13 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -14,13 +14,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/Rule.h b/tmva/inc/Rule.h index f37070d6974..0bab346c7a1 100644 --- a/tmva/inc/Rule.h +++ b/tmva/inc/Rule.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Rule.h,v 1.22 2006/10/23 01:51:39 stelzer Exp $ +// @(#)root/tmva $Id: Rule.h,v 1.23 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss /********************************************************************************** @@ -21,7 +21,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * Iowa State U. * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * diff --git a/tmva/inc/RuleEnsemble.h b/tmva/inc/RuleEnsemble.h index 418a40e0a6b..949dc29aa8e 100644 --- a/tmva/inc/RuleEnsemble.h +++ b/tmva/inc/RuleEnsemble.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: RuleEnsemble.h,v 1.24 2006/10/23 01:51:39 stelzer Exp $ +// @(#)root/tmva $Id: RuleEnsemble.h,v 1.26 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss /********************************************************************************** @@ -19,7 +19,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * Iowa State U. * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * @@ -60,7 +60,7 @@ namespace TMVA { public: - enum LearningModel { kFull, kRules, kLinear }; + enum ELearningModel { kFull, kRules, kLinear }; // main constructor RuleEnsemble( RuleFit* rf ); @@ -196,7 +196,7 @@ namespace TMVA { inline const Bool_t DoLinear() const { return (fLearningModel==kFull) || (fLearningModel==kLinear); } inline const Bool_t DoRules() const { return (fLearningModel==kFull) || (fLearningModel==kRules); } const Bool_t DoFull() const { return (fLearningModel==kFull); } - const LearningModel GetLearningModel() const { return fLearningModel; } + const ELearningModel GetLearningModel() const { return fLearningModel; } const Double_t GetImportanceCut() const { return fImportanceCut; } const Double_t GetOffset() const { return fOffset; } const UInt_t GetNRules() const { return fRules.size(); } @@ -262,7 +262,7 @@ namespace TMVA { // evaluate linear terms used to fill fEventLinearVal Double_t EvalLinEventRaw( UInt_t vind, const Event &e ); - LearningModel fLearningModel; // can be full (rules+linear), rules, linear + ELearningModel fLearningModel; // can be full (rules+linear), rules, linear Double_t fImportanceCut; // minimum importance accepted Double_t fOffset; // offset in discriminator function std::vector< Rule* > fRules; // vector of rules diff --git a/tmva/inc/RuleFit.h b/tmva/inc/RuleFit.h index b0b126d75f6..930ae540554 100644 --- a/tmva/inc/RuleFit.h +++ b/tmva/inc/RuleFit.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: RuleFit.h,v 1.24 2006/10/17 07:44:57 tegen Exp $ +// @(#)root/tmva $Id: RuleFit.h,v 1.25 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss /********************************************************************************** @@ -17,7 +17,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * Iowa State U. * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * diff --git a/tmva/inc/RuleFitParams.h b/tmva/inc/RuleFitParams.h index fbf4f2c8ed5..fd70b5c8e95 100644 --- a/tmva/inc/RuleFitParams.h +++ b/tmva/inc/RuleFitParams.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: RuleFitParams.h,v 1.19 2006/11/14 14:23:32 andreas.hoecker Exp $ +// @(#)root/tmva $Id: RuleFitParams.h,v 1.20 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss /********************************************************************************** @@ -23,7 +23,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * Iowa State U. * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * diff --git a/tmva/inc/SdivSqrtSplusB.h b/tmva/inc/SdivSqrtSplusB.h index 70eae54e71b..92c2ceacf12 100644 --- a/tmva/inc/SdivSqrtSplusB.h +++ b/tmva/inc/SdivSqrtSplusB.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: SdivSqrtSplusB.h,v 1.9 2006/11/06 00:10:17 helgevoss Exp $ +// @(#)root/tmva $Id: SdivSqrtSplusB.h,v 1.10 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,7 +13,7 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * diff --git a/tmva/inc/SeparationBase.h b/tmva/inc/SeparationBase.h index 58b52b0a1d8..59650c4fc09 100644 --- a/tmva/inc/SeparationBase.h +++ b/tmva/inc/SeparationBase.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: SeparationBase.h,v 1.11 2006/11/06 00:10:17 helgevoss Exp $ +// @(#)root/tmva $Id: SeparationBase.h,v 1.12 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -30,7 +30,7 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * diff --git a/tmva/inc/SimulatedAnnealingBase.h b/tmva/inc/SimulatedAnnealingBase.h index 4ba0263be73..23381da02e2 100644 --- a/tmva/inc/SimulatedAnnealingBase.h +++ b/tmva/inc/SimulatedAnnealingBase.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: SimulatedAnnealingBase.h,v 1.6 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: SimulatedAnnealingBase.h,v 1.7 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/SimulatedAnnealingCuts.h b/tmva/inc/SimulatedAnnealingCuts.h index 7792ac8551e..d549e075817 100644 --- a/tmva/inc/SimulatedAnnealingCuts.h +++ b/tmva/inc/SimulatedAnnealingCuts.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: SimulatedAnnealingCuts.h,v 1.5 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: SimulatedAnnealingCuts.h,v 1.6 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/TActivationChooser.h b/tmva/inc/TActivationChooser.h index 5f4dd288a03..eedc2351fde 100644 --- a/tmva/inc/TActivationChooser.h +++ b/tmva/inc/TActivationChooser.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: TActivationChooser.h,v 1.8 2006/10/17 21:22:30 andreas.hoecker Exp $ +// @(#)root/tmva $Id: TActivationChooser.h,v 1.10 2006/11/17 14:59:24 stelzer Exp $ // Author: Matt Jachowski /********************************************************************************** @@ -64,6 +64,8 @@ namespace TMVA { TActivationChooser() : fLogger( "TActivationChooser" ) { + // defaut constructor + fLINEAR = "linear"; fSIGMOID = "sigmoid"; fTANH = "tanh"; @@ -71,14 +73,17 @@ namespace TMVA { } virtual ~TActivationChooser() {} - enum ActivationType { kLinear = 0, - kSigmoid, - kTanh, - kRadial + enum EActivationType { kLinear = 0, + kSigmoid, + kTanh, + kRadial }; - TActivation* CreateActivation(const ActivationType type) const + TActivation* CreateActivation(const EActivationType type) const { + // instantiate the correct activation object according to the + // type choosen (given as the enumeration type) + switch (type) { case kLinear: return new TActivationIdentity(); case kSigmoid: return new TActivationSigmoid(); @@ -93,6 +98,9 @@ namespace TMVA { TActivation* CreateActivation(const TString type) const { + // instantiate the correct activation object according to the + // type choosen (given by a TString) + if (type == fLINEAR) return CreateActivation(kLinear); else if (type == fSIGMOID) return CreateActivation(kSigmoid); else if (type == fTANH) return CreateActivation(kTanh); @@ -105,6 +113,8 @@ namespace TMVA { vector<TString>* GetAllActivationNames() const { + // retuns the names of all know activation functions + vector<TString>* names = new vector<TString>(); names->push_back(fLINEAR); names->push_back(fSIGMOID); diff --git a/tmva/inc/TNeuron.h b/tmva/inc/TNeuron.h index 06f1244058d..a05af06bfb9 100644 --- a/tmva/inc/TNeuron.h +++ b/tmva/inc/TNeuron.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: TNeuron.h,v 1.16 2006/10/15 12:06:33 andreas.hoecker Exp $ +// @(#)root/tmva $Id: TNeuron.h,v 1.17 2006/11/16 19:42:44 stelzer Exp $ // Author: Matt Jachowski /********************************************************************************** @@ -126,7 +126,7 @@ namespace TMVA { void InitNeuron(); void DeleteLinksArray( TObjArray*& links ); void PrintLinks ( TObjArray* links ); - void PrintMessage ( MsgType, TString message ); + void PrintMessage ( EMsgType, TString message ); // inlined helper functions Int_t NumLinks(TObjArray* links) { diff --git a/tmva/inc/TNeuronInputChooser.h b/tmva/inc/TNeuronInputChooser.h index 9483cf77d86..f6d505a3a28 100644 --- a/tmva/inc/TNeuronInputChooser.h +++ b/tmva/inc/TNeuronInputChooser.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: TNeuronInputChooser.h,v 1.4 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: TNeuronInputChooser.h,v 1.5 2006/11/17 14:59:24 stelzer Exp $ // Author: Matt Jachowski /********************************************************************************** @@ -65,12 +65,12 @@ namespace TMVA { } virtual ~TNeuronInputChooser() {} - enum NeuronInputType { kSum = 0, - kSqSum, - kAbsSum + enum ENeuronInputType { kSum = 0, + kSqSum, + kAbsSum }; - TNeuronInput* CreateNeuronInput(const NeuronInputType type) const + TNeuronInput* CreateNeuronInput(const ENeuronInputType type) const { switch (type) { case kSum: return new TNeuronInputSum(); diff --git a/tmva/inc/TSpline1.h b/tmva/inc/TSpline1.h index 7e8dee4b73d..9797e7dc62e 100644 --- a/tmva/inc/TSpline1.h +++ b/tmva/inc/TSpline1.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: TSpline1.h,v 1.9 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: TSpline1.h,v 1.10 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/TSpline2.h b/tmva/inc/TSpline2.h index 3b0425d4f7b..0db07416c8b 100644 --- a/tmva/inc/TSpline2.h +++ b/tmva/inc/TSpline2.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: TSpline2.h,v 1.10 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: TSpline2.h,v 1.11 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/Timer.h b/tmva/inc/Timer.h index 554a4c14cf6..1e249966092 100644 --- a/tmva/inc/Timer.h +++ b/tmva/inc/Timer.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Timer.h,v 1.9 2006/10/14 00:57:42 andreas.hoecker Exp $ +// @(#)root/tmva $Id: Timer.h,v 1.10 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/Tools.h b/tmva/inc/Tools.h index 77ba572aec7..33bbcaf0996 100644 --- a/tmva/inc/Tools.h +++ b/tmva/inc/Tools.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Tools.h,v 1.26 2006/10/15 22:34:22 andreas.hoecker Exp $ +// @(#)root/tmva $Id: Tools.h,v 1.27 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/inc/Types.h b/tmva/inc/Types.h index a82a70a3a75..87f6250958e 100644 --- a/tmva/inc/Types.h +++ b/tmva/inc/Types.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Types.h,v 1.25 2006/11/13 23:43:34 stelzer Exp $ +// @(#)root/tmva $Id: Types.h,v 1.28 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -54,32 +54,32 @@ namespace TMVA { public: // available MVA methods in TMVA - enum MVA { - Variable = 0, - Cuts , - Likelihood , - PDERS , - HMatrix , - Fisher , - CFMlpANN , - TMlpANN , - BDT , - RuleFit , - SVM , - MLP , - BayesClassifier, - Committee , + enum EMVA { + kVariable = 0, + kCuts , + kLikelihood , + kPDERS , + kHMatrix , + kFisher , + kCFMlpANN , + kTMlpANN , + kBDT , + kRuleFit , + kSVM , + kMLP , + kBayesClassifier, + kCommittee , kMaxMethod }; - enum PreprocessingMethod { + enum EPreprocessingMethod { kNone = 0, kDecorrelated, kPCA, kMaxPreprocessingMethod }; - enum SBType { + enum ESBType { kSignal = 0, kBackground, kSBBoth, @@ -87,18 +87,18 @@ namespace TMVA { kTrueType }; - enum TreeType { kTrain = 1, kTest }; + enum ETreeType { kTraining = 0, kTesting, kMaxTreeType }; public: static Types& Instance() { return fgTypesPtr ? *fgTypesPtr : *(fgTypesPtr = new Types()); } ~Types() {} - const MVA GetMethodType( const TString& method ) const { - std::map<TString, MVA>::const_iterator it = fStr2type.find( method ); + const EMVA GetMethodType( const TString& method ) const { + std::map<TString, EMVA>::const_iterator it = fStr2type.find( method ); if (it == fStr2type.end()) { fLogger << kFATAL << "unknown method in map: " << method << Endl; - return Variable; // Inserted to get rid of GCC warning... + return kVariable; // Inserted to get rid of GCC warning... } else return it->second; } @@ -111,7 +111,7 @@ namespace TMVA { private: - std::map<TString, MVA> fStr2type; // types-to-text map + std::map<TString, EMVA> fStr2type; // types-to-text map mutable MsgLogger fLogger; // message logger }; diff --git a/tmva/inc/VariableInfo.h b/tmva/inc/VariableInfo.h index b723104391a..5cc51ddc45d 100644 --- a/tmva/inc/VariableInfo.h +++ b/tmva/inc/VariableInfo.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: VariableInfo.h,v 1.13 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: VariableInfo.h,v 1.15 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2006: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -60,22 +60,22 @@ namespace TMVA { const TString& GetInternalVarName() const { return fInternalVarName; } char VarType() const { return fVarType; } char VarTypeOriginal() const { return fVarTypeOriginal; } - Double_t GetMin (Types::PreprocessingMethod corr = Types::kNone) const { return fXminNorm[(Int_t) corr]; } - Double_t GetMax (Types::PreprocessingMethod corr = Types::kNone) const { return fXmaxNorm[(Int_t) corr]; } - Double_t GetMean(Types::PreprocessingMethod corr = Types::kNone) const { return fXmeanNorm[(Int_t) corr]; } - Double_t GetRMS (Types::PreprocessingMethod corr = Types::kNone) const { return fXrmsNorm[(Int_t) corr]; } + Double_t GetMin (Types::EPreprocessingMethod corr = Types::kNone) const { return fXminNorm[(Int_t) corr]; } + Double_t GetMax (Types::EPreprocessingMethod corr = Types::kNone) const { return fXmaxNorm[(Int_t) corr]; } + Double_t GetMean(Types::EPreprocessingMethod corr = Types::kNone) const { return fXmeanNorm[(Int_t) corr]; } + Double_t GetRMS (Types::EPreprocessingMethod corr = Types::kNone) const { return fXrmsNorm[(Int_t) corr]; } void SetExpression(const TString& s) { fExpression = s; } void SetInternalVarName(const TString& s) { fInternalVarName = s; } void SetVarType(char c) { fVarType = c; } - void SetMin (Double_t v, Types::PreprocessingMethod corr = Types::kNone) { fXminNorm[(Int_t) corr] = v; } - void SetMax (Double_t v, Types::PreprocessingMethod corr = Types::kNone) { fXmaxNorm[(Int_t) corr] = v; } - void SetMean(Double_t v, Types::PreprocessingMethod corr = Types::kNone) { fXmeanNorm[(Int_t) corr] = v; } - void SetRMS (Double_t v, Types::PreprocessingMethod corr = Types::kNone) { fXrmsNorm[(Int_t) corr] = v; } + void SetMin (Double_t v, Types::EPreprocessingMethod corr = Types::kNone) { fXminNorm[(Int_t) corr] = v; } + void SetMax (Double_t v, Types::EPreprocessingMethod corr = Types::kNone) { fXmaxNorm[(Int_t) corr] = v; } + void SetMean(Double_t v, Types::EPreprocessingMethod corr = Types::kNone) { fXmeanNorm[(Int_t) corr] = v; } + void SetRMS (Double_t v, Types::EPreprocessingMethod corr = Types::kNone) { fXrmsNorm[(Int_t) corr] = v; } void SetExternalLink(void* p) { fExternalData = p; } - void WriteToStream(std::ostream& o, Types::PreprocessingMethod corr) const; - void ReadFromStream(std::istream& istr, Types::PreprocessingMethod corr); + void WriteToStream(std::ostream& o, Types::EPreprocessingMethod corr) const; + void ReadFromStream(std::istream& istr, Types::EPreprocessingMethod corr); void* GetExternalLink() const { return fExternalData; } // assignment operator (does not copy external link) diff --git a/tmva/inc/Volume.h b/tmva/inc/Volume.h index f9bdaf15295..b0130bcc95d 100644 --- a/tmva/inc/Volume.h +++ b/tmva/inc/Volume.h @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Volume.h,v 1.10 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: Volume.h,v 1.11 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/src/BinarySearchTree.cxx b/tmva/src/BinarySearchTree.cxx index d88eef7c404..908fa5fac6d 100644 --- a/tmva/src/BinarySearchTree.cxx +++ b/tmva/src/BinarySearchTree.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: BinarySearchTree.cxx,v 1.22 2006/11/06 00:10:12 helgevoss Exp $ +// @(#)root/tmva $Id: BinarySearchTree.cxx,v 1.24 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -27,7 +27,7 @@ * (http://tmva.sourceforge.net/LICENSE) * * * * File and Version Information: * - * $Id: BinarySearchTree.cxx,v 1.22 2006/11/06 00:10:12 helgevoss Exp $ + * $Id: BinarySearchTree.cxx,v 1.24 2006/11/17 14:59:23 stelzer Exp $ **********************************************************************************/ //_______________________________________________________________________ @@ -198,7 +198,7 @@ Double_t TMVA::BinarySearchTree::GetSumOfWeights( void ) const //_______________________________________________________________________ Int_t TMVA::BinarySearchTree::Fill( const DataSet& ds, TTree* theTree, Int_t theType, - Types::PreprocessingMethod corr, Types::SBType type ) + Types::EPreprocessingMethod corr, Types::ESBType type ) { // create the search tree from the events in the DataSet Int_t nevents=0; diff --git a/tmva/src/BinarySearchTreeNode.cxx b/tmva/src/BinarySearchTreeNode.cxx index b2b03cee4ef..1216084ad7f 100644 --- a/tmva/src/BinarySearchTreeNode.cxx +++ b/tmva/src/BinarySearchTreeNode.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: BinarySearchTreeNode.cxx,v 1.4 2006/11/13 15:49:49 helgevoss Exp $ +// @(#)root/tmva $Id: BinarySearchTreeNode.cxx,v 1.5 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * CopyRight (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/src/BinaryTree.cxx b/tmva/src/BinaryTree.cxx index 82c3d4aff01..82715204586 100644 --- a/tmva/src/BinaryTree.cxx +++ b/tmva/src/BinaryTree.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: BinaryTree.cxx,v 1.23 2006/11/13 15:49:49 helgevoss Exp $ +// @(#)root/tmva $Id: BinaryTree.cxx,v 1.24 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/src/CrossEntropy.cxx b/tmva/src/CrossEntropy.cxx index 42d1325cab2..97e8418198c 100644 --- a/tmva/src/CrossEntropy.cxx +++ b/tmva/src/CrossEntropy.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: CrossEntropy.cxx,v 1.10 2006/10/10 17:43:51 andreas.hoecker Exp $ +// @(#)root/tmva $Id: CrossEntropy.cxx,v 1.11 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -14,7 +14,7 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * diff --git a/tmva/src/DataSet.cxx b/tmva/src/DataSet.cxx index 2b437f287c8..ef65307cc67 100644 --- a/tmva/src/DataSet.cxx +++ b/tmva/src/DataSet.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: DataSet.cxx,v 1.78 2006/11/14 23:02:57 stelzer Exp $ +// @(#)root/tmva $Id: DataSet.cxx,v 1.82 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,11 +13,11 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * @@ -56,14 +56,16 @@ TMVA::DataSet::DataSet() fWeightFormula( 0 ), fLogger( GetName(), kINFO ) { - fDecorrMatrix[0] = fDecorrMatrix[1] = 0; - fPrincipal[0] = fPrincipal[1] = 0; + // constructor + + fDecorrMatrix[0] = fDecorrMatrix[1] = 0; + fPrincipal[0] = fPrincipal[1] = 0; fPreprocessingEnabled[Types::kNone] = kTRUE; fPreprocessingEnabled[Types::kDecorrelated] = fPreprocessingEnabled[Types::kPCA] = kFALSE; for (Int_t corr=0; corr!=Types::kMaxPreprocessingMethod; corr++) fFlagPreprocessed[corr] = kFALSE; - for (Int_t dim1=0; dim1!=kMaxTreeType; dim1++) { + for (Int_t dim1=0; dim1!=Types::kMaxTreeType; dim1++) { for (Int_t dim2=0; dim2!=Types::kMaxSBType; dim2++) { fDataStats[dim1][dim2]=0; } @@ -76,8 +78,11 @@ TMVA::DataSet::~DataSet() } //_______________________________________________________________________ -Bool_t TMVA::DataSet::ReadEvent(TTree* tr, UInt_t evidx, Types::PreprocessingMethod corr, Types::SBType type) const +Bool_t TMVA::DataSet::ReadEvent(TTree* tr, UInt_t evidx, Types::EPreprocessingMethod corr, Types::ESBType type) const { + // read event from a tree into memory + // after the reading the event transformation is called + if (tr == 0) fLogger << kFATAL << "<ReadEvent> zero Tree Pointer encountered" << Endl; Bool_t needRead = kFALSE; @@ -105,8 +110,9 @@ Bool_t TMVA::DataSet::ReadEvent(TTree* tr, UInt_t evidx, Types::PreprocessingMet return ApplyTransformation( corr, (type == Types::kTrueType) ? Event().IsSignal() : (type == Types::kSignal) ); } -Bool_t TMVA::DataSet::ApplyTransformation( Types::PreprocessingMethod corr, Bool_t useSignal ) const +Bool_t TMVA::DataSet::ApplyTransformation( Types::EPreprocessingMethod corr, Bool_t useSignal ) const { + // applies the transformation (none, decorellation, PCA) to the event data switch (corr) { @@ -148,6 +154,9 @@ Bool_t TMVA::DataSet::ApplyTransformation( Types::PreprocessingMethod corr, Bool void TMVA::DataSet::ResetBranchAndEventAddresses( TTree* tree ) { + // resets all branch adresses of the tree given as parameter + // to the event memory + if (tree != 0) { tree->ResetBranchAddresses(); if (fEvent != 0) fEvent->SetBranchAddresses( tree ); @@ -158,26 +167,37 @@ void TMVA::DataSet::ResetBranchAndEventAddresses( TTree* tree ) //_______________________________________________________________________ void TMVA::DataSet::AddSignalTree(TTree* tr, double weight) { + // add a signal tree to the dataset to be used as input + // multiple trees is not used at the moment, use chains instead fSignalTrees.push_back(TreeInfo(tr,weight)); } //_______________________________________________________________________ void TMVA::DataSet::AddBackgroundTree(TTree* tr, double weight) { + // add a background tree to the dataset to be used as input + // multiple trees is not used at the moment, use chains instead fBackgroundTrees.push_back(TreeInfo(tr,weight)); } //_______________________________________________________________________ void TMVA::DataSet::AddVariable(const TString& expression, char varType, void* external) { + // add a variable (can be a complex expression) to the set of variables used in + // the MV analysis fVariables.push_back(VariableInfo(expression, fVariables.size()+1, varType, external)); fVariableStrings.push_back(expression); } //_______________________________________________________________________ // corr==0 - correlated, corr==1 - decorrelated, corr==2 - PCAed -void TMVA::DataSet::CalcNorm(Types::PreprocessingMethod corr) +void TMVA::DataSet::CalcNorm(Types::EPreprocessingMethod corr) { + // method to calculate minimum, maximum, mean, and RMS for all + // variables used in the MVA for the given data preprocessing + // method (none, decorrelate, PCA) + + // preprocessing method has to be enabled if(!PreprocessingEnabled(corr)) return; if (GetTrainingTree()==0) return; @@ -193,11 +213,11 @@ void TMVA::DataSet::CalcNorm(Types::PreprocessingMethod corr) TVectorD x0( nvar ); x0 *= 0; for (UInt_t ievt=0; ievt<nevts; ievt++) { - ReadTrainingEvent(ievt,(Types::PreprocessingMethod)corr); + ReadTrainingEvent(ievt,(Types::EPreprocessingMethod)corr); for (UInt_t ivar=0; ivar<nvar; ivar++) { Double_t x = Event().GetVal(ivar); - UpdateNorm( ivar, x, (Types::PreprocessingMethod)corr); + UpdateNorm( ivar, x, (Types::EPreprocessingMethod)corr); x2(ivar) += x*x; x0(ivar) += x; } @@ -221,6 +241,7 @@ void TMVA::DataSet::CalcNorm(Types::PreprocessingMethod corr) //_______________________________________________________________________ Int_t TMVA::DataSet::FindVar(const TString& var) const { + // find variable by name for (UInt_t ivar=0; ivar<GetNVariables(); ivar++) if (var == GetInternalVarName(ivar)) return ivar; fLogger << kFATAL << "<FindVar> variable \'" << var << "\' not found" << Endl; @@ -228,8 +249,9 @@ Int_t TMVA::DataSet::FindVar(const TString& var) const } //_______________________________________________________________________ -void TMVA::DataSet::UpdateNorm ( Int_t ivar, Double_t x, Types::PreprocessingMethod corr) +void TMVA::DataSet::UpdateNorm ( Int_t ivar, Double_t x, Types::EPreprocessingMethod corr) { + // update min and max of a given variable and a given preprocessing method if (x < GetXmin( ivar, corr )) SetXmin( ivar, x, corr ); if (x > GetXmax( ivar, corr )) SetXmax( ivar, x, corr ); } @@ -237,6 +259,17 @@ void TMVA::DataSet::UpdateNorm ( Int_t ivar, Double_t x, Types::PreprocessingMe //_______________________________________________________________________ void TMVA::DataSet::PrepareForTrainingAndTesting( Int_t Ntrain, Int_t Ntest, TString TreeName ) { + // The internally used training and testing trees are prepaired in + // this method + // First the variables (expressions) of interest are copied from + // the given signal and background trees/chains into the local + // trees (training and testing), according to the specified numbers + // of training and testing events + // Second DataSet::CalcNorm is called to determine min, max, mean, + // and rms for all variables + // Optionally (if specified as option) the decorrelation and PCA + // preparation is executed + fLogger << kINFO << "prepare training and Test samples" << Endl; fLogger << kINFO << "" << NSignalTrees() << " signal trees with total number of events : " << flush; @@ -275,7 +308,9 @@ void TMVA::DataSet::PrepareForTrainingAndTesting( Int_t Ntrain, Int_t Ntest, TSt if (Ntrain > 0 && Ntest == 0) { array[0] = Ntrain; - nsig_train = TMath::MinElement(3,array); + // nsig_train = TMath::MinElement(3,array); + // to be backward compatible with ROOT 4.02 + nsig_train = TMath::Min(array[0],TMath::Min(array[1],array[2])); nbkg_train = nsig_train; nsig_test_min = nsig_train; nbkg_test_min = nsig_train; @@ -291,7 +326,9 @@ void TMVA::DataSet::PrepareForTrainingAndTesting( Int_t Ntrain, Int_t Ntest, TSt } else if (Ntrain > 0 && Ntest > 0) { array[0] = Ntrain; - nsig_train = TMath::MinElement(3,array); + // nsig_train = TMath::MinElement(3,array); + // to be backward compatible with ROOT 4.02 + nsig_train = TMath::Min(array[0],TMath::Min(array[1],array[2])); nbkg_train = nsig_train; nsig_test_min = nsig_train; nbkg_test_min = nsig_train; @@ -492,22 +529,22 @@ void TMVA::DataSet::PrepareForTrainingAndTesting( Int_t Ntrain, Int_t Ntest, TSt ac++; if ( ac <= n_train[sb]) { trainingTree->Fill(); - fDataStats[kTraining][Types::kSBBoth]++; - fDataStats[kTraining][sb]++; + fDataStats[Types::kTraining][Types::kSBBoth]++; + fDataStats[Types::kTraining][sb]++; } if ((ac > n_test_min[sb])&& (ac <= n_test[sb])) { testTree->Fill(); - fDataStats[kTesting][Types::kSBBoth]++; - fDataStats[kTesting][sb]++; + fDataStats[Types::kTesting][Types::kSBBoth]++; + fDataStats[Types::kTesting][sb]++; } } } tr[sb]->ResetBranchAddresses(); // some output - fLogger << kINFO << "collected " << fDataStats[kTraining][sb] << " " << kindS[sb] + fLogger << kINFO << "collected " << fDataStats[Types::kTraining][sb] << " " << kindS[sb] << " events for the training sample" << Endl; - fLogger << kINFO << "collected " << fDataStats[kTesting][sb] << " " << kindS[sb] + fLogger << kINFO << "collected " << fDataStats[Types::kTesting][sb] << " " << kindS[sb] << " events for the test sample" << Endl; } @@ -560,8 +597,16 @@ void TMVA::DataSet::PrepareForTrainingAndTesting( Int_t Ntrain, Int_t Ntest, TSt } //_______________________________________________________________________ -Bool_t TMVA::DataSet::Preprocess( Types::PreprocessingMethod corr ) +Bool_t TMVA::DataSet::Preprocess( Types::EPreprocessingMethod corr ) { + // the dataset can be preprocessed with one of three methods + // - None: trivial identity + // - Decorellation: transformation that decorellates the input variables + // - PCA: Principal component analysis + // first the transformations are calculated, then the minimum, + // maximum, mean, and rms for the transformed set of variables are + // calculated (in DataSet::CalcNorm) + if( corr == Types::kNone ) return kTRUE; if( ! PreprocessingEnabled(corr) ) return kFALSE; @@ -608,6 +653,12 @@ Bool_t TMVA::DataSet::Preprocess( Types::PreprocessingMethod corr ) //_______________________________________________________________________ void TMVA::DataSet::ChangeToNewTree( TTree* tr ) { + // While the data gets copied into the local training and testing + // trees, the input tree can change (for intance when changing from + // signal to background tree, or using TChains as input) The + // TTreeFormulas, that hold the input expressions need to be + // reassociated with the new tree, which is done here + vector<TTreeFormula*>::const_iterator varFIt = fInputVarFormulas.begin(); for (;varFIt!=fInputVarFormulas.end();varFIt++) delete *varFIt; fInputVarFormulas.clear(); @@ -637,8 +688,11 @@ void TMVA::DataSet::ChangeToNewTree( TTree* tr ) } //_______________________________________________________________________ -void TMVA::DataSet::PlotVariables( TString tree, TString folderName, Types::PreprocessingMethod corr ) +void TMVA::DataSet::PlotVariables( TString tree, TString folderName, Types::EPreprocessingMethod corr ) { + // wrapper around PlotVariable(TTree*) that allows to call + // PlotVariables with a name + if(!PreprocessingEnabled(corr)) return; tree.ToLower(); if (tree.BeginsWith("train")) { @@ -650,8 +704,12 @@ void TMVA::DataSet::PlotVariables( TString tree, TString folderName, Types::Prep } //_______________________________________________________________________ -void TMVA::DataSet::PlotVariables( TTree* theTree, TString folderName, Types::PreprocessingMethod corr ) +void TMVA::DataSet::PlotVariables( TTree* theTree, TString folderName, Types::EPreprocessingMethod corr ) { + // create histograms from the input variables + // - histograms for all input variables + // - scatter plots for all pairs of input variables + if(!PreprocessingEnabled(corr)) return; // if decorrelation has not been achieved, the decorrelation tree may be empty @@ -682,7 +740,7 @@ void TMVA::DataSet::PlotVariables( TTree* theTree, TString folderName, Types::Pr TVectorD rmsS( nvar ), meanS( nvar ); TVectorD rmsB( nvar ), meanB( nvar ); - UInt_t nevts = theTree->GetEntries(); + UInt_t nevts = (UInt_t)theTree->GetEntries(); UInt_t nS = 0, nB = 0; for (UInt_t ievt=0; ievt<nevts; ievt++) { ReadTrainingEvent( ievt, corr, Types::kTrueType ); @@ -996,8 +1054,12 @@ Double_t TMVA::DataSet::GetSeparation( TH1* S, TH1* B ) const } //_______________________________________________________________________ -Bool_t TMVA::DataSet::PreparePreprocessing( Types::PreprocessingMethod corr, TTree* originalTree ) +Bool_t TMVA::DataSet::PreparePreprocessing( Types::EPreprocessingMethod corr, TTree* originalTree ) { + // For the preprocessing methods kDecorrelate and kPCA the + // transformation matrices are calculated. For the PCA the class + // TPrincipal is used. + if( originalTree == 0 ) return kFALSE; // creates a deep copy of a tree with all of the values decorrelated @@ -1179,51 +1241,57 @@ void TMVA::DataSet::GetSQRMats( TMatrixD*& sqS, TMatrixD*& sqB, vector<TString>* //_______________________________________________________________________ void TMVA::DataSet::CalculatePrincipalComponents (TTree* originalTree, TPrincipal *&sigPrincipal, TPrincipal *&bgdPrincipal, vector<TString>* theVars ) { - if (sigPrincipal != NULL) { delete sigPrincipal; sigPrincipal = 0; } - if (bgdPrincipal != NULL) { delete bgdPrincipal; sigPrincipal = 0; } - - Int_t nvar = (int)theVars->size(); - sigPrincipal = new TPrincipal (nvar, ""); // Not normalizing and not storing input data, for performance reasons. Should perhaps restore normalization. - bgdPrincipal = new TPrincipal (nvar, ""); - - // Should we shove this into TMVA::Tools? - - TObjArrayIter branchIter( originalTree->GetListOfBranches(), kIterForward ); - TBranch* branch = NULL; - Long64_t ievt, entries = originalTree->GetEntries(); - Float_t * fvec = new Float_t [nvar]; - Double_t * dvec = new Double_t [nvar]; - Int_t type, jvar=-1; - Float_t weight, boostweight; - - while ((branch = (TBranch*)branchIter.Next()) != 0) { - if ((TString)branch->GetName() == "type") - originalTree->SetBranchAddress( branch->GetName(), &type ); - else if ((TString)branch->GetName() == "weight") - originalTree->SetBranchAddress( branch->GetName(), &weight ); - else if ((TString)branch->GetName() == "boostweight") - originalTree->SetBranchAddress( branch->GetName(), &boostweight ); - else - originalTree->SetBranchAddress( branch->GetName(), &fvec[++jvar] ); - } - - for (ievt=0; ievt<entries; ievt++) { - originalTree->GetEntry( ievt ); - TPrincipal *princ = type == Types::kSignal ? sigPrincipal : bgdPrincipal; - for (Int_t i = 0; i < nvar; i++) - dvec [i] = (Double_t) fvec [i]; - princ->AddRow (dvec); - } - - sigPrincipal->MakePrincipals(); - bgdPrincipal->MakePrincipals(); - - delete fvec; delete dvec; + // calculate the principal components for the signal and the background data + // it uses the MakePrincipal method of ROOT's TPrincipal class + + if (sigPrincipal != NULL) { delete sigPrincipal; sigPrincipal = 0; } + if (bgdPrincipal != NULL) { delete bgdPrincipal; sigPrincipal = 0; } + + Int_t nvar = (int)theVars->size(); + sigPrincipal = new TPrincipal (nvar, ""); // Not normalizing and not storing input data, for performance reasons. Should perhaps restore normalization. + bgdPrincipal = new TPrincipal (nvar, ""); + + // Should we shove this into TMVA::Tools? + + TObjArrayIter branchIter( originalTree->GetListOfBranches(), kIterForward ); + TBranch* branch = NULL; + Long64_t ievt, entries = originalTree->GetEntries(); + Float_t * fvec = new Float_t [nvar]; + Double_t * dvec = new Double_t [nvar]; + Int_t type, jvar=-1; + Float_t weight, boostweight; + + while ((branch = (TBranch*)branchIter.Next()) != 0) { + if ((TString)branch->GetName() == "type") + originalTree->SetBranchAddress( branch->GetName(), &type ); + else if ((TString)branch->GetName() == "weight") + originalTree->SetBranchAddress( branch->GetName(), &weight ); + else if ((TString)branch->GetName() == "boostweight") + originalTree->SetBranchAddress( branch->GetName(), &boostweight ); + else + originalTree->SetBranchAddress( branch->GetName(), &fvec[++jvar] ); + } + + for (ievt=0; ievt<entries; ievt++) { + originalTree->GetEntry( ievt ); + TPrincipal *princ = type == Types::kSignal ? sigPrincipal : bgdPrincipal; + for (Int_t i = 0; i < nvar; i++) + dvec [i] = (Double_t) fvec [i]; + princ->AddRow (dvec); + } + + sigPrincipal->MakePrincipals(); + bgdPrincipal->MakePrincipals(); + + delete fvec; delete dvec; } //_______________________________________________________________________ -void TMVA::DataSet::WriteVarsToStream(std::ostream& o, Types::PreprocessingMethod corr) const +void TMVA::DataSet::WriteVarsToStream(std::ostream& o, Types::EPreprocessingMethod corr) const { + // write the list of variables (name, min, max) for a given data + // transformation method to the stream + o << "NVar " << GetNVariables() << endl; std::vector<VariableInfo>::const_iterator varIt = fVariables.begin(); for (;varIt!=fVariables.end(); varIt++) varIt->WriteToStream(o,corr); @@ -1232,6 +1300,7 @@ void TMVA::DataSet::WriteVarsToStream(std::ostream& o, Types::PreprocessingMetho //_______________________________________________________________________ void TMVA::DataSet::WriteCorrMatToStream(std::ostream& o) const { + // write the decorrelation matrix to the stream for (Int_t matType=0; matType<2; matType++) { o << "# correlation matrix " << endl; TMatrixD* mat = fDecorrMatrix[matType]; @@ -1247,8 +1316,11 @@ void TMVA::DataSet::WriteCorrMatToStream(std::ostream& o) const } //_______________________________________________________________________ -void TMVA::DataSet::ReadVarsFromStream(std::istream& istr, Types::PreprocessingMethod corr) +void TMVA::DataSet::ReadVarsFromStream(std::istream& istr, Types::EPreprocessingMethod corr) { + // Read the variables (name, min, max) for a given data + // transformation method from the stream. + TString dummy; Int_t readNVar; istr >> dummy >> readNVar; @@ -1293,6 +1365,8 @@ void TMVA::DataSet::ReadVarsFromStream(std::istream& istr, Types::PreprocessingM //_______________________________________________________________________ void TMVA::DataSet::ReadCorrMatFromStream(std::istream& istr ) { + // Read the decorellation matrix from an input stream + char buf[512]; istr.getline(buf,512); TString strvar, dummy; diff --git a/tmva/src/DecisionTree.cxx b/tmva/src/DecisionTree.cxx index 4eba30e6cfb..9892652601a 100644 --- a/tmva/src/DecisionTree.cxx +++ b/tmva/src/DecisionTree.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: DecisionTree.cxx,v 1.45 2006/11/14 15:21:00 stelzer Exp $ +// @(#)root/tmva $Id: DecisionTree.cxx,v 1.48 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -27,7 +27,7 @@ * (http://mva.sourceforge.net/license.txt) * * * * File and Version Information: * - * $Id: DecisionTree.cxx,v 1.45 2006/11/14 15:21:00 stelzer Exp $ + * $Id: DecisionTree.cxx,v 1.48 2006/11/16 22:51:58 helgevoss Exp $ **********************************************************************************/ //_______________________________________________________________________ @@ -172,7 +172,8 @@ TMVA::DecisionTree::~DecisionTree( void ) } //_______________________________________________________________________ -void TMVA::DecisionTree::SetParentTreeInNodes( DecisionTreeNode *n){ +void TMVA::DecisionTree::SetParentTreeInNodes( DecisionTreeNode *n) +{ // descend a tree to find all its leaf nodes, fill max depth reached in the // tree at the same time. @@ -354,7 +355,8 @@ void TMVA::DecisionTree::FillEvent( TMVA::Event & event, //_______________________________________________________________________ -void TMVA::DecisionTree::ClearTree(){ +void TMVA::DecisionTree::ClearTree() +{ // clear the tree nodes (their S/N, Nevents etc), just keep the structure of the tree if (this->GetRoot()!=NULL) @@ -364,7 +366,8 @@ void TMVA::DecisionTree::ClearTree(){ //_______________________________________________________________________ -void TMVA::DecisionTree::PruneTree(){ +void TMVA::DecisionTree::PruneTree() +{ // prune (get rid of internal nodes) the Decision tree to avoid overtraining // serveral different pruning methods can be applied as selected by the // variable "fPruneMethod". Currently however only the Expected Error Pruning @@ -387,7 +390,8 @@ void TMVA::DecisionTree::PruneTree(){ //_______________________________________________________________________ -void TMVA::DecisionTree::PruneTreeEEP(DecisionTreeNode *node){ +void TMVA::DecisionTree::PruneTreeEEP(DecisionTreeNode *node) +{ // recursive prunig of nodes using the Expected Error Pruning (EEP) // if internal node, then prune DecisionTreeNode *l = (DecisionTreeNode*)node->GetLeft(); @@ -404,7 +408,8 @@ void TMVA::DecisionTree::PruneTreeEEP(DecisionTreeNode *node){ } //_______________________________________________________________________ -void TMVA::DecisionTree::PruneTreeCC(){ +void TMVA::DecisionTree::PruneTreeCC() +{ // prunig of nodes using the Cost Complexity criteria. The Pruning is performed // until a minimum in the cost complexity CC(alpha) is reached. // CC(alpha) = alpha*NLeafs + sum_over_leafs[ N*Quality(leaf) ] @@ -429,7 +434,8 @@ void TMVA::DecisionTree::PruneTreeCC(){ //_______________________________________________________________________ -void TMVA::DecisionTree::PruneTreeMCC(){ +void TMVA::DecisionTree::PruneTreeMCC() +{ // Similar to the CostCoplexity pruning, only here I calculate immediately // the "prunestrength" (= alpha, the regularisation parameter in the CostComplexity) // for which the respective subtree below a node would be pruned. Then I continue @@ -768,39 +774,39 @@ Double_t TMVA::DecisionTree::GetNodeError(DecisionTreeNode *node) // this node) is (1-f) // now f has a statistical error according to the binomial distribution // hence the error on f can be estimated (same error as the binomial error - // for efficency calculations ( sigma = sqrt(eff(1-eff)/N) ) + // for efficency calculations ( sigma = sqrt(eff(1-eff)/nEvts ) ) - Double_t errorRate=0; + Double_t errorRate = 0; - Double_t N=node->GetNEvents(); + Double_t nEvts = node->GetNEvents(); //fraction of correctly classified events by this node: Double_t f=0; if (node->GetSoverSB() > 0.5) f = node->GetSoverSB(); else f = (1-node->GetSoverSB()); - Double_t df = sqrt(f*(1-f)/N); + Double_t df = sqrt(f*(1-f)/nEvts ); errorRate = std::min(1.,(1 - (f-fPruneStrength*df) )); // ------------------------------------------------------------------- // Minimum Error Pruning (MEP) accordig to Niblett/Bratko //# of correctly classified events by this node: - //Double_t n=f*N; + //Double_t n=f*nEvts ; //Double_t p_apriori = 0.5, m=100; - //errorRate = (N - n + (1-p_apriori) * m ) / (N + m); + //errorRate = (nEvts - n + (1-p_apriori) * m ) / (nEvts + m); // Pessimistic error Pruing (proposed by Quinlan (error estimat with continuity approximation) //# of correctly classified events by this node: - //Double_t n=f*N; - //errorRate = (N - n + 0.5) / N; + //Double_t n=f*nEvts ; + //errorRate = (nEvts - n + 0.5) / nEvts ; //const Double Z=.65; //# of correctly classified events by this node: - //Double_t n=f*N; - //errorRate = (f + Z*Z/(2*N) + Z*sqrt(f/N - f*f/N + Z*Z/4/N/N) ) / (1 + Z*Z/N); - //errorRate = (n + Z*Z/2 + Z*sqrt(n - n*n/N + Z*Z/4) )/ (N + Z*Z); + //Double_t n=f*nEvts ; + //errorRate = (f + Z*Z/(2*nEvts ) + Z*sqrt(f/nEvts - f*f/nEvts + Z*Z/4/nEvts /nEvts ) ) / (1 + Z*Z/nEvts ); + //errorRate = (n + Z*Z/2 + Z*sqrt(n - n*n/nEvts + Z*Z/4) )/ (nEvts + Z*Z); //errorRate = 1 - errorRate; // ------------------------------------------------------------------- diff --git a/tmva/src/DecisionTreeNode.cxx b/tmva/src/DecisionTreeNode.cxx index da5c667d7ca..aafd2a591a7 100644 --- a/tmva/src/DecisionTreeNode.cxx +++ b/tmva/src/DecisionTreeNode.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: DecisionTreeNode.cxx,v 1.23 2006/11/14 15:03:46 stelzer Exp $ +// @(#)root/tmva $Id: DecisionTreeNode.cxx,v 1.25 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * CopyRight (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -144,7 +144,7 @@ Bool_t TMVA::DecisionTreeNode::GoesLeft(const TMVA::Event & e) const Double_t TMVA::DecisionTreeNode::GetSoverSB( void ) const { // return the S/(S+B) for the node - return this->GetNSigEvents() / ( this->GetNSigEvents() + this->GetNBkgEvents()); + return this->GetNSigEvents() / ( this->GetNSigEvents() + this->GetNBkgEvents()); } //_______________________________________________________________________ diff --git a/tmva/src/Event.cxx b/tmva/src/Event.cxx index e0e69ecbd6c..cc125e18b58 100644 --- a/tmva/src/Event.cxx +++ b/tmva/src/Event.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Event.cxx,v 1.26 2006/11/13 20:02:23 helgevoss Exp $ +// @(#)root/tmva $Id: Event.cxx,v 1.29 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -86,6 +86,9 @@ TMVA::Event::Event(const Event& event) void TMVA::Event::InitPointers(bool AllowExternalLink) { + // sets the links of fVarPtr to the internal arrays that hold the + // integer and float variables + fVarPtrI = new Int_t[fCountI]; fVarPtrF = new Float_t[fCountF]; @@ -97,14 +100,14 @@ void TMVA::Event::InitPointers(bool AllowExternalLink) // set the void pointer (which are used to access the data) to the proper field // if external field is given if (AllowExternalLink&& var.GetExternalLink()!=0) { - fVarPtr[ivar] = var.GetExternalLink(); - // or if its type is I(int) or F(float) - } - else if (var.VarType()=='F') { + fVarPtr[ivar] = var.GetExternalLink(); + // or if its type is I(int) or F(float) + } + else if (var.VarType()=='F') { // set the void pointer to the float field fVarPtr[ivar] = fVarPtrF+ivarF++; } - else if (var.VarType()=='I') { + else if (var.VarType()=='I') { // set the void pointer to the int field fVarPtr[ivar] = fVarPtrI+ivarI++; } @@ -115,6 +118,9 @@ void TMVA::Event::InitPointers(bool AllowExternalLink) //____________________________________________________________ void TMVA::Event::SetBranchAddresses(TTree *tr) { + // sets the branch addresses of the associated + // tree to the local memory as given by fVarPtr + fBranches.clear(); Int_t ivar(0); TBranch * br(0); @@ -144,6 +150,8 @@ void TMVA::Event::CopyVarValues( const Event& other ) //____________________________________________________________ void TMVA::Event::SetVal(UInt_t ivar, Float_t val) { + // set variable ivar to val + if (ivar>=GetNVars()) fLogger << kFATAL << "<SetVal> cannot set value for variable index " << ivar << ", exceeds max index " << GetNVars()-1 << Endl; @@ -154,12 +162,16 @@ void TMVA::Event::SetVal(UInt_t ivar, Float_t val) //____________________________________________________________ Float_t TMVA::Event::GetValueNormalized(Int_t ivar) const { + // returns the value of variable ivar, normalized to [-1,1] + return Tools::NormVariable(GetVal(ivar),fVariables[ivar].GetMin(),fVariables[ivar].GetMax()); } //____________________________________________________________ void TMVA::Event::Print(std::ostream& o) const { + // print method + o << fVariables.size() << " vars: "; for(UInt_t ivar=0; ivar<fVariables.size(); ivar++) o << std::setw(10) << GetVal(ivar); diff --git a/tmva/src/Factory.cxx b/tmva/src/Factory.cxx index fbf6ad91d38..2f666ebe9ba 100644 --- a/tmva/src/Factory.cxx +++ b/tmva/src/Factory.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Factory.cxx,v 1.80 2006/11/15 00:20:32 stelzer Exp $ +// @(#)root/tmva $Id: Factory.cxx,v 1.85 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -74,15 +74,16 @@ const int MinNoTrainingEvents = 10; const int MinNoTestEvents = 1; const long int basketsize = 1280000; -const TString BCwhite__f ( "\033[1;37m" ); -const TString BCred__f ( "\033[31m" ); -const TString BCblue__f ( "\033[34m" ); -const TString BCblue__b ( "\033[44m" ); -const TString BCred__b ( "\033[1;41m" ); -const TString EC__ ( "\033[0m" ); -const TString BClblue__b ( "\033[1;44m" ); -const TString BC_yellow = "\033[1;33m"; -const TString BC_lgreen = "\033[1;32m"; +const TString BCwhite__f = "\033[1;37m"; +const TString BCred__f = "\033[31m"; +const TString BCblue__f = "\033[34m"; +const TString BCblue__b = "\033[44m"; +const TString BCred__b = "\033[1;41m"; +const TString EC__ = "\033[0m"; +const TString BClblue__b = "\033[1;44m"; +const TString BC_yellow = "\033[1;33m"; +const TString BC_lgreen = "\033[1;32m"; +const TString BC_green = "\033[32m"; using namespace std; @@ -517,7 +518,7 @@ void TMVA::Factory::PrepareTrainingAndTestTree( TCut cut, Int_t Ntrain, Int_t Nt // the samples. // ------------------------------------------------------------------------ // - fLogger << kINFO << BC_lgreen << "preparing trees for training and testing..." << EC__ << Endl; + fLogger << kINFO << "preparing trees for training and testing..." << Endl; if(fMultipleMVAs) Data().SetMultiCut(cut); else Data().SetCut(cut); @@ -566,7 +567,7 @@ Bool_t TMVA::Factory::BookMethod( TString theMethodName, TString methodTitle, TS } //_______________________________________________________________________ -Bool_t TMVA::Factory::BookMethod( TMVA::Types::MVA theMethod, TString methodTitle, TString theOption ) +Bool_t TMVA::Factory::BookMethod( TMVA::Types::EMVA theMethod, TString methodTitle, TString theOption ) { // books MVA method; the option configuration string is custom for each MVA // the TString field "theNameAppendix" serves to define (and distringuish) @@ -577,31 +578,31 @@ Bool_t TMVA::Factory::BookMethod( TMVA::Types::MVA theMethod, TString methodTitl // initialize methods switch(theMethod) { - case TMVA::Types::Cuts: + case TMVA::Types::kCuts: method = new TMVA::MethodCuts ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::Fisher: + case TMVA::Types::kFisher: method = new TMVA::MethodFisher ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::MLP: + case TMVA::Types::kMLP: method = new TMVA::MethodMLP ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::TMlpANN: + case TMVA::Types::kTMlpANN: method = new TMVA::MethodTMlpANN ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::CFMlpANN: + case TMVA::Types::kCFMlpANN: method = new TMVA::MethodCFMlpANN ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::Likelihood: + case TMVA::Types::kLikelihood: method = new TMVA::MethodLikelihood ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::Variable: + case TMVA::Types::kVariable: method = new TMVA::MethodVariable ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::HMatrix: + case TMVA::Types::kHMatrix: method = new TMVA::MethodHMatrix ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::PDERS: + case TMVA::Types::kPDERS: method = new TMVA::MethodPDERS ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::BDT: + case TMVA::Types::kBDT: method = new TMVA::MethodBDT ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::SVM: + case TMVA::Types::kSVM: method = new TMVA::MethodSVM ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::RuleFit: + case TMVA::Types::kRuleFit: method = new TMVA::MethodRuleFit ( fJobName, methodTitle, Data(), theOption ); break; - case TMVA::Types::BayesClassifier: + case TMVA::Types::kBayesClassifier: method = new TMVA::MethodBayesClassifier( fJobName, methodTitle, Data(), theOption ); break; default: fLogger << kFATAL << "method: " << theMethod << " does not exist" << Endl; @@ -613,8 +614,8 @@ Bool_t TMVA::Factory::BookMethod( TMVA::Types::MVA theMethod, TString methodTitl } //_______________________________________________________________________ -Bool_t TMVA::Factory::BookMethod( TMVA::Types::MVA theMethod, TString methodTitle, TString methodOption, - TMVA::Types::MVA theCommittee, TString committeeOption ) +Bool_t TMVA::Factory::BookMethod( TMVA::Types::EMVA theMethod, TString methodTitle, TString methodOption, + TMVA::Types::EMVA theCommittee, TString committeeOption ) { // books MVA method; the option configuration string is custom for each MVA // the TString field "theNameAppendix" serves to define (and distringuish) @@ -625,7 +626,7 @@ Bool_t TMVA::Factory::BookMethod( TMVA::Types::MVA theMethod, TString methodTitl // initialize methods switch(theMethod) { - case TMVA::Types::Committee: + case TMVA::Types::kCommittee: method = new TMVA::MethodCommittee( fJobName, methodTitle, Data(), methodOption, theCommittee, committeeOption ); break; default: fLogger << kFATAL << "method: " << theMethod << " does not exist" << Endl; @@ -653,7 +654,7 @@ TMVA::IMethod* TMVA::Factory::GetMVA( TString method ) void TMVA::Factory::TrainAllMethods( void ) { // iterates over all MVAs that have been booked, and calls their training methods - fLogger << kINFO << BC_lgreen << "training all methods..." << EC__ << Endl; + fLogger << kINFO << "training all methods..." << Endl; // if multiple MVAs if (fMultipleMVAs && !fMultipleStoredOptions ) { @@ -699,7 +700,7 @@ void TMVA::Factory::TrainAllMethods( void ) void TMVA::Factory::TestAllMethods( void ) { // iterates over all MVAs that have been booked, and calls their testing methods - fLogger << kINFO << BC_lgreen << "testing all methods..." << EC__ << Endl; + fLogger << kINFO << "testing all methods..." << Endl; // if multiple MVAs if (fMultipleMVAs && !fMultipleStoredOptions ) { @@ -730,7 +731,7 @@ void TMVA::Factory::TestAllMethods( void ) void TMVA::Factory::EvaluateAllVariables( TString options ) { // iterates over all MVA input varables and evaluates them - fLogger << kINFO << BC_lgreen << "evaluating all variables..." << EC__ << Endl; + fLogger << kINFO << "evaluating all variables..." << Endl; // if multiple MVAs if (fMultipleMVAs && !fMultipleStoredOptions ) { @@ -757,9 +758,10 @@ void TMVA::Factory::EvaluateAllVariables( TString options ) //_______________________________________________________________________ void TMVA::Factory::EvaluateAllMethods( void ) { - fLogger << kINFO << BC_lgreen << "evaluating all methods..." << EC__ << Endl; // iterates over all MVAs that have been booked, and calls their evaluation methods + fLogger << kINFO << "evaluating all methods..." << Endl; + // if multiple MVAs if (fMultipleMVAs && !fMultipleStoredOptions ) { fLogger << kINFO << "EvaluateAllMethods will be called for multiple MVAs " << Endl; diff --git a/tmva/src/GeneticANN.cxx b/tmva/src/GeneticANN.cxx index 8f90b14e3bc..00b1d7dce22 100644 --- a/tmva/src/GeneticANN.cxx +++ b/tmva/src/GeneticANN.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticANN.cxx,v 1.3 2006/10/10 17:43:51 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticANN.cxx,v 1.5 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Matt Jachowski, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -44,7 +44,7 @@ TMVA::GeneticANN::GeneticANN( Int_t size, std::vector<LowHigh_t*> ranges, TMVA:: { // constructor fMethodMLP = methodMLP; -} +} //_______________________________________________________________________ Double_t TMVA::GeneticANN::FitnessFunction( const std::vector<Double_t>& parameters ) diff --git a/tmva/src/GeneticBase.cxx b/tmva/src/GeneticBase.cxx index b2a1a92ed91..b713c800269 100644 --- a/tmva/src/GeneticBase.cxx +++ b/tmva/src/GeneticBase.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticBase.cxx,v 1.17 2006/10/17 21:22:29 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticBase.cxx,v 1.19 2006/11/16 22:51:58 helgevoss Exp $ // Author: Peter Speckmayer /********************************************************************************** @@ -16,7 +16,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -278,5 +278,7 @@ Bool_t TMVA::GeneticBase::HasConverged( Int_t steps, Double_t improvement ) //_______________________________________________________________________ void TMVA::GeneticBase::Finalize() -{} +{ + // nothing so far... +} diff --git a/tmva/src/GeneticCuts.cxx b/tmva/src/GeneticCuts.cxx index a6b1b579dee..a9d5852b4a5 100644 --- a/tmva/src/GeneticCuts.cxx +++ b/tmva/src/GeneticCuts.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticCuts.cxx,v 1.15 2006/10/10 17:43:51 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticCuts.cxx,v 1.17 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Matt Jachowski, Peter Speckmayer, Helge Voss, Kai Voss /********************************************************************************** @@ -14,13 +14,13 @@ * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA * * Peter Speckmayer <speckmay@mail.cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -47,7 +47,7 @@ TMVA::GeneticCuts::GeneticCuts( Int_t size, std::vector<LowHigh_t*> ranges, { // constructor fMethodCuts = methodCuts; -} +} //_______________________________________________________________________ Double_t TMVA::GeneticCuts::FitnessFunction( const std::vector<Double_t>& parameters ) diff --git a/tmva/src/GeneticGenes.cxx b/tmva/src/GeneticGenes.cxx index d92d30af95f..6cff6dbd73d 100644 --- a/tmva/src/GeneticGenes.cxx +++ b/tmva/src/GeneticGenes.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticGenes.cxx,v 1.12 2006/10/10 17:43:51 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticGenes.cxx,v 1.13 2006/11/16 22:51:58 helgevoss Exp $ // Author: Peter Speckmayer /********************************************************************************** @@ -16,7 +16,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/src/GeneticPopulation.cxx b/tmva/src/GeneticPopulation.cxx index 7b559c1aa0d..dacf2ca7040 100644 --- a/tmva/src/GeneticPopulation.cxx +++ b/tmva/src/GeneticPopulation.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticPopulation.cxx,v 1.17 2006/10/17 21:22:29 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticPopulation.cxx,v 1.18 2006/11/16 22:51:58 helgevoss Exp $ // Author: Peter Speckmayer /********************************************************************************** @@ -16,7 +16,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/src/GeneticRange.cxx b/tmva/src/GeneticRange.cxx index e4a06eb820e..2d4d9ba6dc4 100644 --- a/tmva/src/GeneticRange.cxx +++ b/tmva/src/GeneticRange.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GeneticRange.cxx,v 1.11 2006/10/10 17:43:51 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GeneticRange.cxx,v 1.12 2006/11/16 22:51:58 helgevoss Exp $ // Author: Peter Speckmayer /********************************************************************************** @@ -16,7 +16,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -24,7 +24,7 @@ * (http://tmva.sourceforge.net/LICENSE) * * * * File and Version Information: * - * $Id: GeneticRange.cxx,v 1.11 2006/10/10 17:43:51 andreas.hoecker Exp $ + * $Id: GeneticRange.cxx,v 1.12 2006/11/16 22:51:58 helgevoss Exp $ **********************************************************************************/ //_______________________________________________________________________ diff --git a/tmva/src/GiniIndex.cxx b/tmva/src/GiniIndex.cxx index e02d42bb19e..eda0ffe37f0 100644 --- a/tmva/src/GiniIndex.cxx +++ b/tmva/src/GiniIndex.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: GiniIndex.cxx,v 1.10 2006/10/10 17:43:51 andreas.hoecker Exp $ +// @(#)root/tmva $Id: GiniIndex.cxx,v 1.11 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -22,7 +22,7 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * diff --git a/tmva/src/MethodANNBase.cxx b/tmva/src/MethodANNBase.cxx index 52442f15c90..1a8240c4242 100644 --- a/tmva/src/MethodANNBase.cxx +++ b/tmva/src/MethodANNBase.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodANNBase.cxx,v 1.47 2006/11/14 23:02:57 stelzer Exp $ +// @(#)root/tmva $Id: MethodANNBase.cxx,v 1.49 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Matt Jachowski /********************************************************************************** @@ -74,12 +74,12 @@ TMVA::MethodANNBase::MethodANNBase( TString jobName, TString methodTitle, DataSe TString theOption, TDirectory* theTargetDir ) : TMVA::MethodBase( jobName, methodTitle, theData, theOption, theTargetDir ) { - /* Note: Right now it is an option to choose the neuron input function, - but only the input function "sum" leads to weight convergence -- - otherwise the weights go to nan and lead to an ABORT. - */ - // standard constructor + // Note: Right now it is an option to choose the neuron input function, + // but only the input function "sum" leads to weight convergence -- + // otherwise the weights go to nan and lead to an ABORT. + + InitANNBase(); DeclareOptions(); @@ -90,15 +90,26 @@ TMVA::MethodANNBase::MethodANNBase( DataSet & theData, TString theWeightFile, TDirectory* theTargetDir ) : TMVA::MethodBase( theData, theWeightFile, theTargetDir ) { - // weight file constructor + // construct the Method from the weight file InitANNBase(); DeclareOptions(); } +//______________________________________________________________________________ void TMVA::MethodANNBase::DeclareOptions() { + // define the options (their key words) that can be set in the option string + // here the options valid for ALL MVA methods are declared. + // know options: NCycles=xx :the number of training cycles + // Normalize=kTRUE,kFALSe :if normalised in put variables should be used + // HiddenLayser="N-1,N-2" :the specification of the hidden layers + // NeuronType=sigmoid,tanh,radial,linar : the type of activation function + // used at the neuronn + // + + DeclareOptionRef(fNcycles=3000,"NCycles","Number of training cycles"); DeclareOptionRef(fNormalize=kTRUE, "Normalize", "Normalize input variables"); DeclareOptionRef(fLayerSpec="N-1,N-2","HiddenLayers","Specification of the hidden layers"); @@ -121,8 +132,11 @@ void TMVA::MethodANNBase::DeclareOptions() delete names; } +//______________________________________________________________________________ void TMVA::MethodANNBase::ProcessOptions() { + // decode the options in the option string + MethodBase::ProcessOptions(); vector<Int_t>* layout = ParseLayoutString(fLayerSpec); @@ -585,7 +599,7 @@ const TMVA::Ranking* TMVA::MethodANNBase::CreateRanking() // figure out average value of variable i Double_t meanS, meanB, rmsS, rmsB, xmin, xmax; - Statistics( TMVA::Types::kTrain, varName, + Statistics( TMVA::Types::kTraining, varName, meanS, meanB, rmsS, rmsB, xmin, xmax ); avgVal = (meanS + meanB) / 2.0; // change this into a real weighted average diff --git a/tmva/src/MethodBDT.cxx b/tmva/src/MethodBDT.cxx index 03496cdafdf..6a79f86e218 100644 --- a/tmva/src/MethodBDT.cxx +++ b/tmva/src/MethodBDT.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodBDT.cxx,v 1.58 2006/11/14 23:02:57 stelzer Exp $ +// @(#)root/tmva $Id: MethodBDT.cxx,v 1.64 2006/11/17 00:21:35 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -113,25 +113,27 @@ TMVA::MethodBDT::MethodBDT( TString jobName, TString methodTitle, DataSet& theDa // the standard constructor for the "boosted decision trees" // // MethodBDT (Boosted Decision Trees) options: - // nTrees: number of trees in the forest to be created - // BoostType: the boosting type for the trees in the forest (AdaBoost e.t.c..) + // know options: + // nTrees=Int_t: number of trees in the forest to be created + // BoostType= the boosting type for the trees in the forest (AdaBoost e.t.c..) + // known: AdaBoost + // Bagging // SeparationType the separation criterion applied in the node splitting + // known: GiniIndex + // MisClassificationError + // CrossEntropy + // SDivSqrtSPlusB // nEventsMin: the minimum number of events in a node (leaf criteria, stop splitting) - // nCuts: the number of steps in the optimisation of the cut for a node + // nCuts: the number of steps in the optimisation of the cut for a node // UseYesNoLeaf decide if the classification is done simply by the node type, or the S/B // (from the training) in the leaf node // UseWeightedTrees use average classification from the trees, or have the individual trees // trees in the forest weighted (e.g. log(boostweight) from AdaBoost - // PruneMethod The Pruning method: Expected Error or Cost Complexity"); + // PruneMethod The Pruning method: + // known: ExpectedError + // CostComplexity + // CostComplexity2 // PruneStrength a parameter to adjust the amount of pruning. Should be large enouth such that overtraining is avoided"); - // - // known SeparationTypes are: - // - MisClassificationError - // - GiniIndex - // - CrossEntropy - // known BoostTypes are: - // - AdaBoost - // - Bagging InitBDT(); // sets default values DeclareOptions(); @@ -153,13 +155,17 @@ TMVA::MethodBDT::MethodBDT( TString jobName, TString methodTitle, DataSet& theDa //book monitoring histograms (currently for AdaBost, only) fBoostWeightHist = new TH1F("fBoostWeight","Ada Boost weights",100,1,100); - fErrFractHist = new TH2F("fErrFractHist","error fraction vs tree number", - fNTrees,0,fNTrees,50,0,0.5); + fBoostWeightVsTree = new TH1F("fBoostWeightVsTree","Ada Boost weights",fNTrees,0,fNTrees); + + fErrFractHist = new TH1F("fErrFractHist","error fraction vs tree number",fNTrees,0,fNTrees); + + fNodesBeforePruningVsTree = new TH1I("fNodesBeforePruning","nodes before pruning",fNTrees,0,fNTrees); + fNodesAfterPruningVsTree = new TH1I("fNodesAfterPruning","nodes after pruning",fNTrees,0,fNTrees); + fMonitorNtuple= new TTree("fMonitorNtuple","BDT variables"); fMonitorNtuple->Branch("iTree",&fITree,"iTree/I"); fMonitorNtuple->Branch("boostWeight",&fBoostWeight,"boostWeight/D"); fMonitorNtuple->Branch("errorFraction",&fErrorFraction,"errorFraction/D"); - fMonitorNtuple->Branch("nNodes",&fNnodes,"nNodes/I"); } //_______________________________________________________________________ @@ -180,6 +186,29 @@ TMVA::MethodBDT::MethodBDT( DataSet& theData, //_______________________________________________________________________ void TMVA::MethodBDT::DeclareOptions() { + // define the options (their key words) that can be set in the option string + // know options: + // nTrees=Int_t: number of trees in the forest to be created + // BoostType= the boosting type for the trees in the forest (AdaBoost e.t.c..) + // known: AdaBoost + // Bagging + // SeparationType the separation criterion applied in the node splitting + // known: GiniIndex + // MisClassificationError + // CrossEntropy + // SDivSqrtSPlusB + // nEventsMin: the minimum number of events in a node (leaf criteria, stop splitting) + // nCuts: the number of steps in the optimisation of the cut for a node + // UseYesNoLeaf decide if the classification is done simply by the node type, or the S/B + // (from the training) in the leaf node + // UseWeightedTrees use average classification from the trees, or have the individual trees + // trees in the forest weighted (e.g. log(boostweight) from AdaBoost + // PruneMethod The Pruning method: + // known: ExpectedError + // CostComplexity + // CostComplexity2 + // PruneStrength a parameter to adjust the amount of pruning. Should be large enouth such that overtraining is avoided"); + DeclareOptionRef(fNTrees, "NTrees", "number of trees in the forest"); DeclareOptionRef(fBoostType, "BoostType", "boosting type for the trees in the forest"); AddPreDefVal(TString("AdaBoost")); @@ -203,6 +232,8 @@ void TMVA::MethodBDT::DeclareOptions() //_______________________________________________________________________ void TMVA::MethodBDT::ProcessOptions() { + // the option string is decoded, for available options see "DeclareOptions" + MethodBase::ProcessOptions(); fSepTypeS.ToLower(); @@ -234,12 +265,12 @@ void TMVA::MethodBDT::InitBDT( void ) { // common initialisation with defaults for the BDT-Method SetMethodName( "BDT" ); - SetMethodType( TMVA::Types::BDT ); + SetMethodType( TMVA::Types::kBDT ); SetTestvarName(); fNTrees = 200; fBoostType = "AdaBoost"; - fNodeMinEvents = 5; + fNodeMinEvents = 10; fNCuts = 20; fPruneMethod = TMVA::DecisionTree::kMCC; fPruneStrength = 5; // means automatic determination of the prune strength using a validation sample @@ -294,6 +325,9 @@ void TMVA::MethodBDT::Train( void ) fLogger << kINFO << "will train "<< fNTrees << " Decision Trees ... patience please" << Endl; TMVA::Timer timer( fNTrees, GetName() ); + Int_t nNodesBeforePruningCount = 0; + Int_t nNodesAfterPruningCount = 0; + Int_t nNodesBeforePruning = 0; Int_t nNodesAfterPruning = 0; @@ -312,12 +346,12 @@ void TMVA::MethodBDT::Train( void ) // // std::vector<Event*> sample; // for (std::vector<Event*>::iterator iev=fEventSample.begin(); - // iev != fEventSample.end(); iev++){ - // if ((*iev)->GetWeight() > 0.1) sample.push_back(*iev); + // iev != fEventSample.end(); iev++){ + // if ((*iev)->GetWeight() > 0.1) sample.push_back(*iev); // } - // fNnodes = fForest.back()->BuildTree(sample); + // nNodesBeforePruning = fForest.back()->BuildTree(sample); - fNnodes = fForest.back()->BuildTree(fEventSample); + nNodesBeforePruning = fForest.back()->BuildTree(fEventSample); if (itree==1 && fgDebugLevel==1){ //plot Cost Complexity versus #Nodes for increasing pruning strengths @@ -377,7 +411,8 @@ void TMVA::MethodBDT::Train( void ) } - nNodesBeforePruning +=fNnodes; + nNodesBeforePruningCount +=nNodesBeforePruning; + fNodesBeforePruningVsTree->SetBinContent(itree+1,nNodesBeforePruning); fBoostWeights.push_back( this->Boost(fEventSample, fForest.back(), itree) ); fITree = itree; @@ -403,15 +438,16 @@ void TMVA::MethodBDT::Train( void ) fForest[itree]->SetPruneStrength(fPruneStrength); fForest[itree]->PruneTree(); } - fNnodes = fForest[itree]->GetNNodes(); - nNodesAfterPruning +=fNnodes; + nNodesAfterPruning = fForest[itree]->GetNNodes(); + nNodesAfterPruningCount += nNodesAfterPruning; + fNodesAfterPruningVsTree->SetBinContent(itree+1,nNodesAfterPruning); alpha->SetBinContent(itree+1,fPruneStrength); } alpha->Write(); fLogger << kINFO << "<Train> average number of nodes before/after pruning : " - << nNodesBeforePruning/fNTrees << " / " - << nNodesAfterPruning/fNTrees + << nNodesBeforePruningCount/fNTrees << " / " + << nNodesAfterPruningCount/fNTrees << Endl; // get elapsed time @@ -474,7 +510,7 @@ Double_t TMVA::MethodBDT::PruneTree( TMVA::DecisionTree *dt, Int_t itree) << " if that stitill didn't work, TRY IT BY HAND" << " currently Prunestrenght= " << alpha << " stepsize " << fDeltaPruneStrength << " " << Endl; - troubleCount = 0; // try again + troubleCount = 0; // try again fPruneStrength = 3; // if it was for the first time.. }else{ forceStop=kTRUE; @@ -626,7 +662,10 @@ Double_t TMVA::MethodBDT::AdaBoost( vector<TMVA::Event*> eventSample, TMVA::Deci } fBoostWeightHist->Fill(boostWeight); - fErrFractHist->Fill(fForest.size(),err); + + fBoostWeightVsTree->SetBinContent(fForest.size(),boostWeight); + + fErrFractHist->SetBinContent(fForest.size(),err); fBoostWeight = boostWeight; fErrorFraction = err; @@ -738,7 +777,10 @@ void TMVA::MethodBDT::WriteMonitoringHistosToFile( void ) const BaseDir()->cd(); fBoostWeightHist->Write(); + fBoostWeightVsTree->Write(); fErrFractHist->Write(); + fNodesBeforePruningVsTree->Write(); + fNodesAfterPruningVsTree->Write(); fMonitorNtuple->Write(); // (*fForest.begin())->DrawTree("ExampleTree")->Write(); diff --git a/tmva/src/MethodBase.cxx b/tmva/src/MethodBase.cxx index 5f228c1068f..31c521e6a68 100644 --- a/tmva/src/MethodBase.cxx +++ b/tmva/src/MethodBase.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodBase.cxx,v 1.86 2006/11/15 00:20:32 stelzer Exp $ +// @(#)root/tmva $Id: MethodBase.cxx,v 1.92 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -27,7 +27,7 @@ * (http://tmva.sourceforge.net/LICENSE) * * * * File and Version Information: * - * $Id: MethodBase.cxx,v 1.86 2006/11/15 00:20:32 stelzer Exp $ + * $Id: MethodBase.cxx,v 1.92 2006/11/17 14:59:23 stelzer Exp $ **********************************************************************************/ //_______________________________________________________________________ @@ -385,6 +385,17 @@ void TMVA::MethodBase::Init() //_______________________________________________________________________ void TMVA::MethodBase::DeclareOptions() { + // define the options (their key words) that can be set in the option string + // here the options valid for ALL MVA methods are declared. + // know options: Preprocess=None,Decorrelated,PCA to use decorrelated variables + // instead of the original ones + // PreprocessType=Signal,Background which decorrelation matrix to use + // in the method. Only the Likelihood + // Method can make proper use of independent + // transformations of signal and background + // V for Verbose output (!V) for non verbos + // H for Help + DeclareOptionRef(fUseDecorr, "D", "use-decorrelated-variables flag (for backward compatibility)"); DeclareOptionRef(fPreprocessingString="None", "Preprocess", "Variable Decorrelation Method"); @@ -403,6 +414,8 @@ void TMVA::MethodBase::DeclareOptions() //_______________________________________________________________________ void TMVA::MethodBase::ProcessOptions() { + // the option string is decoded, for availabel options see "DeclareOptions" + if (fPreprocessingString == "None") fPreprocessingMethod = Types::kNone; else if (fPreprocessingString == "Decorrelate" ) fPreprocessingMethod = Types::kDecorrelated; else if (fPreprocessingString == "PCA" ) fPreprocessingMethod = Types::kPCA; @@ -425,7 +438,7 @@ void TMVA::MethodBase::ProcessOptions() if( GetPreprocessingMethod() == Types::kDecorrelated ) { - Types::PreprocessingMethod c = Types::kDecorrelated; + Types::EPreprocessingMethod c = Types::kDecorrelated; Data().EnablePreprocess(Types::kDecorrelated); if( Data().Preprocess(Types::kDecorrelated) ) { // local copy of the variable ranges @@ -438,7 +451,7 @@ void TMVA::MethodBase::ProcessOptions() } if( GetPreprocessingMethod() == Types::kPCA ) { - Types::PreprocessingMethod c = Types::kPCA; + Types::EPreprocessingMethod c = Types::kPCA; Data().EnablePreprocess(Types::kPCA); if( Data().Preprocess(Types::kPCA) ) { // local copy of the variable ranges @@ -467,6 +480,9 @@ void TMVA::MethodBase::TrainMethod() //_______________________________________________________________________ void TMVA::MethodBase::WriteStateToStream(std::ostream& o) const { + // general method used in writing the header of the weight files where + // the used variables, preprocessing type etc. is specified + o << "#GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-" << endl << endl; o << "Method : " << GetMethodName() << endl; o << "Creator: " << gSystem->GetUserInfo()->fUser << endl; @@ -547,8 +563,11 @@ void TMVA::MethodBase::ReadStateFromFile() } } +//_______________________________________________________________________ void TMVA::MethodBase::ReadStateFromStream( std::istream& fin ) { + // read the header from the weight files of the different MVA methods + char buf[512]; // first read the method name @@ -595,7 +614,7 @@ void TMVA::MethodBase::ReadStateFromStream( std::istream& fin ) if (0 != fXmaxNorm[corr]) delete fXmaxNorm[corr]; fXminNorm[corr] = new Double_t[Data().GetNVariables()]; fXmaxNorm[corr] = new Double_t[Data().GetNVariables()]; - Types::PreprocessingMethod c = (Types::PreprocessingMethod) corr; + Types::EPreprocessingMethod c = (Types::EPreprocessingMethod) corr; for(UInt_t ivar=0; ivar<Data().GetNVariables(); ivar++) { SetXmin(ivar, Data().GetXmin(ivar, c), c); SetXmax(ivar, Data().GetXmax(ivar, c), c); @@ -614,6 +633,7 @@ void TMVA::MethodBase::ReadStateFromStream( std::istream& fin ) //_______________________________________________________________________ Double_t TMVA::MethodBase::GetEventValNormalized(Int_t ivar) const { + // return the normalized event variable (normalized to interval [0,1] return Tools::NormVariable( Data().Event().GetVal(ivar), GetXmin(ivar, GetPreprocessingMethod()), GetXmax(ivar, GetPreprocessingMethod())); @@ -622,6 +642,9 @@ Double_t TMVA::MethodBase::GetEventValNormalized(Int_t ivar) const //_______________________________________________________________________ TDirectory * TMVA::MethodBase::BaseDir( void ) const { + // returns the ROOT directory where info/histograms etc of the + // corresponding MVA method are stored + if (fBaseDir != 0) return fBaseDir; TDirectory* dir = 0; @@ -961,6 +984,7 @@ Double_t TMVA::MethodBase::GetEfficiency( TString theString, TTree *theTree ) return 0.5*(effS + effS_); // the mean between bin above and bin below } +//_______________________________________________________________________ Double_t TMVA::MethodBase::GetTrainingEfficiency( TString theString) { // fill background efficiency (resp. rejection) versus signal efficiency plots @@ -1018,7 +1042,7 @@ Double_t TMVA::MethodBase::GetTrainingEfficiency( TString theString) ReadTrainingEvent(ievt); TH1* theHist = (Data().Event().IsSignal() ? fTrainEffS : fTrainEffB); - + Double_t theVal = this->GetMvaValue(); for (Int_t bin=1; bin<=fNbinsH; bin++) @@ -1285,14 +1309,19 @@ Double_t TMVA::MethodBase::GetmuTransform( TTree *theTree ) return intS; // return average mu-transform for signal } -void TMVA::MethodBase::Statistics( TMVA::Types::TreeType treeType, const TString& theVarName, +//_______________________________________________________________________ +void TMVA::MethodBase::Statistics( TMVA::Types::ETreeType treeType, const TString& theVarName, Double_t& meanS, Double_t& meanB, Double_t& rmsS, Double_t& rmsB, Double_t& xmin, Double_t& xmax, Bool_t norm ) { - Long64_t entries = ( (treeType == TMVA::Types::kTest ) ? Data().GetNEvtTest() : - (treeType == TMVA::Types::kTrain) ? Data().GetNEvtTrain() : -1 ); + // calculates rms,mean, xmin, xmax of the event variable + // this can be either done for the variables as they are or for + // normalised variables (in the range of 0-1) if "norm" is set to kTRUE + + Long64_t entries = ( (treeType == TMVA::Types::kTesting ) ? Data().GetNEvtTest() : + (treeType == TMVA::Types::kTraining) ? Data().GetNEvtTrain() : -1 ); // sanity check if (entries <=0) @@ -1312,8 +1341,10 @@ void TMVA::MethodBase::Statistics( TMVA::Types::TreeType treeType, const TString // loop over all training events for (Int_t i = 0; i < entries; i++) { - if (treeType == TMVA::Types::kTest ) ReadTestEvent(i); - else ReadTrainingEvent(i); + if (treeType == TMVA::Types::kTesting ) + ReadTestEvent(i); + else + ReadTrainingEvent(i); Double_t theVar = (norm) ? GetEventValNormalized(varIndex) : GetEventVal(varIndex); @@ -1395,6 +1426,8 @@ Double_t TMVA::MethodBase::GetEffForRoot( Double_t theCut ) //______________________________________________________________________ void TMVA::MethodBase::PrintOptions() const { + // prints out the options set in the options string and the defaults + fLogger << kINFO << "the following options are set:" << Endl; TListIter optIt( & ListOfOptions() ); fLogger << kINFO << "by User:" << Endl; @@ -1413,6 +1446,8 @@ void TMVA::MethodBase::PrintOptions() const //______________________________________________________________________ void TMVA::MethodBase::WriteOptionsToStream(ostream& o) const { + // write options to output stream (e.g. in writing the MVA weight files + TListIter optIt( & ListOfOptions() ); o << "# Set by User:" << endl; while (OptionBase * opt = (OptionBase *) optIt()) if (opt->IsSet()) { opt->Print(o); o << endl; } @@ -1425,6 +1460,8 @@ void TMVA::MethodBase::WriteOptionsToStream(ostream& o) const //______________________________________________________________________ void TMVA::MethodBase::ReadOptionsFromStream(istream& istr) { + // read option back from the weight file + fOptions = ""; char buf[512]; istr.getline(buf,512); diff --git a/tmva/src/MethodBayesClassifier.cxx b/tmva/src/MethodBayesClassifier.cxx index fecc2a97735..bf2f5ad89ac 100644 --- a/tmva/src/MethodBayesClassifier.cxx +++ b/tmva/src/MethodBayesClassifier.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodBayesClassifier.cxx,v 1.2 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodBayesClassifier.cxx,v 1.5 2006/11/17 00:21:35 stelzer Exp $ // Author: Marcin .... /********************************************************************************** @@ -17,7 +17,7 @@ * University of Houston, * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -60,15 +60,20 @@ void TMVA::MethodBayesClassifier::InitBayesClassifier( void ) { // default initialisation SetMethodName( "BayesClassifier" ); - SetMethodType( TMVA::Types::BayesClassifier ); + SetMethodType( TMVA::Types::kBayesClassifier ); SetTestvarName(); } +//_______________________________________________________________________ void TMVA::MethodBayesClassifier::DeclareOptions() -{} +{ + // define the options (their key words) that can be set in the option string +} +//_______________________________________________________________________ void TMVA::MethodBayesClassifier::ProcessOptions() { + // the option string is decoded, for availabel options see "DeclareOptions" MethodBase::ProcessOptions(); } @@ -90,12 +95,14 @@ void TMVA::MethodBayesClassifier::Train( void ) //_______________________________________________________________________ void TMVA::MethodBayesClassifier::WriteWeightsToStream( ostream & o ) const { + // write the weight from the training to a file (stream) o << "whatever" << endl; } //_______________________________________________________________________ void TMVA::MethodBayesClassifier::ReadWeightsFromStream( istream & istr ) { + // read back the training results from a file (stream) if (istr.eof()); } diff --git a/tmva/src/MethodCFMlpANN.cxx b/tmva/src/MethodCFMlpANN.cxx index 847f9754aa6..625f29bf6f8 100644 --- a/tmva/src/MethodCFMlpANN.cxx +++ b/tmva/src/MethodCFMlpANN.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodCFMlpANN.cxx,v 1.32 2006/11/14 23:02:57 stelzer Exp $ +// @(#)root/tmva $Id: MethodCFMlpANN.cxx,v 1.35 2006/11/17 00:21:35 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -168,6 +168,10 @@ TMVA::MethodCFMlpANN::MethodCFMlpANN( DataSet & theData, //_______________________________________________________________________ void TMVA::MethodCFMlpANN::DeclareOptions() { + // define the options (their key words) that can be set in the option string + // know options: NCycles=xx :the number of training cycles + // HiddenLayser="N-1,N-2" :the specification of the hidden layers + DeclareOptionRef(fNcycles=3000,"NCycles","Number of training cycles"); DeclareOptionRef(fLayerSpec="N-1,N-2","HiddenLayers","Specification of the hidden layers"); } @@ -175,6 +179,8 @@ void TMVA::MethodCFMlpANN::DeclareOptions() //_______________________________________________________________________ void TMVA::MethodCFMlpANN::ProcessOptions() { + // decode the options in the option string + MethodBase::ProcessOptions(); fNodes = new Int_t[100]; // input layer @@ -210,7 +216,7 @@ void TMVA::MethodCFMlpANN::InitCFMlpANN( void ) { // default initialisation called by all constructors SetMethodName( "CFMlpANN" ); - SetMethodType( TMVA::Types::CFMlpANN ); + SetMethodType( TMVA::Types::kCFMlpANN ); SetTestvarName(); // initialize all pointers @@ -381,7 +387,7 @@ void TMVA::MethodCFMlpANN::WriteWeightsToStream( std::ostream & o ) const //_______________________________________________________________________ void TMVA::MethodCFMlpANN::ReadWeightsFromStream( istream & istr ) { - + // read back the weight from the training from file (stream) TString var; // read number of variables and classes diff --git a/tmva/src/MethodCFMlpANN_Utils.cxx b/tmva/src/MethodCFMlpANN_Utils.cxx index 9130aeb4eb8..222c3b1b99c 100644 --- a/tmva/src/MethodCFMlpANN_Utils.cxx +++ b/tmva/src/MethodCFMlpANN_Utils.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodCFMlpANN_Utils.cxx,v 1.14 2006/11/14 23:02:57 stelzer Exp $ +// @(#)root/tmva $Id: MethodCFMlpANN_Utils.cxx,v 1.15 2006/11/16 22:51:58 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -31,13 +31,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -45,7 +45,7 @@ * (http://tmva.sourceforge.net/LICENSE) * * * * File and Version Information: * - * $Id: MethodCFMlpANN_Utils.cxx,v 1.14 2006/11/14 23:02:57 stelzer Exp $ + * $Id: MethodCFMlpANN_Utils.cxx,v 1.15 2006/11/16 22:51:58 helgevoss Exp $ **********************************************************************************/ //_______________________________________________________________________ diff --git a/tmva/src/MethodCommittee.cxx b/tmva/src/MethodCommittee.cxx index 821c06ba4ca..18b5df39b93 100644 --- a/tmva/src/MethodCommittee.cxx +++ b/tmva/src/MethodCommittee.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodCommittee.cxx,v 1.10 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodCommittee.cxx,v 1.14 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -63,12 +63,13 @@ ClassImp(TMVA::MethodCommittee) //_______________________________________________________________________ TMVA::MethodCommittee::MethodCommittee( TString jobName, TString committeeTitle, DataSet& theData, TString committeeOptions, - Types::MVA method, TString methodOptions, + Types::EMVA method, TString methodOptions, TDirectory* theTargetDir ) : TMVA::MethodBase( jobName, committeeTitle, theData, committeeOptions, theTargetDir ), fMemberType( method ), fMemberOption( methodOptions ) { + // constructor InitCommittee(); // sets default values DeclareOptions(); @@ -105,6 +106,16 @@ TMVA::MethodCommittee::MethodCommittee( DataSet& theData, //_______________________________________________________________________ void TMVA::MethodCommittee::DeclareOptions() { + // define the options (their key words) that can be set in the option string + // know options: + // NMembers <string> number of members in the committee + // UseMemberDecision <bool> use signal information from event (otherwise assume signal) + // UseWeightedMembers <bool> use weighted trees or simple average in classification from the forest + // + // BoostType <string> boosting type + // available values are: AdaBoost <default> + // Bagging + DeclareOptionRef(fNMembers, "NMembers", "number of members in the committee"); DeclareOptionRef(fUseMemberDecision=kFALSE, "UseMemberDecision", "use binary information from IsSignal"); DeclareOptionRef(fUseWeightedMembers=kTRUE, "UseWeightedMembers", "use weighted trees or simple average in classification from the forest"); @@ -117,6 +128,7 @@ void TMVA::MethodCommittee::DeclareOptions() //_______________________________________________________________________ void TMVA::MethodCommittee::ProcessOptions() { + // process user options MethodBase::ProcessOptions(); } @@ -125,7 +137,7 @@ void TMVA::MethodCommittee::InitCommittee( void ) { // common initialisation with defaults for the Committee-Method SetMethodName( "Committee" ); - SetMethodType( TMVA::Types::Committee ); + SetMethodType( TMVA::Types::kCommittee ); SetTestvarName(); fNMembers = 100; @@ -179,29 +191,29 @@ void TMVA::MethodCommittee::Train( void ) // initialize methods switch(fMemberType) { - case TMVA::Types::Cuts: + case TMVA::Types::kCuts: method = new TMVA::MethodCuts ( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; - case TMVA::Types::Fisher: + case TMVA::Types::kFisher: method = new TMVA::MethodFisher ( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; - case TMVA::Types::MLP: + case TMVA::Types::kMLP: method = new TMVA::MethodMLP ( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; - case TMVA::Types::TMlpANN: + case TMVA::Types::kTMlpANN: method = new TMVA::MethodTMlpANN ( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; - case TMVA::Types::CFMlpANN: + case TMVA::Types::kCFMlpANN: method = new TMVA::MethodCFMlpANN ( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; - case TMVA::Types::Likelihood: + case TMVA::Types::kLikelihood: method = new TMVA::MethodLikelihood ( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; - case TMVA::Types::HMatrix: + case TMVA::Types::kHMatrix: method = new TMVA::MethodHMatrix ( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; - case TMVA::Types::PDERS: + case TMVA::Types::kPDERS: method = new TMVA::MethodPDERS ( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; - case TMVA::Types::BDT: + case TMVA::Types::kBDT: method = new TMVA::MethodBDT ( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; - case TMVA::Types::SVM: + case TMVA::Types::kSVM: method = new TMVA::MethodSVM ( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; - case TMVA::Types::RuleFit: + case TMVA::Types::kRuleFit: method = new TMVA::MethodRuleFit ( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; - case TMVA::Types::BayesClassifier: + case TMVA::Types::kBayesClassifier: method = new TMVA::MethodBayesClassifier( GetJobName(), GetMethodTitle(), Data(), fMemberOption ); break; default: fLogger << kFATAL << "method: " << fMemberType << " does not exist" << Endl; @@ -366,6 +378,7 @@ Double_t TMVA::MethodCommittee::Bagging( UInt_t imember ) //_______________________________________________________________________ void TMVA::MethodCommittee::WriteWeightsToStream( ostream& o ) const { + // write the state of the method to an output stream for (UInt_t imember=0; imember<GetCommittee().size(); imember++) { o << endl; o << "------------------------------ new member: " << imember << " ---------------" << endl; @@ -377,6 +390,8 @@ void TMVA::MethodCommittee::WriteWeightsToStream( ostream& o ) const //_______________________________________________________________________ void TMVA::MethodCommittee::ReadWeightsFromStream( istream& istr ) { + // read the state of the method from an input stream + // explicitly destroy objects in vector std::vector<IMethod*>::iterator member = GetCommittee().begin(); for (; member != GetCommittee().end(); member++) delete *member; @@ -403,29 +418,29 @@ void TMVA::MethodCommittee::ReadWeightsFromStream( istream& istr ) // initialize methods switch(fMemberType) { - case TMVA::Types::Cuts: + case TMVA::Types::kCuts: method = new TMVA::MethodCuts ( Data(), "" ); break; - case TMVA::Types::Fisher: + case TMVA::Types::kFisher: method = new TMVA::MethodFisher ( Data(), "" ); break; - case TMVA::Types::MLP: + case TMVA::Types::kMLP: method = new TMVA::MethodMLP ( Data(), "" ); break; - case TMVA::Types::TMlpANN: + case TMVA::Types::kTMlpANN: method = new TMVA::MethodTMlpANN ( Data(), "" ); break; - case TMVA::Types::CFMlpANN: + case TMVA::Types::kCFMlpANN: method = new TMVA::MethodCFMlpANN ( Data(), "" ); break; - case TMVA::Types::Likelihood: + case TMVA::Types::kLikelihood: method = new TMVA::MethodLikelihood ( Data(), "" ); break; - case TMVA::Types::HMatrix: + case TMVA::Types::kHMatrix: method = new TMVA::MethodHMatrix ( Data(), "" ); break; - case TMVA::Types::PDERS: + case TMVA::Types::kPDERS: method = new TMVA::MethodPDERS ( Data(), "" ); break; - case TMVA::Types::BDT: + case TMVA::Types::kBDT: method = new TMVA::MethodBDT ( Data(), "" ); break; - case TMVA::Types::SVM: + case TMVA::Types::kSVM: method = new TMVA::MethodSVM ( Data(), "" ); break; - case TMVA::Types::RuleFit: + case TMVA::Types::kRuleFit: method = new TMVA::MethodRuleFit ( Data(), "" ); break; - case TMVA::Types::BayesClassifier: + case TMVA::Types::kBayesClassifier: method = new TMVA::MethodBayesClassifier( Data(), "" ); break; default: fLogger << kFATAL << "<ReadWeightsFromStream> fatal error: method: " @@ -510,6 +525,7 @@ vector< Double_t > TMVA::MethodCommittee::GetVariableImportance() //_______________________________________________________________________ Double_t TMVA::MethodCommittee::GetVariableImportance(UInt_t ivar) { + // return the variable importance vector<Double_t> relativeImportance = this->GetVariableImportance(); if (ivar < (UInt_t)relativeImportance.size()) return relativeImportance[ivar]; else fLogger << kFATAL << "<GetVariableImportance> ivar = " << ivar << " is out of range " << Endl; diff --git a/tmva/src/MethodCuts.cxx b/tmva/src/MethodCuts.cxx index 33d9eea77c1..47707819508 100644 --- a/tmva/src/MethodCuts.cxx +++ b/tmva/src/MethodCuts.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodCuts.cxx,v 1.67 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodCuts.cxx,v 1.73 2006/11/17 14:59:23 stelzer Exp $ // Author: Andreas Hoecker, Matt Jachowski, Peter Speckmayer, Helge Voss, Kai Voss /********************************************************************************** @@ -15,13 +15,13 @@ * Matt Jachowski <jachowski@stanford.edu> - Stanford University, USA * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * * Peter Speckmayer <speckmay@mail.cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -192,7 +192,7 @@ void TMVA::MethodCuts::InitCuts( void ) { // default initialisation called by all constructors SetMethodName( "Cuts" ); - SetMethodType( TMVA::Types::Cuts ); + SetMethodType( TMVA::Types::kCuts ); SetTestvarName(); fConstrainType = kConstrainEffS; @@ -221,7 +221,7 @@ void TMVA::MethodCuts::InitCuts( void ) fXmax = new vector<Double_t>( GetNvar() ); // get the variable specific options, first initialize default - fFitParams = new vector<FitParameters>( GetNvar() ); + fFitParams = new vector<EFitParameters>( GetNvar() ); for (Int_t ivar=0; ivar<GetNvar(); ivar++) (*fFitParams)[ivar] = kNotEnforced; fRandom = new TRandom( 0 ); // set seed @@ -280,6 +280,47 @@ TMVA::MethodCuts::~MethodCuts( void ) //_______________________________________________________________________ void TMVA::MethodCuts::DeclareOptions() { + // define the options (their key words) that can be set in the option string + // know options: + // Method <string> Minimization method + // available values are: MC Monte Carlo <default> + // GA Genetic Algorithm + // SA Simulated annealing + // + // EffMethod <string> Efficiency selection method + // available values are: EffSel <default> + // EffPDF + // + // MC_NRandCuts <int> Number of random cuts to estimate the efficiency for the MC method + // MC_AllVarProp <string> Property of all variables for the MC method + // available values are: AllNotEnforced <default> + // AllFMax + // AllFMin + // AllFSmart + // AllFVerySmart + // MC_Var1Prop <string> Property of variable 1 for the MC method (taking precedence over the + // globale setting. The same values as for the global option are available. Variables 1..10 can be + // set this way + // + // + // GA_nsteps <int> Number of steps for the genetic algorithm + // GA_cycles <int> Number of generations for the genetic algorithm + // GA_popSize <int> Size of the population for the genetic algorithm + // GA_SC_steps <int> Number of steps for the genetic algorithm + // GA_SC_offsteps <int> for the genetic algorithm + // GA_SC_factor <float> for the genetic algorithm + // + // + // SA_MaxCalls <int> maximum number of calls for simulated annealing + // SA_TemperatureGradient <float> temperature gradient for simulated annealing + // SA_UseAdaptiveTemperature <bool> use of adaptive temperature for simulated annealing + // SA_InitialTemperature <float> initial temperature for simulated annealing + // SA_MinTemperature <float> minimum temperature for simulated annealing + // SA_Eps <int> number of epochs for simulated annealing + // SA_NFunLoops <int> number of loops for simulated annealing + // SA_NEps <int> number of epochs for simulated annealing + + DeclareOptionRef(fFitMethodS="MC", "Method", "Minimization Method"); AddPreDefVal(TString("GA")); AddPreDefVal(TString("SA")); @@ -339,12 +380,13 @@ void TMVA::MethodCuts::DeclareOptions() DeclareOptionRef(fSA_MinTemperature, "SA_MinTemperature", ""); DeclareOptionRef(fSA_Eps, "SA_Eps", ""); DeclareOptionRef(fSA_NFunLoops, "SA_NFunLoops", ""); - DeclareOptionRef(fSA_NEps, "SA_NEps", ""); + DeclareOptionRef(fSA_NEps, "SA_NEps", ""); } //_______________________________________________________________________ void TMVA::MethodCuts::ProcessOptions() { + // process user options MethodBase::ProcessOptions(); if (fFitMethodS == "MC" ) fFitMethod = kUseMonteCarlo; @@ -377,7 +419,7 @@ void TMVA::MethodCuts::ProcessOptions() if (fAllVars!="AllNotEnforced") { // options are specified - FitParameters theFitP = kNotEnforced; + EFitParameters theFitP = kNotEnforced; if (fAllVars == "AllNotEnforced") theFitP = kNotEnforced; else if (fAllVars == "AllFMax" ) theFitP = kForceMax; else if (fAllVars == "AllFMin" ) theFitP = kForceMin; @@ -395,7 +437,7 @@ void TMVA::MethodCuts::ProcessOptions() int maxVar = GetNvar()<=10?GetNvar():10; for (Int_t ivar=0; ivar<maxVar; ivar++) { - FitParameters theFitP = kNotEnforced; + EFitParameters theFitP = kNotEnforced; if (fAllVarsI[ivar] == "" || fAllVarsI[ivar] == "NotEnforced") theFitP = kNotEnforced; else if (fAllVarsI[ivar] == "FMax" ) theFitP = kForceMax; else if (fAllVarsI[ivar] == "FMin" ) theFitP = kForceMin; @@ -429,9 +471,9 @@ void TMVA::MethodCuts::ProcessOptions() } // decorrelate option will be last option, if it is specified - if (GetPreprocessingMethod() == Types::kDecorrelated) + if (GetPreprocessingMethod() == Types::kDecorrelated) fLogger << kINFO << "use decorrelated variable set" << Endl; - else if (GetPreprocessingMethod() == Types::kPCA) + else if (GetPreprocessingMethod() == Types::kPCA) fLogger << kINFO << "use principal component preprocessing" << Endl; } @@ -496,7 +538,7 @@ void TMVA::MethodCuts::Train( void ) const TString& varname = Data().GetInternalVarName(ivar); - Statistics( TMVA::Types::kTrain, varname, + Statistics( TMVA::Types::kTraining, varname, (*fMeanS)[ivar], (*fMeanB)[ivar], (*fRmsS)[ivar], (*fRmsB)[ivar], (*fXmin)[ivar], (*fXmax)[ivar] ); @@ -536,7 +578,7 @@ void TMVA::MethodCuts::Train( void ) // generate random cuts for (Int_t ivar=0; ivar<GetNvar(); ivar++) { - FitParameters fitParam = (*fFitParams)[ivar]; + EFitParameters fitParam = (*fFitParams)[ivar]; if (fitParam == kForceSmart) { if ((*fMeanS)[ivar] > (*fMeanB)[ivar]) fitParam = kForceMax; @@ -643,13 +685,13 @@ void TMVA::MethodCuts::Train( void ) timer1.DrawProgressBar( cycle ); // ---- perform series of fits to achieve best convergence - + // "m_ga_spread" times the number of variables TMVA::GeneticCuts ga( fGA_popSize, ranges, this ); - + ga.CalculateFitness(); ga.GetGeneticPopulation().TrimPopulation(); - + do { ga.Init(); ga.CalculateFitness(); @@ -728,9 +770,9 @@ void TMVA::MethodCuts::Train( void ) if (fBinaryTreeB != 0) { delete fBinaryTreeB; fBinaryTreeB = 0; } } -void TMVA::MethodCuts::Test( TTree* theTestTree ) +void TMVA::MethodCuts::Test( TTree* ) { - if (theTestTree == 0); // dummy call + // not used } //_______________________________________________________________________ @@ -998,6 +1040,7 @@ void TMVA::MethodCuts::WriteWeightsToStream( ostream & o ) const //_______________________________________________________________________ void TMVA::MethodCuts::ReadWeightsFromStream( istream& istr ) { + // read the cuts from stream TString dummy; UInt_t dummyInt; diff --git a/tmva/src/MethodFisher.cxx b/tmva/src/MethodFisher.cxx index 127e14843df..3f061910b52 100644 --- a/tmva/src/MethodFisher.cxx +++ b/tmva/src/MethodFisher.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodFisher.cxx,v 1.53 2006/11/14 15:22:54 helgevoss Exp $ +// @(#)root/tmva $Id: MethodFisher.cxx,v 1.58 2006/11/17 00:21:35 stelzer Exp $ // Author: Andreas Hoecker, Xavier Prudent, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -17,13 +17,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -171,7 +171,7 @@ void TMVA::MethodFisher::InitFisher( void ) { // default initialisation called by all constructors SetMethodName( "Fisher" ); - SetMethodType( TMVA::Types::Fisher ); + SetMethodType( TMVA::Types::kFisher ); SetTestvarName(); fMeanMatx = 0; @@ -201,6 +201,7 @@ void TMVA::MethodFisher::DeclareOptions() void TMVA::MethodFisher::ProcessOptions() { + // process user options MethodBase::ProcessOptions(); if (fTheMethod == "Fisher" ) fFisherMethod = kFisher; @@ -410,11 +411,19 @@ void TMVA::MethodFisher::GetFisherCoeff( void ) } TMatrixD invCov( *theMat ); - if ( TMath::Abs(invCov.Determinant()) < 10E-14 ) { - fLogger << kFATAL << "<GetFisherCoeff> matrix is singular," - << " did you use the variables that are linear combinations ???" - << GetFisherMethod() << Endl; - } + if ( TMath::Abs(invCov.Determinant()) < 10E-24 ) { + fLogger << kWARNING << "<GetFisherCoeff> matrix is almost singular with deterninant=" + << TMath::Abs(invCov.Determinant()) + << " did you use the variables that are linear combinations or highly correlated ???" + << Endl; + } + if ( TMath::Abs(invCov.Determinant()) < 10E-120 ) { + fLogger << kFATAL << "<GetFisherCoeff> matrix is singular with determinant=" + << TMath::Abs(invCov.Determinant()) + << " did you use the variables that are linear combinations ???" + << Endl; + } + invCov.Invert(); // apply rescaling factor @@ -440,7 +449,7 @@ void TMVA::MethodFisher::GetFisherCoeff( void ) fF0 = 0.0; for(ivar=0; ivar<GetNvar(); ivar++){ fF0 += (*fFisherCoeff)[ivar]*((*fMeanMatx)(ivar, 0) + (*fMeanMatx)(ivar, 1)); - } + } fF0 /= -2.0; } diff --git a/tmva/src/MethodHMatrix.cxx b/tmva/src/MethodHMatrix.cxx index 36ff36a978a..ace282ddcdb 100644 --- a/tmva/src/MethodHMatrix.cxx +++ b/tmva/src/MethodHMatrix.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodHMatrix.cxx,v 1.31 2006/11/14 16:00:56 helgevoss Exp $ +// @(#)root/tmva $Id: MethodHMatrix.cxx,v 1.36 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Xavier Prudent, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -90,7 +90,7 @@ void TMVA::MethodHMatrix::InitHMatrix( void ) { // default initialisation called by all constructors SetMethodName( "HMatrix" ); - SetMethodType( TMVA::Types::HMatrix ); + SetMethodType( TMVA::Types::kHMatrix ); SetTestvarName(); fNormaliseInputVars = kTRUE; @@ -122,6 +122,7 @@ void TMVA::MethodHMatrix::DeclareOptions() void TMVA::MethodHMatrix::ProcessOptions() { + // process user options MethodBase::ProcessOptions(); } @@ -137,7 +138,7 @@ void TMVA::MethodHMatrix::Train( void ) Double_t meanS, meanB, rmsS, rmsB, xmin, xmax; for (Int_t ivar=0; ivar<GetNvar(); ivar++) { - Statistics( TMVA::Types::kTrain, (*fInputVars)[ivar], + Statistics( TMVA::Types::kTraining, (*fInputVars)[ivar], meanS, meanB, rmsS, rmsB, xmin, xmax, fNormaliseInputVars ); (*fVecMeanS)(ivar) = meanS; @@ -148,17 +149,33 @@ void TMVA::MethodHMatrix::Train( void ) this->ComputeCovariance( kTRUE, fInvHMatrixS ); this->ComputeCovariance( kFALSE, fInvHMatrixB ); - if ( TMath::Abs(fInvHMatrixS->Determinant()) < 10E-14 ) { - fLogger << kFATAL << "<Train> matrix is singular," - << " did you use the variables that are linear combinations ???" + if ( TMath::Abs(fInvHMatrixS->Determinant()) < 10E-24 ) { + fLogger << kWARNING << "<Train> H-matrix S is almost singular with deterinant= " + << TMath::Abs(fInvHMatrixS->Determinant()) + << " did you use the variables that are linear combinations or highly correlated ???" << Endl; } - if ( TMath::Abs(fInvHMatrixB->Determinant()) < 10E-14 ) { - fLogger << kFATAL << "<Train> matrix is singular," - << " did you use the variables that are linear combinations ???" + if ( TMath::Abs(fInvHMatrixB->Determinant()) < 10E-24 ) { + fLogger << kWARNING << "<Train> H-matrix B is almost singular with deterinant= " + << TMath::Abs(fInvHMatrixB->Determinant()) + << " did you use the variables that are linear combinations or highly correlated ???" << Endl; } + if ( TMath::Abs(fInvHMatrixS->Determinant()) < 10E-120 ) { + fLogger << kFATAL << "<Train> H-matrix S is singular with deterinant= " + << TMath::Abs(fInvHMatrixS->Determinant()) + << " did you use the variables that are linear combinations ???" + << Endl; + } + if ( TMath::Abs(fInvHMatrixB->Determinant()) < 10E-120 ) { + fLogger << kFATAL << "<Train> H-matrix B is singular with deterinant= " + << TMath::Abs(fInvHMatrixB->Determinant()) + << " did you use the variables that are linear combinations ???" + << Endl; + } + + // invert matrix @@ -229,7 +246,7 @@ Double_t TMVA::MethodHMatrix::GetMvaValue() } //_______________________________________________________________________ -Double_t TMVA::MethodHMatrix::GetChi2( TMVA::Event *e, Types::SBType type ) const +Double_t TMVA::MethodHMatrix::GetChi2( TMVA::Event *e, Types::ESBType type ) const { // compute chi2-estimator for event according to type (signal/background) @@ -261,7 +278,7 @@ Double_t TMVA::MethodHMatrix::GetChi2( TMVA::Event *e, Types::SBType type ) con } //_______________________________________________________________________ -Double_t TMVA::MethodHMatrix::GetChi2( Types::SBType type ) const +Double_t TMVA::MethodHMatrix::GetChi2( Types::ESBType type ) const { // compute chi2-estimator for event according to type (signal/background) diff --git a/tmva/src/MethodLikelihood.cxx b/tmva/src/MethodLikelihood.cxx index e12d3845e94..4e1787ca94f 100644 --- a/tmva/src/MethodLikelihood.cxx +++ b/tmva/src/MethodLikelihood.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodLikelihood.cxx,v 1.51 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodLikelihood.cxx,v 1.55 2006/11/17 00:21:35 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -186,7 +186,7 @@ void TMVA::MethodLikelihood::InitLik( void ) fPDFBgd = NULL; SetMethodName( "Likelihood" ); - SetMethodType( TMVA::Types::Likelihood ); + SetMethodType( TMVA::Types::kLikelihood ); SetTestvarName(); fEpsilon = 1e-5; @@ -207,6 +207,17 @@ void TMVA::MethodLikelihood::InitLik( void ) //_______________________________________________________________________ void TMVA::MethodLikelihood::DeclareOptions() { + // define the options (their key words) that can be set in the option string + // know options: + // Spline <int> spline used to interpolate reference histograms + // available values are: 0, 1, 2 <default>, 3, 5 + // + // NSmooth <int> how often the input histos are smoothed + // NAvEvtPerBin <int> minimum average number of events per PDF bin (less trigger warning) + // TransformOutput <bool> transform (often strongly peaked) likelihood output through sigmoid inversion + + + DeclareOptionRef(fSpline=2,"Spline","spline used to interpolate reference histograms"); AddPreDefVal(0); // take histogram AddPreDefVal(1); // linear interpolation between bins @@ -226,6 +237,7 @@ void TMVA::MethodLikelihood::DeclareOptions() //_______________________________________________________________________ void TMVA::MethodLikelihood::ProcessOptions() { + // process user options MethodBase::ProcessOptions(); if (fSpline == 0) fSmoothMethod = TMVA::PDF::kSpline0; @@ -239,9 +251,9 @@ void TMVA::MethodLikelihood::ProcessOptions() } // decorrelate option will be last option, if it is specified - if (GetPreprocessingMethod() == Types::kDecorrelated) + if (GetPreprocessingMethod() == Types::kDecorrelated) fLogger << kINFO << "use decorrelated variable set" << Endl; - else if (GetPreprocessingMethod() == Types::kPCA) + else if (GetPreprocessingMethod() == Types::kPCA) fLogger << kINFO << "use principal component preprocessing" << Endl; } @@ -563,8 +575,11 @@ void TMVA::MethodLikelihood::ReadWeightsFromStream( istream& istr ) // find corresponding variable index and cache it (to spead up likelihood evaluation) for (Int_t ivar=0; ivar<GetNvar(); ivar++) { if (hname.Contains( (*fInputVars)[ivar] )) { - if (hname.Contains("_sig_")) (*fIndexSig)[ivar] = fSigPDFHist->GetEntries()-1; - else if (hname.Contains("_bgd_")) (*fIndexBgd)[ivar] = fBgdPDFHist->GetEntries()-1; +// if (hname.Contains("_sig_")) (*fIndexSig)[ivar] = fSigPDFHist->GetEntries()-1; +// else if (hname.Contains("_bgd_")) (*fIndexBgd)[ivar] = fBgdPDFHist->GetEntries()-1; +// to be backward compatible to ROOT 4.02 + if (hname.Contains("_sig_")) (*fIndexSig)[ivar] = fSigPDFHist->GetSize()-1; + else if (hname.Contains("_bgd_")) (*fIndexBgd)[ivar] = fBgdPDFHist->GetSize()-1; } } } diff --git a/tmva/src/MethodMLP.cxx b/tmva/src/MethodMLP.cxx index 82eaeb72776..292d633982d 100644 --- a/tmva/src/MethodMLP.cxx +++ b/tmva/src/MethodMLP.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodMLP.cxx,v 1.29 2006/11/14 23:02:57 stelzer Exp $ +// @(#)root/tmva $Id: MethodMLP.cxx,v 1.32 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Matt Jachowski /********************************************************************************** @@ -113,13 +113,29 @@ void TMVA::MethodMLP::InitMLP() { // default initializations SetMethodName( "MLP" ); - SetMethodType( TMVA::Types::MLP ); + SetMethodType( TMVA::Types::kMLP ); SetTestvarName(); } //_______________________________________________________________________ void TMVA::MethodMLP::DeclareOptions() { + // define the options (their key words) that can be set in the option string + // know options: + // TrainingMethod <string> Training method + // available values are: BP Back-Propagation <default> + // GA Genetic Algorithm (takes a LONG time) + // + // LearningRate <float> NN learning rate parameter + // DecayRate <float> Decay rate for learning parameter + // TestRate <int> Test for overtraining performed at each #th epochs + // + // BPMode <string> Back-propagation learning mode + // available values are: sequential <default> + // batch + // + // BatchSize <int> Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events + DeclareOptionRef(fTrainMethodS="BP", "TrainingMethod", "Train with Back-Propagation (BP) or Genetic Algorithm (GA) (takes a LONG time)"); AddPreDefVal(TString("BP")); @@ -141,6 +157,7 @@ void TMVA::MethodMLP::DeclareOptions() //_______________________________________________________________________ void TMVA::MethodMLP::ProcessOptions() { + // process user options MethodANNBase::ProcessOptions(); if (fTrainMethodS == "BP") fTrainingMethod = kBP; @@ -153,7 +170,6 @@ void TMVA::MethodMLP::ProcessOptions() //______________________________________________________________________________ void TMVA::MethodMLP::InitializeLearningRates() { - // initialize learning rates of synapses, used only by backpropagation TSynapse *synapse; Int_t numSynapses = fSynapses->GetEntriesFast(); @@ -164,12 +180,12 @@ void TMVA::MethodMLP::InitializeLearningRates() } //______________________________________________________________________________ -Double_t TMVA::MethodMLP::CalculateEstimator( TMVA::Types::TreeType treeType ) +Double_t TMVA::MethodMLP::CalculateEstimator( TMVA::Types::ETreeType treeType ) { // calculate the estimator that training is attempting to minimize - Int_t nEvents = ( (treeType == TMVA::Types::kTest ) ? Data().GetNEvtTest() : - (treeType == TMVA::Types::kTrain) ? Data().GetNEvtTrain() : -1 ); + Int_t nEvents = ( (treeType == TMVA::Types::kTesting ) ? Data().GetNEvtTest() : + (treeType == TMVA::Types::kTraining) ? Data().GetNEvtTrain() : -1 ); // sanity check if (nEvents <=0) @@ -180,8 +196,10 @@ Double_t TMVA::MethodMLP::CalculateEstimator( TMVA::Types::TreeType treeType ) // loop over all training events for (Int_t i = 0; i < nEvents; i++) { - if (treeType == TMVA::Types::kTest ) ReadTestEvent(i); - else ReadTrainingEvent(i); + if (treeType == TMVA::Types::kTesting ) + ReadTestEvent(i); + else + ReadTrainingEvent(i); Double_t desired = GetDesiredOutput(); ForceNetworkInputs(); @@ -236,8 +254,8 @@ void TMVA::MethodMLP::BackPropagationMinimize(Int_t nEpochs) // monitor convergence of training and control sample if ((i+1)%fTestRate == 0) { - Double_t trainE = CalculateEstimator( TMVA::Types::kTrain ); // estimator for training sample - Double_t testE = CalculateEstimator( TMVA::Types::kTest ); // estimator for test samplea + Double_t trainE = CalculateEstimator( TMVA::Types::kTraining ); // estimator for training sample + Double_t testE = CalculateEstimator( TMVA::Types::kTesting ); // estimator for test samplea fEstimatorHistTrain->Fill( i+1, trainE ); fEstimatorHistTest ->Fill( i+1, testE ); } @@ -579,6 +597,7 @@ void TMVA::MethodMLP::AdjustSynapseWeights() //______________________________________________________________________________ void TMVA::MethodMLP::MinuitMinimize() { + // minimize using Minuit fNumberOfWeights = fSynapses->GetEntriesFast(); TFitter* tfitter = new TFitter( fNumberOfWeights ); diff --git a/tmva/src/MethodPDERS.cxx b/tmva/src/MethodPDERS.cxx index 398e8cb54e0..863a628d2c4 100644 --- a/tmva/src/MethodPDERS.cxx +++ b/tmva/src/MethodPDERS.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodPDERS.cxx,v 1.39 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodPDERS.cxx,v 1.43 2006/11/17 00:21:35 stelzer Exp $ // Author: Andreas Hoecker, Yair Mahalalel, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Yair Mahalalel <Yair.Mahalalel@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * @@ -123,7 +123,7 @@ void TMVA::MethodPDERS::InitPDERS( void ) { // default initialisation routine called by all constructors SetMethodName( "PDERS" ); - SetMethodType( TMVA::Types::PDERS ); + SetMethodType( TMVA::Types::kPDERS ); SetTestvarName(); fBinaryTreeS = fBinaryTreeB = NULL; @@ -160,6 +160,36 @@ TMVA::MethodPDERS::~MethodPDERS( void ) //_______________________________________________________________________ void TMVA::MethodPDERS::DeclareOptions() { + // define the options (their key words) that can be set in the option string + // know options: + // VolumeRangeMode <string> Method to determine volume range + // available values are: MinMax <default> + // Unscaled + // RMS + // Adaptive + // + // KernelEstimator <string> Kernel estimation function + // available values are: Box <default> + // Sphere + // Teepee + // Gauss + // Sinc3 + // Sinc5 + // Sinc7 + // Sinc9 + // Sinc11 + // Lanczos2 + // Lanczos3 + // Lanczos5 + // Lanczos8 + // + // DeltaFrac <float> Ratio of #EventsMin/#EventsMax for MinMax and RMS volume range + // NEventsMin <int> Minimum number of events for adaptive volume range + // NEventsMax <int> Maximum number of events for adaptive volume range + // MaxVIterations <int> Maximum number of iterations for adaptive volume range + // InitialScale <float> Initial scale for adaptive volume range + // GaussSigma <float> Width with respect to the volume size of Gaussian kernel estimator + DeclareOptionRef(fVolumeRange="MinMax", "VolumeRangeMode", "Method to determine volume range"); AddPreDefVal(TString("Unscaled")); AddPreDefVal(TString("MinMax")); @@ -181,7 +211,7 @@ void TMVA::MethodPDERS::DeclareOptions() AddPreDefVal(TString("Lanczos5")); AddPreDefVal(TString("Lanczos8")); - DeclareOptionRef(fDeltaFrac , "DeltaFrac", "nEventsMin/Max for minmax and rms vaolume range"); + DeclareOptionRef(fDeltaFrac , "DeltaFrac", "nEventsMin/Max for minmax and rms volume range"); DeclareOptionRef(fNEventsMin , "NEventsMin", "nEventsMin for adaptive volume range"); DeclareOptionRef(fNEventsMax , "NEventsMax", "nEventsMax for adaptive volume range"); DeclareOptionRef(fMaxVIterations, "MaxVIterations", "MaxVIterations for adaptive volume range"); @@ -192,36 +222,38 @@ void TMVA::MethodPDERS::DeclareOptions() //_______________________________________________________________________ void TMVA::MethodPDERS::ProcessOptions() { + // process the options specified by the user + MethodBase::ProcessOptions(); fVRangeMode = TMVA::MethodPDERS::kUnsupported; - if (fVolumeRange == "MinMax" ) fVRangeMode = TMVA::MethodPDERS::kMinMax; - else if (fVolumeRange == "RMS" ) fVRangeMode = TMVA::MethodPDERS::kRMS; - else if (fVolumeRange == "Adaptive" ) fVRangeMode = TMVA::MethodPDERS::kAdaptive; - else if (fVolumeRange == "Unscaled" ) fVRangeMode = TMVA::MethodPDERS::kUnscaled; + if (fVolumeRange == "MinMax" ) fVRangeMode = TMVA::MethodPDERS::kMinMax; + else if (fVolumeRange == "RMS" ) fVRangeMode = TMVA::MethodPDERS::kRMS; + else if (fVolumeRange == "Adaptive" ) fVRangeMode = TMVA::MethodPDERS::kAdaptive; + else if (fVolumeRange == "Unscaled" ) fVRangeMode = TMVA::MethodPDERS::kUnscaled; else { fLogger << kFATAL << "VolumeRangeMode parameter '" << fVolumeRange << "' unknown" << Endl; } - if (fKernelString == "Box" ) fKernelEstimator = TMVA::MethodPDERS::kBox; - else if (fKernelString == "Sphere" ) fKernelEstimator = TMVA::MethodPDERS::kSphere; - else if (fKernelString == "Teepee" ) fKernelEstimator = TMVA::MethodPDERS::kTeepee; - else if (fKernelString == "Gauss" ) fKernelEstimator = TMVA::MethodPDERS::kGauss; - else if (fKernelString == "Sinc3" ) fKernelEstimator = TMVA::MethodPDERS::kSinc3; - else if (fKernelString == "Sinc5" ) fKernelEstimator = TMVA::MethodPDERS::kSinc5; - else if (fKernelString == "Sinc7" ) fKernelEstimator = TMVA::MethodPDERS::kSinc7; - else if (fKernelString == "Sinc9" ) fKernelEstimator = TMVA::MethodPDERS::kSinc9; - else if (fKernelString == "Sinc11" ) fKernelEstimator = TMVA::MethodPDERS::kSinc11; - else if (fKernelString == "Lanczos2" ) fKernelEstimator = TMVA::MethodPDERS::kLanczos2; - else if (fKernelString == "Lanczos3" ) fKernelEstimator = TMVA::MethodPDERS::kLanczos3; - else if (fKernelString == "Lanczos5" ) fKernelEstimator = TMVA::MethodPDERS::kLanczos5; - else if (fKernelString == "Lanczos8" ) fKernelEstimator = TMVA::MethodPDERS::kLanczos8; + if (fKernelString == "Box" ) fKernelEstimator = TMVA::MethodPDERS::kBox; + else if (fKernelString == "Sphere" ) fKernelEstimator = TMVA::MethodPDERS::kSphere; + else if (fKernelString == "Teepee" ) fKernelEstimator = TMVA::MethodPDERS::kTeepee; + else if (fKernelString == "Gauss" ) fKernelEstimator = TMVA::MethodPDERS::kGauss; + else if (fKernelString == "Sinc3" ) fKernelEstimator = TMVA::MethodPDERS::kSinc3; + else if (fKernelString == "Sinc5" ) fKernelEstimator = TMVA::MethodPDERS::kSinc5; + else if (fKernelString == "Sinc7" ) fKernelEstimator = TMVA::MethodPDERS::kSinc7; + else if (fKernelString == "Sinc9" ) fKernelEstimator = TMVA::MethodPDERS::kSinc9; + else if (fKernelString == "Sinc11" ) fKernelEstimator = TMVA::MethodPDERS::kSinc11; + else if (fKernelString == "Lanczos2" ) fKernelEstimator = TMVA::MethodPDERS::kLanczos2; + else if (fKernelString == "Lanczos3" ) fKernelEstimator = TMVA::MethodPDERS::kLanczos3; + else if (fKernelString == "Lanczos5" ) fKernelEstimator = TMVA::MethodPDERS::kLanczos5; + else if (fKernelString == "Lanczos8" ) fKernelEstimator = TMVA::MethodPDERS::kLanczos8; else { fLogger << kFATAL << "KernelEstimator parameter '" << fKernelString << "' unknown" << Endl; } - // TODO: Add parameter validation + // TODO: Add parameter validation fLogger << kVERBOSE << "interpreted option string: vRangeMethod: '" << (const char*)((fVRangeMode == kMinMax) ? "MinMax" : @@ -438,7 +470,7 @@ Float_t TMVA::MethodPDERS::RScalc( const TMVA::Event& e ) delete lb; delete ub; } - else if (fVRangeMode == kAdaptive) { // adaptive volume + else if (fVRangeMode == kAdaptive) { // adaptive volume // ----------------------------------------------------------------------- @@ -554,7 +586,7 @@ Float_t TMVA::MethodPDERS::RScalc( const TMVA::Event& e ) if (countS < 1e-20) return 0.0; Float_t r = countB*fScaleB/(countS*fScaleS); - return 1.0/(r + 1.0); // TODO: propagate errors from here + return 1.0/(r + 1.0); // TODO: propagate errors from here } //_______________________________________________________________________ @@ -577,7 +609,7 @@ Double_t TMVA::MethodPDERS::KernelEstimate( const TMVA::Event & event, // always working within the hyperelipsoid, except for when we don't // note that rejection ratio goes to 1 as nvar goes to infinity - if (normalized_distance > 1 && fKernelEstimator != kBox) continue; + if (normalized_distance > 1 && fKernelEstimator != kBox) continue; pdfSum += ApplyKernelFunction (normalized_distance) * (*iev)->GetWeight(); } @@ -587,6 +619,8 @@ Double_t TMVA::MethodPDERS::KernelEstimate( const TMVA::Event & event, //_______________________________________________________________________ Double_t TMVA::MethodPDERS::ApplyKernelFunction (Double_t normalized_distance) { + // from the normalized euclidean distance calculate the distance + // for a certain kernel switch (fKernelEstimator) { case kBox: case kSphere: @@ -605,8 +639,8 @@ Double_t TMVA::MethodPDERS::ApplyKernelFunction (Double_t normalized_distance) case kSinc11: { Double_t side_crossings = 2 + ((int) fKernelEstimator) - ((int) kSinc3); return NormSinc (side_crossings * normalized_distance); - } - break; + } + break; case kLanczos2: return LanczosFilter (2, normalized_distance); break; @@ -626,25 +660,25 @@ Double_t TMVA::MethodPDERS::ApplyKernelFunction (Double_t normalized_distance) return 0; } - + //_______________________________________________________________________ Double_t TMVA::MethodPDERS::KernelNormalization (Double_t pdf) { - // Calculating the normalization factor only once (might need a reset at some point. Can the method be restarted with different params?) + // Calculating the normalization factor only once (might need a reset at some point. Can the method be restarted with different params?) - static Double_t ret = 1.; // Caching jammed to disable function. It's not really useful afterall, badly implemented and untested :-) + static Double_t ret = 1.; // Caching jammed to disable function. It's not really useful afterall, badly implemented and untested :-) - if (ret != 0.) - return ret*pdf; + if (ret != 0.) + return ret*pdf; - // We first normalize by the volume of the hypersphere. - switch (fKernelEstimator) { + // We first normalize by the volume of the hypersphere. + switch (fKernelEstimator) { case kBox: case kSphere: ret = 1.; break; case kTeepee: - ret = (GetNvar() * (GetNvar() + 1) * TMath::Gamma (((Double_t) GetNvar()) / 2.)) / + ret = (GetNvar() * (GetNvar() + 1) * TMath::Gamma (((Double_t) GetNvar()) / 2.)) / ( TMath::Power (2., (Double_t) GetNvar() + 1) * TMath::Power (TMath::Pi(), ((Double_t) GetNvar()) / 2.)); break; case kGauss: @@ -665,63 +699,65 @@ Double_t TMVA::MethodPDERS::KernelNormalization (Double_t pdf) break; default: fLogger << kFATAL << "Kernel estimation function unsupported. Enumerator is " << fKernelEstimator << Endl; - } - // Normalizing by the full volume - ret *= ( TMath::Power (2., GetNvar()) * TMath::Gamma (1 + (((Double_t) GetNvar()) / 2.)) ) / + } + // Normalizing by the full volume + ret *= ( TMath::Power (2., GetNvar()) * TMath::Gamma (1 + (((Double_t) GetNvar()) / 2.)) ) / TMath::Power (TMath::Pi(), ((Double_t) GetNvar()) / 2.); - return ret*pdf; + return ret*pdf; } //_______________________________________________________________________ -Double_t TMVA::MethodPDERS::GetNormalizedDistance ( const TMVA::Event &base_event, +Double_t TMVA::MethodPDERS::GetNormalizedDistance ( const TMVA::Event &base_event, const TMVA::Event &sample_event, Double_t *dim_normalization) { - // We use Euclidian metric here. Might not be best or most efficient. - Double_t ret=0; - for (Int_t ivar=0; ivar<GetNvar(); ivar++) { - Double_t dist = dim_normalization[ivar] * (sample_event.GetVal(ivar) - base_event.GetVal(ivar)); - ret += dist*dist; - } - return TMath::Sqrt (ret); + // We use Euclidian metric here. Might not be best or most efficient. + Double_t ret=0; + for (Int_t ivar=0; ivar<GetNvar(); ivar++) { + Double_t dist = dim_normalization[ivar] * (sample_event.GetVal(ivar) - base_event.GetVal(ivar)); + ret += dist*dist; + } + return TMath::Sqrt (ret); } //_______________________________________________________________________ -Double_t TMVA::MethodPDERS::NormSinc (Double_t x) { - - if (x < 10e-10 && x > -10e-10) { - return 1; // Poor man's l'Hopital - } - - Double_t pix = TMath::Pi() * x; - Double_t sinc = TMath::Sin(pix) / pix; - Double_t ret; - - if (GetNvar() % 2) - ret = TMath::Power (sinc, GetNvar()); - else - ret = TMath::Abs (sinc) * TMath::Power (sinc, GetNvar() - 1); - - return ret; +Double_t TMVA::MethodPDERS::NormSinc (Double_t x) +{ + // NormSinc + if (x < 10e-10 && x > -10e-10) { + return 1; // Poor man's l'Hopital + } + + Double_t pix = TMath::Pi() * x; + Double_t sinc = TMath::Sin(pix) / pix; + Double_t ret; + + if (GetNvar() % 2) + ret = TMath::Power (sinc, GetNvar()); + else + ret = TMath::Abs (sinc) * TMath::Power (sinc, GetNvar() - 1); + + return ret; } //_______________________________________________________________________ -Double_t TMVA::MethodPDERS::LanczosFilter (Int_t level, Double_t x) { - - if (x < 10e-10 && x > -10e-10) { - return 1; // Poor man's l'Hopital - } - - Double_t pix = TMath::Pi() * x; - Double_t pixtimesn = pix * ((Double_t) level); - Double_t lanczos = (TMath::Sin(pix) / pix) * (TMath::Sin(pixtimesn) / pixtimesn); - Double_t ret; - - if (GetNvar() % 2) - ret = TMath::Power (lanczos, GetNvar()); - else - ret = TMath::Abs (lanczos) * TMath::Power (lanczos, GetNvar() - 1); - - return ret; +Double_t TMVA::MethodPDERS::LanczosFilter (Int_t level, Double_t x) +{ + // Lanczos Filter + if (x < 10e-10 && x > -10e-10) { + return 1; // Poor man's l'Hopital + } + + Double_t pix = TMath::Pi() * x; + Double_t pixtimesn = pix * ((Double_t) level); + Double_t lanczos = (TMath::Sin(pix) / pix) * (TMath::Sin(pixtimesn) / pixtimesn); + Double_t ret; + + if (GetNvar() % 2) + ret = TMath::Power (lanczos, GetNvar()); + else + ret = TMath::Abs (lanczos) * TMath::Power (lanczos, GetNvar() - 1); + + return ret; } //_______________________________________________________________________ diff --git a/tmva/src/MethodRuleFit.cxx b/tmva/src/MethodRuleFit.cxx index 8d707269a05..fd634730eda 100644 --- a/tmva/src/MethodRuleFit.cxx +++ b/tmva/src/MethodRuleFit.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodRuleFit.cxx,v 1.40 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodRuleFit.cxx,v 1.43 2006/11/17 00:21:35 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss /********************************************************************************** @@ -16,7 +16,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * Iowa State U. * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * @@ -87,6 +87,30 @@ TMVA::MethodRuleFit::~MethodRuleFit( void ) //_______________________________________________________________________ void TMVA::MethodRuleFit::DeclareOptions() { + // define the options (their key words) that can be set in the option string + // know options: + // GDTau <float> gradient-directed path: fit threshhold + // GDStep <float> gradient-directed path: step size + // GDNSteps <float> gradient-directed path: number of steps + // GDErrNsigma <float> threshold for error-rate + // MinImp <float> minimum rule importance accepted + // nEventsMin <float> minimum number of events in a leaf node + // nTrees <float> number of trees in forest. + // SampleFraction <float> fraction of events used to train each tree + // nCuts <float> number of steps during node cut optimisation + // RuleMaxDist <float> max distance allowed between equal rules + // + // SeparationType <string> separation criterion for node splitting + // available values are: GiniIndex <default> + // MisClassificationError + // CrossEntropy + // SDivSqrtSPlusB + // + // Model <string> model to be used + // available values are: ModRuleLinear <default> + // ModRule + // ModLinear + DeclareOptionRef(fGDTau=0.0, "GDTau", "gradient-directed path: fit threshhold"); DeclareOptionRef(fGDPathStep=0.01, "GDStep", "gradient-directed path: step size"); DeclareOptionRef(fGDNPathSteps=100, "GDNSteps", "gradient-directed path: number of steps"); @@ -113,6 +137,7 @@ void TMVA::MethodRuleFit::DeclareOptions() //_______________________________________________________________________ void TMVA::MethodRuleFit::ProcessOptions() { + // process the options specified by the user MethodBase::ProcessOptions(); if (fSepTypeS == "misclassificationerror") fSepType = new TMVA::MisClassificationError(); @@ -135,6 +160,7 @@ void TMVA::MethodRuleFit::ProcessOptions() //_______________________________________________________________________ void TMVA::MethodRuleFit::InitMonitorNtuple() { + // initialize the monitoring ntuple fMonitorNtuple= new TTree("MonitorNtuple_RuleFit","RuleFit variables"); fMonitorNtuple->Branch("importance",&fNTImportance,"importance/D"); fMonitorNtuple->Branch("support",&fNTSupport,"support/D"); @@ -154,7 +180,7 @@ void TMVA::MethodRuleFit::InitRuleFit( void ) { // default initialisation SetMethodName( "RuleFit" ); - SetMethodType( TMVA::Types::RuleFit ); + SetMethodType( TMVA::Types::kRuleFit ); SetTestvarName(); } @@ -191,6 +217,7 @@ void TMVA::MethodRuleFit::InitEventSample( void ) //_______________________________________________________________________ void TMVA::MethodRuleFit::BuildTree( TMVA::DecisionTree *dt, std::vector< TMVA::Event *> & el ) { + // build the decision tree if (dt==0) return; dt->BuildTree(el); } @@ -298,13 +325,14 @@ const TMVA::Ranking* TMVA::MethodRuleFit::CreateRanking() //_______________________________________________________________________ void TMVA::MethodRuleFit::WriteWeightsToStream( ostream & o ) const { + // write the rules to an ostream fRuleFit.GetRuleEnsemble().PrintRaw(o); } //_______________________________________________________________________ void TMVA::MethodRuleFit::ReadWeightsFromStream( istream & istr ) { - // read rules from stream + // read rules from an istream fRuleFit.GetRuleEnsemblePtr()->ReadRaw(istr); } diff --git a/tmva/src/MethodSVM.cxx b/tmva/src/MethodSVM.cxx index 2c8055ea8d7..7d713ef68a0 100644 --- a/tmva/src/MethodSVM.cxx +++ b/tmva/src/MethodSVM.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodSVM.cxx,v 1.16 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodSVM.cxx,v 1.19 2006/11/17 00:21:35 stelzer Exp $ // Author: Marcin .... /********************************************************************************** @@ -17,7 +17,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -60,15 +60,18 @@ void TMVA::MethodSVM::InitSVM( void ) { // default initialisation SetMethodName( "SVM" ); - SetMethodType( TMVA::Types::SVM ); + SetMethodType( TMVA::Types::kSVM ); SetTestvarName(); } void TMVA::MethodSVM::DeclareOptions() -{} +{ + // declare options available for this method +} void TMVA::MethodSVM::ProcessOptions() { + // evaluate options MethodBase::ProcessOptions(); } @@ -90,12 +93,14 @@ void TMVA::MethodSVM::Train( void ) //_______________________________________________________________________ void TMVA::MethodSVM::WriteWeightsToStream( ostream & o ) const { + // Write configuration to output stream o << "whatever" << endl; } //_______________________________________________________________________ void TMVA::MethodSVM::ReadWeightsFromStream( istream & istr ) { + // Read configuration from input stream if (istr.eof()); } diff --git a/tmva/src/MethodTMlpANN.cxx b/tmva/src/MethodTMlpANN.cxx index 4b556100180..298c2fb8ab8 100644 --- a/tmva/src/MethodTMlpANN.cxx +++ b/tmva/src/MethodTMlpANN.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodTMlpANN.cxx,v 1.31 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodTMlpANN.cxx,v 1.34 2006/11/17 00:21:35 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** * Project: TMVA - a Root-integrated toolkit for multivariate data analysis * @@ -12,13 +12,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -100,7 +100,7 @@ void TMVA::MethodTMlpANN::InitTMlpANN( void ) { // default initialisations SetMethodName( "TMlpANN" ); - SetMethodType( TMVA::Types::TMlpANN ); + SetMethodType( TMVA::Types::kTMlpANN ); SetTestvarName(); } @@ -157,6 +157,16 @@ void TMVA::MethodTMlpANN::CreateMLPOptions( TString layerSpec ) //_______________________________________________________________________ void TMVA::MethodTMlpANN::DeclareOptions() { + // define the options (their key words) that can be set in the option string + // know options: + // NCycles <integer> Number of training cycles (too many cycles could overtrain the network) + // HiddenLayers <string> Layout of the hidden layers (nodes per layer) + // * specifiactions for each hidden layer are separated by commata + // * for each layer the number of nodes can be either absolut (simply a number) + // or relative to the number of input nodes to the neural net (N) + // * there is always a single node in the output layer + // example: a net with 6 input nodes and "Hiddenlayers=N-1,N-2" has 6,5,4,1 nodes in the + // layers 1,2,3,4, repectively DeclareOptionRef(fNcycles=3000,"NCycles","Number of training cycles"); DeclareOptionRef(fLayerSpec="N-1,N-2","HiddenLayers","Specification of the hidden layers"); } @@ -164,6 +174,8 @@ void TMVA::MethodTMlpANN::DeclareOptions() //_______________________________________________________________________ void TMVA::MethodTMlpANN::ProcessOptions() { + // builds the neural network as specified by the user + CreateMLPOptions(fLayerSpec); // Here we create a dummy tree necessary to create @@ -188,6 +200,7 @@ void TMVA::MethodTMlpANN::ProcessOptions() //_______________________________________________________________________ Double_t TMVA::MethodTMlpANN::GetMvaValue() { + // calculate the value of the neural net for the current event static Double_t* d = new Double_t[Data().GetNVariables()]; for(UInt_t ivar = 0; ivar<Data().GetNVariables(); ivar++) { d[ivar] = (Double_t)Data().Event().GetVal(ivar); diff --git a/tmva/src/MethodVariable.cxx b/tmva/src/MethodVariable.cxx index 67012064749..25378e68c34 100644 --- a/tmva/src/MethodVariable.cxx +++ b/tmva/src/MethodVariable.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MethodVariable.cxx,v 1.21 2006/11/02 15:44:50 andreas.hoecker Exp $ +// @(#)root/tmva $Id: MethodVariable.cxx,v 1.24 2006/11/17 00:21:35 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -50,7 +50,7 @@ TMVA::MethodVariable::MethodVariable( TString jobName, TString methodTitle, Data // so, remove this part SetMethodName( "Variable" ); - SetMethodType( TMVA::Types::Variable ); + SetMethodType( TMVA::Types::kVariable ); SetTestvarPrefix( "" ); SetTestvarName(); @@ -93,11 +93,13 @@ Double_t TMVA::MethodVariable::GetMvaValue() //_______________________________________________________________________ void TMVA::MethodVariable::WriteWeightsToStream( ostream & o ) const { + // Write configuration to output stream o << ""; } //_______________________________________________________________________ void TMVA::MethodVariable::ReadWeightsFromStream( istream & istr ) { + // Read configuration from input stream if (istr.eof()); } diff --git a/tmva/src/MisClassificationError.cxx b/tmva/src/MisClassificationError.cxx index a07bdbee008..5747d6e41d5 100644 --- a/tmva/src/MisClassificationError.cxx +++ b/tmva/src/MisClassificationError.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MisClassificationError.cxx,v 1.11 2006/11/06 00:10:17 helgevoss Exp $ +// @(#)root/tmva $Id: MisClassificationError.cxx,v 1.12 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,7 +13,7 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * diff --git a/tmva/src/MsgLogger.cxx b/tmva/src/MsgLogger.cxx index 7a5fa49ba95..29fa62fda72 100644 --- a/tmva/src/MsgLogger.cxx +++ b/tmva/src/MsgLogger.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: MsgLogger.cxx,v 1.6 2006/11/13 23:43:34 stelzer Exp $ +// @(#)root/tmva $Id: MsgLogger.cxx,v 1.10 2006/11/16 22:51:59 helgevoss Exp $ // Author: Attila Krasznahorkay /********************************************************************************** @@ -15,7 +15,7 @@ * * * Copyright (c) 2005: * * CERN, Switzerland, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -48,7 +48,7 @@ static const string PREFIX = "--- "; // this is the hardcoded suffix static const string SUFFIX = ": "; -TMVA::MsgLogger::MsgLogger( const TObject* source, MsgType minType ) +TMVA::MsgLogger::MsgLogger( const TObject* source, EMsgType minType ) : fObjSource( source ), fStrSource( "" ), fPrefix( PREFIX ), @@ -61,7 +61,7 @@ TMVA::MsgLogger::MsgLogger( const TObject* source, MsgType minType ) InitMaps(); } -TMVA::MsgLogger::MsgLogger( const string& source, MsgType minType ) +TMVA::MsgLogger::MsgLogger( const string& source, EMsgType minType ) : fObjSource( 0 ), fStrSource( source ), fPrefix( PREFIX ), @@ -74,7 +74,7 @@ TMVA::MsgLogger::MsgLogger( const string& source, MsgType minType ) InitMaps(); } -TMVA::MsgLogger::MsgLogger( MsgType minType ) +TMVA::MsgLogger::MsgLogger( EMsgType minType ) : fObjSource( 0 ), fStrSource( "Unknown" ), fPrefix( PREFIX ), @@ -96,19 +96,25 @@ TMVA::MsgLogger::MsgLogger( const MsgLogger& parent ) : fSuffix( SUFFIX ), fMaxSourceSize( MAXIMUM_SOURCE_NAME_LENGTH ) { + // copy constructor InitMaps(); *this = parent; } TMVA::MsgLogger::~MsgLogger() -{} +{ + // destructor +} TMVA::MsgLogger& TMVA::MsgLogger::operator= ( const MsgLogger& parent ) { - fObjSource = parent.fObjSource; - fStrSource = parent.fStrSource; - fActiveType = parent.fActiveType; - fMinType = parent.fMinType; + // assingment operator + if( &parent != this) { + fObjSource = parent.fObjSource; + fStrSource = parent.fStrSource; + fActiveType = parent.fActiveType; + fMinType = parent.fMinType; + } return *this; } @@ -169,15 +175,19 @@ void TMVA::MsgLogger::Send() return; } -void TMVA::MsgLogger::WriteMsg( MsgType type, const std::string& line ) const +void TMVA::MsgLogger::WriteMsg( EMsgType type, const std::string& line ) const { + // putting the output string, the message type, and the color + // switcher together into a single string + if (type < fMinType) return; - map<MsgType, std::string>::const_iterator stype; + map<EMsgType, std::string>::const_iterator stype; if ((stype = fTypeMap.find( type )) == fTypeMap.end()) return; #ifdef USE_COLORED_CONSOLE // no text for INFO if (type == kINFO) - cout << fColorMap.find( type )->second << fPrefix << line << "\033[0m" << endl; + // no color for info + cout << fPrefix << line << endl; else cout << fColorMap.find( type )->second << fPrefix << "<" << stype->second << "> " << line << "\033[0m" << endl; #else @@ -191,7 +201,7 @@ void TMVA::MsgLogger::WriteMsg( MsgType type, const std::string& line ) const if (type == kFATAL) { cout << "***> abort program execution" << endl; exit(1); } } -TMVA::MsgLogger& TMVA::MsgLogger::endmsg( MsgLogger& logger ) +TMVA::MsgLogger& TMVA::MsgLogger::Endmsg( MsgLogger& logger ) { // end line logger.Send(); @@ -200,6 +210,7 @@ TMVA::MsgLogger& TMVA::MsgLogger::endmsg( MsgLogger& logger ) void TMVA::MsgLogger::InitMaps() { + // fill maps that assign a string and a color to echo message level fTypeMap[kVERBOSE] = "VERBOSE"; fTypeMap[kDEBUG] = "DEBUG"; fTypeMap[kINFO] = "INFO"; diff --git a/tmva/src/Node.cxx b/tmva/src/Node.cxx index e448d99da4d..b52b92c4b82 100644 --- a/tmva/src/Node.cxx +++ b/tmva/src/Node.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Node.cxx,v 1.20 2006/11/13 15:49:49 helgevoss Exp $ +// @(#)root/tmva $Id: Node.cxx,v 1.21 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * CopyRight (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/src/Option.cxx b/tmva/src/Option.cxx index 927fc273c4d..17d2aecdc74 100644 --- a/tmva/src/Option.cxx +++ b/tmva/src/Option.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Option.cxx,v 1.14 2006/11/14 13:56:12 stelzer Exp $ +// @(#)root/tmva $Id: Option.cxx,v 1.15 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/src/PDF.cxx b/tmva/src/PDF.cxx index 0f29b9c5c7d..bfbd43d6ae7 100644 --- a/tmva/src/PDF.cxx +++ b/tmva/src/PDF.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: PDF.cxx,v 1.20 2006/10/15 12:06:32 andreas.hoecker Exp $ +// @(#)root/tmva $Id: PDF.cxx,v 1.22 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -46,7 +46,7 @@ ClassImp(TMVA::PDF) ; //_______________________________________________________________________ -TMVA::PDF::PDF( const TH1 *hist, TMVA::PDF::SmoothMethod method, Int_t nsmooth ) +TMVA::PDF::PDF( const TH1 *hist, TMVA::PDF::ESmoothMethod method, Int_t nsmooth ) : fUseHistogram( kFALSE ), fNsmooth ( nsmooth ), fSpline ( 0 ), diff --git a/tmva/src/Ranking.cxx b/tmva/src/Ranking.cxx index 80337bb8e35..498327cb26a 100644 --- a/tmva/src/Ranking.cxx +++ b/tmva/src/Ranking.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Ranking.cxx,v 1.10 2006/10/17 14:02:14 krasznaa Exp $ +// @(#)root/tmva $Id: Ranking.cxx,v 1.12 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,11 +13,11 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * @@ -39,6 +39,7 @@ ClassImp(TMVA::Ranking) TMVA::Ranking::Ranking() : fLogger( "", kINFO ) { + // default constructor fRanking.clear(); } @@ -47,24 +48,26 @@ TMVA::Ranking::Ranking( const TString& context, const TString& rankingDiscrimina fRankingDiscriminatorName( rankingDiscriminatorName ), fLogger( context.Data(), kINFO ) { + // constructor fRanking.clear(); } TMVA::Ranking::~Ranking() { + // destructor fRanking.clear(); } void TMVA::Ranking::AddRank( Rank& rank ) { - fRanking.push_back( rank ); - + // Add a new rank // sort according to rank value (descending) // Who the hell knows why this does not compile on windos.. write the sorting // reversing myself... (means sorting in "descending" order) // --> std::sort ( fRanking.begin(), fRanking.end() ); // --> std::reverse( fRanking.begin(), fRanking.end() ); - + fRanking.push_back( rank ); + UInt_t sizeofarray=fRanking.size(); Rank temp(fRanking[0]); for (unsigned int i=0; i<sizeofarray; i++) { @@ -108,18 +111,24 @@ TMVA::Rank::Rank( TString variable, Double_t rankValue ) : fVariable( variable ), fRankValue( rankValue ), fRank( -1 ) -{} +{ + // constructor +} TMVA::Rank::~Rank() -{} +{ + // destructor +} -Bool_t TMVA::Rank::operator < ( const Rank& other ) const +Bool_t TMVA::Rank::operator< ( const Rank& other ) const { + // comparison operator < if (fRankValue < other.fRankValue) return true; else return false; } -Bool_t TMVA::Rank::operator > ( const Rank& other ) const +Bool_t TMVA::Rank::operator> ( const Rank& other ) const { + // comparison operator > if (fRankValue > other.fRankValue) return true; else return false; } diff --git a/tmva/src/Reader.cxx b/tmva/src/Reader.cxx index a46c8378566..761be48164b 100644 --- a/tmva/src/Reader.cxx +++ b/tmva/src/Reader.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Reader.cxx,v 1.31 2006/11/13 23:43:34 stelzer Exp $ +// @(#)root/tmva $Id: Reader.cxx,v 1.35 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -14,13 +14,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -182,12 +182,14 @@ void TMVA::Reader::Init( void ) //_______________________________________________________________________ void TMVA::Reader::AddVariable( const TString& expression, float* datalink) { + // Add a float variable or expression to the reader Data().AddVariable(expression, 'F', (void*)datalink); } //_______________________________________________________________________ void TMVA::Reader::AddVariable( const TString& expression, int* datalink) { + // Add a integer variable or expression to the reader Data().AddVariable(expression, 'I', (void*)datalink); } @@ -221,53 +223,53 @@ TMVA::IMethod* TMVA::Reader::BookMVA( TString methodName, TString weightfile ) //_______________________________________________________________________ -TMVA::IMethod* TMVA::Reader::BookMVA( TMVA::Types::MVA methodType, TString weightfile ) +TMVA::IMethod* TMVA::Reader::BookMVA( TMVA::Types::EMVA methodType, TString weightfile ) { - IMethod* method = 0; // books MVA method from weightfile + IMethod* method = 0; switch (methodType) { - case (TMVA::Types::Cuts): + case (TMVA::Types::kCuts): method = new TMVA::MethodCuts( Data(), weightfile ); break; - case (TMVA::Types::Likelihood): + case (TMVA::Types::kLikelihood): method = new TMVA::MethodLikelihood( Data(), weightfile ); break; - case (TMVA::Types::PDERS): + case (TMVA::Types::kPDERS): method = new TMVA::MethodPDERS( Data(), weightfile ); break; - case (TMVA::Types::HMatrix): + case (TMVA::Types::kHMatrix): method = new TMVA::MethodHMatrix( Data(), weightfile ); break; - case (TMVA::Types::Fisher): + case (TMVA::Types::kFisher): method = new TMVA::MethodFisher( Data(), weightfile ); break; - case (TMVA::Types::CFMlpANN): + case (TMVA::Types::kCFMlpANN): method = new TMVA::MethodCFMlpANN( Data(), weightfile ); break; - case (TMVA::Types::TMlpANN): + case (TMVA::Types::kTMlpANN): method = new TMVA::MethodTMlpANN( Data(), weightfile ); break; - case (TMVA::Types::BDT): + case (TMVA::Types::kBDT): method = new TMVA::MethodBDT( Data(), weightfile ); break; - case (TMVA::Types::MLP): + case (TMVA::Types::kMLP): method = new TMVA::MethodMLP( Data(), weightfile ); break; - case (TMVA::Types::RuleFit): + case (TMVA::Types::kRuleFit): method = new TMVA::MethodRuleFit( Data(), weightfile ); break; - case (TMVA::Types::BayesClassifier): + case (TMVA::Types::kBayesClassifier): method = new TMVA::MethodBayesClassifier( Data(), weightfile ); break; @@ -287,6 +289,9 @@ TMVA::IMethod* TMVA::Reader::BookMVA( TMVA::Types::MVA methodType, TString weigh //_______________________________________________________________________ Double_t TMVA::Reader::EvaluateMVA( const std::vector<Float_t>& inputVec, TString methodName, Double_t aux ) { + // Evaluate a vector<float> of input data for a given method + // The parameter aux is obligatory for the cuts method where it represents the efficiency cutoff + for (UInt_t ivar=0; ivar<inputVec.size(); ivar++) Data().Event().SetVal( ivar, inputVec[ivar] ); return EvaluateMVA( methodName, aux ); @@ -295,6 +300,9 @@ Double_t TMVA::Reader::EvaluateMVA( const std::vector<Float_t>& inputVec, TStrin //_______________________________________________________________________ Double_t TMVA::Reader::EvaluateMVA( const std::vector<Double_t>& inputVec, TString methodName, Double_t aux ) { + // Evaluate a vector<double> of input data for a given method + // The parameter aux is obligatory for the cuts method where it represents the efficiency cutoff + for (UInt_t ivar=0; ivar<inputVec.size(); ivar++) Data().Event().SetVal( ivar, (Float_t)inputVec[ivar] ); return EvaluateMVA( methodName, aux ); @@ -303,9 +311,9 @@ Double_t TMVA::Reader::EvaluateMVA( const std::vector<Double_t>& inputVec, TStri //_______________________________________________________________________ Double_t TMVA::Reader::EvaluateMVA( TString methodName, Double_t aux ) { + // evaluates MVA for given set of input variables IMethod* method = 0; - // evaluates MVA for given set of input variables std::map<TString, IMethod*>::iterator it = fMethodMap.find( methodName ); if (it == fMethodMap.end()) { for (it = fMethodMap.begin(); it!=fMethodMap.end(); it++) fLogger << "M" << it->first << Endl; @@ -327,11 +335,11 @@ Double_t TMVA::Reader::EvaluateMVA( IMethod* method, Double_t aux ) // NOTE: in likelihood the preprocessing transformations are inserted by hand in GetMvaValue() // (to distinguish signal and background transformations), and hence should not be applied here - if (method->GetMethodType() != Types::Likelihood) + if (method->GetMethodType() != Types::kLikelihood) Data().ApplyTransformation( method->GetPreprocessingMethod(), kTRUE ); // the aux value is only needed for MethodCuts: it sets the required signal efficiency - if (method->GetMethodType() == TMVA::Types::Cuts) + if (method->GetMethodType() == TMVA::Types::kCuts) ((TMVA::MethodCuts*)method)->SetTestSignalEfficiency( aux ); Double_t mvaVal = method->GetMvaValue(); diff --git a/tmva/src/RootFinder.cxx b/tmva/src/RootFinder.cxx index 0d3d9705b50..e2af19eb707 100644 --- a/tmva/src/RootFinder.cxx +++ b/tmva/src/RootFinder.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: RootFinder.cxx,v 1.11 2006/10/15 22:34:22 andreas.hoecker Exp $ +// @(#)root/tmva $Id: RootFinder.cxx,v 1.12 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/src/Rule.cxx b/tmva/src/Rule.cxx index 1188cfbea33..abdedb36149 100644 --- a/tmva/src/Rule.cxx +++ b/tmva/src/Rule.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Rule.cxx,v 1.30 2006/10/29 23:40:57 helgevoss Exp $ +// @(#)root/tmva $Id: Rule.cxx,v 1.32 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss /********************************************************************************** @@ -20,7 +20,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * Iowa State U. * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * @@ -56,7 +56,14 @@ TMVA::Rule::Rule( RuleEnsemble *re, const std::vector< const Node * >& nodes, const std::vector< Int_t > & cutdirs ) - : fLogger( "Rule" ) + : fNorm ( 1.0 ) + , fSupport ( 0.0 ) + , fSigma ( 0.0 ) + , fCoefficient ( 0.0 ) + , fImportance ( 0.0 ) + , fImportanceRef ( 1.0 ) + , fRuleEnsemble ( re ) + , fLogger( "Rule" ) { // the main constructor for a Rule @@ -65,15 +72,8 @@ TMVA::Rule::Rule( RuleEnsemble *re, // nodes - a vector of TMVA::Node; from these all possible rules will be created // // - fRuleEnsemble = re; SetNodes( nodes ); SetCutDirs( cutdirs ); - fNorm = 1.0; - fCoefficient = 0.0; - fSupport = 0.0; - fSigma = 0.0; - fImportance = 0.0; - fImportanceRef = 1.0; } //_______________________________________________________________________ @@ -274,18 +274,18 @@ Double_t TMVA::Rule::RuleDist( const Rule& other, Bool_t useCutValue ) const //_______________________________________________________________________ void TMVA::Rule::GetEffectiveRule( std::vector<Int_t>& nodeind ) const -// -// Returns a vector of node indecis which correspond to the effective rule. -// E.g, the rule: -// v1<0.1 -// v1<0.05 -// v4>0.12 -// -// is effectively the same as: -// v1<0.05 -// v4>0.12 -// { + // + // Returns a vector of node indecis which correspond to the effective rule. + // E.g, the rule: + // v1<0.1 + // v1<0.05 + // v4>0.12 + // + // is effectively the same as: + // v1<0.05 + // v4>0.12 + // nodeind.clear(); UInt_t nnodes = fNodes.size(); if (nnodes==2) { // just one cut, return all nodes @@ -370,18 +370,22 @@ Bool_t TMVA::Rule::IsSimpleRule() const //_______________________________________________________________________ Bool_t TMVA::Rule::operator==( const TMVA::Rule& other ) const { + // comparison operator == + return this->Equal( other, kTRUE, 1e-3 ); } //_______________________________________________________________________ Bool_t TMVA::Rule::operator<( const TMVA::Rule& other ) const { + // comparison operator < return (fImportance < other.GetImportance()); } //_______________________________________________________________________ ostream& TMVA::operator<< ( ostream& os, const TMVA::Rule& rule ) { + // ostream operator rule.Print( os ); return os; } @@ -389,28 +393,34 @@ ostream& TMVA::operator<< ( ostream& os, const TMVA::Rule& rule ) //_______________________________________________________________________ const TString & TMVA::Rule::GetVarName( Int_t i ) const { + // returns the name of a rule + return fRuleEnsemble->GetMethodRuleFit()->GetInputExp(i); } //_______________________________________________________________________ void TMVA::Rule::Copy( const Rule& other ) { - SetRuleEnsemble( other.GetRuleEnsemble() ); - SetNodes( other.GetNodes() ); - fSSB = other.GetSSB(); - fSSBNeve = other.GetSSBNeve(); - SetCutDirs( other.GetCutDirs() ); - SetCoefficient(other.GetCoefficient()); - SetSupport( other.GetSupport() ); - SetSigma( other.GetSigma() ); - SetNorm( other.GetNorm() ); - CalcImportance(); - SetImportanceRef( other.GetImportanceRef() ); + // copy function + if(this != &other) { + SetRuleEnsemble( other.GetRuleEnsemble() ); + SetNodes( other.GetNodes() ); + fSSB = other.GetSSB(); + fSSBNeve = other.GetSSBNeve(); + SetCutDirs( other.GetCutDirs() ); + SetCoefficient(other.GetCoefficient()); + SetSupport( other.GetSupport() ); + SetSigma( other.GetSigma() ); + SetNorm( other.GetNorm() ); + CalcImportance(); + SetImportanceRef( other.GetImportanceRef() ); + } } //_______________________________________________________________________ void TMVA::Rule::Print( ostream& os ) const { + // print function Int_t ind; Int_t sel,ntype,nnodes; Double_t data, ssbval; @@ -449,6 +459,7 @@ void TMVA::Rule::Print( ostream& os ) const //_______________________________________________________________________ void TMVA::Rule::PrintRaw( ostream& os ) const { + // extensive print function const TMVA::DecisionTreeNode *node; std::vector<Int_t> nodes; GetEffectiveRule( nodes ); @@ -484,6 +495,8 @@ void TMVA::Rule::PrintRaw( ostream& os ) const //_______________________________________________________________________ void TMVA::Rule::ReadRaw( istream& istr ) { + // read function (format is the same as written by PrintRaw) + TString dummy; TMVA::DecisionTreeNode *node; std::vector<Int_t> nodes; diff --git a/tmva/src/RuleEnsemble.cxx b/tmva/src/RuleEnsemble.cxx index 20647acc17f..7b5bea09e6c 100644 --- a/tmva/src/RuleEnsemble.cxx +++ b/tmva/src/RuleEnsemble.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: RuleEnsemble.cxx,v 1.30 2006/11/14 15:39:36 helgevoss Exp $ +// @(#)root/tmva $Id: RuleEnsemble.cxx,v 1.33 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss /********************************************************************************** @@ -19,7 +19,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * Iowa State U. * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * @@ -38,12 +38,12 @@ //_______________________________________________________________________ TMVA::RuleEnsemble::RuleEnsemble( RuleFit *rf ) - : fLogger( "RuleEnsemble" ) + : fLearningModel ( kFull ) + , fAverageRuleSigma ( 0.4 ) // default value - used if only linear model is chosen + , fMaxRuleDist ( 1e-3 ) // closest allowed 'distance' between two rules + , fLogger( "RuleEnsemble" ) { // constructor - fAverageRuleSigma = 0.4; // default value - used if only linear model is chosen - fLearningModel = kFull; - fMaxRuleDist = 1e-3; // closest allowed 'distance' between two rules Initialize( rf ); } @@ -51,7 +51,7 @@ TMVA::RuleEnsemble::RuleEnsemble( RuleFit *rf ) TMVA::RuleEnsemble::RuleEnsemble( const RuleEnsemble& other ) : fLogger( "RuleEnsemble" ) { - // constructor + // copy constructor Copy( other ); } @@ -65,6 +65,7 @@ TMVA::RuleEnsemble::RuleEnsemble() //_______________________________________________________________________ TMVA::RuleEnsemble::~RuleEnsemble() { + // destructor for ( std::vector< TMVA::Rule *>::iterator itrRule = fRules.begin(); itrRule != fRules.end(); itrRule++ ) { delete *itrRule; } @@ -74,6 +75,8 @@ TMVA::RuleEnsemble::~RuleEnsemble() //_______________________________________________________________________ void TMVA::RuleEnsemble::Initialize( RuleFit *rf ) { + // Initializes all member variables with default values + fAverageRuleSigma = 0.4; // default value - used if only linear model is chosen fRuleFit = rf; UInt_t nvars = GetMethodRuleFit()->GetNvar(); @@ -98,6 +101,7 @@ const TMVA::MethodRuleFit* TMVA::RuleEnsemble::GetMethodRuleFit() const //_______________________________________________________________________ void TMVA::RuleEnsemble::MakeModel() { + // create model if (DoRules()) MakeRules( fRuleFit->GetForest() ); if (DoLinear()) @@ -125,6 +129,8 @@ Double_t TMVA::RuleEnsemble::CoefficientRadius() //_______________________________________________________________________ void TMVA::RuleEnsemble::ResetCoefficients() { + // reset all rule coefficients + fOffset = 0.0; UInt_t nrules = fRules.size(); for (UInt_t i=0; i<nrules; i++) { @@ -135,6 +141,8 @@ void TMVA::RuleEnsemble::ResetCoefficients() //_______________________________________________________________________ void TMVA::RuleEnsemble::SetCoefficients( const std::vector< Double_t > & v ) { + // set all rule coefficients + UInt_t nrules = fRules.size(); if (v.size()!=nrules) { fLogger << kFATAL << "<SetCoefficients> - BUG TRAP - input vector worng size! It is = " << v.size() @@ -148,6 +156,8 @@ void TMVA::RuleEnsemble::SetCoefficients( const std::vector< Double_t > & v ) //_______________________________________________________________________ void TMVA::RuleEnsemble::GetCoefficients( std::vector< Double_t > & v ) { + // Retrieve all rule coefficients + UInt_t nrules = fRules.size(); v.resize(nrules); if (nrules==0) return; @@ -160,41 +170,50 @@ void TMVA::RuleEnsemble::GetCoefficients( std::vector< Double_t > & v ) //_______________________________________________________________________ const std::vector<const TMVA::Event *> *TMVA::RuleEnsemble::GetTrainingEvents() const { + // get list of training events from the rule fitter + return &(fRuleFit->GetTrainingEvents()); } //_______________________________________________________________________ const std::vector< Int_t > *TMVA::RuleEnsemble::GetSubsampleEvents() const { + // get list of events for the subsamples from the rule fitter return &(fRuleFit->GetSubsampleEvents()); } //_______________________________________________________________________ void TMVA::RuleEnsemble::GetSubsampleEvents(UInt_t sub, UInt_t & ibeg, UInt_t & iend) const { + // get list of events for the subsample sub from the rule fitter fRuleFit->GetSubsampleEvents(sub,ibeg,iend); } //_______________________________________________________________________ const UInt_t TMVA::RuleEnsemble::GetNSubsamples() const { + // get the number of subsamples from the rule fitter return fRuleFit->GetNSubsamples(); } //_______________________________________________________________________ const TMVA::Event * TMVA::RuleEnsemble::GetTrainingEvent(UInt_t i) const { + // get the training event from the rule fitter return fRuleFit->GetTrainingEvent(i); } //_______________________________________________________________________ const TMVA::Event * TMVA::RuleEnsemble::GetTrainingEvent(UInt_t i, UInt_t isub) const { + // get one training event for one subsample from the rule fitter return fRuleFit->GetTrainingEvent(i,isub); } //_______________________________________________________________________ void TMVA::RuleEnsemble::SetRulesNCuts() { + // set the number of nodes to the cut array + std::vector<Int_t> nodes; fRulesNCuts.clear(); for (UInt_t i=0; i<fRules.size(); i++) { @@ -206,6 +225,8 @@ void TMVA::RuleEnsemble::SetRulesNCuts() //_______________________________________________________________________ void TMVA::RuleEnsemble::RemoveSimpleRules() { + // remove all simple rules + fLogger << kINFO << "removing simple rules" << Endl; UInt_t nrulesIn = fRules.size(); std::vector<bool> removeMe( nrulesIn,false ); @@ -234,6 +255,8 @@ void TMVA::RuleEnsemble::RemoveSimpleRules() //_______________________________________________________________________ void TMVA::RuleEnsemble::RemoveSimilarRules() { + // remove rules that behave similar + fLogger << kINFO << "removing similar rules; distance = " << fMaxRuleDist << Endl; UInt_t nrulesIn = fRules.size(); @@ -287,6 +310,8 @@ void TMVA::RuleEnsemble::RemoveSimilarRules() //_______________________________________________________________________ void TMVA::RuleEnsemble::CleanupRules() { + // cleanup rules + UInt_t nrules = fRules.size(); if (nrules==0) return; fLogger << kINFO << "removing rules with relative importance < " << fImportanceCut << Endl; @@ -312,6 +337,8 @@ void TMVA::RuleEnsemble::CleanupRules() //_______________________________________________________________________ void TMVA::RuleEnsemble::CleanupLinear() { + // cleanup linear model + UInt_t nlin = fLinNorm.size(); if (nlin==0) return; fLogger << kINFO << "removing linear terms with relative importance < " << fImportanceCut << Endl; @@ -326,6 +353,8 @@ void TMVA::RuleEnsemble::CleanupLinear() //_______________________________________________________________________ void TMVA::RuleEnsemble::CalcRuleSupport() { + // calculate the support for all rules + Double_t seve, s,t,stot,ttot; Double_t ssig, sbkg; Int_t indrule=0; @@ -371,6 +400,8 @@ void TMVA::RuleEnsemble::CalcRuleSupport() //_______________________________________________________________________ void TMVA::RuleEnsemble::CalcImportance() { + // calculate the importance of each rule + Double_t maxRuleImp = CalcRuleImportance(); Double_t maxLinImp = CalcLinImportance(); Double_t maxImp = (maxRuleImp>maxLinImp ? maxRuleImp : maxLinImp); @@ -384,6 +415,8 @@ void TMVA::RuleEnsemble::CalcImportance() //_______________________________________________________________________ Double_t TMVA::RuleEnsemble::CalcRuleImportance() { + // calculate importance of each rule + Double_t maxImp=-1.0; Double_t imp; Int_t nrules = fRules.size(); @@ -402,6 +435,8 @@ Double_t TMVA::RuleEnsemble::CalcRuleImportance() //_______________________________________________________________________ Double_t TMVA::RuleEnsemble::CalcLinImportance() { + // calculate the linear importance for each rule + Double_t maxImp=-1.0; UInt_t nvars = fLinCoefficients.size(); fLinImportance.resize(nvars,0.0); @@ -661,7 +696,8 @@ Double_t TMVA::RuleEnsemble::FStar() const //_____________________________________________________________________ Double_t TMVA::RuleEnsemble::EvalEvent() const { - // + // evaluate current event + Int_t nrules = fRules.size(); Double_t rval=fOffset; Double_t linear=0; @@ -686,6 +722,7 @@ Double_t TMVA::RuleEnsemble::EvalEvent() const //_____________________________________________________________________ Double_t TMVA::RuleEnsemble::EvalEvent(const TMVA::Event & e) { + // evaluate event e SetEvent(e); UpdateEventVal(); return EvalEvent(); @@ -694,6 +731,8 @@ Double_t TMVA::RuleEnsemble::EvalEvent(const TMVA::Event & e) //_______________________________________________________________________ Double_t TMVA::RuleEnsemble::EvalLinEventRaw( UInt_t vind, const TMVA::Event & e) { + // evaluate the event linearly (not normalized) + Double_t val = e.GetVal(vind); Double_t rval = TMath::Min( fLinDP[vind], TMath::Max( fLinDM[vind], val ) ); return rval; @@ -702,6 +741,7 @@ Double_t TMVA::RuleEnsemble::EvalLinEventRaw( UInt_t vind, const TMVA::Event & e //_______________________________________________________________________ Double_t TMVA::RuleEnsemble::EvalLinEvent( UInt_t vind, Bool_t norm ) const { + // evaluate the event linearly normalized Double_t rval=0; rval = fEventLinearVal[vind]; if (norm) rval*=fLinNorm[vind]; @@ -711,6 +751,8 @@ Double_t TMVA::RuleEnsemble::EvalLinEvent( UInt_t vind, Bool_t norm ) const //_______________________________________________________________________ Double_t TMVA::RuleEnsemble::EvalLinEvent() const { + // evaluate event linearly + Double_t rval=0; for (UInt_t v=0; v<fLinTermOK.size(); v++) { if (fLinTermOK[v]) @@ -722,6 +764,8 @@ Double_t TMVA::RuleEnsemble::EvalLinEvent() const //_______________________________________________________________________ Double_t TMVA::RuleEnsemble::EvalLinEvent( const TMVA::Event& e ) { + // evaluate event linearly + SetEvent(e); UpdateEventVal(); return EvalLinEvent(); @@ -730,6 +774,8 @@ Double_t TMVA::RuleEnsemble::EvalLinEvent( const TMVA::Event& e ) //_______________________________________________________________________ void TMVA::RuleEnsemble::RuleStatistics() { + // calculate various statistics for this rule + const UInt_t nrules = fRules.size(); const std::vector<const TMVA::Event *> *events = GetTrainingEvents(); const TMVA::Event *eveData; @@ -802,6 +848,8 @@ void TMVA::RuleEnsemble::RuleStatistics() //_______________________________________________________________________ void TMVA::RuleEnsemble::Print( ostream& os ) const { + // print function + fLogger << kINFO << "===============================" << Endl; fLogger << kINFO << " RuleEnsemble " << Endl; fLogger << kINFO << "===============================" << Endl; @@ -961,18 +1009,22 @@ void TMVA::RuleEnsemble::ReadRaw( istream & istr ) //_______________________________________________________________________ void TMVA::RuleEnsemble::Copy( const RuleEnsemble & other ) { - fRuleFit = other.GetRuleFit(); - fMaxRuleDist = other.GetMaxRuleDist(); - fOffset = other.GetOffset(); - fRules = other.GetRulesConst(); - fImportanceCut = other.GetImportanceCut(); - fVarImportance = other.GetVarImportance(); - fLearningModel = other.GetLearningModel(); + // copy function + if(this != &other) { + fRuleFit = other.GetRuleFit(); + fMaxRuleDist = other.GetMaxRuleDist(); + fOffset = other.GetOffset(); + fRules = other.GetRulesConst(); + fImportanceCut = other.GetImportanceCut(); + fVarImportance = other.GetVarImportance(); + fLearningModel = other.GetLearningModel(); + } } //_______________________________________________________________________ Int_t TMVA::RuleEnsemble::CalcNRules( const TMVA::DecisionTree *dtree ) { + // calculate the number of rules if (dtree==0) return 0; TMVA::Node *node = dtree->GetRoot(); Int_t nendnodes = 0; @@ -983,6 +1035,8 @@ Int_t TMVA::RuleEnsemble::CalcNRules( const TMVA::DecisionTree *dtree ) //_______________________________________________________________________ void TMVA::RuleEnsemble::FindNEndNodes( const TMVA::Node *node, Int_t & nendnodes ) { + // find the number of leaf nodes + if (node==0) return; if (dynamic_cast<const TMVA::DecisionTreeNode*>(node)->GetSelector()<0) { ++nendnodes; @@ -997,6 +1051,7 @@ void TMVA::RuleEnsemble::FindNEndNodes( const TMVA::Node *node, Int_t & nendnode //_______________________________________________________________________ void TMVA::RuleEnsemble::MakeRulesFromTree( const TMVA::DecisionTree *dtree ) { + // create rules from the decsision tree structure TMVA::Node *node = dtree->GetRoot(); AddRule( node ); } @@ -1004,6 +1059,8 @@ void TMVA::RuleEnsemble::MakeRulesFromTree( const TMVA::DecisionTree *dtree ) //_______________________________________________________________________ void TMVA::RuleEnsemble::AddRule( const TMVA::Node *node ) { + // add a new rule to the tree + if (node==0) return; if (node->GetParent()==0) { // it's a root node, don't make a rule AddRule( node->GetRight() ); @@ -1081,6 +1138,7 @@ TMVA::Rule *TMVA::RuleEnsemble::MakeTheRule( const TMVA::Node *node ) //_______________________________________________________________________ ostream& TMVA::operator<< ( ostream& os, const TMVA::RuleEnsemble & rules ) { + // ostream operator rules.Print( os ); return os; } diff --git a/tmva/src/RuleFit.cxx b/tmva/src/RuleFit.cxx index eebd656d8c0..0e939007a9e 100644 --- a/tmva/src/RuleFit.cxx +++ b/tmva/src/RuleFit.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: RuleFit.cxx,v 1.26 2006/10/17 21:22:30 andreas.hoecker Exp $ +// @(#)root/tmva $Id: RuleFit.cxx,v 1.28 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss /********************************************************************************** @@ -21,7 +21,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * Iowa State U. * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * @@ -44,17 +44,22 @@ TMVA::RuleFit::RuleFit( const TMVA::MethodRuleFit *rfbase, Double_t samplefrac ) : fLogger( "RuleFit" ) { + // constructor Initialise( rfbase, forest, trainingEvents, samplefrac ); } //_______________________________________________________________________ TMVA::RuleFit::RuleFit() : fLogger( "RuleFit" ) -{} +{ + // default constructor +} //_______________________________________________________________________ TMVA::RuleFit::~RuleFit() -{} +{ + // destructor +} //_______________________________________________________________________ void TMVA::RuleFit::Initialise( const TMVA::MethodRuleFit *rfbase, @@ -62,6 +67,7 @@ void TMVA::RuleFit::Initialise( const TMVA::MethodRuleFit *rfbase, const std::vector< TMVA::Event *> & events, Double_t sampfrac ) { + // initialize the parameters of the RuleFit method fMethodRuleFit = rfbase; std::vector< TMVA::DecisionTree *>::const_iterator itrDtree=forest.begin(); for (; itrDtree!=forest.end(); ++itrDtree ) fForest.push_back( *itrDtree ); @@ -84,19 +90,23 @@ void TMVA::RuleFit::Initialise( const TMVA::MethodRuleFit *rfbase, //_______________________________________________________________________ void TMVA::RuleFit::Copy( const TMVA::RuleFit& other ) { - fMethodRuleFit = other.GetMethodRuleFit(); - fTrainingEvents = other.GetTrainingEvents(); - fSubsampleEvents = other.GetSubsampleEvents(); + // copy method + if(this != &other) { + fMethodRuleFit = other.GetMethodRuleFit(); + fTrainingEvents = other.GetTrainingEvents(); + fSubsampleEvents = other.GetSubsampleEvents(); - fForest = other.GetForest(); - fRuleEnsemble = other.GetRuleEnsemble(); + fForest = other.GetForest(); + fRuleEnsemble = other.GetRuleEnsemble(); + } } //_______________________________________________________________________ void TMVA::RuleFit::ForestStatistics() -// summary of statistics of all trees -// * end-nodes: average and spread { + // summary of statistics of all trees + // * end-nodes: average and spread + UInt_t ntrees = fForest.size(); Double_t nt = Double_t(ntrees); const TMVA::DecisionTree *tree; @@ -128,6 +138,8 @@ void TMVA::RuleFit::FitCoefficients() //_______________________________________________________________________ void TMVA::RuleFit::CalcImportance() { + // calculates the importance of each rule + fLogger << kINFO << "calculating importance" << Endl; fRuleEnsemble.CalcImportance(); fRuleEnsemble.CleanupRules(); @@ -140,12 +152,16 @@ void TMVA::RuleFit::CalcImportance() //_______________________________________________________________________ Double_t TMVA::RuleFit::EvalEvent( const TMVA::Event& e ) { + // evaluate single event + return fRuleEnsemble.EvalEvent( e ); } //_______________________________________________________________________ void TMVA::RuleFit::SetTrainingEvents( const std::vector<TMVA::Event *>& el, Double_t sampfrac ) { + // set the training events randomly + UInt_t neve = el.size(); if (neve==0) fLogger << kWARNING << "an empty sample of training events was given" << Endl; @@ -174,6 +190,8 @@ void TMVA::RuleFit::SetTrainingEvents( const std::vector<TMVA::Event *>& el, Dou //_______________________________________________________________________ void TMVA::RuleFit::GetSubsampleEvents(Int_t sub, UInt_t& ibeg, UInt_t& iend) const { + // get the events for subsample sub + Int_t nsub = GetNSubsamples(); if (nsub==0) { fLogger << kFATAL << "<GetSubsampleEvents> - wrong size, not properly initialised! BUG!!!" << Endl; diff --git a/tmva/src/RuleFitParams.cxx b/tmva/src/RuleFitParams.cxx index 5c4c8fa46e7..9253afa4bd3 100644 --- a/tmva/src/RuleFitParams.cxx +++ b/tmva/src/RuleFitParams.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: RuleFitParams.cxx,v 1.29 2006/11/14 15:39:36 helgevoss Exp $ +// @(#)root/tmva $Id: RuleFitParams.cxx,v 1.32 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Fredrik Tegenfeldt, Helge Voss /********************************************************************************** @@ -17,7 +17,7 @@ * Copyright (c) 2005: * * CERN, Switzerland, * * Iowa State U. * - * MPI-KP Heidelberg, Germany * + * MPI-K Heidelberg, Germany * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted according to the terms listed in LICENSE * @@ -39,29 +39,29 @@ //_______________________________________________________________________ TMVA::RuleFitParams::RuleFitParams() - : fLogger( "RuleFitParams" ) + : fRuleFit ( 0 ) + , fRuleEnsemble ( 0 ) + , fPathIdx1 ( 0 ) + , fPathIdx2 ( 0 ) + , fPerfIdx1 ( 0 ) + , fPerfIdx2 ( 0 ) + , fGDTau ( 0.0 ) + , fGDPathStep ( 0.01 ) + , fGDNPathSteps ( 100 ) + , fGDErrNsigma ( 1.0 ) + , fGDNtuple ( 0 ) + , fNTOffset ( 0 ) + , fNTCoeff ( 0 ) + , fNTLinCoeff ( 0 ) + , fLogger( "RuleFitParams" ) { - fRuleFit = 0; - fRuleEnsemble = 0; - fGDTau = 0.0; - fGDPathStep = 0.01; - fGDNPathSteps = 100; - fGDNtuple = 0; - fGDErrNsigma = 1.0; - fNTLinCoeff = 0; - fNTCoeff = 0; - fNTOffset = 0; - - fPathIdx1 = 0; - fPathIdx2 = 0; - fPerfIdx1 = 0; - fPerfIdx2 = 0; - + // constructor Init(); } //_______________________________________________________________________ TMVA::RuleFitParams::~RuleFitParams() { + // destructor if (fNTCoeff) { delete fNTCoeff; fNTCoeff = 0; } if (fNTLinCoeff) { delete fNTLinCoeff;fNTLinCoeff = 0; } } @@ -69,7 +69,7 @@ TMVA::RuleFitParams::~RuleFitParams() //_______________________________________________________________________ void TMVA::RuleFitParams::Init() { - // + // Initializes all parameters using the RuleEnsemble and the training tree if (fRuleFit==0) return; fRuleEnsemble = fRuleFit->GetRuleEnsemblePtr(); UInt_t nrules = fRuleEnsemble->GetNRules(); @@ -102,6 +102,8 @@ void TMVA::RuleFitParams::Init() //_______________________________________________________________________ void TMVA::RuleFitParams::InitNtuple() { + // initializes the ntuple + const UInt_t nrules = fRuleEnsemble->GetNRules(); const UInt_t nlin = fRuleEnsemble->GetLinNorm().size(); // @@ -132,24 +134,28 @@ void TMVA::RuleFitParams::InitNtuple() //_______________________________________________________________________ const std::vector< Int_t > *TMVA::RuleFitParams::GetSubsampleEvents() const { + // accessor to the subsamples return &(fRuleFit->GetSubsampleEvents()); } //_______________________________________________________________________ void TMVA::RuleFitParams::GetSubsampleEvents(UInt_t sub, UInt_t & ibeg, UInt_t & iend) const { + // calls the Subsample Events fRuleFit->GetSubsampleEvents(sub,ibeg,iend); } //_______________________________________________________________________ const UInt_t TMVA::RuleFitParams::GetNSubsamples() const { + // get the number of subsamples return fRuleFit->GetNSubsamples(); } //_______________________________________________________________________ const TMVA::Event *TMVA::RuleFitParams::GetTrainingEvent(UInt_t i, UInt_t isub) const { + // accesses a training event return fRuleFit->GetTrainingEvent(i,isub); } @@ -158,8 +164,8 @@ Double_t TMVA::RuleFitParams::LossFunction( const TMVA::Event& e ) const { // Implementation of squared-error ramp loss function (eq 39,40 in ref 1) // This is used for binary Classifications where y = {+1,-1} for (sig,bkg) - Double_t H = max( -1.0, min(1.0,fRuleEnsemble->EvalEvent( e )) ); - Double_t diff = (e.IsSignal()?1:-1) - H; + Double_t h = max( -1.0, min(1.0,fRuleEnsemble->EvalEvent( e )) ); + Double_t diff = (e.IsSignal()?1:-1) - h; // return diff*diff; } @@ -167,6 +173,7 @@ Double_t TMVA::RuleFitParams::LossFunction( const TMVA::Event& e ) const //_______________________________________________________________________ Double_t TMVA::RuleFitParams::Risk(UInt_t ibeg, UInt_t iend) const { + // risk asessment UInt_t neve = iend-ibeg+1; if (neve<1) { fLogger << kWARNING << "makeGradientVector() - invalid start/end indices!" << Endl; @@ -452,6 +459,8 @@ void TMVA::RuleFitParams::MakeGDPath() //_______________________________________________________________________ void TMVA::RuleFitParams::FillCoefficients() { + // helper function to store the rule coefficients in local arrays + const UInt_t nrules = fRuleEnsemble->GetNRules(); const UInt_t nlin = fRuleEnsemble->GetLinNorm().size(); // @@ -468,7 +477,6 @@ void TMVA::RuleFitParams::FillCoefficients() //_______________________________________________________________________ void TMVA::RuleFitParams::CalcFStar(UInt_t ibeg, UInt_t iend) { - // // Estimates F* (optimum scoring function) for all events for the given sets. // The result is used in ErrorRateReg(). // @@ -828,6 +836,8 @@ void TMVA::RuleFitParams::UpdateCoefficients() //_______________________________________________________________________ Double_t TMVA::RuleFitParams::CalcAverageResponse(UInt_t ibeg, UInt_t iend) { + // calulate the average response + UInt_t neve = iend-ibeg+1; if (neve<1) { fLogger << kFATAL << "<CalcAverageResponse> invalid start/end indices!" << Endl; diff --git a/tmva/src/SdivSqrtSplusB.cxx b/tmva/src/SdivSqrtSplusB.cxx index 315558b2942..7b609e24b02 100644 --- a/tmva/src/SdivSqrtSplusB.cxx +++ b/tmva/src/SdivSqrtSplusB.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: SdivSqrtSplusB.cxx,v 1.8 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: SdivSqrtSplusB.cxx,v 1.9 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -14,7 +14,7 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * diff --git a/tmva/src/SeparationBase.cxx b/tmva/src/SeparationBase.cxx index 6fc317063b1..65064f90e2b 100644 --- a/tmva/src/SeparationBase.cxx +++ b/tmva/src/SeparationBase.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: SeparationBase.cxx,v 1.11 2006/11/06 00:10:17 helgevoss Exp $ +// @(#)root/tmva $Id: SeparationBase.cxx,v 1.12 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -28,7 +28,7 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * diff --git a/tmva/src/SimulatedAnnealingBase.cxx b/tmva/src/SimulatedAnnealingBase.cxx index afa16215d50..69fa327cb40 100644 --- a/tmva/src/SimulatedAnnealingBase.cxx +++ b/tmva/src/SimulatedAnnealingBase.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: SimulatedAnnealingBase.cxx,v 1.8 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: SimulatedAnnealingBase.cxx,v 1.10 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -39,23 +39,24 @@ ClassImp(TMVA::SimulatedAnnealingBase) ; TMVA::SimulatedAnnealingBase::SimulatedAnnealingBase( std::vector<LowHigh_t*>& ranges ) - : fRanges( ranges ) + : fRandom ( new TRandom() ) + , fRanges ( ranges ) + , fMaxCalls ( 500000 ) + , fTemperatureGradient ( 0.3 ) + , fUseAdaptiveTemperature( kFALSE ) + , fInitialTemperature ( 1000 ) + , fMinTemperature ( 0 ) + , fEps ( 1e-04 ) + , fNFunLoops ( 25 ) + , fNEps ( 4 ) // needs to be at leas 2 ! { - fRandom = new TRandom(); - - // set default options - fMaxCalls = 500000; - fTemperatureGradient = 0.3; - fUseAdaptiveTemperature = kFALSE; - fInitialTemperature = 1000; - fMinTemperature = 0; - fEps = 1e-04; - fNFunLoops = 25; - fNEps = 4; // needs to be at leas 2 ! + // constructor } TMVA::SimulatedAnnealingBase::~SimulatedAnnealingBase() -{} +{ + // destructor +} Double_t TMVA::SimulatedAnnealingBase::Minimize( std::vector<Double_t>& parameters ) { @@ -242,5 +243,6 @@ Double_t TMVA::SimulatedAnnealingBase::Minimize( std::vector<Double_t>& paramete Double_t TMVA::SimulatedAnnealingBase::GetPerturbationProbability( Double_t E, Double_t Eref, Double_t temperature ) { + // calculates the probability that a perturbation occured return (temperature > 0) ? TMath::Exp( (E - Eref)/temperature ) : 0; } diff --git a/tmva/src/SimulatedAnnealingCuts.cxx b/tmva/src/SimulatedAnnealingCuts.cxx index 623819de9e9..78563913cad 100644 --- a/tmva/src/SimulatedAnnealingCuts.cxx +++ b/tmva/src/SimulatedAnnealingCuts.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: SimulatedAnnealingCuts.cxx,v 1.5 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: SimulatedAnnealingCuts.cxx,v 1.7 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -41,13 +41,18 @@ ClassImp(TMVA::SimulatedAnnealingCuts) TMVA::SimulatedAnnealingCuts::SimulatedAnnealingCuts( std::vector<LowHigh_t*>& ranges ) : SimulatedAnnealingBase( ranges ) -{} +{ + // constructor +} TMVA::SimulatedAnnealingCuts::~SimulatedAnnealingCuts() -{} +{ + // destructor +} Double_t TMVA::SimulatedAnnealingCuts::MinimizeFunction( const std::vector<Double_t>& parameters ) { // minimize function interface for Simulated Annealing fitter for cut optimisation + return TMVA::MethodCuts::ThisCuts()->ComputeEstimator( parameters ); } diff --git a/tmva/src/TNeuron.cxx b/tmva/src/TNeuron.cxx index ed5fe1d9f36..c2fcf7f19f0 100644 --- a/tmva/src/TNeuron.cxx +++ b/tmva/src/TNeuron.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: TNeuron.cxx,v 1.18 2006/10/15 12:06:33 andreas.hoecker Exp $ +// @(#)root/tmva $Id: TNeuron.cxx,v 1.19 2006/11/16 19:42:44 stelzer Exp $ // Author: Matt Jachowski /********************************************************************************** @@ -326,7 +326,7 @@ void TMVA::TNeuron::PrintActivationEqn() } //______________________________________________________________________________ -void TMVA::TNeuron::PrintMessage( MsgType type, TString message) +void TMVA::TNeuron::PrintMessage( EMsgType type, TString message) { // print message, for debugging fLogger << type << message << Endl; diff --git a/tmva/src/TSpline1.cxx b/tmva/src/TSpline1.cxx index a77c7f92c4f..184ec8bef11 100644 --- a/tmva/src/TSpline1.cxx +++ b/tmva/src/TSpline1.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: TSpline1.cxx,v 1.10 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: TSpline1.cxx,v 1.11 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/src/TSpline2.cxx b/tmva/src/TSpline2.cxx index a0167424863..f517305cbf2 100644 --- a/tmva/src/TSpline2.cxx +++ b/tmva/src/TSpline2.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: TSpline2.cxx,v 1.10 2006/10/10 17:43:52 andreas.hoecker Exp $ +// @(#)root/tmva $Id: TSpline2.cxx,v 1.11 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/src/Timer.cxx b/tmva/src/Timer.cxx index 7c25018fd2a..288ca66750e 100644 --- a/tmva/src/Timer.cxx +++ b/tmva/src/Timer.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Timer.cxx,v 1.13 2006/10/15 22:34:22 andreas.hoecker Exp $ +// @(#)root/tmva $Id: Timer.cxx,v 1.14 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * diff --git a/tmva/src/Tools.cxx b/tmva/src/Tools.cxx index 8233e5541d0..ab70ba6dd85 100644 --- a/tmva/src/Tools.cxx +++ b/tmva/src/Tools.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Tools.cxx,v 1.52 2006/11/12 15:45:42 stelzer Exp $ +// @(#)root/tmva $Id: Tools.cxx,v 1.55 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -56,6 +56,8 @@ namespace TMVA { TMVA::MsgLogger& TMVA::Tools::Logger() { + // static access to a common MsgLogger + return Tools_Logger ? *Tools_Logger : *(Tools_Logger = new MsgLogger( Tools_NAME_ )); } @@ -452,6 +454,8 @@ int TMVA::Tools::GetIndexMinElement(vector<Double_t> &v) // check if regular expression Bool_t TMVA::Tools::ContainsRegularExpression( const TString& s ) { + // helper function to search for "!%^&()'<>?= " in a string + Bool_t regular = kFALSE; for (Int_t i = 0; i < TMVA::Tools::__regexp__.Length(); i++) if (s.Contains( TMVA::Tools::__regexp__[i] )) { regular = kTRUE; break; } @@ -462,6 +466,9 @@ Bool_t TMVA::Tools::ContainsRegularExpression( const TString& s ) // replace regular expressions TString TMVA::Tools::ReplaceRegularExpressions( const TString& s, TString r ) { + // helper function to remove all occurences "!%^&()'<>?= " from a string + // and replace all ::,*,/,+,- with _M_,_T_,_D_,_P_,_M_ respectively + TString snew = s; for (Int_t i = 0; i < TMVA::Tools::__regexp__.Length(); i++) snew.ReplaceAll( TMVA::Tools::__regexp__[i], r ); @@ -487,15 +494,15 @@ void TMVA::Tools::FormattedOutput( const TMatrixD& M, const std::vector<TString> // get length of each variable, and maximum length UInt_t minL = 7; UInt_t maxL = minL; - std::vector<UInt_t> L; + std::vector<UInt_t> vLengths; for (UInt_t ivar=0; ivar<nvar; ivar++) { - L.push_back(TMath::Max( (UInt_t)V[ivar].Length(), minL )); - maxL = TMath::Max( L.back(), maxL ); + vLengths.push_back(TMath::Max( (UInt_t)V[ivar].Length(), minL )); + maxL = TMath::Max( vLengths.back(), maxL ); } // count column length UInt_t clen = maxL+1; - for (UInt_t icol=0; icol<nvar; icol++) clen += L[icol]+1; + for (UInt_t icol=0; icol<nvar; icol++) clen += vLengths[icol]+1; // bar line for (UInt_t i=0; i<clen; i++) logger << "-"; @@ -503,14 +510,14 @@ void TMVA::Tools::FormattedOutput( const TMatrixD& M, const std::vector<TString> // title bar logger << setw(maxL+1) << " "; - for (UInt_t icol=0; icol<nvar; icol++) logger << setw(L[icol]+1) << V[icol]; + for (UInt_t icol=0; icol<nvar; icol++) logger << setw(vLengths[icol]+1) << V[icol]; logger << Endl; // the numbers for (UInt_t irow=0; irow<nvar; irow++) { logger << setw(maxL) << V[irow] << ":"; for (UInt_t icol=0; icol<nvar; icol++) { - logger << setw(L[icol]+1) << Form( "%+1.3f", M(irow,icol) ); + logger << setw(vLengths[icol]+1) << Form( "%+1.3f", M(irow,icol) ); } logger << Endl; } diff --git a/tmva/src/Types.cxx b/tmva/src/Types.cxx index 6c792b391b3..b4f01f0c9d5 100644 --- a/tmva/src/Types.cxx +++ b/tmva/src/Types.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Types.cxx,v 1.8 2006/10/26 19:55:40 andreas.hoecker Exp $ +// @(#)root/tmva $Id: Types.cxx,v 1.11 2006/11/17 00:21:35 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -33,18 +33,21 @@ TMVA::Types* TMVA::Types::fgTypesPtr = 0; TMVA::Types::Types() : fLogger( "Types" ) { - fStr2type["Variable"] = Types::Variable; - fStr2type["Cuts"] = Types::Cuts; - fStr2type["Likelihood"] = Types::Likelihood; - fStr2type["PDERS"] = Types::PDERS; - fStr2type["HMatrix"] = Types::HMatrix; - fStr2type["Fisher"] = Types::Fisher; - fStr2type["CFMlpANN"] = Types::CFMlpANN; - fStr2type["TMlpANN"] = Types::TMlpANN; - fStr2type["BDT"] = Types::BDT; - fStr2type["RuleFit"] = Types::RuleFit; - fStr2type["SVM"] = Types::SVM; - fStr2type["MLP"] = Types::MLP; - fStr2type["BayesClassifier"] = Types::BayesClassifier; - fStr2type["Committee"] = Types::Committee; + // constructor + // fill map that links method names and enums + + fStr2type["Variable"] = Types::kVariable; + fStr2type["Cuts"] = Types::kCuts; + fStr2type["Likelihood"] = Types::kLikelihood; + fStr2type["PDERS"] = Types::kPDERS; + fStr2type["HMatrix"] = Types::kHMatrix; + fStr2type["Fisher"] = Types::kFisher; + fStr2type["CFMlpANN"] = Types::kCFMlpANN; + fStr2type["TMlpANN"] = Types::kTMlpANN; + fStr2type["BDT"] = Types::kBDT; + fStr2type["RuleFit"] = Types::kRuleFit; + fStr2type["SVM"] = Types::kSVM; + fStr2type["MLP"] = Types::kMLP; + fStr2type["BayesClassifier"] = Types::kBayesClassifier; + fStr2type["Committee"] = Types::kCommittee; } diff --git a/tmva/src/VariableInfo.cxx b/tmva/src/VariableInfo.cxx index 0c227e2cfe4..203696041b4 100644 --- a/tmva/src/VariableInfo.cxx +++ b/tmva/src/VariableInfo.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: VariableInfo.cxx,v 1.12 2006/10/18 01:24:53 armske Exp $ +// @(#)root/tmva $Id: VariableInfo.cxx,v 1.14 2006/11/17 14:59:24 stelzer Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss /********************************************************************************** @@ -13,12 +13,12 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -79,7 +79,7 @@ TMVA::VariableInfo& TMVA::VariableInfo::operator=(const TMVA::VariableInfo& rhs) } //_______________________________________________________________________ -void TMVA::VariableInfo::WriteToStream(std::ostream& o, Types::PreprocessingMethod corr) const +void TMVA::VariableInfo::WriteToStream(std::ostream& o, Types::EPreprocessingMethod corr) const { // write VariableInfo to stream UInt_t nc = TMath::Max( 30, TMath::Max( GetExpression().Length()+1, GetInternalVarName().Length()+1 ) ); @@ -91,7 +91,7 @@ void TMVA::VariableInfo::WriteToStream(std::ostream& o, Types::PreprocessingMeth } //_______________________________________________________________________ -void TMVA::VariableInfo::ReadFromStream(std::istream& istr, Types::PreprocessingMethod corr) +void TMVA::VariableInfo::ReadFromStream(std::istream& istr, Types::EPreprocessingMethod corr) { // write VariableInfo to stream TString exp, varname, vartype, minmax, minstr, maxstr; diff --git a/tmva/src/Volume.cxx b/tmva/src/Volume.cxx index 1878c17e93b..7380aca5584 100644 --- a/tmva/src/Volume.cxx +++ b/tmva/src/Volume.cxx @@ -1,4 +1,4 @@ -// @(#)root/tmva $Id: Volume.cxx,v 1.9 2006/10/15 22:34:22 andreas.hoecker Exp $ +// @(#)root/tmva $Id: Volume.cxx,v 1.10 2006/11/16 22:51:59 helgevoss Exp $ // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss /********************************************************************************** @@ -13,13 +13,13 @@ * Authors (alphabetical): * * Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland * * Xavier Prudent <prudent@lapp.in2p3.fr> - LAPP, France * - * Helge Voss <Helge.Voss@cern.ch> - MPI-KP Heidelberg, Germany * + * Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany * * Kai Voss <Kai.Voss@cern.ch> - U. of Victoria, Canada * * * * Copyright (c) 2005: * * CERN, Switzerland, * * U. of Victoria, Canada, * - * MPI-KP Heidelberg, Germany, * + * MPI-K Heidelberg, Germany , * * LAPP, Annecy, France * * * * Redistribution and use in source and binary forms, with or without * @@ -27,7 +27,7 @@ * (http://tmva.sourceforge.net/LICENSE) * * * * File and Version Information: * - * $Id: Volume.cxx,v 1.9 2006/10/15 22:34:22 andreas.hoecker Exp $ + * $Id: Volume.cxx,v 1.10 2006/11/16 22:51:59 helgevoss Exp $ **********************************************************************************/ #include "TMVA/Volume.h" diff --git a/tmva/test/BDT.C b/tmva/test/BDT.C new file mode 100644 index 00000000000..d057f73333b --- /dev/null +++ b/tmva/test/BDT.C @@ -0,0 +1,189 @@ +#include "tmvaglob.C" + +// this macro displays a decision tree read in from the weight file + + +// input: - No. of tree +// - the weight file from which the tree is read +void BDT(Int_t itree=1, TString fname= "weights/MVAnalysis_BDT.weights.txt") { + draw_tree(itree,fname); +} + + +//_______________________________________________________________________ +void draw_tree(Int_t itree, TString fname= "weights/MVAnalysis_BDT.weights.txt"){ + // returns poiter to 2D histogram with tree drawing in it. + + TString *vars; + TMVA::DecisionTree *d = read_tree(vars,itree,fname); + + Int_t depth = d->GetDepth(); + Double_t xmax= 2*depth + 0.5; + Double_t xmin= -xmax; + Double_t ystep = 1./(depth+1); + + char buffer[100]; + sprintf (buffer, "Decision Tree No.: %d",itree); + TCanvas *c1=new TCanvas("c1",buffer,0,0,1000,600); + c1->Draw(); + + draw_node( (TMVA::DecisionTreeNode*)d->GetRoot(), 0.5, 1.-0.5*ystep, 0.25, ystep ,vars); + + // make the legend + Double_t yup=0.99; + Double_t ydown=yup-ystep/2.5; + Double_t dy= ystep/2.5 * 0.2; + + + TPaveText *whichTree = new TPaveText(0.85,ydown,0.98,yup, "NDC"); + whichTree->SetBorderSize(1); + whichTree->SetFillStyle(1); + whichTree->SetFillColor(5); + whichTree->AddText(buffer); + whichTree->Draw(); + + TPaveText *intermediate = new TPaveText(0.02,ydown,0.15,yup, "NDC"); + intermediate->SetBorderSize(1); + intermediate->SetFillStyle(1); + intermediate->SetFillColor(3); + intermediate->AddText("Intermediate Nodes"); + intermediate->Draw(); + + + + ydown = ydown - ystep/2.5 -dy; + yup = yup - ystep/2.5 -dy; + TPaveText *signalleaf = new TPaveText(0.02,ydown ,0.15,yup, "NDC"); + signalleaf->SetBorderSize(1); + signalleaf->SetFillStyle(1); + signalleaf->SetFillColor(4); + signalleaf->SetTextColor(10); + signalleaf->AddText("Signal Leaf Nodes"); + signalleaf->Draw(); + + ydown = ydown - ystep/2.5 -dy; + yup = yup - ystep/2.5 -dy; + TPaveText *backgroundleaf = new TPaveText(0.02,ydown,0.15,yup, "NDC"); + backgroundleaf->SetBorderSize(1); + backgroundleaf->SetFillStyle(1); + backgroundleaf->SetFillColor(2); + backgroundleaf->AddText("Background Leaf Nodes"); + backgroundleaf->Draw(); + + + +} + + + +//_______________________________________________________________________ +void draw_node( TMVA::DecisionTreeNode *n, + Double_t x, Double_t y, + Double_t xscale, Double_t yscale, TString * vars) { + // recursively puts an entries in the histogram for the node and its daughters + // + + if (n->GetLeft() != NULL){ + TLine *a1 = new TLine(x-xscale/2,y,x-xscale,y-yscale/2); + a1->SetLineWidth(2); + a1->Draw(); + draw_node((TMVA::DecisionTreeNode*) n->GetLeft(), x-xscale, y-yscale, xscale/2, yscale, vars); + } + if (n->GetRight() != NULL){ + TLine *a1 = new TLine(x+xscale/2,y,x+xscale,y-yscale/2); + a1->SetLineWidth(2); + a1->Draw(); + draw_node((TMVA::DecisionTreeNode*) n->GetRight(), x+xscale, y-yscale, xscale/2, yscale, vars ); + } + + + TPaveText *t = new TPaveText(x-xscale/2,y-yscale/2,x+xscale/2,y+yscale/2, "NDC"); + + t->SetBorderSize(1); + + t->SetFillStyle(1); + if (n->GetNodeType() == 1) { t->SetFillColor(4); t->SetTextColor(10); } + else if (n->GetNodeType() == -1) t->SetFillColor(2); + else if (n->GetNodeType() == 0) t->SetFillColor(3); + + char buffer[25]; + sprintf(buffer,"N=%d",n->GetNEvents()); + t->AddText(buffer); + sprintf(buffer,"S/(S+B)=%4.3f",n->GetSoverSB()); + t->AddText(buffer); + + if (n->GetNodeType() == 0){ + t->AddText(TString(vars[n->GetSelector()]+">"+=::Form("%5.3g",n->GetCutValue()))); + } + +// sprintf(buffer,"seq=%d",n->GetSequence()); +// t->AddText(buffer); +// sprintf(buffer,"depth=%d",n->GetDepth()); +// t->AddText(buffer); + sprintf(buffer,"type=%d",n->GetNodeType()); + t->AddText(buffer); + + + if (n->GetNodeType() == 1) t->SetFillColor(4); + else if (n->GetNodeType() == -1) t->SetFillColor(2); + else if (n->GetNodeType() == 0) t->SetFillColor(3); + + + + t->Draw(); + + return; +} + + +TMVA::DecisionTree* read_tree(TString * &vars, Int_t itree=1, TString fname= "weights/MVAnalysis_BDT.weights.txt") +{ + cout << "reading Tree " << itree << " from weight file: " << fname << endl; + ifstream fin( fname ); + if (!fin.good( )) { // file not found --> Error + cout << "Error opening " << fname << endl; + exit(1); + } + + Int_t idummy; + Float_t fdummy; + TString dummy = ""; + + // file header with name + while (!dummy.Contains("#VAR")) fin >> dummy; + fin >> dummy >> dummy >> dummy; // the rest of header line + + // number of variables + Int_t nVars; + fin >> dummy >> nVars; + // at this point, we should have idummy == nVars + // cout << "rread nVars = " << nVars <<endl; + + + // variable mins and maxes + vars = new TString[nVars]; + for (Int_t i = 0; i < nVars; i++) fin >> vars[i] >> dummy >> dummy >> dummy; + + char buffer[20]; + char line[256]; + sprintf(buffer,"T %d",itree); + + while (!dummy.Contains(buffer)) { + fin.getline(line,256); + dummy = TString(line); + } + + TMVA::DecisionTreeNode *n = new TMVA::DecisionTreeNode(); + char pos="s"; + UInt_t depth =0; + n->ReadRec(fin,pos,depth); + TMVA::DecisionTree *d = new TMVA::DecisionTree(n); + + // d->Print(cout); + + + fin.close(); + + return d; +} + diff --git a/tmva/test/TMVAGui.C b/tmva/test/TMVAGui.C new file mode 100644 index 00000000000..5c02ef0bdb2 --- /dev/null +++ b/tmva/test/TMVAGui.C @@ -0,0 +1,109 @@ +#include <iostream> + +#include "TControlBar.h" + +void TMVAGui( const char* fName = "TMVA.root" ) +{ + // Use this script in order to run the various individual macros + // that plot the output of TMVA (e.g. running TMVAnalysis.C), + // stored in the file "TMVA.root" + // for further documentation, look in the individual macros + + + cout << "--- Open TMVAGui for input file: " << fName << endl; + + // gROOT->Reset(); + // gStyle->SetScreenFactor(2); // if you have a large screen, select 1,2 or 1.4 + + // create the control bar + TControlBar * cbar = new TControlBar( "vertical", "Plotting Scripts", 0, 0 ); + + const char* buttonType = "button"; + + // configure buttons + cbar->AddButton( "Input Variables", + Form(".x variables.C(\"%s\",0)",fName), + "Plots all input variables (macro variables.C)", + buttonType ); + + cbar->AddButton( "Decorrelated Variables", + Form(".x variables.C(\"%s\",1)",fName), + "Plots all decorrelated input variables (macro variables.C)", + buttonType ); + + cbar->AddButton( "PCA-transformed Variables", + Form(".x variables.C(\"%s\",2)",fName), + "Plots all PCA-transformed input variables (macro variables.C)", + buttonType ); + + cbar->AddButton( "Variable Correlations (scatter profiles)", + Form(".x correlationscatters.C\(\"%s\",0)",fName), + "Plots signal and background correlation profiles between all input variables (macro correlationscatters.C)", + buttonType ); + + cbar->AddButton( " Decorrelated-Variable Correlations (scatter profiles) ", + Form(".x correlationscatters.C\(\"%s\",1)",fName), + "Plots signal and background correlation profiles between all decorrelated input variables (macro correlationscatters.C(1))", + buttonType ); + + cbar->AddButton( " PCA-transformed Variable Correlations (scatter profiles) ", + Form(".x correlationscatters.C\(\"%s\",2)",fName), + "Plots signal and background correlation profiles between all PCA-transformed input variables (macro correlationscatters.C(2))", + buttonType ); + + cbar->AddButton( "Variable Correlations (summary)", + Form(".x correlations.C(\"%s\")",fName), + "Plots signal and background correlation summaries for all input variables (macro correlations.C)", + buttonType ); + + cbar->AddButton( "Output MVA Variables", + Form(".x mvas.C(\"%s\")",fName), + "Plots the output variable of each method (macro mvas.C)", + buttonType ); + + cbar->AddButton( "Mu-transforms (summary)", + Form(".x mutransform.C(\"%s\")",fName), + "Plots the mu-transformed signal and background MVAs of each method (macro mutransform.C)", + buttonType ); + + cbar->AddButton( "Background Rejection vs Signal Efficiencies", + Form(".x efficiencies.C(\"%s\")",fName), + "Plots background rejection vs signal efficiencies (macro efficiencies.C)", + buttonType ); + + cbar->AddButton( "Likelihood Reference Distributiuons (if exist)", + Form(".x likelihoodrefs.C(\"%s\")",fName), + "Plots to verify the likelihood reference distributions (macro likelihoodrefs.C)", + buttonType ); + + cbar->AddButton( "Network Architecture (if exists)", + Form(".x network.C(\"%s\")",fName), + "Plots the MLP weights (macro network.C)", + buttonType ); + + cbar->AddButton( "Network Convergence Test (if exists)", + Form(".x annconvergencetest.C(\"%s\")",fName), + "Plots error estimator versus training epoch for training and test samples (macro annconvergencetest.C)", + buttonType ); + + cbar->AddButton( "Decision Tree (#1)", + Form(".x BDT.C",fName), + "Plots the Decision Tree (#1); to plot other trees (i) call macro BDT.C(i) from command line", + buttonType ); + + + cbar->AddButton( "Quit", ".q", "Quit", buttonType ); + + // set the style + cbar->SetTextColor("black"); + + // there seems to be a bug in ROOT: font jumps back to default after pressing on >2 different buttons + // cbar->SetFont("-adobe-helvetica-bold-r-*-*-12-*-*-*-*-*-iso8859-1"); + + // draw + cbar->Show(); + + + + gROOT->SaveContext(); +} diff --git a/tmva/test/TMVAlogon.C b/tmva/test/TMVAlogon.C new file mode 100644 index 00000000000..44f2dacc68c --- /dev/null +++ b/tmva/test/TMVAlogon.C @@ -0,0 +1,57 @@ + +{ + + gSystem->Load("libMLP.so"); + + // load TMVA shared library created in local release + TString libTMVA( "../lib/libTMVA.1.so" ); + gSystem->Load( libTMVA ); + + // welcome the user + TMVA::Tools::TMVAWelcomeMessage(); + cout << "TMVAlogon: loaded TMVA library: \"" << libTMVA << "\"" << endl; + + // some basic style settings + TStyle *TMVAStyle = gROOT->GetStyle("Plain"); // our style is based on Plain + // new TStyle("TMVA","TMVA plots style"); + // the pretty color palette of old + TMVAStyle->SetPalette(1,0); + + // use plain black on white colors + TMVAStyle->SetFrameBorderMode(0); + TMVAStyle->SetCanvasBorderMode(0); + TMVAStyle->SetPadBorderMode(0); + TMVAStyle->SetPadColor(0); + TMVAStyle->SetCanvasColor(0); + TMVAStyle->SetTitleFillColor(0); + TMVAStyle->SetFillStyle(0); + TMVAStyle->SetLegendBorderSize(0); + + // set the paper & margin sizes + TMVAStyle->SetPaperSize(20,26); + TMVAStyle->SetPadTopMargin(0.10); + TMVAStyle->SetPadRightMargin(0.05); + TMVAStyle->SetPadBottomMargin(0.11); + TMVAStyle->SetPadLeftMargin(0.12); + + // use bold lines and markers + TMVAStyle->SetMarkerStyle(21); + TMVAStyle->SetMarkerSize(0.3); + TMVAStyle->SetHistLineWidth(1.85); + TMVAStyle->SetLineStyleString(2,"[12 12]"); // postscript dashes + + // do not display any of the standard histogram decorations + TMVAStyle->SetOptTitle(1); + TMVAStyle->SetTitleH(0.052); + + TMVAStyle->SetOptStat(0); + TMVAStyle->SetOptFit(0); + + // put tick marks on top and RHS of plots + TMVAStyle->SetPadTickX(1); + TMVAStyle->SetPadTickY(1); + + gROOT->SetStyle("Plain"); + cout << "TMVAlogon: use " << gStyle->GetName() << " style with \"Pretty Color Palette\"" << endl; + cout << endl; +} diff --git a/tmva/test/TMVAnalysis.C b/tmva/test/TMVAnalysis.C index 5d2007e0837..6b449a95016 100644 --- a/tmva/test/TMVAnalysis.C +++ b/tmva/test/TMVAnalysis.C @@ -1,71 +1,60 @@ +// @(#)root/tmva $Id: TMVAnalysis.C,v 1.62 2006/11/17 00:21:35 stelzer Exp $ /********************************************************************************** * Project : TMVA - a Root-integrated toolkit for multivariate data analysis * * Package : TMVA * * Root Macro: TMVAnalysis * * * - * This exectutable provides an example on training and testing of several * - * Multivariate Analyser (MVA) methods * + * This macro gives an example on training and testing of several * + * Multivariate Analyser (MVA) methods. * * * - * As input file we use a standard Root example of a Higgs analysis, which * - * is part of the Root tutorial (and hence present in any Root release) * + * As input file we use a toy MC sample (you find it in TMVA/examples/data). * * * - * The methods to be used can be switched on and off by means of the boolians * - * below * + * The methods to be used can be switched on and off by means of booleans. * * * * The output file "TMVA.root" can be analysed with the use of dedicated * - * macros (simply say: root -l <macro.C>) : * - * * - * - variables.C ==> show us the MVA input variables for signal and backgr * - * - correlations.C ==> show us the correlations between the MVA input vars * - * - mvas.C ==> show the trained MVAs for the test events * - * - efficiencies.C ==> show us the background rejections versus signal effs * - * for all MVAs used * - * * - * TMVA allows to train and test multiple MVAs for different phase space * - * regions. This is however not realised in this simple example. * - * * + * macros (simply say: root -l <macro.C>), which can be conveniently * + * invoked through a GUI that will appear at the end of the run of this macro. * **********************************************************************************/ -void TMVAnalysis() { - - // initialisation - gSystem->Load("libMLP"); // ROOT's Multilayer Perceptron library is needed - gSystem->Load("libTMVA"); // and of course the TMVA library - gROOT->ProcessLine(".L loader.C+"); - - // --------------------------------------------------------------- - // choose MVA methods to be trained + tested - Bool_t Use_Cuts = 1; - Bool_t Use_Likelihood = 1; - Bool_t Use_LikelihoodD = 1; - Bool_t Use_PDERS = 1; - Bool_t Use_HMatrix = 1; - Bool_t Use_Fisher = 1; - Bool_t Use_CFMlpANN = 1; - Bool_t Use_TMlpANN = 1; - Bool_t Use_BDT_GiniIndex = 1; // default BDT method - Bool_t Use_BDT_CrossEntro = 0; - Bool_t Use_BDT_SdivStSpB = 0; - Bool_t Use_BDT_MisClass = 0; - Bool_t Use_BDT_Bagging_Gini= 0; - // --------------------------------------------------------------- - Bool_t EvaluateVariables = 0; // perform evaluation for each input variable - // --------------------------------------------------------------- +#include <iostream> + +#include "TCut.h" +#include "TFile.h" +#include "TSystem.h" +#include "TTree.h" + +#include "TMVAGui.C" + +// --------------------------------------------------------------- +// choose MVA methods to be trained + tested +Bool_t Use_Cuts = 1; +Bool_t Use_CutsD = 0; +Bool_t Use_Likelihood = 1; +Bool_t Use_LikelihoodD = 1; // the "D" extension indicates decorrelated input variables (see option strings) +Bool_t Use_PDERS = 1; +Bool_t Use_PDERSD = 0; +Bool_t Use_HMatrix = 1; +Bool_t Use_Fisher = 1; +Bool_t Use_MLP = 1; // this is the recommended ANN +Bool_t Use_CFMlpANN = 0; +Bool_t Use_TMlpANN = 0; +Bool_t Use_BDT = 1; +Bool_t Use_BDTD = 0; +Bool_t Use_RuleFit = 0; + +// read input data file with ascii format (otherwise ROOT) ? +Bool_t ReadDataFromAsciiIFormat = kFALSE; + +void TMVAnalysis() +{ + // explicit loading of the shared libTMVA is done in TMVAlogon.C, defined in .rootrc + // if you use your private .rootrc, or run from a different directory, please copy the + // corresponding lines from .rootrc - cout << "Start Test TMVAnalysis" << endl - << "======================" << endl - << endl; - cout << "Testing all methods takes about 10-20 minutes. By excluding" << endl - << "some of the computing expensive analysis methods the demonstration" << endl - << "will finish much faster." << endl - << " 1) All methods (15 min)" << endl - << " 2) Fast methods only (1 min)" << endl - << "Your choice (1:default): " << flush; - int selection=1; - char selc='1'; - cin.get(selc); - if(selc=='2') - Use_Cuts = Use_Likelihood = Use_PDERS = Use_HMatrix = Use_CFMlpANN = Use_TMlpANN = Use_BDT_GiniIndex = 0; + std::cout << "Start Test TMVAnalysis" << std::endl + << "======================" << std::endl + << std::endl; + std::cout << "Testing all standard methods may take about 4 minutes..." << std::endl; // Create a new root output file. TFile* outputFile = TFile::Open( "TMVA.root", "RECREATE" ); @@ -73,182 +62,168 @@ void TMVAnalysis() { // Create the factory object. Later you can choose the methods whose performance // you'd like to investigate. The factory will then run the performance analysis // for you. - TMVA::Factory *factory = new TMVA::Factory( "MVAnalysis", outputFile, "" ) ; - - // Define the signal and background event samples. - TFile *input(0); - const char *fname = "tmva_example.root"; - TFile *input = 0; - if (!gSystem->AccessPathName(fname)) { - input = TFile::Open(fname); - } else { - printf("accessing %s file from http://root.cern.ch/files\n",fname); - input = TFile::Open(Form("http://root.cern.ch/files/%s",fname)); + TMVA::Factory *factory = new TMVA::Factory( "MVAnalysis", outputFile, "" ); + + if (ReadDataFromAsciiIFormat) { + // load the signal and background event samples from ascii files + // format in file must be: + // var1/F:var2/F:var3/F:var4/F + // 0.04551 0.59923 0.32400 -0.19170 + // ... + + TString datFileS = "data/toy_sig_lincorr.dat"; + TString datFileB = "data/toy_bkg_lincorr.dat"; + if (!factory->SetInputTrees( datFileS, datFileB )) exit(1); } - if (!input) return; - - TTree *signal = (TTree*)input->Get("TreeS"); - TTree *background = (TTree*)input->Get("TreeB"); - if( ! factory->SetInputTrees( signal, background )) return; - - - // Define the input variables. These are used in the TMVA. - vector<TString>* inputVars = new vector<TString>; - inputVars->push_back("var1"); - inputVars->push_back("var2"); - inputVars->push_back("var3"); - inputVars->push_back("var4"); - factory->SetInputVariables( inputVars ); - - + else { + // load the signal and background event samples from ROOT trees + TFile *input(0); + if (!gSystem->AccessPathName("../examples/data/toy_sigbkg.root")) { + // ../examples/data/toy_sigbkg.root is + // available with the sourceforge installation + cout << "--- accessing ../examples/data/toy_sigbkg.root" << endl; + input = TFile::Open("../examples/data/toy_sigbkg.root"); + } else if(!gSystem->AccessPathName("tmva_example.root")) { + // if you downloaded the example from the ROOT site + // http://root.cern.ch/files/tmva_example.root + // into the local directory + cout << "--- accessing ./tmva_example.root" << endl; + input = TFile::Open("tmva_example.root"); + } else { + // try accessing the file via the web from the ROOT site + cout << "--- accessing tmva_example.root file from http://root.cern.ch/files" << endl; + cout << "--- for faster startup you may consider downloading it into you local directory" << endl; + input = TFile::Open("http://root.cern.ch/files/tmva_example.root"); + } + + if (!input) { + std::cout << "ERROR: could not open data file" << std::endl; + exit(1); + } + + TTree *signal = (TTree*)input->Get("TreeS"); + TTree *background = (TTree*)input->Get("TreeB"); + + // global event weights (see below for setting event-wise weights) + Double_t signalWeight = 1.0; + Double_t backgroundWeight = 1.0; + + // sanity check + if (!signal || !background) { + std::cout << "ERROR: unknown tree(s)" << std::endl; + exit(1); + } + if (!factory->SetInputTrees( signal, background, signalWeight, backgroundWeight)) exit(1); + } + + // Define the input variables that shall be used for the MVA training + // note that you may also use variable expressions, such as: "3*var1/var2*abs(var3)" + // [all types of expressions that can also be parsed by TTree::Draw( "expression" )] + factory->AddVariable("var1", 'F'); + factory->AddVariable("var2", 'F'); + factory->AddVariable("var3", 'F'); + factory->AddVariable("var4", 'F'); + + // This would set individual event weights (the variables defined in the + // expression need to exist in the original TTree) + // factory->SetWeightExpression("weight1*weight2"); + // Apply additional cuts on the signal and background sample. // Assumptions on size of training and testing sample: // a) equal number of signal and background events is used for training // b) any numbers of signal and background events are used for testing // c) an explicit syntax can violate a) // more Documentation with the Factory class - TCut mycut = ""; - factory->PrepareTrainingAndTestTree( mycut, 2000, 4000 ); + TCut mycut = ""; // for example: TCut mycut = "abs(var1)<0.5 && abs(var2-0.5)<1"; + + factory->PrepareTrainingAndTestTree( mycut, 1000, 40000 ); + // ---- book MVA methods + // + // please lookup the various method configuration options in the corresponding cxx files, eg: + // src/MethoCuts.cxx, etc. - // Book the MVA methods you like to investigate. - - // MethodCuts: - // format of option string: "OptMethod:EffMethod:Option_var1:...:Option_varn" - // "OptMethod" can be: - // - "GA" : Genetic Algorithm (recommended) - // - "MC" : Monte-Carlo optimization - // "EffMethod" can be: - // - "EffSel": compute efficiency by event counting - // - "EffPDF": compute efficiency from PDFs - // === For "GA" method ====== - // "Option_var1++" are (see GA for explanation of parameters): - // - fGa_nsteps - // - fGa_preCalc - // - fGa_SC_steps - // - fGa_SC_offsteps - // - fGa_SC_factor - // === For "MC" method ====== - // "Option_var1" is number of random samples - // "Option_var2++" can be - // - "FMax" : ForceMax (the max cut is fixed to maximum of variable i) - // - "FMin" : ForceMin (the min cut is fixed to minimum of variable i) - // - "FSmart": ForceSmart (the min or max cut is fixed to min/max, based on mean value) - // - Adding "All" to "option_vari", eg, "AllFSmart" will use this option for all variables - // - if "option_vari" is empty (== ""), no assumptions on cut min/max are made - // ---------------------------------------------------------------------------------- + // Cut optimisation if (Use_Cuts) - factory->BookMethod( "MethodCuts", "V:GA:EffSel:30:3:10:5:0.95" ); - // factory->BookMethod( "MethodCuts", "V:MC:EffSel:10000:AllFSmart" ); - - // MethodLikelihood options: - // histogram_interpolation_method:nsmooth:nsmooth:n_aveEvents_per_bin:Decorrelation + factory->BookMethod( TMVA::Types::kCuts, "Cuts", "!V:MC:EffSel:MC_NRandCuts=100000:AllFSmart" ); + + // alternatively, use the powerful cut optimisation with a Genetic Algorithm + // factory->BookMethod( TMVA::Types::Cuts, "CutsGA", + // "!V:GA:EffSel:GA_nsteps=40:GA_cycles=30:GA_popSize=100:GA_SC_steps=10:GA_SC_offsteps=5:GA_SC_factor=0.95" ); + + if (Use_CutsD) + factory->BookMethod( TMVA::Types::kCuts, "CutsD", "!V:MC:EffSel:MC_NRandCuts=200000:AllFSmart:Preprocess=Decorrelate" ); + + // Likelihood if (Use_Likelihood) - factory->BookMethod( TMVA::Types::Likelihood, "Spline2:3" ); - if (Use_LikelihoodD) - factory->BookMethod( TMVA::Types::Likelihood, "Spline2:10:25:D"); + factory->BookMethod( TMVA::Types::kLikelihood, "Likelihood", "!V:!TransformOutput:Spline=2:NSmooth=5" ); + + // test the decorrelated likelihood + if (Use_LikelihoodD) + factory->BookMethod( TMVA::Types::kLikelihood, "LikelihoodD", "!V:!TransformOutput:Spline=2:NSmooth=5:Preprocess=Decorrelate"); - // MethodFisher: + // Fisher: if (Use_Fisher) - factory->BookMethod( TMVA::Types::Fisher, "Fisher" ); // Fisher method ("Fi" or "Ma") - - // Method CF(Clermont-Ferrand)ANN: + factory->BookMethod( TMVA::Types::kFisher, "Fisher", "!V:Fisher" ); + + // the new TMVA ANN: MLP (recommended ANN) + if (Use_MLP) + factory->BookMethod( TMVA::Types::kMLP, "MLP", "!V:NCycles=200:HiddenLayers=N+1,N:TestRate=5" ); + + // CF(Clermont-Ferrand)ANN if (Use_CFMlpANN) - factory->BookMethod( TMVA::Types::CFMlpANN, "5000:N:N" ); // n_cycles:#nodes:#nodes:... + factory->BookMethod( TMVA::Types::kCFMlpANN, "CFMlpANN", "!V:H:NCycles=5000:HiddenLayers=N,N" ); // n_cycles:#nodes:#nodes:... - // Method CF(Root)ANN: + // Tmlp(Root)ANN if (Use_TMlpANN) - factory->BookMethod( TMVA::Types::TMlpANN, "200:N+1:N" ); // n_cycles:#nodes:#nodes:... + factory->BookMethod( TMVA::Types::kTMlpANN, "TMlpANN", "!V:NCycles=200:HiddenLayers=N+1,N" ); // n_cycles:#nodes:#nodes:... - // MethodHMatrix: + // HMatrix if (Use_HMatrix) - factory->BookMethod( TMVA::Types::HMatrix ); // H-Matrix (chi2-squared) method + factory->BookMethod( TMVA::Types::kHMatrix, "HMatrix", "!V" ); // H-Matrix (chi2-squared) method // PDE - RS method - // format and syntax of option string: "VolumeRangeMode:options" - // where: - // VolumeRangeMode - all methods defined in private enum "VolumeRangeMode" - // options - deltaFrac in case of VolumeRangeMode=MinMax/RMS - // - nEventsMin/Max, maxVIterations, scale for VolumeRangeMode=Adaptive if (Use_PDERS) - factory->BookMethod( TMVA::Types::PDERS, "Adaptive:50:100:50:0.99" ); - - // MethodBDT (Boosted Decision Trees) options: - // format and syntax of option string: "nTrees:BoostType:SeparationType: - // nEventsMin:dummy: - // nCuts:SignalFraction" - // nTrees: number of trees in the forest to be created - // BoostType: the boosting type for the trees in the forest (AdaBoost e.t.c..) - // SeparationType the separation criterion applied in the node splitting - // nEventsMin: the minimum number of events in a node (leaf criteria, stop splitting) - // dummy: dummy option to keep backward compatible - // continue splitting. !! - // !!! Needs to be set to zero, as it doesn't work properly otherwise - // ... it's strange though and not yet understood !!! - // nCuts: the number of steps in the optimisation of the cut for a node - // SignalFraction: scale parameter of the number of Bkg events - // applied to the training sample to simulate different initial purity - // of your data sample. - // - // known SeparationTypes are: - // - MisClassificationError - // - GiniIndex - // - CrossEntropy - // known BoostTypes are: - // - AdaBoost - // - Bagging - - if (Use_BDT_GiniIndex) - factory->BookMethod( TMVA::Types::BDT, "200:AdaBoost:GiniIndex:10:0.:20" ); - if (Use_BDT_CrossEntro) - factory->BookMethod( TMVA::Types::BDT, "200:AdaBoost:CrossEntropy:10:0.:20" ); - if (Use_BDT_SdivStSpB) - factory->BookMethod( TMVA::Types::BDT, "200:AdaBoost:SdivSqrtSplusB:10:0.:20" ); - if (Use_BDT_MisClass) - factory->BookMethod( TMVA::Types::BDT, "200:AdaBoost:MisClassificationError:10:0.:20" ); - if (Use_BDT_Bagging_Gini) - factory->BookMethod( TMVA::Types::BDT, "200:Bagging:GiniIndex:10:0.:20","bagging" ); + factory->BookMethod( TMVA::Types::kPDERS, "PDERS", + "!V:VolumeRangeMode=RMS:KernelEstimator=Teepee:MaxVIterations=50:InitialScale=0.99" ) ; + + if (Use_PDERSD) + factory->BookMethod( TMVA::Types::kPDERS, "PDERSD", + "!V:VolumeRangeMode=RMS:KernelEstimator=Teepee:MaxVIterations=50:InitialScale=0.99:Preprocess=Decorrelate" ) ; + // Boosted Decision Trees + if (Use_BDT) + factory->BookMethod( TMVA::Types::kBDT, "BDT", + "!V:NTrees=400:BoostType=AdaBoost:SeparationType=GiniIndex:nEventsMin=20:nCuts=20:PruneMethod=CostComplexity:PruneStrength=3.5"); + if (Use_BDTD) + factory->BookMethod( TMVA::Types::kBDT, "BDTD", + "!V:NTrees=400:BoostType=AdaBoost:SeparationType=GiniIndex:nEventsMin=20:nCuts=20:PruneMethod=CostComplexity:PruneStrength=3.5:Preprocess=Decorrelate"); + + // Friedman's RuleFit method + if (Use_RuleFit) + factory->BookMethod( TMVA::Types::kRuleFit, "RuleFit", + "!V:NTrees=20:SampleFraction=-1:nEventsMin=60:nCuts=20:MinImp=0.001:Model=ModLinear:GDTau=0.6:GDStep=0.01:GDNSteps=100000:SeparationType=GiniIndex:RuleMaxDist=0.00001" ); - // Now you can tell the factory to train, test, and evaluate the MVAs. + // ---- Now you can tell the factory to train, test, and evaluate the MVAs. // Train MVAs. factory->TrainAllMethods(); - // Test MVAs. factory->TestAllMethods(); - - // Evaluate variables. - if (EvaluateVariables) factory->EvaluateAllVariables(); - - // Evaluate MVAs factory->EvaluateAllMethods(); - // Save the output. outputFile->Close(); - - cout << "==> wrote root file TMVA.root" << endl; - cout << "==> TMVAnalysis is done!" << endl; - + std::cout << "==> wrote root file TMVA.root" << std::endl; + std::cout << "==> TMVAnalysis is done!" << std::endl; // clean up delete factory; - delete inputVars; - - gROOT->Reset(); - gStyle->SetScreenFactor(1); //if you have a large screen, select 1,2 or 1.4 - bar = new TControlBar("vertical", "Checks",0,0); - bar->AddButton("Input Variables", ".x variables.C", "Plots all input variables (macro variables.C)"); - bar->AddButton("Variable Correlations", ".x correlations.C", "Plots correlations between all input variables (macro variables.C)"); - bar->AddButton("Output MVA Variables", ".x mvas.C", "Plots the output variable of each method (macro mvas.C)"); - bar->AddButton("Background Rejection vs Signal Efficiencies", ".x efficiencies.C", "Plots background rejection vs signal efficiencies (macro efficiencies.C)"); - bar->AddButton("Quit", ".q", "Quit"); - bar->Show(); - gROOT->SaveContext(); + // open the GUI for the root macros + TMVAGui(); } diff --git a/tmva/test/TMVAnalysis.py b/tmva/test/TMVAnalysis.py new file mode 100644 index 00000000000..68840bcebd0 --- /dev/null +++ b/tmva/test/TMVAnalysis.py @@ -0,0 +1,257 @@ +#!/usr/bin/env python +# @(#)root/tmva $Id: TMVAnalysis.py,v 1.3 2006/11/15 11:00:51 andreas.hoecker Exp $ +# ------------------------------------------------------------------------------ # +# Project : TMVA - a Root-integrated toolkit for multivariate data analysis # +# Package : TMVA # +# Python script: TMVAnalysis.py # +# # +# This python script gives an example on training and testing of several # +# Multivariate Analyser (MVA) methods through PyROOT. Note that PyROOT requires # +# that you have a python version > 2.2 installed on your computer. # +# # +# As input file we use a toy MC sample (you find it in TMVA/examples/data) # +# # +# The methods to be used can be switched on and off by means of booleans. # +# # +# The output file "TMVA.root" can be analysed with the use of dedicated # +# macros (simply say: root -l <../macros/macro.C>), which can be conveniently # +# invoked through a GUI that will appear at the end of the run of this macro. # +# # +# for help type "python TMVAnalysis.py --help" # +# ------------------------------------------------------------------------------ # + +# -------------------------------------------- +# standard python import +import sys # exit +import time # time accounting +import getopt # command line parser + +# -------------------------------------------- + +# default settings for command line arguments +DEFAULT_OUTFNAME = "TMVA.root" +DEFAULT_INFNAME = "../examples/data/toy_sigbkg.root" +DEFAULT_TREESIG = "TreeS" +DEFAULT_TREEBKG = "TreeB" +DEFAULT_METHODS = "Cuts CutsD Likelihood LikelihoodD PDERS HMatrix Fisher MLP BDT" + +# print help +def usage(): + print " " + print "Usage: python %s [options]" % sys.argv[0] + print " -m | --methods : gives methods to be run (default: all methods)" + print " -i | --inputfile : name of input ROOT file (default: '%s')" % DEFAULT_INFNAME + print " -o | --outputfile : name of output ROOT file containing results (default: '%s')" % DEFAULT_OUTFNAME + print " -t | --inputtrees : input ROOT Trees for signal and background (default: '%s %s')" \ + % (DEFAULT_TREESIG, DEFAULT_TREEBKG) + print " -v | --verbose" + print " -? | --usage : print this help message" + print " -h | --help : print this help message" + print " " + +# main routine +def main(): + + try: + # retrive command line options + shortopts = "m:i:t:o:vh?" + longopts = ["methods=", "inputfile=", "inputtrees=", "outputfile=", "verbose", "help", "usage"] + opts, args = getopt.getopt( sys.argv[1:], shortopts, longopts ) + + except getopt.GetoptError: + # print help information and exit: + print "ERROR: unknown options in argument %s" % sys.argv[1:] + usage() + sys.exit(1) + + infname = DEFAULT_INFNAME + treeNameSig = DEFAULT_TREESIG + treeNameBkg = DEFAULT_TREEBKG + outfname = DEFAULT_OUTFNAME + methods = DEFAULT_METHODS + verbose = False + for o, a in opts: + if o in ("-?", "-h", "--help", "--usage"): + usage() + sys.exit(0) + elif o in ("-m", "--methods"): + methods = a + elif o in ("-i", "--inputfile"): + infname = a + elif o in ("-o", "--outputfile"): + outfname = a + elif o in ("-t", "--inputtrees"): + a.strip() + trees = a.rsplit( ' ' ) + trees.sort() + trees.reverse() + if len(trees)-trees.count('') != 2: + print "ERROR: need to give two trees (each one for signal and background)" + print trees + sys.exit(1) + treeNameSig = trees[0] + treeNameBkg = trees[1] + elif o in ("-v", "--verbose"): + verbose = True + + # print methods + mlist = methods.split(' ') + print "=== TMVAnalysis: use methods..." + for m in mlist: + if m != '': + print "=== ... <%s>" % m + + # import ROOT classes + from ROOT import gSystem, gROOT, gApplication, TFile, TTree, TCut + + # load TMVA library and GUI + gSystem.Load( 'libTMVA.1.so' ) + gROOT.LoadMacro( '../macros/TMVAGui.C' ) + + # import TMVA classes from ROOT + from ROOT import TMVA + + # output file + outputFile = TFile( outfname, 'RECREATE' ) + + # create einstance of factory + factory = TMVA.Factory( "MVAnalysis", outputFile, "" ) + + # set verbosity + factory.SetVerbose( verbose ) + + # read input data + if not gSystem.AccessPathName( infname ): + input = TFile( infname ) + else: + print "ERROR: could not access data file %s\n" % infname + + signal = input.Get( treeNameSig ) + background = input.Get( treeNameBkg ) + + # global event weights (see below for setting event-wise weights) + signalWeight = 1.0 + backgroundWeight = 1.0 + + if not factory.SetInputTrees( signal, background, signalWeight, backgroundWeight ): + print "ERROR: could not set input trees\n" + sys.exit(1) + + # Define the input variables that shall be used for the MVA training + # note that you may also use variable expressions, such as: "3*var1/var2*abs(var3)" + # [all types of expressions that can also be parsed by TTree::Draw( "expression" )] + factory.AddVariable("var1", 'F') + factory.AddVariable("var2", 'F') + factory.AddVariable("var3", 'F') + factory.AddVariable("var4", 'F') + + # This would set individual event weights (the variables defined in the + # expression need to exist in the original TTree) + # factory->SetWeightExpression("weight1*weight2") + # + # Apply additional cuts on the signal and background sample. + # Assumptions on size of training and testing sample: + # a) equal number of signal and background events is used for training + # b) any numbers of signal and background events are used for testing + # c) an explicit syntax can violate a) + # more Documentation with the Factory class + # example for cut: mycut = TCut( "abs(var1)<0.5 && abs(var2-0.5)<1" ) + mycut = TCut( "" ) + + # here, the relevant variables are copied over in new, slim trees that are + # used for TMVA training and testing + factory.PrepareTrainingAndTestTree( mycut, 2000, 4000 ) + + # Cut optimisation + if methods.find( "Cuts" ) != -1: + factory.BookMethod( TMVA.Types.Cuts, "Cuts", "!V:MC:EffSel:MC_NRandCuts=100000:AllFSmart" ) + + # Cut optimisation with a Genetic Algorithm + if methods.find( "Cuts" ) != -1: + factory.BookMethod( TMVA.Types.Cuts, "CutsGA", + "!V:GA:EffSel:GA_nsteps=40:GA_cycles=30:GA_popSize=100:GA_SC_steps=10:GA_SC_offsteps=5:GA_SC_factor=0.95" ) + + # Cut optimisation using decorrelated input variables + if methods.find( "CutsD" ) != -1: + factory.BookMethod( TMVA.Types.Cuts, "CutsD", "!V:MC:EffSel:MC_NRandCuts=200000:AllFSmart:Preprocess=Decorrelate" ) + + # Likelihood + if methods.find( "Likelihood" ) != -1: + factory.BookMethod( TMVA.Types.Likelihood, "Likelihood", "!V:!TransformOutput:Spline=2:NSmooth=5" ) + + # test the decorrelated likelihood + if methods.find( "LikelihoodD" ) != -1: + factory.BookMethod( TMVA.Types.Likelihood, "LikelihoodD", "!V:!TransformOutput:Spline=2:NSmooth=5:Preprocess=Decorrelate") + + # Fisher: + if methods.find( "Fisher" ) != -1: + factory.BookMethod( TMVA.Types.Fisher, "Fisher", "!V:Fisher" ) + + # the new TMVA ANN: MLP (recommended ANN) + if methods.find( "MLP" ) != -1: + factory.BookMethod( TMVA.Types.MLP, "MLP", "!V:NCycles=200:HiddenLayers=N+1,N:TestRate=5" ) + + # CF(Clermont-Ferrand)ANN + if methods.find( "CFMlpANN" ) != -1: + factory.BookMethod( TMVA.Types.CFMlpANN, "CFMlpANN", "!V:H:NCycles=5000:HiddenLayers=N,N" ) # n_cycles:#nodes:#nodes:... + + # Tmlp(Root)ANN + if methods.find( "TMlpANN" ) != -1: + factory.BookMethod( TMVA.Types.TMlpANN, "TMlpANN", "!V:NCycles=200:HiddenLayers=N+1,N" ) # n_cycles:#nodes:#nodes:... + + # HMatrix + if methods.find( "HMatrix" ) != -1: + factory.BookMethod( TMVA.Types.HMatrix, "HMatrix", "!V" ) # H-Matrix (chi2-squared) method + + # PDE - RS method + if methods.find( "PDERS" ) != -1: + factory.BookMethod( TMVA.Types.PDERS, "PDERS", + "!V:VolumeRangeMode=RMS:KernelEstimator=Teepee:MaxVIterations=50:InitialScale=0.99" ) + if methods.find( "PDERSD" ) != -1: + factory.BookMethod( TMVA.Types.PDERS, "PDERSD", + "!V:VolumeRangeMode=RMS:KernelEstimator=Teepee:MaxVIterations=50:InitialScale=0.99:Preprocess=Decorrelate" ) + + # Boosted Decision Trees + if methods.find( "BDT" ) != -1: + factory.BookMethod( TMVA.Types.BDT, "BDT", + "!V:NTrees=400:BoostType=AdaBoost:SeparationType=GiniIndex:nEventsMin=20:SignalFraction=0.:nCuts=20:PruneMethod=CostComplexity:PruneStrength=3.5" ); + + # Friedman's RuleFit method + if methods.find( "RuleFit" ) != -1: + factory.BookMethod( TMVA.Types.RuleFit, "RuleFit", + "!V:NTrees=20:SampleFraction=-1:nEventsMin=60:nCuts=20:MinImp=0.001:Model=ModLinear:GDTau=0.6:GDStep=0.01:GDNSteps=100000:SeparationType=GiniIndex:RuleMaxDist=0.00001" ) + + # Bayesian classifier + if methods.find( "BayesClassifier" ) != -1: + factory.BookMethod( TMVA.Types.BayesClassifier, "BayesClassifier", "!V:myOptions" ) + + # ---- Now you can tell the factory to train, test, and evaluate the MVAs. + + # Train MVAs + factory.TrainAllMethods() + + # Test MVAs + factory.TestAllMethods() + + # Evaluate MVAs + factory.EvaluateAllMethods() + + # Save the output. + outputFile.Close() + + # clean up + factory.IsA().Destructor( factory ) + + print "=== wrote root file %s\n" % outfname + print "=== TMVAnalysis is done!\n" + + # open the GUI for the result macros + gROOT.ProcessLine( "TMVAGui(\"%s\")" % outfname ); + + # keep the ROOT thread running + gApplication.Run() + +# ---------------------------------------------------------- + +if __name__ == "__main__": + main() diff --git a/tmva/test/TMVApplication.C b/tmva/test/TMVApplication.C new file mode 100644 index 00000000000..67c1d8aefc7 --- /dev/null +++ b/tmva/test/TMVApplication.C @@ -0,0 +1,176 @@ +/********************************************************************************** + * Project : TMVA - a Root-integrated toolkit for multivariate data analysis * + * Package : TMVA * + * Exectuable: TMVApplication * + * * + * This exectutable provides a simple example on how to use the trained MVAs * + * within a C++ analysis module * + * * + * ------------------------------------------------------------------------------ * + * see also the alternative (slightly faster) way to retrieve the MVA values in * + * examples/TMVApplicationAlternative.cxx * + * ------------------------------------------------------------------------------ * + **********************************************************************************/ + +// --------------------------------------------------------------- +// choose MVA methods to be trained + tested +Bool_t Use_Cuts = 1; +Bool_t Use_CutsD = 1; +Bool_t Use_Likelihood = 1; +Bool_t Use_LikelihoodD = 1; // the "D" extension indicates decorrelated input variables (see option strings) +Bool_t Use_PDERS = 1; +Bool_t Use_PDERSD = 1; +Bool_t Use_HMatrix = 1; +Bool_t Use_Fisher = 1; +Bool_t Use_MLP = 1; // this is the recommended ANN +Bool_t Use_CFMlpANN = 1; +Bool_t Use_TMlpANN = 1; +Bool_t Use_BDT = 1; +Bool_t Use_BDTD = 1; +Bool_t Use_RuleFit = 1; +// --------------------------------------------------------------- + +void TMVApplication() +{ + cout << endl; + cout << "==> start TMVApplication" << endl; + + // + // create the Reader object + // + TMVA::Reader *reader = new TMVA::Reader(); + + // create a set of variables and declare them to the reader + // - the variable names must corresponds in name and type to + // those given in the weight file(s) that you use + Float_t var1, var2, var3, var4; + reader->AddVariable( "var1", &var1 ); + reader->AddVariable( "var2", &var2 ); + reader->AddVariable( "var3", &var3 ); + reader->AddVariable( "var4", &var4 ); + + // + // book the MVA methods + // + string dir = "weights/"; + string prefix = "MVAnalysis"; + + if (Use_Cuts) reader->BookMVA( "Cuts method", dir + prefix + "_Cuts.weights.txt" ); + if (Use_CutsD) reader->BookMVA( "CutsD method", dir + prefix + "_CutsD.weights.txt" ); + if (Use_Likelihood) reader->BookMVA( "Likelihood method", dir + prefix + "_Likelihood.weights.txt" ); + if (Use_LikelihoodD) reader->BookMVA( "LikelihoodD method", dir + prefix + "_LikelihoodD.weights.txt" ); + if (Use_PDERS) reader->BookMVA( "PDERS method", dir + prefix + "_PDERS.weights.txt" ); + if (Use_PDERSD) reader->BookMVA( "PDERSD method", dir + prefix + "_PDERSD.weights.txt" ); + if (Use_HMatrix) reader->BookMVA( "HMatrix method", dir + prefix + "_HMatrix.weights.txt" ); + if (Use_Fisher) reader->BookMVA( "Fisher method", dir + prefix + "_Fisher.weights.txt" ); + if (Use_MLP) reader->BookMVA( "MLP method", dir + prefix + "_MLP.weights.txt" ); + if (Use_CFMlpANN) reader->BookMVA( "CFMlpANN method", dir + prefix + "_CFMlpANN.weights.txt" ); + if (Use_TMlpANN) reader->BookMVA( "TMlpANN method", dir + prefix + "_TMlpANN.weights.txt" ); + if (Use_BDT) reader->BookMVA( "BDT method", dir + prefix + "_BDT.weights.txt" ); + if (Use_BDTD) reader->BookMVA( "BDTD method", dir + prefix + "_BDTD.weights.txt" ); + if (Use_RuleFit) reader->BookMVA( "RuleFit method", dir + prefix + "_RuleFit.weights.txt" ); + + // + // Prepare input tree (this must be replaced by your data source) + // in this example, there is a toy tree with signal and one with background events + // we'll later on use only the "signal" events for the test in this example. + // + TFile *input = new TFile("../examples/data/toy_sigbkg.root"); + TTree *signal = (TTree*)input->Get("TreeS"); + TTree *background = (TTree*)input->Get("TreeB"); + + // + // book output histograms + UInt_t nbin = 100; + TH1F *histLk, *histLkD, *histPD, *histPDD, *histHm, *histFi, *histNn, *histNnC, *histNnT, *histBdt, *histRf; + if (Use_Likelihood) histLk = new TH1F( "MVA_Likelihood", "MVA_Likelihood", nbin, 0, 1 ); + if (Use_LikelihoodD) histLkD = new TH1F( "MVA_LikelihoodD", "MVA_LikelihoodD", nbin, 0, 1 ); + if (Use_PDERS) histPD = new TH1F( "MVA_PDERS", "MVA_PDERS", nbin, 0, 1 ); + if (Use_PDERSD) histPDD = new TH1F( "MVA_PDERSD", "MVA_PDERSD", nbin, 0, 1 ); + if (Use_HMatrix) histHm = new TH1F( "MVA_HMatrix", "MVA_HMatrix", nbin, -0.95, 1.55 ); + if (Use_Fisher) histFi = new TH1F( "MVA_Fisher", "MVA_Fisher", nbin, -4, 4 ); + if (Use_MLP) histNn = new TH1F( "MVA_MLP", "MVA_MLP", nbin, -0.25, 1.5 ); + if (Use_CFMlpANN) histNnC = new TH1F( "MVA_CFMlpANN", "MVA_CFMlpANN", nbin, 0, 1 ); + if (Use_TMlpANN) histNnT = new TH1F( "MVA_TMlpANN", "MVA_TMlpANN", nbin, -1, 1 ); + if (Use_BDT) histBdt = new TH1F( "MVA_BDT", "MVA_BDT", nbin, -0.4, 0.6 ); + if (Use_BDTD) histBdtD= new TH1F( "MVA_BDTD", "MVA_BDTD", nbin, -0.4, 0.6 ); + if (Use_RuleFit) histRf = new TH1F( "MVA_RuleFit", "MVA_RuleFit", nbin, -1.3, 1.3 ); + + // + // prepare the tree + // - here the variable names have to corresponds to your tree + // - you can use the same variables as above which is slightly faster, + // but of course you can use different ones and copy the values inside the event loop + // + TTree* theTree = signal; + theTree->SetBranchAddress( "var1", &var1 ); + theTree->SetBranchAddress( "var2", &var2 ); + theTree->SetBranchAddress( "var3", &var3 ); + theTree->SetBranchAddress( "var4", &var4 ); + + // efficiency calculator for cut method + int nSelCuts = 0; + double effS = 0.7; + + cout << "--- processing: " << theTree->GetEntries() << " events" << endl; + for (Long64_t ievt=0; ievt<theTree->GetEntries();ievt++) { + + theTree->GetEntry(ievt); + + if (ievt%1000 == 0) + cout << "--- ... processing event: " << ievt << endl; + + // + // return the MVA + // + if (Use_Cuts) { + // give the desired signal efficienciy + Bool_t passed = reader->EvaluateMVA( "Cuts method", effS ); + if (passed) nSelCuts++; + } + + // + // fill histograms + // + if (Use_Likelihood ) histLk ->Fill( reader->EvaluateMVA( "Likelihood method" ) ); + if (Use_LikelihoodD) histLkD ->Fill( reader->EvaluateMVA( "LikelihoodD method" ) ); + if (Use_PDERS ) histPD ->Fill( reader->EvaluateMVA( "PDERS method" ) ); + if (Use_PDERSD ) histPDD ->Fill( reader->EvaluateMVA( "PDERSD method" ) ); + if (Use_HMatrix ) histHm ->Fill( reader->EvaluateMVA( "HMatrix method" ) ); + if (Use_Fisher ) histFi ->Fill( reader->EvaluateMVA( "Fisher method" ) ); + if (Use_MLP ) histNn ->Fill( reader->EvaluateMVA( "MLP method" ) ); + if (Use_CFMlpANN ) histNnC ->Fill( reader->EvaluateMVA( "CFMlpANN method" ) ); + if (Use_TMlpANN ) histNnT ->Fill( reader->EvaluateMVA( "TMlpANN method" ) ); + if (Use_BDT ) histBdt ->Fill( reader->EvaluateMVA( "BDT method" ) ); + if (Use_BDTD ) histBdtD->Fill( reader->EvaluateMVA( "BDTD method" ) ); + if (Use_RuleFit ) histRf ->Fill( reader->EvaluateMVA( "RuleFit method" ) ); + } + cout << "--- end of event loop" << endl; + // get elapsed time + if (Use_Cuts) cout << "--- efficiency for cut method: " << double(nSelCuts)/theTree->GetEntries() + << " (for a required signal efficiency of " << effS << ")" << endl; + + // + // write histograms + // + TFile *target = new TFile( "TMVApp.root","RECREATE" ); + if (Use_Likelihood ) histLk ->Write(); + if (Use_LikelihoodD ) histLkD ->Write(); + if (Use_PDERS ) histPD ->Write(); + if (Use_PDERSD ) histPDD ->Write(); + if (Use_HMatrix ) histHm ->Write(); + if (Use_Fisher ) histFi ->Write(); + if (Use_MLP ) histNn ->Write(); + if (Use_CFMlpANN ) histNnC ->Write(); + if (Use_TMlpANN ) histNnT ->Write(); + if (Use_BDT ) histBdt ->Write(); + if (Use_BDTD ) histBdtD->Write(); + if (Use_RuleFit ) histRf ->Write(); + target->Close(); + + cout << "--- created root file: \"TMVApp.root\" containing the MVA output histograms" << endl; + + delete reader; + + cout << "==> TMVApplication is done!" << endl << endl; +} diff --git a/tmva/test/annconvergencetest.C b/tmva/test/annconvergencetest.C new file mode 100644 index 00000000000..a97402a5c2f --- /dev/null +++ b/tmva/test/annconvergencetest.C @@ -0,0 +1,67 @@ +#include "tmvaglob.C" + +// this macro serves to assess the convergence of the MLP ANN. +// It compares the error estimator for the training and testing samples. +// If overtraining occurred, the estimator for the training sample should +// monotoneously decrease, while the estimator of the testing sample should +// show a minimum after which it increases. + +// input: - Input file (result from TMVA), +// - use of TMVA plotting TStyle +void annconvergencetest( TString fin = "TMVA.root", bool useTMVAStyle=kTRUE ) +{ + // set style and remove existing canvas' + TMVAGlob::Initialize( useTMVAStyle ); + + // checks if file with name "fin" is already open, and if not opens one + TMVAGlob::OpenFile( fin ); + + TDirectory * dir = (TDirectory*)gDirectory->Get("MLP"); + if (dir==0) { + cout << "Could not locate directory MLP in file " << fin << endl; + return; + } + dir->cd(); + + TCanvas* c = new TCanvas( "MLPConvergenceTest", "MLP Convergence Test", 150, 0, 600, 580*0.8 ); + + Double_t m1 = estimatorHistTrain->GetMaximum(); + Double_t m2 = estimatorHistTest ->GetMaximum(); + Double_t max = TMath::Max( m1, m2 ); + m1 = estimatorHistTrain->GetMinimum(); + m2 = estimatorHistTest ->GetMinimum(); + Double_t min = TMath::Min( m1, m2 ); + estimatorHistTrain->SetMaximum( max + 0.1*(max - min) ); + estimatorHistTrain->SetMinimum( min - 0.1*(max - min) ); + estimatorHistTrain->SetLineColor( 2 ); + estimatorHistTrain->SetLineWidth( 2 ); + estimatorHistTrain->SetTitle( TString("MLP Convergence test") ); + + estimatorHistTest->SetLineColor( 4 ); + estimatorHistTest->SetLineWidth( 2 ); + + estimatorHistTrain->GetXaxis()->SetTitle( "Epochs" ); + estimatorHistTrain->GetYaxis()->SetTitle( "Estimator" ); + estimatorHistTrain->GetXaxis()->SetTitleOffset( 1.20 ); + estimatorHistTrain->GetYaxis()->SetTitleOffset( 1.65 ); + + estimatorHistTrain->Draw(); + estimatorHistTest ->Draw("same"); + + // need a legend + TLegend *legend= new TLegend( 1 - gPad->GetRightMargin() - 0.45, 1-gPad->GetTopMargin() - 0.20, + 1 - gPad->GetRightMargin() - 0.05, 1-gPad->GetTopMargin() - 0.05 ); + + legend->AddEntry(estimatorHistTrain,"Training Sample","l"); + legend->AddEntry(estimatorHistTest,"Test sample","l"); + legend->Draw("same"); + legend->SetBorderSize(1); + legend->SetMargin( 0.3 ); + + c->cd(); + TMVAGlob::plot_logo(); // don't understand why this doesn't work ... :-( + c->Update(); + + TString fname = "plots/annconvergencetest"; + TMVAGlob::imgconv( c, fname ); +} diff --git a/tmva/test/compareanapp.C b/tmva/test/compareanapp.C new file mode 100644 index 00000000000..7f2029391b4 --- /dev/null +++ b/tmva/test/compareanapp.C @@ -0,0 +1,155 @@ +#include "tmvaglob.C" + +void compareanapp( TString finAn = "TMVA.root", TString finApp = "TMVApp.root", bool useTMVAStyle=kTRUE ) +{ + // set style and remove existing canvas' + TMVAGlob::Initialize( useTMVAStyle ); + + // switches + const Bool_t Draw_CFANN_Logy = kFALSE; + const Bool_t Save_Images = kTRUE; + + cout << "Reading file: " << finApp << endl; + TFile *fileApp = new TFile( finApp ); + + // define Canvas layout here! + Int_t xPad = 1; // no of plots in x + Int_t yPad = 1; // no of plots in y + Int_t noPad = xPad * yPad ; + const Int_t width = 600; // size of canvas + + // this defines how many canvases we need + const Int_t noCanvas = 10; + TCanvas **c = new TCanvas*[noCanvas]; + for (Int_t ic=0; ic<noCanvas; ic++) c[ic] = 0; + + // counter variables + Int_t countCanvas = 0; + Int_t countPad = 1; + + // list of existing MVAs + const Int_t nmva = 16; + TString prefix = ""; + + // list of existing MVAs + const Int_t nmva = 16; + TString prefix = ""; + TString mvaName[nmva] = { "MVA_Likelihood", + "MVA_LikelihoodD", + "MVA_Fisher", + "MVA_Fisher_Fi", + "MVA_Fisher_Ma", + "MVA_CFMlpANN", + "MVA_TMlpANN", + "MVA_HMatrix", + "MVA_PDERS" , + "MVA_BDT", + "MVA_BDTGini", + "MVA_BDTMisCl", + "MVA_BDTStatSig", + "MVA_BDTCE", + "MVA_MLP", + "MVA_RuleFit" }; + char fname[200]; + + // loop over MVAs + for (Int_t imva=0; imva<nmva; imva++) { + + // retrieve corresponding signal and background histograms + TH1* all = (TH1*)gDirectory->Get( prefix + mvaName[imva] ); + + // only a single type (from TMVApplication.cpp) + if (NULL != all) { + + TString hname = all->GetName(); + cout << "--- Found (all) histogram: " << hname << endl; + + // create new canvas + if ((c[countCanvas]==NULL) || (countPad>noPad)) { + cout << "--- Book canvas no: " << countCanvas << endl; + char cn[20]; + sprintf( cn, "canvas%d", countCanvas+1 ); + c[countCanvas] = new TCanvas( cn, "MVA Output Variables", + countCanvas*50, countCanvas*20, width, width*0.8 ); + // style + c[countCanvas]->SetBorderMode(0); + c[countCanvas]->SetFillColor(10); + + c[countCanvas]->Divide(xPad,yPad); + countPad = 1; + } + + // chop off useless stuff + TString title(all->GetTitle()); + title.ReplaceAll(prefix, ""); + all->SetTitle( TString("MVA output for method: ") + title ); + + // set the histogram style + all->SetLineWidth( 2 ); + all->SetLineColor( 1 ); + + // normalize 'all' + all->Scale( 1.0/all->GetSumOfWeights() ); + + // frame limits (choose judicuous x range) + Float_t nrms = 4; + cout << "--- mean and RMS: " << all->GetMean() << ", " << all->GetRMS() << endl; + Float_t xmin = TMath::Max( all->GetMean() - nrms*all->GetRMS(), + all->GetXaxis()->GetXmin() ); + Float_t xmax = TMath::Min( all->GetMean() + nrms*all->GetRMS(), + all->GetXaxis()->GetXmax() ); + Float_t ymin = 0; + Float_t ymax = all->GetMaximum()*1.2 ; + + if (Draw_CFANN_Logy && mvaName[imva] == "CFANN") ymin = 0.01; + + // build a frame + Int_t nb = 500; + TH2F* frame = new TH2F( TString("frame") + all->GetName(), all->GetTitle(), + nb, xmin, xmax, nb, ymin, ymax ); + frame->GetXaxis()->SetTitle(title); + frame->GetYaxis()->SetTitle("Normalized"); + TMVAGlob::SetFrameStyle( frame ); + + // eventually: draw the frame + frame->Draw(); + + if (Draw_CFANN_Logy && mvaName[imva] == "CFANN") gPad->SetLogy(); + + // add the reference histograms + // delete fileApp; + TFile *fileAn = new TFile( finAn ); + TH1* sig = (TH1*)gDirectory->Get( prefix + mvaName[imva] + "_S" ); + TH1* bgd = (TH1*)gDirectory->Get( prefix + mvaName[imva] + "_B" ); + if (NULL != sig && NULL != bgd) { + // set the histogram style + TMVAGlob::SetSignalAndBackgroundStyle( sig, bgd ); + // overlay signal and background histograms + sig->Scale( 1.0/sig->GetSumOfWeights() ); + bgd->Scale( 1.0/bgd->GetSumOfWeights() ); + sig->Draw("samehist"); + bgd->Draw("samehist"); + } + + // overlay signal and background histograms + all->Draw("samehist"); + + // reopen TMVApplication output + TFile *fileApp = new TFile( finApp ); + + // redraw axes + frame->Draw("sameaxis"); + + // save canvas to file + c[countCanvas]->cd(countPad); + countPad++; + if (countPad > noPad) { + TMVAGlob::plot_logo(); + c[countCanvas]->Update(); + sprintf( fname, "plots/compareanapp_c%i", countCanvas+1 ); + if (Save_Images) TMVAGlob::imgconv( c[countCanvas], &fname[0] ); + countCanvas++; + } + } + } +} diff --git a/tmva/test/correlations.C b/tmva/test/correlations.C index a97b2a9fece..b97ee50f21a 100644 --- a/tmva/test/correlations.C +++ b/tmva/test/correlations.C @@ -1,24 +1,30 @@ #include "tmvaglob.C" -void correlations( TString fin = "TMVA.root", Bool_t greyScale = kFALSE ) +// this macro plots the correlation matrix of the various input +// variables used in TMVA (e.g. running TMVAnalysis.C). Signal and +// Background are plotted separately + +// input: - Input file (result from TMVA), +// - use of colors or grey scale +// - use of TMVA plotting TStyle +void correlations( TString fin = "TMVA.root", Bool_t greyScale = kFALSE, Bool_t useTMVAStyle = kTRUE ) { - gROOT->SetStyle("Plain"); - gStyle->SetOptStat(0); - TList * loc = gROOT->GetListOfCanvases(); - TListIter itc(loc); - TObject *o(0); - while( (o = itc()) ) delete o; + // set style and remove existing canvas' + TMVAGlob::Initialize( useTMVAStyle ); + + // checks if file with name "fin" is already open, and if not opens one + TFile* file = TMVAGlob::OpenFile( fin ); // signal and background const TString hName[2] = { "CorrelationMatrixS", "CorrelationMatrixB" }; const Int_t width = 600; for (Int_t ic=0; ic<2; ic++) { - TFile *file = new TFile( fin ); - TCanvas* c = new TCanvas( hName[ic], Form("Correlations between MVA input variables (%s)", (ic==0?"signal":"background")), - ic*(width+5)+300, 0, width, width ); + TCanvas* c = new TCanvas( hName[ic], + Form("Correlations between MVA input variables (%s)", (ic==0?"signal":"background")), + ic*(width+5)+200, 0, width, width ); Float_t newMargin1 = 0.13; - Float_t newMargin2 = 0.18; + Float_t newMargin2 = 0.25; gPad->SetGrid(); gPad->SetTicks(); @@ -32,7 +38,7 @@ void correlations( TString fin = "TMVA.root", Bool_t greyScale = kFALSE ) h2->SetMarkerSize( 1.5 ); h2->SetMarkerColor( 0 ); - Float_t labelSize = 0.050; + Float_t labelSize = 0.040; h2->GetXaxis()->SetLabelSize( labelSize ); h2->GetYaxis()->SetLabelSize( labelSize ); h2->LabelsOption( "d" ); @@ -49,14 +55,13 @@ void correlations( TString fin = "TMVA.root", Bool_t greyScale = kFALSE ) h2->Draw("textsame"); // add text // add comment - TText* t = new TText( 0.31, 0.88, "absolute values for correlation coefficients given in %" ); + TText* t = new TText( 0.53, 0.88, "linear correlation coefficients in %" ); t->SetNDC(); t->SetTextSize( 0.026 ); t->AppendPad(); - c->Modified(); - - TMVAGlob::plot_logo( 0.85 ); + TMVAGlob::plot_logo( ); + c->Update(); TString fname = "plots/"; fname += hName[ic]; diff --git a/tmva/test/correlationscatters.C b/tmva/test/correlationscatters.C new file mode 100644 index 00000000000..65b82b3b0b9 --- /dev/null +++ b/tmva/test/correlationscatters.C @@ -0,0 +1,160 @@ +#include "tmvaglob.C" + +// this macro plots the correlations (as scatter plots) of +// the various input variable combinations used in TMVA (e.g. running +// TMVAnalysis.C). Signal and Background are plotted separately + +const TString extensions[TMVAGlob::kNumOfMethods] = { "", + "_decorr", + "_PCA" }; + +// input: - Input file (result from TMVA), +// - normal/decorrelated/PCA +// - use of TMVA plotting TStyle +void correlationscatters( TString fin = "TMVA.root", TMVAGlob::TypeOfPlot type = TMVAGlob::kNormal, + bool useTMVAStyle = kTRUE ) +{ + // set style and remove existing canvas' + TMVAGlob::Initialize( useTMVAStyle ); + + // checks if file with name "fin" is already open, and if not opens one + TFile* file = TMVAGlob::OpenFile( fin ); + + TString dirName = "CorrelationPlots" + extensions[type]; + + TDirectory* dir = (TDirectory*)gDirectory->Get( dirName ); + if (dir==0) { + cout << "Could not locate directory: " << dirName << " in file " << fin << endl; + return; + } + dir->cd(); + + TListIter keyIt(dir->GetListOfKeys()); + Int_t noPlots = 0; + TKey * key = 0; + // how many plots are in the directory? + Int_t noPlots = ((dir->GetListOfKeys())->GetEntries()) / 2; + cout << "--- Found: " << noPlots << " plots in directory: " << dirName << endl; + + // define Canvas layout here! + // default setting + Int_t xPad; // no of plots in x + Int_t yPad; // no of plots in y + Int_t width; // size of canvas + Int_t height; + switch (noPlots) { + case 1: + xPad = 1; yPad = 1; width = 500; height = width; break; + case 2: + xPad = 2; yPad = 1; width = 600; height = 0.7*width; break; + case 3: + xPad = 3; yPad = 1; width = 800; height = 0.5*width; break; + case 4: + xPad = 2; yPad = 2; width = 600; height = width; break; + default: + xPad = 3; yPad = 2; width = 800; height = 0.7*width; break; + } + Int_t noPad = xPad * yPad ; + + // this defines how many canvases we need + const Int_t noCanvas = 1 + (Int_t)(noPlots/noPad); + TCanvas **c = new TCanvas*[noCanvas]; + for (Int_t ic=0; ic<noCanvas; ic++) c[ic] = 0; + + cout << "--- Found: " << noPlots << " plots; " + << "will produce: " << noCanvas << " canvas" << endl; + + // counter variables + Int_t countCanvas = 0; + Int_t countPad = 1; + + // loop over all objects in "input_variables" directory + TString thename[2] = { "_sig", "_bgd" }; + for (UInt_t itype = 0; itype < 2; itype++) { + + TIter next(gDirectory->GetListOfKeys()); + TKey* key = 0; + + while ((key = (TKey*)next())) { + + if (key->GetCycle() != 1) continue; + + // make sure, that we only look at histograms + TClass *cl = gROOT->GetClass(key->GetClassName()); + if (!cl->InheritsFrom("TH1")) continue; + TH1 *scat = (TH1*)key->ReadObj(); + TString hname= scat->GetName(); + + // check for all signal histograms + if (hname.EndsWith( thename[itype] + extensions[type] ) && + hname.BeginsWith( "scat_" )) { // found a new signal plot + + cout << hname << endl; + + // create new canvas + if ((c[countCanvas]==NULL) || (countPad>noPad)) { + cout << "--- Book canvas no: " << countCanvas << endl; + char cn[20]; + sprintf( cn, "canvas%d", countCanvas+1 ); + c[countCanvas] = new TCanvas( cn, "Correlation Profiles", + countCanvas*50+200, countCanvas*20, width, height ); + // style + c[countCanvas]->SetBorderMode(0); + c[countCanvas]->SetFillColor(0); + + c[countCanvas]->Divide(xPad,yPad); + countPad = 1; + } + + // save canvas to file + c[countCanvas]->cd(countPad); + countPad++; + + // find the corredponding backgrouns histo + TString bgname = hname; + bgname.ReplaceAll("scat_","prof_"); + TH1 *prof = (TH1*)gDirectory->Get(bgname); + if (prof == NULL) { + cout << "ERROR!!! couldn't find backgroung histo for" << hname << endl; + exit; + } + // this is set but not stored during plot creation in MVA_Factory + TMVAGlob::SetSignalAndBackgroundStyle( scat, prof ); + + // chop off "signal" + TMVAGlob::SetFrameStyle( scat, 1.2 ); + + // finally plot and overlay + Float_t sc = 1.1; + if (countPad==2) sc = 1.3; + scat->SetMarkerColor( 4); + scat->Draw(); + prof->SetMarkerColor( 2 ); + prof->SetMarkerSize( 0.2 ); + prof->SetLineColor( 2 ); + prof->SetLineWidth( 1 ); + prof->SetFillStyle( 3002 ); + prof->SetFillColor( 46 ); + prof->Draw("samee1"); + + // redraw axes + scat->Draw("sameaxis"); + + // save canvas to file + if (countPad > noPad) { + c[countCanvas]->Update(); + + TString fname = Form( "plots/correlationscatter_%s_c%i", extensions[type].Data(), countCanvas+1 ); + TMVAGlob::imgconv( c[countCanvas], &fname[0] ); + countCanvas++; + } + } + } + } + if (countPad <= noPad) { + c[countCanvas]->Update(); + TString fname = Form( "plots/correlationscatter_%s_c%i", extensions[type].Data(), countCanvas+1 ); + TMVAGlob::imgconv( c[countCanvas], &fname[0] ); + } + +} diff --git a/tmva/test/efficiencies.C b/tmva/test/efficiencies.C index 49a615d5b0b..1a3c99f3b9b 100644 --- a/tmva/test/efficiencies.C +++ b/tmva/test/efficiencies.C @@ -1,54 +1,17 @@ #include "tmvaglob.C" -void efficiencies( TString fin = "TMVA.root", Int_t type = 2 ) -{ - // argument: type = 1 --> plot efficiency(B) versus eff(S) - // type = 2 --> plot rejection (B) versus efficiency (S) - - gROOT->SetStyle("Plain"); - gStyle->SetOptStat(0); - TList * loc = gROOT->GetListOfCanvases(); - TListIter itc(loc); - TObject *o(0); - while( (o = itc()) ) delete o; - cout << "Reading file: " << fin << endl; - TFile *file = new TFile( fin ); - // check if multi-cut MVA or only one set of MVAs - Bool_t multiMVA=kFALSE; - TIter nextDir(file->GetListOfKeys()); - TKey *key; - // loop over all directories and check if - // one contains the key word 'multicutMVA' - while (key = (TKey*)nextDir()) { - TClass *cl = gROOT->GetClass(key->GetClassName()); - if (!cl->InheritsFrom("TDirectory")) continue; - TDirectory *d = (TDirectory*)key->ReadObj(); - TString path(d->GetPath()); - if ((TString(d->GetPath())).Contains("multicutMVA")){ - multiMVA=kTRUE; - plot_efficiencies(fin,type,d); - } - } - plot_efficiencies(fin,type,gDirectory); - -} - -void plot_efficiencies( TString fin = "TMVA.root", Int_t type = 2 , TDirectory* BinDir) +void plot_efficiencies( TFile* file, Int_t type = 2 , TDirectory* BinDir) { - // argument: type = 1 --> plot efficiency(B) versus eff(S) - // type = 2 --> plot rejection (B) versus efficiency (S) + // input: - Input file (result from TMVA), + // - type = 1 --> plot efficiency(B) versus eff(S) + // = 2 --> plot rejection (B) versus efficiency (S) Bool_t __PRINT_LOGO__ = kTRUE; Bool_t __SAVE_IMAGE__ = kTRUE; - cout <<"Bindir="<<BinDir->GetName()<<endl; - gROOT->Reset(); - gROOT->SetStyle("Plain"); - gStyle->SetOptStat(0); - - TFile *file = new TFile( fin ); + // cout << "Bindir=" << BinDir->GetName() << endl; // the coordinates Float_t x1 = 0; @@ -61,13 +24,13 @@ void plot_efficiencies( TString fin = "TMVA.root", Int_t type = 2 , TDirectory* Float_t z = y1; y1 = 1 - y2; y2 = 1 - z; - cout << "--- type==2: plot background rejection versus signal efficiency" << endl; + // cout << "--- type==2: plot background rejection versus signal efficiency" << endl; + } + else { + // cout << "--- type==1: plot background efficiency versus signal efficiency" << endl; } - else - cout << "--- type==1: plot background efficiency versus signal efficiency" << endl; - // create canvas - TCanvas* c = new TCanvas( "c", "the canvas", 300, 0, 650, 500 ); + TCanvas* c = new TCanvas( "c", "the canvas", 200, 0, 650, 500 ); c->SetBorderMode(0); c->SetFillColor(10); @@ -77,7 +40,6 @@ void plot_efficiencies( TString fin = "TMVA.root", Int_t type = 2 , TDirectory* // legend Float_t x0L = 0.107, y0H = 0.899; - //Float_t dxL = 0.557-x0L, dyH = 0.22; Float_t dxL = 0.457-x0L, dyH = 0.22; if (type == 2) { x0L = 0.15; @@ -88,6 +50,7 @@ void plot_efficiencies( TString fin = "TMVA.root", Int_t type = 2 , TDirectory* legend->SetTextSize( 0.05 ); legend->SetHeader( "MVA Method:" ); legend->SetMargin( 0.4 ); + legend->SetFillColor(0); TString xtit = "Signal efficiency"; TString ytit = "Background efficiency"; @@ -99,6 +62,7 @@ void plot_efficiencies( TString fin = "TMVA.root", Int_t type = 2 , TDirectory* ftit += (BinDir->GetTitle()); } // draw empty frame + if(gROOT->FindObject("frame")!=0) gROOT->FindObject("frame")->Delete(); TH2F* frame = new TH2F( "frame", ftit, 500, x1, x2, 500, y1, y2 ); frame->GetXaxis()->SetTitle( xtit ); frame->GetYaxis()->SetTitle( ytit ); @@ -111,25 +75,44 @@ void plot_efficiencies( TString fin = "TMVA.root", Int_t type = 2 , TDirectory* TIter next(file->GetListOfKeys()); TKey *key; - TString hName = "effBvsS"; - if (type == 2) hName = "rejBvsS"; + TString hNameRef = "effBvsS"; + if (type == 2) hNameRef = "rejBvsS"; + + TList hists; // loop over all histograms with that name while (key = (TKey*)next()) { TClass *cl = gROOT->GetClass(key->GetClassName()); if (!cl->InheritsFrom("TH1")) continue; TH1 *h = (TH1*)key->ReadObj(); - if ((TString(h->GetName()).Contains( hName))&& - (TString(h->GetName()).Contains( "MVA_" ))){ + TString hname = h->GetName(); + + if (hname.Contains( hNameRef ) && hname.BeginsWith( "MVA_" )) { h->SetLineWidth(3); h->SetLineColor(color); color++; if (color == 5 || color == 10 || color == 11) color++; - legend->AddEntry(h,TString(h->GetTitle()).ReplaceAll("MVA_",""),"l"); h->Draw("csame"); + hists.Add(h); nmva++; } } - + + while (hists.GetSize()) { + TListIter hIt(&hists); + TH1* hist(0); + Double_t largestInt=0; + TH1* histWithLargestInt(0); + while ((hist = (TH1*)hIt())!=0) { + Double_t integral = hist->Integral(1,hist->FindBin(0.9999)); + if (integral>largestInt) { + largestInt = integral; + histWithLargestInt = hist; + } + } + legend->AddEntry(histWithLargestInt,TString(histWithLargestInt->GetTitle()).ReplaceAll("MVA_",""),"l"); + hists.Remove(histWithLargestInt); + } + // rescale legend box size // current box size has been tuned for 3 MVAs + 1 title if (type == 1) { @@ -142,8 +125,7 @@ void plot_efficiencies( TString fin = "TMVA.root", Int_t type = 2 , TDirectory* } // redraw axes - frame->Draw("sameaxis"); - + frame->Draw("sameaxis"); legend->Draw("same"); // ============================================================ @@ -154,14 +136,46 @@ void plot_efficiencies( TString fin = "TMVA.root", Int_t type = 2 , TDirectory* c->Update(); - TString fname = "plots/" + hName; + TString fname = "plots/" + hNameRef; if (TString(BinDir->GetName()).Contains("multicut")){ TString fprepend(BinDir->GetName()); fprepend.ReplaceAll("multicutMVA_",""); - TString fname = "plots/" + fprepend + "_" + hName; - }else { - String fname = "plots/" + hName; + fname = "plots/" + fprepend + "_" + hNameRef; } if (__SAVE_IMAGE__) TMVAGlob::imgconv( c, fname ); + return; +} + +void efficiencies( TString fin = "TMVA.root", Int_t type = 2, Bool_t useTMVAStyle = kTRUE ) +{ + // argument: type = 1 --> plot efficiency(B) versus eff(S) + // type = 2 --> plot rejection (B) versus efficiency (S) + + // set style and remove existing canvas' + TMVAGlob::Initialize( useTMVAStyle ); + + // checks if file with name "fin" is already open, and if not opens one + TFile* file = TMVAGlob::OpenFile( fin ); + + // check if multi-cut MVA or only one set of MVAs + Bool_t multiMVA=kFALSE; + TIter nextDir(file->GetListOfKeys()); + TKey *key; + // loop over all directories and check if + // one contains the key word 'multicutMVA' + while ((key = (TKey*)nextDir())) { + TClass *cl = gROOT->GetClass(key->GetClassName()); + if (!cl->InheritsFrom("TDirectory")) continue; + TDirectory *d = (TDirectory*)key->ReadObj(); + TString path(d->GetPath()); + if ((TString(d->GetPath())).Contains("multicutMVA")){ + multiMVA=kTRUE; + plot_efficiencies( file, type, d ); + } + } + plot_efficiencies( file, type, gDirectory ); + + return; } + diff --git a/tmva/test/likelihoodrefs.C b/tmva/test/likelihoodrefs.C new file mode 100644 index 00000000000..bf8b59a0820 --- /dev/null +++ b/tmva/test/likelihoodrefs.C @@ -0,0 +1,181 @@ +#include <vector> +#include <string> +#include "tmvaglob.C" + + +// this macro plots the reference distribuions for the Likelihood +// methods for the various input variables used in TMVA (e.g. running +// TMVAnalysis.C). Signal and Background are plotted separately + + +// input: - Input file (result from TMVA), +// - use of TMVA plotting TStyle +void likelihoodrefs( TString fin = "TMVA.root", Bool_t useTMVAStyle = kTRUE ) +{ + // set style and remove existing canvas' + TMVAGlob::Initialize( useTMVAStyle ); + + // checks if file with name "fin" is already open, and if not opens one + TFile* file = TMVAGlob::OpenFile( fin ); + + file->cd("Likelihood"); + TDirectory *current_sourcedir = gDirectory; + Int_t color=1; + TIter next(current_sourcedir->GetListOfKeys()); + TKey *key; + TLegend *legS = new TLegend(0.14,0.7,0.87,0.87); + TLegend *legB = new TLegend(0.14,0.7,0.87,0.87); + + Bool_t newCanvas = kTRUE; + + const UInt_t maxCanvas = 200; + TCanvas** c = new TCanvas*[maxCanvas]; + Int_t width = 500; + Int_t height = 500; + + // avoid duplicated printing + std::vector<std::string> hasBeenUsed; + + UInt_t ic = -1; + + while ((key = (TKey*)next())) { + TClass *cl = gROOT->GetClass(key->GetClassName()); + if (!cl->InheritsFrom("TH1")) continue; + TH1 *h = (TH1*)key->ReadObj(); + TH1F *b( 0 ); + TString hname( h->GetName() ); + + // avoid duplicated plotting + Bool_t found = kFALSE; + for (UInt_t j = 0; j < hasBeenUsed.size(); j++) { + if (hasBeenUsed[j] == hname.Data()) found = kTRUE; + } + if (!found) { + + // draw original plots + if (hname.EndsWith("_sig")) { + + if (newCanvas) { + char cn[20]; + sprintf( cn, "canvas%d", ic+1 ); + ++ic; + TString n = hname; + c[ic] = new TCanvas( cn, Form( "Likelihood reference for variable: %s", + (n.ReplaceAll("_sig","")).Data() ), + ic*50+200, ic*20, width, height ); + c[ic]->Divide(2,2); + newCanvas = kFALSE; + } + + // signal + h->SetMaximum(h->GetMaximum()*1.1); + color = 4; + c[ic]->cd(1); + TString plotname = hname; + legS->Clear(); + legS->SetBorderSize(1); + h->SetMarkerColor(color); + h->SetMarkerSize( 0.7 ); + h->SetMarkerStyle( 20 ); + h->SetLineWidth(1); + h->SetLineColor(color); + color++; + legS->AddEntry(h,"Input data (signal)","p"); + h->Draw("e1"); + + // background + TString bname( hname ); + b = (TH1F*)gDirectory->Get( bname.ReplaceAll("_sig","_bgd") ); + c[ic]->cd(3); + color = 2; + legB->Clear(); + legB->SetBorderSize(1); + b->SetMaximum(b->GetMaximum()*1.1); + b->SetLineWidth(1); + b->SetLineColor(color); + b->SetMarkerColor(color); + b->SetMarkerSize( 0.7 ); + b->SetMarkerStyle( 20 ); + legB->AddEntry(b,"Input data (backgr.)","p"); + b->Draw("e1"); + + // register + hasBeenUsed.push_back( bname.Data() ); + + // the smooth histograms + TString hsmooth = hname + "_smooth"; + h = (TH1F*)gDirectory->Get( hsmooth ); + if (h == 0) { + cout << "ERROR in likelihoodrefs.C: unknown histogram: " << hsmooth << endl; + return; + } + b = (TH1F*)gDirectory->Get( hsmooth.ReplaceAll("_sig","_bgd") ); + + color = 1; + c[ic]->cd(1); + h->SetLineWidth(2); + h->SetLineColor(color); + h->SetMarkerColor(color); + color++; + legS->AddEntry(h,"Smoothed histogram (signal)","l"); + h->Draw("histsame"); + + color = 1; + c[ic]->cd(3); + b->SetLineWidth(2); + b->Draw("histsame"); + legB->AddEntry(b,"Smoothed histogram (backgr.)","l"); + + hasBeenUsed.push_back( hname.Data() ); + + // the splines + for (int i=0; i<= 5; i++) { + TString hspline = hname + Form( "_smooth_hist_from_spline%i", i ); + h = (TH1F*)gDirectory->Get( hspline ); + if (h) { + b = (TH1F*)gDirectory->Get( hspline.ReplaceAll("_sig","_bgd") ); + break; + } + } + if (h == 0 || b == 0) { + cout << "--- likelihoodrefs.C: did not find spline for histogram: " << hname.Data() << endl; + } + else { + + h->SetMaximum(h->GetMaximum()*1.5); + color = 4; + c[ic]->cd(2); + h->SetLineWidth(2); + h->SetLineColor(color); + legS->AddEntry(h,"Splined PDF (norm. signal)","l"); + h->Draw("hist"); + legS->Draw(); + + b->SetMaximum(b->GetMaximum()*1.5); + color = 2; + c[ic]->cd(4); + b->SetLineColor(color); + b->SetLineWidth(2); + legB->AddEntry(b,"Splined PDF (norm. backgr.)","l"); + b->Draw("hist"); + + // draw the legends + legB->Draw(); + + hasBeenUsed.push_back( hname.Data() ); + } + + c[ic]->Update(); + + // write to file + TString fname = Form( "plots/likelihoodrefs_c%i", ic+1 ); + TMVAGlob::imgconv( c[ic], fname ); + // c[ic]->Update(); + + newCanvas = kTRUE; + hasBeenUsed.push_back( hname.Data() ); + } + } + } +} + diff --git a/tmva/test/line-small.png b/tmva/test/line-small.png new file mode 100644 index 0000000000000000000000000000000000000000..9e9d90045a7a93206e94b7f1f534a3e3120d82bb GIT binary patch literal 294 zcmeAS@N?(olHy`uVBq!ia0y~yU@!t<4kiW$hKaHA&lngOSkfJR9T^xl_H+M9WMyDr zxa;ZS7!u)r_5vf{VFezRgCG982QLcAnk{5-Y~SwMgBjjyzY46LQe-kMB==&a*M%iV zciF|Ryz|R!<%T>p&TIQS&%c^=VvV}+WK)BS5y`^srlx`sV#2dhO&lZ3g!{IpFl`L; z<(PJO%DGQ7iz4%am9mddIeN=v>4tB<AJ+&38)%;rImu<xx-rSua7{rlgZAMqr@f4F zH-7S|3tM&a<SDbt2!_i>+{sbN2YtA8mYh7rG}}jKPv*HrGt_jZWgbzQB^MbJJm1y4 yG=giHwfB)Phc6k{Mg%Q0_dZ*)iN&|~E7KuG?q=^zqRk8p3=E#GelF{r5}E+a_;m;X literal 0 HcmV?d00001 diff --git a/tmva/test/mutransform.C b/tmva/test/mutransform.C new file mode 100644 index 00000000000..631a0a48db8 --- /dev/null +++ b/tmva/test/mutransform.C @@ -0,0 +1,137 @@ +#include "tmvaglob.C" + +// this macro plots the mu-transformation results of the MVA output +// variables from the various MVA methods run in TMVA (e.g. running +// TMVAnalysis.C). Since the mu-transform is flat for background, only +// signal is shown. The more signal ispeaked towards one, the better is +// the discrimination of the MVA method. See this reference for more +// information (in French): +// http://tel.archives-ouvertes.fr/documents/archives0/00/00/29/91/index_fr.html +// +// input: - Input file (result from TMVA), +// - use log/lin scale +// - use of TMVA plotting TStyle +void mutransform( TString fin = "TMVA.root", Bool_t logy = kFALSE, Bool_t useTMVAStyle = kTRUE ) +{ + // set style and remove existing canvas' + TMVAGlob::Initialize( useTMVAStyle ); + + // checks if file with name "fin" is already open, and if not opens one + TFile* file = TMVAGlob::OpenFile( fin ); + + // the coordinates + Float_t x1 = 0.0; + Float_t x2 = 1; + Float_t y1 = 0; + Float_t y2 = -1; + Float_t ys = 1.08; + + enum { nskip = 1 }; + TString hskip[nskip] = { "Variable" }; + + // loop over all histograms with that name + // search for maximum ordinate + TIter next(file->GetListOfKeys()); + TKey * key; + Int_t nmva = 0; + TString hName = "muTransform_S"; // ignore background + while (key = (TKey*)next()) { + TClass *cl = gROOT->GetClass(key->GetClassName()); + if (!cl->InheritsFrom("TH1")) continue; + TH1 *h = (TH1*)key->ReadObj(); + Bool_t skip = !TString(h->GetName()).Contains( hName ); + for (Int_t iskip=0; iskip<nskip; iskip++) + if (TString(h->GetName()).Contains( hskip[iskip] )) skip = kTRUE; + if (!skip) { + if (h->GetMaximum() > y2) y2 = h->GetMaximum()*ys; + nmva++; + } + } + if (y2 == -1) { + cout << "No mu-transforms found" << endl; + return; + } + + // create canvas + TCanvas* c = new TCanvas( "c", "the canvas", 150, 0, 650, 500 ); + c->SetBorderMode(0); + c->SetFillColor(10); + + // global style settings + gPad->SetTicks(); + if (logy) { + y1 = 0.1; + ys = 2.0; + gPad->SetLogy(); + } + + // legend + Float_t x0L = 0.140, y0H = 0.86; + Float_t dxL = 0.48, dyH = 0.22; + TLegend *legend = new TLegend( x0L, y0H-dyH, x0L+dxL, y0H ); + legend->SetBorderSize(1); + legend->SetTextSize( 0.05 ); + legend->SetHeader( "MVA Method:" ); + legend->SetMargin( 0.4 ); + + TString xtit = "mu-transform"; + TString ytit = ""; + TString ftit = "Signal " + xtit; + + cout << "--- set frame maximum to: " << y2 << endl; + next.Reset(); + + // rescale legend box size + // current box size has been tuned for 3 MVAs + 1 title + dyH *= (1.0 + Float_t(nmva - 3.0)/4.0); + legend->SetY1( y0H - dyH ); + + // draw empty frame + TH2F* frame = new TH2F( "frame", ftit, 500, x1, x2, 500, y1, y2 ); + frame->GetXaxis()->SetTitle( xtit ); + frame->GetYaxis()->SetTitle( ytit ); + TMVAGlob::SetFrameStyle( frame, 1.0 ); + + frame->Draw(); + + // loop over all histograms with that name + // plot + Int_t color = 1; + while (key = (TKey*)next()) { + TClass *cl = gROOT->GetClass(key->GetClassName()); + if (!cl->InheritsFrom("TH1")) continue; + TH1 *h = (TH1*)key->ReadObj(); + Bool_t skip = !TString(h->GetName()).Contains( hName ); + for (Int_t iskip=0; iskip<nskip; iskip++) + if (TString(h->GetName()).Contains( hskip[iskip] )) skip = kTRUE; + if (!skip) { + // signal or background ? + if (TString(h->GetName()).Contains( "_S" )) { + h->SetLineStyle( 1 ); + h->SetLineWidth( 3 ); + } + else { + h->SetLineStyle( 2 ); + h->SetLineWidth( 3 ); + } + h->SetLineColor(color); + color++; + TString tit = h->GetTitle(); + tit.ReplaceAll( "mu-Transform", "" ); + tit.ReplaceAll( "(S)", "" ); + tit.ReplaceAll( ":", "" ); + legend->AddEntry( h, tit, "l" ); + h->Draw("same"); + } + } + + // redraw axes + frame->Draw("sameaxis"); + + legend->Draw("same"); + c->Update(); + + TString fname = "plots/mutransform"; + TMVAGlob::imgconv( c, fname ); +} + diff --git a/tmva/test/mvas.C b/tmva/test/mvas.C index cf765df936c..ea36eaa12c5 100644 --- a/tmva/test/mvas.C +++ b/tmva/test/mvas.C @@ -1,26 +1,29 @@ #include "tmvaglob.C" -void mvas( TString fin = "TMVA.root" ) +// this macro plots the resulting MVA distribution (Signal and +// Background overlayed) of different MVA methods run in TMVA +// (e.g. running TMVAnalysis.C). + + +// input: - Input file (result from TMVA) +// - use of TMVA plotting TStyle +void mvas( TString fin = "TMVA.root", Bool_t useTMVAStyle = kTRUE ) { + // set style and remove existing canvas' + TMVAGlob::Initialize( useTMVAStyle ); + // switches const Bool_t Draw_CFANN_Logy = kFALSE; const Bool_t Save_Images = kTRUE; - gROOT->SetStyle("Plain"); - gStyle->SetOptStat(0); - TList * loc = gROOT->GetListOfCanvases(); - TListIter itc(loc); - TObject *o(0); - while( (o = itc()) ) delete o; - - cout << "Reading file: " << fin << endl; - TFile *file = new TFile( fin ); + // checks if file with name "fin" is already open, and if not opens one + TFile* file = TMVAGlob::OpenFile( fin ); // define Canvas layout here! Int_t xPad = 1; // no of plots in x Int_t yPad = 1; // no of plots in y Int_t noPad = xPad * yPad ; - const Int_t width = 650; // size of canvas + const Int_t width = 600; // size of canvas // this defines how many canvases we need const Int_t noCanvas = 10; @@ -32,31 +35,38 @@ void mvas( TString fin = "TMVA.root" ) Int_t countPad = 1; // list of existing MVAs - const Int_t nmva = 14; - TString prefix = ""; - TString mvaName[nmva] = { "MVA_Likelihood", - "MVA_LikelihoodD", - "MVA_Fisher", - "MVA_Fisher_Fi", - "MVA_Fisher_Ma", - "MVA_CFMlpANN", - "MVA_TMlpANN", - "MVA_HMatrix", - "MVA_PDERS" , - "MVA_BDT", - "MVA_BDTGini", - "MVA_BDTMisCl", - "MVA_BDTStatSig", - "MVA_BDTCE" }; - char fname[200]; - - // loop over MVAs - for (Int_t imva=0; imva<nmva; imva++) { + const Int_t nveto = 1; + TString prefix = "MVA_"; + TString suffixSig = "_S"; + TString suffixBgd = "_B"; + TString vetoNames[nveto] = { "muTransform" }; + + // search for the right histograms in full list of keys + TIter next(file->GetListOfKeys()); + TKey *key; + char fname[200]; + while ((key = (TKey*)next())) { + + // make sure, that we only look at histograms + TClass *cl = gROOT->GetClass(key->GetClassName()); + if (!cl->InheritsFrom("TH1")) continue; + TH1 *th1 = (TH1*)key->ReadObj(); + TString hname= th1->GetName(); + + if (!hname.BeginsWith( prefix ) || !hname.EndsWith( suffixSig )) continue; + + // check if histogram is vetoed + Bool_t found = kFALSE; + for (UInt_t iv=0; iv<nveto; iv++) if (hname.Contains( vetoNames[iv] )) found = kTRUE; + if (found) continue; + + // remove the signal suffix + hname.ReplaceAll( suffixSig, "" ); // retrieve corresponding signal and background histograms - TH1* sig = (TH1*)gDirectory->Get( prefix + mvaName[imva] + "_S" ); - TH1* bgd = (TH1*)gDirectory->Get( prefix + mvaName[imva] + "_B" ); - TH1* all = (TH1*)gDirectory->Get( prefix + mvaName[imva] ); + TH1* sig = (TH1*)gDirectory->Get( hname + "_S" ); + TH1* bgd = (TH1*)gDirectory->Get( hname + "_B" ); + TH1* all = (TH1*)gDirectory->Get( hname ); // check that exist if (NULL != sig && NULL != bgd) { @@ -76,7 +86,7 @@ void mvas( TString fin = "TMVA.root" ) char cn[20]; sprintf( cn, "canvas%d", countCanvas+1 ); c[countCanvas] = new TCanvas( cn, Form("MVA Output Variables %s",title.Data()), - countCanvas*50+300, countCanvas*20, width, width*0.8 ); + countCanvas*50+200, countCanvas*20, width, width*0.78 ); // style c[countCanvas]->SetBorderMode(0); c[countCanvas]->SetFillColor(10); @@ -85,7 +95,6 @@ void mvas( TString fin = "TMVA.root" ) countPad = 1; } - // set the histogram style TMVAGlob::SetSignalAndBackgroundStyle( sig, bgd ); @@ -134,13 +143,12 @@ void mvas( TString fin = "TMVA.root" ) legend->SetMargin( 0.3 ); } - TMVAGlob::plot_logo(); - // save canvas to file c[countCanvas]->cd(countPad); countPad++; if (countPad > noPad) { c[countCanvas]->Update(); + TMVAGlob::plot_logo(); sprintf( fname, "plots/mva_c%i", countCanvas+1 ); if (Save_Images) TMVAGlob::imgconv( c[countCanvas], &fname[0] ); countCanvas++; @@ -207,12 +215,11 @@ void mvas( TString fin = "TMVA.root" ) // redraw axes frame->Draw("sameaxis"); - TMVAGlob::plot_logo(); - // save canvas to file c[countCanvas]->cd(countPad); countPad++; if (countPad > noPad) { + TMVAGlob::plot_logo(); c[countCanvas]->Update(); sprintf( fname, "plots/mva_all_c%i", countCanvas+1 ); if (Save_Images) TMVAGlob::imgconv( c[countCanvas], &fname[0] ); diff --git a/tmva/test/network.C b/tmva/test/network.C new file mode 100644 index 00000000000..ce587738d3d --- /dev/null +++ b/tmva/test/network.C @@ -0,0 +1,313 @@ +#include "tmvaglob.C" + +// this macro prints out a neural network generated by MethodMLP graphically +// @author: Matt Jachowski, jachowski@stanford.edu + +// input: - Input file (result from TMVA), +// - use of TMVA plotting TStyle +void network( TString fin = "TMVA.root", Bool_t useTMVAStyle = kTRUE ) +{ + // set style and remove existing canvas' + TMVAGlob::Initialize( useTMVAStyle ); + + // checks if file with name "fin" is already open, and if not opens one + TFile* file = TMVAGlob::OpenFile( fin ); + + TDirectory * dir = (TDirectory*)gDirectory->Get("MLP"); + if (dir==0) { + cout << "Could not locate directory MLP in file " << fin << endl; + return; + } + dir->cd(); + draw_network(dir); +} + +void draw_network(TDirectory* d) +{ + Bool_t __PRINT_LOGO__ = kTRUE; + + // create canvas + TCanvas* c = new TCanvas( "c", "Neural Network Layout", 100, 0, 1000, 650 ); + c->SetBorderMode(0); + c->SetFillColor(10); + + TIter next = d->GetListOfKeys(); + TKey *key; + TString hName = "weights_hist"; + Int_t numHists = 0; + + // loop over all histograms with hName in name + while (key = (TKey*)next()) { + TClass *cl = gROOT->GetClass(key->GetClassName()); + if (!cl->InheritsFrom("TH2F")) continue; + TH2F *h = (TH2F*)key->ReadObj(); + if (TString(h->GetName()).Contains( hName )) + numHists++; + } + + // loop over all histograms with hName in name again + next.Reset(); + Double_t maxWeight = 0; + + // find max weight + while (key = (TKey*)next()) { + + //cout << "Title: " << key->GetTitle() << endl; + TClass *cl = gROOT->GetClass(key->GetClassName()); + if (!cl->InheritsFrom("TH2F")) continue; + + TH2F* h = (TH2F*)key->ReadObj(); + if (TString(h->GetName()).Contains( hName )){ + + Int_t n1 = h->GetNbinsX(); + Int_t n2 = h->GetNbinsY(); + for (Int_t i = 0; i < n1; i++) { + for (Int_t j = 0; j < n2; j++) { + Double_t weight = TMath::Abs(h->GetBinContent(i+1, j+1)); + if (maxWeight < weight) maxWeight = weight; + } + } + } + } + + // draw network + next.Reset(); + Int_t count = 0; + while (key = (TKey*)next()) { + + TClass *cl = gROOT->GetClass(key->GetClassName()); + if (!cl->InheritsFrom("TH2F")) continue; + + TH2F* h = (TH2F*)key->ReadObj(); + if (TString(h->GetName()).Contains( hName )){ + draw_layer(c, h, count++, numHists+1, maxWeight); + } + } + + draw_layer_labels(numHists+1); + + // ============================================================ + if (__PRINT_LOGO__) TMVAGlob::plot_logo(); + // ============================================================ + + c->Update(); + + TString fname = "plots/network"; + TMVAGlob::imgconv( c, fname ); +} + +void draw_layer_labels(Int_t nLayers) +{ + const Double_t LABEL_HEIGHT = 0.03; + const Double_t LABEL_WIDTH = 0.20; + Double_t effWidth = 0.8*(1.0-LABEL_WIDTH)/nLayers; + Double_t height = 0.8*LABEL_HEIGHT; + Double_t margY = LABEL_HEIGHT - height; + + for (Int_t i = 0; i < nLayers; i++) { + TString label = Form("Layer %i", i); + Double_t cx = i*(1.0-LABEL_WIDTH)/nLayers+1.0/(2.0*nLayers)+LABEL_WIDTH; + Double_t x1 = cx-0.8*effWidth/2.0; + Double_t x2 = cx+0.8*effWidth/2.0; + Double_t y1 = margY; + Double_t y2 = margY + height; + + TPaveLabel *p = new TPaveLabel(x1, y1, x2, y2, label+"", "br"); + p->Draw(); + } +} + +void draw_input_labels(Int_t nInputs, Double_t* cy, + Double_t rad, Double_t layerWidth) +{ + const Double_t LABEL_HEIGHT = 0.03; + const Double_t LABEL_WIDTH = 0.20; + Double_t width = LABEL_WIDTH + (layerWidth-4*rad); + Double_t margX = 0.01; + Double_t effHeight = 0.8*LABEL_HEIGHT; + + TString *varNames = get_var_names(nInputs); + TString input; + + for (Int_t i = 0; i < nInputs; i++) { + if (i != nInputs-1) input = varNames[i]; + else input = "bias"; + Double_t x1 = margX; + Double_t x2 = margX + width; + Double_t y1 = cy[i] - effHeight; + Double_t y2 = cy[i] + effHeight; + + TPaveLabel *p = new TPaveLabel(x1, y1, x2, y2, input+"", "br"); + p->Draw(); + if (i == nInputs-1) p->SetTextColor(9); + } + + delete[] varNames; +} + +TString* get_var_names(Int_t nVars) +{ + TString fname = "weights/MVAnalysis_MLP.weights.txt"; + ifstream fin( fname ); + if (!fin.good( )) { // file not found --> Error + cout << "Error opening " << fname << endl; + exit(1); + } + + Int_t idummy; + Float_t fdummy; + TString dummy = ""; + + // file header with name + while (!dummy.Contains("#VAR")) fin >> dummy; + fin >> dummy >> dummy >> dummy; // the rest of header line + + // number of variables + fin >> dummy >> idummy; + // at this point, we should have idummy == nVars + + // variable mins and maxes + TString* vars = new TString[nVars]; + for (Int_t i = 0; i < idummy; i++) fin >> vars[i] >> dummy >> dummy >> dummy; + + fin.close(); + + return vars; +} + +void draw_activation(TCanvas* c, Double_t cx, Double_t cy, + Double_t radx, Double_t rady, Int_t whichActivation) +{ + TImage *activation = NULL; + + switch (whichActivation) { + case 0: + activation = TImage::Open("sigmoid-small.png"); + break; + case 1: + activation = TImage::Open("line-small.png"); + break; + default: + cout << "Activation index " << whichActivation << " is not known." << endl; + cout << "You messed up or you need to modify network.C to introduce a new " + << "activation function (and image) corresponding to this index" << endl; + } + + if (activation == NULL) { + cout << "Could not create an image... exit" << endl; + return; + } + + activation->SetConstRatio(kFALSE); + + radx *= 0.7; + rady *= 0.7; + TString name = Form("activation%f%f", cx, cy); + TPad* p = new TPad(name+"", name+"", cx-radx, cy-rady, cx+radx, cy+rady); + + p->Draw(); + p->cd(); + + activation->Draw(); + c->cd(); +} + +void draw_layer(TCanvas* c, TH2F* h, Int_t iHist, + Int_t nLayers, Double_t maxWeight) +{ + const Double_t MAX_NEURONS_NICE = 12; + const Double_t LABEL_HEIGHT = 0.03; + const Double_t LABEL_WIDTH = 0.20; + Double_t ratio = ((Double_t)(c->GetWindowHeight())) / c->GetWindowWidth(); + Double_t rad, cx1, *cy1, cx2, *cy2; + + // this is the smallest radius that will still display the activation images + rad = 0.04*650/c->GetWindowHeight(); + + Int_t nNeurons1 = h->GetNbinsX(); + cx1 = iHist*(1.0-LABEL_WIDTH)/nLayers + 1.0/(2.0*nLayers) + LABEL_WIDTH; + cy1 = new Double_t[nNeurons1]; + + Int_t nNeurons2 = h->GetNbinsY(); + cx2 = (iHist+1)*(1.0-LABEL_WIDTH)/nLayers + 1.0/(2.0*nLayers) + LABEL_WIDTH; + cy2 = new Double_t[nNeurons2]; + + Double_t effRad1 = rad; + if (nNeurons1 > MAX_NEURONS_NICE) + effRad1 = 0.8*(1.0-LABEL_HEIGHT)/(2.0*nNeurons1); + + + for (Int_t i = 0; i < nNeurons1; i++) { + cy1[nNeurons1-i-1] = i*(1.0-LABEL_HEIGHT)/nNeurons1 + + 1.0/(2.0*nNeurons1) + LABEL_HEIGHT; + + if (iHist == 0) { + + TEllipse *ellipse + = new TEllipse(cx1, cy1[nNeurons1-i-1], + effRad1*ratio, effRad1, 0, 360, 0); + ellipse->Draw(); + + if (i == 0) ellipse->SetLineColor(9); + + if (nNeurons1 > MAX_NEURONS_NICE) continue; + + Int_t whichActivation = 0; + if (iHist==0 || iHist==nLayers-1 || i==0) whichActivation = 1; + draw_activation(c, cx1, cy1[nNeurons1-i-1], + rad*ratio, rad, whichActivation); + } + } + + if (iHist == 0) draw_input_labels(nNeurons1, cy1, rad, (1.0-LABEL_WIDTH)/nLayers); + + Double_t effRad2 = rad; + if (nNeurons2 > MAX_NEURONS_NICE) + effRad2 = 0.8*(1.0-LABEL_HEIGHT)/(2.0*nNeurons2); + + for (Int_t i = 0; i < nNeurons2; i++) { + cy2[nNeurons2-i-1] = i*(1.0-LABEL_HEIGHT)/nNeurons2 + 1.0/(2.0*nNeurons2) + LABEL_HEIGHT; + + TEllipse *ellipse = + new TEllipse(cx2, cy2[nNeurons2-i-1], effRad2*ratio, effRad2, 0, 360, 0); + ellipse->Draw(); + + if (i == 0 && nNeurons2 > 1) ellipse->SetLineColor(9); + + if (nNeurons2 > MAX_NEURONS_NICE) continue; + + Int_t whichActivation = 0; + if (iHist+1==0 || iHist+1==nLayers-1 || i==0) whichActivation = 1; + draw_activation(c, cx2, cy2[nNeurons2-i-1], rad*ratio, rad, whichActivation); + } + + for (Int_t i = 0; i < nNeurons1; i++) { + for (Int_t j = 0; j < nNeurons2; j++) { + draw_synapse(cx1, cy1[i], cx2, cy2[j], effRad1*ratio, effRad2*ratio, + h->GetBinContent(i+1, j+1)/maxWeight); + } + } + + delete[] cy1; + delete[] cy2; +} + +void draw_synapse(Double_t cx1, Double_t cy1, Double_t cx2, Double_t cy2, + Double_t rad1, Double_t rad2, Double_t weightNormed) +{ + const Double_t TIP_SIZE = 0.01; + const Double_t MAX_WEIGHT = 8; + const Double_t MAX_COLOR = 100; // red + const Double_t MIN_COLOR = 60; // blue + + if (weightNormed == 0) return; + + gStyle->SetPalette(100, NULL); + + TArrow *arrow = new TArrow(cx1+rad1, cy1, cx2-rad2, cy2, TIP_SIZE, ">"); + arrow->SetFillColor(1); + arrow->SetFillStyle(1001); + arrow->SetLineWidth((Int_t)(TMath::Abs(weightNormed)*MAX_WEIGHT+0.5)); + arrow->SetLineColor((Int_t)((weightNormed+1.0)/2.0*(MAX_COLOR-MIN_COLOR)+MIN_COLOR+0.5)); + arrow->Draw(); +} diff --git a/tmva/test/plotall.C b/tmva/test/plotall.C new file mode 100644 index 00000000000..582effcf763 --- /dev/null +++ b/tmva/test/plotall.C @@ -0,0 +1,23 @@ +#include "variables.C" +#include "correlations.C" +#include "efficiencies.C" +#include "mvas.C" +#include "mutransform.C" + +void plotall( TString fin = "TMVA.root" ) +{ + cout << "=== execute: variables()" << endl; + variables( fin ); + + cout << "=== execute: correlations()" << endl; + correlations( fin ); + + cout << "=== execute: mvas()" << endl; + mvas( fin ); + + cout << "=== execute: efficiencies()" << endl; + efficiencies( fin ); + + cout << "=== execute: ztransform()" << endl; + mutransform( fin ); +} diff --git a/tmva/test/sigmoid-small.png b/tmva/test/sigmoid-small.png new file mode 100644 index 0000000000000000000000000000000000000000..c5882fb8942f829d03540953c87b3678c3ce3d2b GIT binary patch literal 363 zcmeAS@N?(olHy`uVBq!ia0y~yU@!t<4kiW$hKaHA&lngO7>k44ofy`glX=O&z`&C3 z=<CS9u(6-}Pa-P=1A{`cN02WALzNl>LqiJ#!!HH~hK3gm45bDP46hOx7_4S6Fo+k- z*%fHRz`*d*)5S3)!u{=yhg{7DJS-2s{*~YLip%xp1n=sdBEE_LU;U{PFIjUX%KqZ& zDMe~BOa2OPwR$QpaA4QPlKHc?&Yn=T!1si>VztBMl5GW(JFd2Ib_SbAhXqW&$(LwV zSgjPLU#XpKp><p+-A6y!*X`-lYh7z9CwoU}Z_-e!i#pkKKWoj36RK+3iyYHBkFaIx ztkaDA&~vHI<g)aWDKm5`L~Ir*O_{Og1zWmzNmfDE({!oyowMd0{nVFbuvRpZX|knm z@5y76X7cDP<xK7L74XjAm0bJ(k-gZTM`?L`?raTSWb!v9-P6aQ-uWIwY{K&y+=BVG Q3=9kmp00i_>zopr0GUvV`2YX_ literal 0 HcmV?d00001 diff --git a/tmva/test/tmvaglob.C b/tmva/test/tmvaglob.C index 0de08fe7721..58e020648ac 100644 --- a/tmva/test/tmvaglob.C +++ b/tmva/test/tmvaglob.C @@ -1,114 +1,169 @@ // global TMVA style settings +#ifndef TMVA_TMVAGLOB +#define TMVA_TMVAGLOB namespace TMVAGlob { - //signal - const Int_t FillColor__S = 38; - const Int_t FillStyle__S = 1001; - const Int_t LineColor__S = 1; - const Int_t LineWidth__S = 1; - - // background - const Int_t FillColor__B = 2; - const Int_t FillStyle__B = 3002; - const Int_t LineColor__B = 2; - const Int_t LineWidth__B = 1; - - // set the style - void SetSignalAndBackgroundStyle( TH1* sig, TH1* bgd, TH1* all = 0 ) - { - if (sig != NULL) { - sig->SetLineColor( LineColor__S ); - sig->SetLineWidth( LineWidth__S ); - sig->SetFillStyle( FillStyle__S ); - sig->SetFillColor( FillColor__S ); - } + enum TypeOfPlot { kNormal = 0, + kDecorrelated, + kPCA, + kNumOfMethods }; + + + // set the style + void SetSignalAndBackgroundStyle( TH1* sig, TH1* bgd, TH1* all = 0 ) + { + + //signal + const Int_t FillColor__S = 38; + const Int_t FillStyle__S = 1001; + const Int_t LineColor__S = 1; + const Int_t LineWidth__S = 2; + + // background + const Int_t FillColor__B = 2; + const Int_t FillStyle__B = 3554; + const Int_t LineColor__B = 2; + const Int_t LineWidth__B = 2; + + if (sig != NULL) { + sig->SetLineColor( LineColor__S ); + sig->SetLineWidth( LineWidth__S ); + sig->SetFillStyle( FillStyle__S ); + sig->SetFillColor( FillColor__S ); + } - if (bgd != NULL) { - bgd->SetLineColor( LineColor__B ); - bgd->SetLineWidth( LineWidth__B ); - bgd->SetFillStyle( FillStyle__B ); - bgd->SetFillColor( FillColor__B ); - } - - if (all != NULL) { - all->SetLineColor( LineColor__S ); - all->SetLineWidth( LineWidth__S ); - all->SetFillStyle( FillStyle__S ); - all->SetFillColor( FillColor__S ); - } - } - - // set frame styles - SetFrameStyle( TH1* frame, Float_t scale = 1.0 ) - { - frame->SetLabelOffset( 0.012, "X" );// label offset on x axis - frame->SetLabelOffset( 0.012, "Y" );// label offset on x axis - frame->GetXaxis()->SetTitleOffset( 1.25 ); - frame->GetYaxis()->SetTitleOffset( 1.22 ); - frame->GetXaxis()->SetTitleSize( 0.045*scale ); - frame->GetYaxis()->SetTitleSize( 0.045*scale ); - Float_t labelSize = 0.04*scale; - frame->GetXaxis()->SetLabelSize( labelSize ); - frame->GetYaxis()->SetLabelSize( labelSize ); - - // global style settings - gPad->SetTicks(); - gPad->SetLeftMargin ( 0.108*scale ); - gPad->SetRightMargin ( 0.050*scale ); - gPad->SetBottomMargin( 0.120*scale ); - } - - // used to create output file for canvas - void imgconv( TCanvas* c, TString fname ) - { - if (NULL == c) { - cout << "--- Error in TMVAGlob::imgconv: canvas is NULL" << endl; - } - else { - // create directory if not existing - TString f = fname; - TString dir = f.Remove( f.Last( '/' ), f.Length() - f.Last( '/' ) ); - gSystem->mkdir( dir ); - - TString pngName = fname + ".png"; - TString gifName = fname + ".gif"; - TString epsName = fname + ".eps"; - - // create png - TImage *img = TImage::Create(); - img->FromPad( c ); - img->WriteImage( gifName ); - img->WriteImage( pngName ); - - // create eps (other option: c->Print( epsName )) - c->SaveAs(epsName); - - cout << "--- Create output files: " << pngName << " (+ .gif and .eps)" << endl; - } - } - - void plot_logo( Float_t v_scale = 1.0 ) - { - TImage *img = TImage::Open("tmva_logo.gif"); - if (!img) { - printf("Could not create an image... exit\n"); - return; - } - img->SetConstRatio(kFALSE); - UInt_t h_ = img->GetHeight(); - UInt_t w_ = img->GetWidth(); - - Float_t d = 0.045; - // absolute coordinates - Float_t x1L = 0.7803; - Float_t y1L = 0.91; - TPad *p1 = new TPad("img", "img", x1L, y1L, x1L + d*w_/h_, y1L + d*1.5*v_scale ); - - p1->Draw(); - p1->cd(); - img->Draw(); - } + if (bgd != NULL) { + bgd->SetLineColor( LineColor__B ); + bgd->SetLineWidth( LineWidth__B ); + bgd->SetFillStyle( FillStyle__B ); + bgd->SetFillColor( FillColor__B ); + } + + if (all != NULL) { + all->SetLineColor( LineColor__S ); + all->SetLineWidth( LineWidth__S ); + all->SetFillStyle( FillStyle__S ); + all->SetFillColor( FillColor__S ); + } + } + + // set frame styles + SetFrameStyle( TH1* frame, Float_t scale = 1.0 ) + { + frame->SetLabelOffset( 0.012, "X" );// label offset on x axis + frame->SetLabelOffset( 0.012, "Y" );// label offset on x axis + frame->GetXaxis()->SetTitleOffset( 1.25 ); + frame->GetYaxis()->SetTitleOffset( 1.22 ); + frame->GetXaxis()->SetTitleSize( 0.045*scale ); + frame->GetYaxis()->SetTitleSize( 0.045*scale ); + Float_t labelSize = 0.04*scale; + frame->GetXaxis()->SetLabelSize( labelSize ); + frame->GetYaxis()->SetLabelSize( labelSize ); + + // global style settings + gPad->SetTicks(); + gPad->SetLeftMargin ( 0.108*scale ); + gPad->SetRightMargin ( 0.050*scale ); + gPad->SetBottomMargin( 0.120*scale ); + } + + // set style and remove existing canvas' + void Initialize( Bool_t useTMVAStyle = kTRUE ) + { + // set style + if (!useTMVAStyle) { + gROOT->SetStyle("Plain"); + gStyle->SetOptStat(0); + } + + // destroy canvas' + TList * loc = gROOT->GetListOfCanvases(); + TListIter itc(loc); + TObject *o(0); + while ((o = itc())) delete o; + } + + // checks if file with name "fin" is already open, and if not opens one + TFile* OpenFile( const TString& fin ) + { + TFile* file = gDirectory->GetFile(); + if (file==0 || fin != file->GetName()) { + if (file != 0) { + gROOT->cd(); + file->Close(); + } + cout << "Creating TFile: " << fin << endl; + file = new TFile( fin ); + } + else { + cout << "Retrieving TFile: " << fin << endl; + file = gDirectory->GetFile(); + } + + file->cd(); + return file; + } + + // used to create output file for canvas + void imgconv( TCanvas* c, TString fname ) + { + // return; + if (NULL == c) { + cout << "--- Error in TMVAGlob::imgconv: canvas is NULL" << endl; + } + else { + // create directory if not existing + TString f = fname; + TString dir = f.Remove( f.Last( '/' ), f.Length() - f.Last( '/' ) ); + gSystem->mkdir( dir ); + + TString pngName = fname + ".png"; + TString gifName = fname + ".gif"; + TString epsName = fname + ".eps"; + + // create eps (other option: c->Print( epsName )) + c->SaveAs(epsName); + cout << "If you want to save the image as gif or png, please comment out " + << "the corresponding lines (line no. 83+84) in tmvaglob.C" << endl; + // c->SaveAs(gifName); + c->SaveAs(pngName); + } + } + + void plot_logo( Float_t v_scale = 1.0 ) + { + TImage *img = TImage::Open("tmva_logo.gif"); + if (!img) { + printf("Could not create an image... exit\n"); + return; + } + img->SetConstRatio(kFALSE); + UInt_t h_ = img->GetHeight(); + UInt_t w_ = img->GetWidth(); + //cout << w_/h_ << endl; + + Float_t rgif = 405/108.; + Float_t rpad = gPad->GetWw()/gPad->GetWh(); + Float_t xperc = 0.3; + Float_t yperc = xperc * rpad / rgif; + + Float_t r = w_/h_; + + Float_t d = 0.045; + // absolute coordinates + Float_t x1L = 1 - gStyle->GetPadRightMargin(); + Float_t y1L = 0.91; + TPad *p1 = new TPad("img", "img", x1L - d*r, y1L, x1L, y1L + d*1.5*v_scale ); + p1->SetRightMargin(0); + p1->SetBottomMargin(0); + p1->SetLeftMargin(0); + p1->SetTopMargin(0); + p1->Draw(); + p1->cd(); + img->Draw(); + } } +#endif diff --git a/tmva/test/variables.C b/tmva/test/variables.C index 26a2b6a4dda..1fdcd09cc19 100644 --- a/tmva/test/variables.C +++ b/tmva/test/variables.C @@ -1,144 +1,166 @@ #include "tmvaglob.C" -void variables( TString fin = "TMVA.root" ) +// this macro plots the distributions of the different input variables +// used in TMVA (e.g. running TMVAnalysis.C). Signal and Background are overlayed. + + + +const TString directories[TMVAGlob::kNumOfMethods] = { "input_variables", + "decorrelated_input_variables", + "principal_component_analyzed_input_variables" }; + +const TString titles[TMVAGlob::kNumOfMethods] = { "TMVA Input Variables", + "Decorrelated TMVA Input Variables", + "Principal Component Transformed TMVA Input Variables" }; + +const TString outfname[TMVAGlob::kNumOfMethods] = { "variables", + "variables_decorr", + "variables_pca" }; + +// input: - Input file (result from TMVA), +// - normal/decorrelated/PCA +// - use of TMVA plotting TStyle +void variables( TString fin = "TMVA.root", TMVAGlob::TypeOfPlot type = TMVAGlob::kNormal, bool useTMVAStyle=kTRUE ) { - gROOT->Reset(); - gROOT->SetStyle("Plain"); - gStyle->SetOptStat(0); - TList * loc = gROOT->GetListOfCanvases(); - TListIter itc(loc); - TObject *o(0); - while( (o = itc()) ) delete o; - - TFile *file = new TFile( fin ); - - input_variables->cd(); - - // how many plots are in the "input_variables" directory? - Int_t noPlots = ((gDirectory->GetListOfKeys())->GetEntries()) / 2; - - // define Canvas layout here! - // default setting - Int_t xPad; // no of plots in x - Int_t yPad; // no of plots in y - Int_t width; // size of canvas - Int_t height; - switch (noPlots) { - case 1: - xPad = 1; yPad = 1; width = 500; height = width; break; - case 2: - xPad = 2; yPad = 1; width = 600; height = 0.7*width; break; - case 3: - xPad = 3; yPad = 1; width = 800; height = 0.5*width; break; - case 4: - xPad = 2; yPad = 2; width = 600; height = width; break; - default: - xPad = 3; yPad = 2; width = 800; height = 0.7*width; break; - } - Int_t noPad = xPad * yPad ; - - // this defines how many canvases we need - const Int_t noCanvas = 1 + (Int_t)(noPlots/noPad); - TCanvas **c = new TCanvas*[noCanvas]; - for (Int_t ic=0; ic<noCanvas; ic++) c[ic] = 0; - - cout << "--- Found: " << noPlots << " plots; " - << "will produce: " << noCanvas << " canvas" << endl; - - // counter variables - Int_t countCanvas = 0; - Int_t countPad = 1; - - // loop over all objects in "input_variables" directory - TIter next(gDirectory->GetListOfKeys()); - TKey *key; - char fname[200]; - while ((key = (TKey*)next())) { - - // make sure, that we only look at histograms - TClass *cl = gROOT->GetClass(key->GetClassName()); - if (!cl->InheritsFrom("TH1")) continue; - TH1 *sig = (TH1*)key->ReadObj(); - TString hname= sig->GetName(); - - // check for all signal histograms - if (hname.Contains("__S")){ // found a new signal plot - - // create new canvas - if ((c[countCanvas]==NULL) || (countPad>noPad)) { - cout << "--- Book canvas no: " << countCanvas << endl; - char cn[20]; - sprintf( cn, "canvas%d", countCanvas+1 ); - c[countCanvas] = new TCanvas( cn, "MVA Input Variables", - countCanvas*50+300, countCanvas*20, width, height ); - // style - c[countCanvas]->SetBorderMode(0); - c[countCanvas]->SetFillColor(10); - - c[countCanvas]->Divide(xPad,yPad); - countPad = 1; - } - - // save canvas to file - c[countCanvas]->cd(countPad); - countPad++; - if (countPad > noPad) { - c[countCanvas]->Update(); - sprintf( fname, "plots/variables_c%i", countCanvas+1 ); - TMVAGlob::imgconv( c[countCanvas], &fname[0] ); - countCanvas++; + // set style and remove existing canvas' + TMVAGlob::Initialize( useTMVAStyle ); + + // checks if file with name "fin" is already open, and if not opens one + TFile* file = TMVAGlob::OpenFile( fin ); + + TDirectory* dir = (TDirectory*)gDirectory->Get( directories[type] ); + if (dir==0) { + cout << "Could not locate directory '" << directories[type] << "' in file: " << fin << endl; + return; + } + dir->cd(); + + // how many plots are in the directory? + Int_t noPlots = ((dir->GetListOfKeys())->GetEntries()) / 2; + + // define Canvas layout here! + // default setting + Int_t xPad; // no of plots in x + Int_t yPad; // no of plots in y + Int_t width; // size of canvas + Int_t height; + switch (noPlots) { + case 1: + xPad = 1; yPad = 1; width = 500; height = width; break; + case 2: + xPad = 2; yPad = 1; width = 600; height = 0.7*width; break; + case 3: + xPad = 3; yPad = 1; width = 800; height = 0.5*width; break; + case 4: + xPad = 2; yPad = 2; width = 400; height = width; break; + default: + xPad = 3; yPad = 2; width = 800; height = 0.7*width; break; + } + Int_t noPad = xPad * yPad ; + + // this defines how many canvases we need + const Int_t noCanvas = 1 + (Int_t)((noPlots - 0.001)/noPad); + TCanvas **c = new TCanvas*[noCanvas]; + for (Int_t ic=0; ic<noCanvas; ic++) c[ic] = 0; + + cout << "--- Found: " << noPlots << " plots; will produce: " << noCanvas << " canva(s)" << endl; + + // counter variables + Int_t countCanvas = 0; + Int_t countPad = 1; + + // loop over all objects in directory + TIter next(dir->GetListOfKeys()); + TKey *key; + while ((key = (TKey*)next())) { + + // make sure, that we only look at histograms + TClass *cl = gROOT->GetClass(key->GetClassName()); + if (!cl->InheritsFrom("TH1")) continue; + TH1 *sig = (TH1*)key->ReadObj(); + TString hname= sig->GetName(); + + // check for all signal histograms + if (hname.Contains("__S")){ // found a new signal plot + + // create new canvas + if ((c[countCanvas]==NULL) || (countPad>noPad)) { + cout << "--- Book canvas no: " << countCanvas << endl; + char cn[20]; + sprintf( cn, "canvas%d", countCanvas+1 ); + c[countCanvas] = new TCanvas( cn, titles[type], + countCanvas*50+200, countCanvas*20, width, height ); + // style + c[countCanvas]->SetBorderMode(0); + c[countCanvas]->SetFillColor(0); + + c[countCanvas]->Divide(xPad,yPad); + countPad = 1; + } + + // save canvas to file + c[countCanvas]->cd(countPad); + countPad++; + + // find the corredponding backgrouns histo + TString bgname = hname; + bgname.ReplaceAll("__S","__B"); + TH1 *bgd = (TH1*)dir->Get(bgname); + if (bgd == NULL) { + cout << "ERROR!!! couldn't find backgroung histo for" << hname << endl; + exit; + } + + // this is set but not stored during plot creation in MVA_Factory + TMVAGlob::SetSignalAndBackgroundStyle( sig, bgd ); + + // chop off "signal" + TString title(sig->GetTitle()); + title.ReplaceAll("signal",""); + sig->SetTitle( TString( titles[type] ) + title ); + TMVAGlob::SetFrameStyle( sig, 1.2 ); + + // finally plot and overlay + Float_t sc = 1.1; + if (countPad==2) sc = 1.3; + sig->SetMaximum( TMath::Max( sig->GetMaximum(), bgd->GetMaximum() )*sc ); + sig->Draw(); + + bgd->Draw("same"); + sig->GetXaxis()->SetTitle( title ); + sig->GetYaxis()->SetTitleOffset( 1.35 ); + sig->GetYaxis()->SetTitle("Normalized"); + + // redraw axes + sig->Draw("sameaxis"); + + // Draw legend + if (countPad==2){ + TLegend *legend= new TLegend( gPad->GetLeftMargin(), + 1-gPad->GetTopMargin()-.18, + gPad->GetLeftMargin()+.4, + 1-gPad->GetTopMargin() ); + legend->AddEntry(sig,"Signal","F"); + legend->AddEntry(bgd,"Background","F"); + legend->Draw("same"); + legend->SetBorderSize(1); + legend->SetMargin( 0.3 ); + } + + // save canvas to file + if (countPad > noPad) { + c[countCanvas]->Update(); + TString fname = Form( "plots/%s_c%i", outfname[type].Data(), countCanvas+1 ); + TMVAGlob::imgconv( c[countCanvas], &fname[0] ); + // TMVAGlob::plot_logo(); // don't understand why this doesn't work ... :-( + countCanvas++; + } } + } - // find the corredponding backgrouns histo - TString bgname = hname; - bgname.ReplaceAll("__S","__B"); - TH1 *bgd = (TH1*)gDirectory->Get(bgname); - if (bgd == NULL){ - cout << "ERROR!!! couldn't find backgroung histo for" << hname << endl; - exit; - } - // this is set but not stored during plot creation in MVA_Factory - TMVAGlob::SetSignalAndBackgroundStyle( sig, bgd ); - - // chop off "signal" - TString title(sig->GetTitle()); - title.ReplaceAll("signal",""); - sig->SetTitle( TString("MVA input variable: ") + title ); - - TMVAGlob::SetFrameStyle( sig, 1.2 ); - - // finally plot and overlay - Float_t sc = 1.1; - if (countPad==2) sc = 1.3; - sig->SetMaximum( TMath::Max( sig->GetMaximum(), bgd->GetMaximum() )*sc ); - sig->Draw(); - bgd->SetLineColor( 2 ); - bgd->SetLineWidth( 1 ); - bgd->SetFillStyle( 3002 ); - bgd->SetFillColor( 46 ); - bgd->Draw("same"); - sig->GetYaxis()->SetTitleOffset( 1.35 ); - sig->GetYaxis()->SetTitle("Normalized"); - - // redraw axes - sig->Draw("sameaxis"); - - // Draw legend - if (countPad==2){ - TLegend *legend= new TLegend( 0.131, 0.762, 0.531, 0.901 ); - legend->AddEntry(sig,"Signal","F"); - legend->AddEntry(bgd,"Background","F"); - legend->Draw("same"); - legend->SetBorderSize(1); - legend->SetMargin( 0.3 ); - } - - } - } - if (countPad <= noPad) { - - c[countCanvas]->Update(); - sprintf( fname, "plots/variables_c%i", countCanvas+1 ); - TMVAGlob::imgconv( c[countCanvas], &fname[0] ); - } + if (countPad <= noPad) { + c[countCanvas]->Update(); + TString fname = Form( "plots/%s_c%i", outfname[type].Data(), countCanvas+1 ); + TMVAGlob::imgconv( c[countCanvas], &fname[0] ); + } } -- GitLab