From ed2ce0cf883ec5f6d78a163a4f3adc3a45d4ae9d Mon Sep 17 00:00:00 2001
From: Olivier Couet <olivier.couet@cern.ch>
Date: Mon, 19 Dec 2016 15:21:32 +0100
Subject: [PATCH] - remove TABS - Doxygen format - in group TMVA - remove
 trailing spaces - spell check

---
 tmva/tmva/inc/TMVA/MethodTMlpANN.h          |  30 +--
 tmva/tmva/inc/TMVA/MisClassificationError.h |  12 +-
 tmva/tmva/inc/TMVA/MsgLogger.h              |  12 +-
 tmva/tmva/inc/TMVA/Node.h                   |  60 ++---
 tmva/tmva/inc/TMVA/NodekNN.h                | 232 ++++++++++----------
 tmva/tmva/src/MethodSVM.cxx                 |  79 ++++---
 tmva/tmva/src/MethodTMlpANN.cxx             |  68 +++---
 tmva/tmva/src/MinuitFitter.cxx              |  75 ++++---
 tmva/tmva/src/MinuitWrapper.cxx             |  51 +++--
 tmva/tmva/src/MisClassificationError.cxx    |  21 +-
 tmva/tmva/src/ModulekNN.cxx                 |  12 +-
 tmva/tmva/src/MsgLogger.cxx                 |  37 ++--
 tmva/tmva/src/Node.cxx                      |  86 ++++----
 13 files changed, 392 insertions(+), 383 deletions(-)

diff --git a/tmva/tmva/inc/TMVA/MethodTMlpANN.h b/tmva/tmva/inc/TMVA/MethodTMlpANN.h
index 5551ba3c16b..e0b69ec27a9 100644
--- a/tmva/tmva/inc/TMVA/MethodTMlpANN.h
+++ b/tmva/tmva/inc/TMVA/MethodTMlpANN.h
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$ 
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -19,9 +19,9 @@
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                                                    * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      MPI-K Heidelberg, Germany                                                 *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
@@ -36,7 +36,7 @@
 // MethodTMlpANN                                                        //
 //                                                                      //
 // Implementation of interface for Root-integrated artificial neural    //
-// network: TMultiLayerPerceptron                                       //  
+// network: TMultiLayerPerceptron                                       //
 //                                                                      //
 //////////////////////////////////////////////////////////////////////////
 
@@ -49,19 +49,19 @@ class TMultiLayerPerceptron;
 namespace TMVA {
 
    class MethodTMlpANN : public MethodBase {
-  
+
    public:
 
-      MethodTMlpANN( const TString& jobName, 
-                     const TString& methodTitle, 
+      MethodTMlpANN( const TString& jobName,
+                     const TString& methodTitle,
                      DataSetInfo& theData,
                      const TString& theOption = "3000:N-1:N-2");
 
-      MethodTMlpANN( DataSetInfo& theData, 
+      MethodTMlpANN( DataSetInfo& theData,
                      const TString& theWeightFile);
 
       virtual ~MethodTMlpANN( void );
-    
+
       virtual Bool_t HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets );
 
       // training method
@@ -78,7 +78,7 @@ namespace TMVA {
 
       // calculate the MVA value ...
       // - here it is just a dummy, as it is done in the overwritten
-      // - PrepareEvaluationtree... ugly but necessary due to the strucure 
+      // - PrepareEvaluationtree... ugly but necessary due to the structure
       //   of TMultiLayerPercepton in ROOT grr... :-(
       Double_t GetMvaValue( Double_t* err = 0, Double_t* errUpper = 0 );
 
@@ -111,9 +111,9 @@ namespace TMVA {
 
       TMultiLayerPerceptron* fMLP; // the TMLP
       TTree*                 fLocalTrainingTree; // local copy of training tree
-     
-      TString  fHiddenLayer;        // string containig the hidden layer structure
-      Int_t    fNcycles;            // number of training cylcles
+
+      TString  fHiddenLayer;        // string containing the hidden layer structure
+      Int_t    fNcycles;            // number of training cycles
       Double_t fValidationFraction; // fraction of events in training tree used for cross validation
       TString  fMLPBuildOptions;    // option string to build the mlp
 
diff --git a/tmva/tmva/inc/TMVA/MisClassificationError.h b/tmva/tmva/inc/TMVA/MisClassificationError.h
index a84fc02d116..d164f7f0576 100644
--- a/tmva/tmva/inc/TMVA/MisClassificationError.h
+++ b/tmva/tmva/inc/TMVA/MisClassificationError.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -18,9 +18,9 @@
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                                                    * 
- *      Heidelberg U., Germany                                                    * 
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      Heidelberg U., Germany                                                    *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
@@ -49,7 +49,7 @@ namespace TMVA {
 
    public:
 
-      // consturctor for the Misclassification error
+      // constructor for the Misclassification error
       MisClassificationError() { fName = "MisCl"; }
 
       // copy constructor
@@ -64,7 +64,7 @@ namespace TMVA {
 
       ClassDef(MisClassificationError,0); // Implementation of the MisClassificationError as separation criterion
    };
- 
+
 } // namespace TMVA
 
 #endif
diff --git a/tmva/tmva/inc/TMVA/MsgLogger.h b/tmva/tmva/inc/TMVA/MsgLogger.h
index 44505dd03d5..65e7964d448 100644
--- a/tmva/tmva/inc/TMVA/MsgLogger.h
+++ b/tmva/tmva/inc/TMVA/MsgLogger.h
@@ -85,22 +85,22 @@ namespace TMVA {
 
       // Stream modifier(s)
       static MsgLogger& Endmsg( MsgLogger& logger );
-      
+
       // Accept stream modifiers
       MsgLogger& operator<< ( MsgLogger& ( *_f )( MsgLogger& ) );
       MsgLogger& operator<< ( std::ostream& ( *_f )( std::ostream& ) );
       MsgLogger& operator<< ( std::ios& ( *_f )( std::ios& ) );
-      
+
       // Accept message type specification
       MsgLogger& operator<< ( EMsgType type );
-      
+
       // For all the "conventional" inputs
       template <class T> MsgLogger& operator<< ( T arg ) {
          *(std::ostringstream*)this << arg;
          return *this;
       }
 
-      // Temporaly disables all the loggers (Caution! Use with care !)
+      // Temporally disables all the loggers (Caution! Use with care !)
       static void  InhibitOutput();
       static void  EnableOutput();
 
@@ -118,13 +118,13 @@ namespace TMVA {
       EMsgType                 fActiveType;       // active type
       static const UInt_t      fgMaxSourceSize;   // maximum length of source name
 #if __cplusplus > 199711L
-      static std::atomic<Bool_t> fgOutputSupressed; // disable the output globaly (used by generic booster)
+      static std::atomic<Bool_t> fgOutputSupressed; // disable the output globally (used by generic booster)
       static std::atomic<Bool_t> fgInhibitOutput;   // flag to suppress all output
 
       static std::atomic<const std::map<EMsgType, std::string>*> fgTypeMap;   // matches output types with strings
       static std::atomic<const std::map<EMsgType, std::string>*> fgColorMap;  // matches output types with terminal colors
 #else
-      static Bool_t            fgOutputSupressed; // disable the output globaly (used by generic booster)
+      static Bool_t            fgOutputSupressed; // disable the output globally (used by generic booster)
       static Bool_t            fgInhibitOutput;   // flag to suppress all output
 
       static const std::map<EMsgType, std::string>* fgTypeMap;   // matches output types with strings
diff --git a/tmva/tmva/inc/TMVA/Node.h b/tmva/tmva/inc/TMVA/Node.h
index b17e29bbca9..9d40cd5f694 100644
--- a/tmva/tmva/inc/TMVA/Node.h
+++ b/tmva/tmva/inc/TMVA/Node.h
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$    
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -16,9 +16,9 @@
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                                                    * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      MPI-K Heidelberg, Germany                                                 *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
@@ -63,12 +63,12 @@ namespace TMVA {
       friend std::ostream& operator << (std::ostream& os, const Node& node);
       // output operator with a pointer to the node (which still prints the node itself)
       friend std::ostream& operator << (std::ostream& os, const Node* node);
-    
+
    public:
 
-      // constructor of a node 
+      // constructor of a node
       Node();
-      
+
       // constructor of a daughter node as a daughter of 'p'
       Node( Node* p, char pos );
 
@@ -80,30 +80,30 @@ namespace TMVA {
 
       virtual Node* CreateNode() const = 0;
 
-      // test event if i{ decends the tree at this node to the right  
+      // test event if i{ descends the tree at this node to the right
       virtual Bool_t GoesRight( const Event& ) const = 0;
-      // test event if it decends the tree at this node to the left 
+      // test event if it descends the tree at this node to the left
 
       virtual Bool_t GoesLeft ( const Event& ) const = 0;
-      // test event if it is equal to the event that "makes the node" (just for the "search tree"  
+      // test event if it is equal to the event that "makes the node" (just for the "search tree"
 
       // return pointer to the left/right daughter or parent node
       inline virtual Node* GetLeft  () const { return fLeft;   }
       inline virtual Node* GetRight () const { return fRight;  }
       inline virtual Node* GetParent() const { return fParent; }
-    
+
       // set pointer to the left/right daughter or parent node
-      inline virtual void SetLeft  (Node* l) { fLeft   = l;} 
-      inline virtual void SetRight (Node* r) { fRight  = r;} 
-      inline virtual void SetParent(Node* p) { fParent = p;} 
-    
+      inline virtual void SetLeft  (Node* l) { fLeft   = l;}
+      inline virtual void SetRight (Node* r) { fRight  = r;}
+      inline virtual void SetParent(Node* p) { fParent = p;}
+
       //recursively go through the part of the tree below this node and count all daughters
       Int_t  CountMeAndAllDaughters() const;
-    
+
       // printout of the node
       virtual void Print( std::ostream& os ) const = 0;
 
-      // recursive printout of the node and it daughters 
+      // recursive printout of the node and it daughters
       virtual void PrintRec ( std::ostream& os ) const = 0;
 
       void* AddXMLTo(void* parent) const;
@@ -113,21 +113,21 @@ namespace TMVA {
 
       // Set depth, layer of the where the node is within the tree, seen from the top (root)
       void SetDepth(UInt_t d){fDepth=d;}
-      
+
       // Return depth, layer of the where the node is within the tree, seen from the top (root)
       UInt_t GetDepth() const {return fDepth;}
-      
-      // set node position, i.e, the node is a left (l) or right (r) daugther
+
+      // set node position, i.e, the node is a left (l) or right (r) daughter
       void SetPos(char s) {fPos=s;}
-      
-      // Return the node position, i.e, the node is a left (l) or right (r) daugther
+
+      // Return the node position, i.e, the node is a left (l) or right (r) daughter
       char GetPos() const {return fPos;}
 
-      // Return the pointer to the Parent tree to which the Node belongs 
+      // Return the pointer to the Parent tree to which the Node belongs
       virtual TMVA::BinaryTree* GetParentTree() const {return fParentTree;}
 
-      // set the pointer to the Parent Tree to which the Node belongs 
-      virtual void SetParentTree(TMVA::BinaryTree* t) {fParentTree = t;} 
+      // set the pointer to the Parent Tree to which the Node belongs
+      virtual void SetParentTree(TMVA::BinaryTree* t) {fParentTree = t;}
 
       int GetCount();
 
@@ -135,17 +135,17 @@ namespace TMVA {
       virtual void ReadAttributes(void* node, UInt_t tmva_Version_Code = TMVA_VERSION_CODE  ) = 0;
       virtual void ReadContent(std::stringstream& s) =0;
 
-   protected: 
+   protected:
 
       Node*   fParent;              // the previous (parent) node
       Node*   fLeft;                // pointers to the two "daughter" nodes
       Node*   fRight;               // pointers to the two "daughter" nodes
 
-      char    fPos;                 // position, i.e. it is a left (l) or right (r) daughter 
+      char    fPos;                 // position, i.e. it is a left (l) or right (r) daughter
       UInt_t  fDepth;               // depth of the node within the tree (seen from root node)
 
-      BinaryTree*  fParentTree;     // pointer to the parent tree to which the Node belongs 
-   private: 
+      BinaryTree*  fParentTree;     // pointer to the parent tree to which the Node belongs
+   private:
 
       static Int_t fgCount;         // counter of all nodes present.. for debug.. to spot memory leaks...
 
diff --git a/tmva/tmva/inc/TMVA/NodekNN.h b/tmva/tmva/inc/TMVA/NodekNN.h
index b78f271a98f..333031c08ed 100644
--- a/tmva/tmva/inc/TMVA/NodekNN.h
+++ b/tmva/tmva/inc/TMVA/NodekNN.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Rustem Ospanov 
+// Author: Rustem Ospanov
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -14,8 +14,8 @@
  *      Rustem Ospanov <rustem@fnal.gov> - U. of Texas at Austin, USA             *
  *                                                                                *
  * Copyright (c) 2007:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      MPI-K Heidelberg, Germany                                                 *
  *      U. of Texas at Austin, USA                                                *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
@@ -37,31 +37,28 @@
 #include "Rtypes.h"
 #endif
 
-//////////////////////////////////////////////////////////////////////////
-//                                                                      //
-// kNN::Node                                                            //
-//                                                                      //
-// This file contains binary tree and global function template          //
-// that searches tree for k-nearest neigbors                            //
-//                                                                      //
-// Node class template parameter T has to provide these functions:      //
-//   rtype GetVar(UInt_t) const;                                        //
-//   - rtype is any type convertible to Float_t                         //
-//   UInt_t GetNVar(void) const;                                        //
-//   rtype GetWeight(void) const;                                       //
-//   - rtype is any type convertible to Double_t                        //
-//                                                                      //
-// Find function template parameter T has to provide these functions:   //
-// (in addition to above requirements)                                  //
-//   rtype GetDist(Float_t, UInt_t) const;                              //
-//   - rtype is any type convertible to Float_t                         //
-//   rtype GetDist(const T &) const;                                    //
-//   - rtype is any type convertible to Float_t                         //
-//                                                                      //
-//   where T::GetDist(Float_t, UInt_t) <= T::GetDist(const T &)         //
-//   for any pair of events and any variable number for these events    //
-//                                                                      //
-//////////////////////////////////////////////////////////////////////////
+/*! \class TMVA::kNN:Node
+\ingroup TMVA
+This file contains binary tree and global function template
+that searches tree for k-nearest neigbors
+
+Node class template parameter T has to provide these functions:
+  rtype GetVar(UInt_t) const;
+  - rtype is any type convertible to Float_t
+  UInt_t GetNVar(void) const;
+  rtype GetWeight(void) const;
+  - rtype is any type convertible to Double_t
+
+Find function template parameter T has to provide these functions:
+(in addition to above requirements)
+  rtype GetDist(Float_t, UInt_t) const;
+  - rtype is any type convertible to Float_t
+  rtype GetDist(const T &) const;
+  - rtype is any type convertible to Float_t
+
+  where T::GetDist(Float_t, UInt_t) <= T::GetDist(const T &)
+  for any pair of events and any variable number for these events
+*/
 
 namespace TMVA
 {
@@ -72,21 +69,21 @@ namespace TMVA
          {
 
          public:
-      
+
             Node(const Node *parent, const T &event, Int_t mod);
             ~Node();
 
             const Node* Add(const T &event, UInt_t depth);
-      
+
             void SetNodeL(Node *node);
             void SetNodeR(Node *node);
-      
+
             const T& GetEvent() const;
 
             const Node* GetNodeL() const;
             const Node* GetNodeR() const;
             const Node* GetNodeP() const;
-      
+
             Double_t GetWeight() const;
 
             Float_t GetVarDis() const;
@@ -98,7 +95,7 @@ namespace TMVA
             void Print() const;
             void Print(std::ostream& os, const std::string &offset = "") const;
 
-         private: 
+         private:
 
             // these methods are private and not implemented by design
             // use provided public constructor for all uses of this template class
@@ -109,12 +106,12 @@ namespace TMVA
          private:
 
             const Node* fNodeP;
-      
+
             Node* fNodeL;
-            Node* fNodeR;      
-      
+            Node* fNodeR;
+
             const T fEvent;
-      
+
             const Float_t fVarDis;
 
             Float_t fVarMin;
@@ -123,7 +120,7 @@ namespace TMVA
             const UInt_t fMod;
          };
 
-      // recursive search for k-nearest neighbor: k = nfind 
+      // recursive search for k-nearest neighbor: k = nfind
       template<class T>
          UInt_t Find(std::list<std::pair<const Node<T> *, Float_t> > &nlist,
                      const Node<T> *node, const T &event, UInt_t nfind);
@@ -142,7 +139,7 @@ namespace TMVA
       //template <class T>
       //std::ostream& operator<<(std::ostream& os, const Node<T> &node);
 
-      // 
+      //
       // Inlined functions for Node template
       //
       template <class T>
@@ -211,7 +208,7 @@ namespace TMVA
             return fMod;
          }
 
-      // 
+      //
       // Inlined global function(s)
       //
       template <class T>
@@ -224,9 +221,9 @@ namespace TMVA
    } // end of kNN namespace
 } // end of TMVA namespace
 
-//-------------------------------------------------------------------------------------------
+////////////////////////////////////////////////////////////////////////////////
 template<class T>
-TMVA::kNN::Node<T>::Node(const Node<T> *parent, const T &event, const Int_t mod) 
+TMVA::kNN::Node<T>::Node(const Node<T> *parent, const T &event, const Int_t mod)
 :fNodeP(parent),
    fNodeL(0),
    fNodeR(0),
@@ -237,7 +234,7 @@ TMVA::kNN::Node<T>::Node(const Node<T> *parent, const T &event, const Int_t mod)
    fMod(mod)
 {}
 
-//-------------------------------------------------------------------------------------------
+////////////////////////////////////////////////////////////////////////////////
 template<class T>
 TMVA::kNN::Node<T>::~Node()
 {
@@ -245,21 +242,22 @@ TMVA::kNN::Node<T>::~Node()
    if (fNodeR) delete fNodeR;
 }
 
-//-------------------------------------------------------------------------------------------
+////////////////////////////////////////////////////////////////////////////////
+/// This is Node member function that adds a new node to a binary tree.
+/// each node contains maximum and minimum values of splitting variable
+/// left or right nodes are added based on value of splitting variable
+
 template<class T>
 const TMVA::kNN::Node<T>* TMVA::kNN::Node<T>::Add(const T &event, const UInt_t depth)
 {
-   // This is Node member function that adds a new node to a binary tree.
-   // each node contains maximum and minimum values of splitting variable
-   // left or right nodes are added based on value of splitting variable
-   
+
    assert(fMod == depth % event.GetNVar() && "Wrong recursive depth in Node<>::Add");
-   
+
    const Float_t value = event.GetVar(fMod);
-   
+
    fVarMin = std::min(fVarMin, value);
    fVarMax = std::max(fVarMax, value);
-   
+
    Node<T> *node = 0;
    if (value < fVarDis) {
       if (fNodeL)
@@ -278,36 +276,36 @@ const TMVA::kNN::Node<T>* TMVA::kNN::Node<T>::Add(const T &event, const UInt_t d
       else {
          fNodeR = new Node<T>(this, event, (depth + 1) % event.GetNVar());
          node = fNodeR;
-      }      
+      }
    }
-   
+
    return node;
 }
-   
-//-------------------------------------------------------------------------------------------
+
+////////////////////////////////////////////////////////////////////////////////
 template<class T>
 void TMVA::kNN::Node<T>::Print() const
 {
    Print(std::cout);
 }
-   
-//-------------------------------------------------------------------------------------------
+
+////////////////////////////////////////////////////////////////////////////////
 template<class T>
 void TMVA::kNN::Node<T>::Print(std::ostream& os, const std::string &offset) const
 {
    os << offset << "-----------------------------------------------------------" << std::endl;
-   os << offset << "Node: mod " << fMod 
-      << " at " << fVarDis 
+   os << offset << "Node: mod " << fMod
+      << " at " << fVarDis
       << " with weight: " << GetWeight() << std::endl
       << offset << fEvent;
-   
+
    if (fNodeL) {
       os << offset << "Has left node " << std::endl;
    }
    if (fNodeR) {
       os << offset << "Has right node" << std::endl;
    }
-   
+
    if (fNodeL) {
       os << offset << "PrInt_t left node " << std::endl;
       fNodeL->Print(os, offset + " ");
@@ -316,32 +314,31 @@ void TMVA::kNN::Node<T>::Print(std::ostream& os, const std::string &offset) cons
       os << offset << "PrInt_t right node" << std::endl;
       fNodeR->Print(os, offset + " ");
    }
-   
+
    if (!fNodeL && !fNodeR) {
       os << std::endl;
    }
 }
 
-//-------------------------------------------------------------------------------------------
+////////////////////////////////////////////////////////////////////////////////
+/// This is a global templated function that searches for k-nearest neighbors.
+/// list contains k or less nodes that are closest to event.
+/// only nodes with positive weights are added to list.
+/// each node contains maximum and minimum values of splitting variable
+/// for all its children - this range is checked to avoid descending into
+/// nodes that are definitely outside current minimum neighbourhood.
+///
+/// This function should be modified with care.
+
 template<class T>
 UInt_t TMVA::kNN::Find(std::list<std::pair<const TMVA::kNN::Node<T> *, Float_t> > &nlist,
                        const TMVA::kNN::Node<T> *node, const T &event, const UInt_t nfind)
 {
-   // This is a global templated function that searches for k-nearest neighbors.
-   // list contains k or less nodes that are closest to event.
-   // only nodes with positive weights are added to list.
-   // each node contains maximum and minimum values of splitting variable
-   // for all its children - this range is checked to avoid descending into
-   // nodes that are defintely outside current minimum neighbourhood.
-   //
-   // This function should be modified with care.
-   //
-
    if (!node || nfind < 1) {
       return 0;
    }
 
-   const Float_t value = event.GetVar(node->GetMod());     
+   const Float_t value = event.GetVar(node->GetMod());
 
    if (node->GetWeight() > 0.0) {
 
@@ -350,24 +347,24 @@ UInt_t TMVA::kNN::Find(std::list<std::pair<const TMVA::kNN::Node<T> *, Float_t>
       if (!nlist.empty()) {
 
          max_dist = nlist.back().second;
-         
+
          if (nlist.size() == nfind) {
-            if (value > node->GetVarMax() && 
+            if (value > node->GetVarMax() &&
                 event.GetDist(node->GetVarMax(), node->GetMod()) > max_dist) {
                return 0;
-            }  
-            if (value < node->GetVarMin() && 
+            }
+            if (value < node->GetVarMin() &&
                 event.GetDist(node->GetVarMin(), node->GetMod()) > max_dist) {
                return 0;
             }
-         }      
+         }
       }
 
       const Float_t distance = event.GetDist(node->GetEvent());
-      
+
       Bool_t insert_this = kFALSE;
       Bool_t remove_back = kFALSE;
-      
+
       if (nlist.size() < nfind) {
          insert_this = kTRUE;
       }
@@ -381,12 +378,12 @@ UInt_t TMVA::kNN::Find(std::list<std::pair<const TMVA::kNN::Node<T> *, Float_t>
          std::cerr << "TMVA::kNN::Find() - logic error in recursive procedure" << std::endl;
          return 1;
       }
-      
+
       if (insert_this) {
-         // need typename keyword because qualified dependent names 
+         // need typename keyword because qualified dependent names
          // are not valid types unless preceded by 'typename'.
          typename std::list<std::pair<const Node<T> *, Float_t> >::iterator lit = nlist.begin();
-         
+
          // find a place where current node should be inserted
          for (; lit != nlist.end(); ++lit) {
             if (distance < lit->second) {
@@ -396,22 +393,22 @@ UInt_t TMVA::kNN::Find(std::list<std::pair<const TMVA::kNN::Node<T> *, Float_t>
                continue;
             }
          }
-         
+
          nlist.insert(lit, std::pair<const Node<T> *, Float_t>(node, distance));
-         
+
          if (remove_back) {
             nlist.pop_back();
          }
       }
    }
-   
+
    UInt_t count = 1;
    if (node->GetNodeL() && node->GetNodeR()) {
       if (value < node->GetVarDis()) {
          count += Find(nlist, node->GetNodeL(), event, nfind);
          count += Find(nlist, node->GetNodeR(), event, nfind);
       }
-      else { 
+      else {
          count += Find(nlist, node->GetNodeR(), event, nfind);
          count += Find(nlist, node->GetNodeL(), event, nfind);
       }
@@ -424,34 +421,33 @@ UInt_t TMVA::kNN::Find(std::list<std::pair<const TMVA::kNN::Node<T> *, Float_t>
          count += Find(nlist, node->GetNodeR(), event, nfind);
       }
    }
-   
+
    return count;
 }
 
+////////////////////////////////////////////////////////////////////////////////
+/// This is a global templated function that searches for k-nearest neighbors.
+/// list contains all nodes that are closest to event
+/// and have sum of event weights >= nfind.
+/// Only nodes with positive weights are added to list.
+/// Requirement for used classes:
+///  - each node contains maximum and minimum values of splitting variable
+///    for all its children
+///  - min and max range is checked to avoid descending into
+///    nodes that are definitely outside current minimum neighbourhood.
+///
+/// This function should be modified with care.
 
-//-------------------------------------------------------------------------------------------
 template<class T>
 UInt_t TMVA::kNN::Find(std::list<std::pair<const TMVA::kNN::Node<T> *, Float_t> > &nlist,
                        const TMVA::kNN::Node<T> *node, const T &event, const Double_t nfind, Double_t ncurr)
 {
-   // This is a global templated function that searches for k-nearest neighbors.
-   // list contains all nodes that are closest to event 
-   // and have sum of event weights >= nfind.
-   // Only nodes with positive weights are added to list.
-   // Requirement for used classes:
-   //  - each node contains maximum and minimum values of splitting variable
-   //    for all its children
-   //  - min and max range is checked to avoid descending into
-   //    nodes that are defintely outside current minimum neighbourhood.
-   //
-   // This function should be modified with care.
-   //
 
    if (!node || !(nfind < 0.0)) {
       return 0;
    }
 
-   const Float_t value = event.GetVar(node->GetMod());     
+   const Float_t value = event.GetVar(node->GetMod());
 
    if (node->GetWeight() > 0.0) {
 
@@ -460,23 +456,23 @@ UInt_t TMVA::kNN::Find(std::list<std::pair<const TMVA::kNN::Node<T> *, Float_t>
       if (!nlist.empty()) {
 
          max_dist = nlist.back().second;
-         
+
          if (!(ncurr < nfind)) {
-            if (value > node->GetVarMax() && 
+            if (value > node->GetVarMax() &&
                 event.GetDist(node->GetVarMax(), node->GetMod()) > max_dist) {
                return 0;
-            }  
-            if (value < node->GetVarMin() && 
+            }
+            if (value < node->GetVarMin() &&
                 event.GetDist(node->GetVarMin(), node->GetMod()) > max_dist) {
                return 0;
             }
-         }      
+         }
       }
 
       const Float_t distance = event.GetDist(node->GetEvent());
-      
+
       Bool_t insert_this = kFALSE;
-      
+
       if (ncurr < nfind) {
          insert_this = kTRUE;
       }
@@ -489,12 +485,12 @@ UInt_t TMVA::kNN::Find(std::list<std::pair<const TMVA::kNN::Node<T> *, Float_t>
          std::cerr << "TMVA::kNN::Find() - logic error in recursive procedure" << std::endl;
          return 1;
       }
-      
+
       if (insert_this) {
          // (re)compute total current weight when inserting a new node
          ncurr = 0;
 
-         // need typename keyword because qualified dependent names 
+         // need typename keyword because qualified dependent names
          // are not valid types unless preceded by 'typename'.
          typename std::list<std::pair<const Node<T> *, Float_t> >::iterator lit = nlist.begin();
 
@@ -506,9 +502,9 @@ UInt_t TMVA::kNN::Find(std::list<std::pair<const TMVA::kNN::Node<T> *, Float_t>
 
             ncurr += lit -> first -> GetWeight();
          }
-         
+
          lit = nlist.insert(lit, std::pair<const Node<T> *, Float_t>(node, distance));
-         
+
          for (; lit != nlist.end(); ++lit) {
             ncurr += lit -> first -> GetWeight();
             if (!(ncurr < nfind)) {
@@ -522,15 +518,15 @@ UInt_t TMVA::kNN::Find(std::list<std::pair<const TMVA::kNN::Node<T> *, Float_t>
                nlist.erase(lit, nlist.end());
             }
       }
-   }   
-   
+   }
+
    UInt_t count = 1;
    if (node->GetNodeL() && node->GetNodeR()) {
       if (value < node->GetVarDis()) {
          count += Find(nlist, node->GetNodeL(), event, nfind, ncurr);
          count += Find(nlist, node->GetNodeR(), event, nfind, ncurr);
       }
-      else { 
+      else {
          count += Find(nlist, node->GetNodeR(), event, nfind, ncurr);
          count += Find(nlist, node->GetNodeL(), event, nfind, ncurr);
       }
@@ -543,7 +539,7 @@ UInt_t TMVA::kNN::Find(std::list<std::pair<const TMVA::kNN::Node<T> *, Float_t>
          count += Find(nlist, node->GetNodeR(), event, nfind, ncurr);
       }
    }
-   
+
    return count;
 }
 
diff --git a/tmva/tmva/src/MethodSVM.cxx b/tmva/tmva/src/MethodSVM.cxx
index a0404d097b2..ba3c2259750 100644
--- a/tmva/tmva/src/MethodSVM.cxx
+++ b/tmva/tmva/src/MethodSVM.cxx
@@ -20,12 +20,12 @@
  *      Kamil Kraszewski      <kalq@cern.ch>     - IFJ PAN & UJ, Krakow, Poland   *
  *      Maciej Kruk           <mkruk@cern.ch>    - IFJ PAN & AGH, Krakow, Poland  *
  *                                                                                *
- * Introduction of kernel parameter optimisation                                  *   
- *            and additional kernel functions by:                                 *   
- *      Adrian Bevan          <adrian.bevan@cern.ch> -   Queen Mary               *   
- *                                                       University of London, UK *   
- *      Tom Stevenson <thomas.james.stevenson@cern.ch> - Queen Mary               *   
- *                                                       University of London, UK * 
+ * Introduction of kernel parameter optimisation                                  *
+ *            and additional kernel functions by:                                 *
+ *      Adrian Bevan          <adrian.bevan@cern.ch> -   Queen Mary               *
+ *                                                       University of London, UK *
+ *      Tom Stevenson <thomas.james.stevenson@cern.ch> - Queen Mary               *
+ *                                                       University of London, UK *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
  *      CERN, Switzerland                                                         *
@@ -37,10 +37,10 @@
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
-//_______________________________________________________________________
-//
-// SMO Platt's SVM classifier with Keerthi & Shavade improvements
-//_______________________________________________________________________
+/*! \class TMVA::MethodSVM
+\ingroup TMVA
+SMO Platt's SVM classifier with Keerthi & Shavade improvements
+*/
 
 #include "TMVA/MethodSVM.h"
 
@@ -232,15 +232,15 @@ void TMVA::MethodSVM::DeclareOptions()
    DeclareOptionRef( fTheKernel = "RBF", "Kernel", "Pick which kernel ( RBF or MultiGauss )");
    // for gaussian kernel parameter(s)
    DeclareOptionRef( fGamma = 1., "Gamma", "RBF kernel parameter: Gamma (size of the Kernel)");
-   // for polynomial kernel parameter(s)                                              
+   // for polynomial kernel parameter(s)
    DeclareOptionRef( fOrder = 3, "Order", "Polynomial Kernel parameter: polynomial order");
    DeclareOptionRef( fTheta = 1., "Theta", "Polynomial Kernel parameter: polynomial theta");
-   // for multi-gaussian kernel parameter(s)                                          
+   // for multi-gaussian kernel parameter(s)
    DeclareOptionRef( fGammas = "", "GammaList", "MultiGauss parameters" );
 
-   // for range and step number for kernel paramter optimisation                      
+   // for range and step number for kernel parameter optimisation
    DeclareOptionRef( fTune = "All", "Tune", "Tune Parameters");
-   // for list of kernels to be used with product or sum kernel                       
+   // for list of kernels to be used with product or sum kernel
    DeclareOptionRef( fMultiKernels = "None", "KernelList", "Sum or product of kernels");
    DeclareOptionRef( fLoss = "hinge", "Loss", "Loss function");
 
@@ -265,7 +265,7 @@ void TMVA::MethodSVM::DeclareCompatibilityOptions()
    DeclareOptionRef( fTheKernel = "Gauss", "Kernel", "Uses kernel function");
    // for gaussian kernel parameter(s)
    DeclareOptionRef( fDoubleSigmaSquared = 2., "Sigma", "Kernel parameter: sigma");
-   // for polynomiarl kernel parameter(s)
+   // for polynomial kernel parameter(s)
    DeclareOptionRef( fOrder = 3, "Order", "Polynomial Kernel parameter: polynomial order");
    // for sigmoid kernel parameters
    DeclareOptionRef( fTheta = 1., "Theta", "Sigmoid Kernel parameter: theta");
@@ -301,9 +301,9 @@ void TMVA::MethodSVM::Train()
    Double_t CSig;
    Double_t CBkg;
 
-   // Use number of signal and background from above to weight the cost parameter     
-   // so that the training is not biased towards the larger dataset when the signal   
-   // and background samples are significantly different sizes.                       
+   // Use number of signal and background from above to weight the cost parameter
+   // so that the training is not biased towards the larger dataset when the signal
+   // and background samples are significantly different sizes.
    if(nSignal < nBackground){
       CSig = fCost;
       CBkg = CSig*((double)nSignal/nBackground);
@@ -313,7 +313,7 @@ void TMVA::MethodSVM::Train()
       CSig = CBkg*((double)nSignal/nBackground);
    }
 
-   // Loop over events and assign the correct cost parameter.                         
+   // Loop over events and assign the correct cost parameter.
    for (Int_t ievnt=0; ievnt<Data()->GetNEvents(); ievnt++){
       if (GetEvent(ievnt)->GetWeight() != 0){
          if(DataInfo().IsSignal(GetEvent(ievnt))){
@@ -327,9 +327,9 @@ void TMVA::MethodSVM::Train()
       }
    }
 
-   // Set the correct kernel function.                                                
-   // Here we only use valid Mercer kernels. In the literature some people have reported reasonable                                                                        
-   // results using Sigmoid kernel function however that is not a valid Mercer kernel and is not used here.                                                                           
+   // Set the correct kernel function.
+   // Here we only use valid Mercer kernels. In the literature some people have reported reasonable
+   // results using Sigmoid kernel function however that is not a valid Mercer kernel and is not used here.
    if( fTheKernel == "RBF"){
       fSVKernelFunction = new SVKernelFunction( SVKernelFunction::kRBF, fGamma);
    }
@@ -559,7 +559,7 @@ void  TMVA::MethodSVM::ReadWeightsFromStream( std::istream& istr )
    delete fSVKernelFunction;
    if (fTheKernel == "Gauss" ) {
       fSVKernelFunction = new SVKernelFunction(1/fDoubleSigmaSquared);
-   } 
+   }
    else {
       SVKernelFunction::EKernelType k = SVKernelFunction::kLinear;
       if(fTheKernel == "Linear")           k = SVKernelFunction::kLinear;
@@ -665,7 +665,7 @@ void TMVA::MethodSVM::MakeClassSpecific( std::ostream& fout, const TString& clas
    fout << "}" << std::endl;
    fout << std::endl;
 
-   // GetMvaValue__ function defninition
+   // GetMvaValue__ function definition
    fout << "inline double " << className << "::GetMvaValue__(const std::vector<double>& inputValues ) const" << std::endl;
    fout << "{" << std::endl;
    fout << "   double mvaval = 0; " << std::endl;
@@ -727,7 +727,7 @@ void TMVA::MethodSVM::GetHelpMessage() const
    Log() << Endl;
    Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
    Log() << Endl;
-   Log() << "The Support Vector Machine (SVM) builds a hyperplance separating" << Endl;
+   Log() << "The Support Vector Machine (SVM) builds a hyperplane separating" << Endl;
    Log() << "signal and background events (vectors) using the minimal subset of " << Endl;
    Log() << "all vectors used for training (support vectors). The extension to" << Endl;
    Log() << "the non-linear case is performed by mapping input vectors into a " << Endl;
@@ -761,8 +761,12 @@ void TMVA::MethodSVM::GetHelpMessage() const
 /// This is used to optimise the kernel function parameters and cost. All kernel parameters
 /// are optimised by default with default ranges, however the parameters to be optimised can
 /// be set when booking the method with the option Tune.
-/// Example: "Tune=Gamma[0.01;1.0;100]" would only tune the RBF Gamma between 0.01 and 1.0
+///
+/// Example:
+///
+/// "Tune=Gamma[0.01;1.0;100]" would only tune the RBF Gamma between 0.01 and 1.0
 /// with 100 steps.
+
 std::map<TString,Double_t> TMVA::MethodSVM::OptimizeTuningParameters(TString fomType, TString fitType)
 {
    // Call the Optimizer with the set of kernel parameters and ranges that are meant to be tuned.
@@ -815,7 +819,7 @@ std::map<TString,Double_t> TMVA::MethodSVM::OptimizeTuningParameters(TString fom
                exit(1);
             }
          }
-      }  
+      }
    }
    else if( fTheKernel == "MultiGauss" ){
       if (fTune == "All"){
@@ -913,13 +917,13 @@ std::map<TString,Double_t> TMVA::MethodSVM::OptimizeTuningParameters(TString fom
    }
    OptimizeConfigParameters optimize(this, tuneParameters, fomType, fitType);
    tunedParameters=optimize.optimize();
-  
+
    return tunedParameters;
 
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// Set the tuning parameters according to the arguement
+/// Set the tuning parameters according to the argument
 void TMVA::MethodSVM::SetTuneParameters(std::map<TString,Double_t> tuneParameters)
 {
    std::map<TString,Double_t>::iterator it;
@@ -943,7 +947,7 @@ void TMVA::MethodSVM::SetTuneParameters(std::map<TString,Double_t> tuneParameter
          stringstream s;
          s << fVarNames.at(i);
          string str = "Gamma_" + s.str();
-         Log() << kWARNING << tuneParameters.find(str)->first << " = " << tuneParameters.find(str)->second << Endl; 
+         Log() << kWARNING << tuneParameters.find(str)->first << " = " << tuneParameters.find(str)->second << Endl;
          fmGamma.push_back(tuneParameters.find(str)->second);
       }
       for(it=tuneParameters.begin(); it!=tuneParameters.end(); it++){
@@ -1017,9 +1021,9 @@ void TMVA::MethodSVM::SetTuneParameters(std::map<TString,Double_t> tuneParameter
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// Takes as input a string of values for multigaussian gammas and splits it, filling the 
+/// Takes as input a string of values for multigaussian gammas and splits it, filling the
 /// gamma vector required by the SVKernelFunction. Example: "GammaList=0.1,0.2,0.3" would
-/// make a vector with Gammas of 0.1,0.2 & 0.3 corresponding to input variables 1,2 & 3 
+/// make a vector with Gammas of 0.1,0.2 & 0.3 corresponding to input variables 1,2 & 3
 /// respectively.
 void TMVA::MethodSVM::SetMGamma(std::string & mg){
    std::stringstream tempstring(mg);
@@ -1051,8 +1055,12 @@ void TMVA::MethodSVM::GetMGamma(const std::vector<float> & gammas){
 /// Function providing string manipulation for product or sum of kernels functions
 /// to take list of kernels specified in the booking of the method and provide a vector
 /// of SV kernels to iterate over in SVKernelFunction.
-/// Example: "KernelList=RBF*Polynomial" would use a product of the RBF and Polynomial
+///
+/// Example:
+///
+/// "KernelList=RBF*Polynomial" would use a product of the RBF and Polynomial
 /// kernels.
+
 std::vector<TMVA::SVKernelFunction::EKernelType> TMVA::MethodSVM::MakeKernelList(std::string multiKernels, TString kernel)
 {
    std::vector<TMVA::SVKernelFunction::EKernelType> kernelsList;
@@ -1061,7 +1069,7 @@ std::vector<TMVA::SVKernelFunction::EKernelType> TMVA::MethodSVM::MakeKernelList
    if(kernel=="Prod"){
       while (std::getline(tempstring,value,'*')){
          if(value == "RBF"){ kernelsList.push_back(SVKernelFunction::kRBF);}
-         else if(value == "MultiGauss"){ 
+         else if(value == "MultiGauss"){
             kernelsList.push_back(SVKernelFunction::kMultiGauss);
             if(fGammas!=""){
                SetMGamma(fGammas);
@@ -1161,6 +1169,7 @@ std::map< TString,std::vector<Double_t> > TMVA::MethodSVM::GetTuningOptions()
 /// Calculates loss for testing dataset. The loss function can be specified when
 /// booking the method, otherwise defaults to hinge loss. Currently not used however
 /// is accesible if required.
+
 Double_t TMVA::MethodSVM::getLoss(TString lossFunction){
    Double_t loss = 0.0;
    Double_t sumW = 0.0;
@@ -1204,6 +1213,6 @@ Double_t TMVA::MethodSVM::getLoss(TString lossFunction){
       sumW += w;
    }
    loss = temp/sumW;
-  
+
    return loss;
 }
diff --git a/tmva/tmva/src/MethodTMlpANN.cxx b/tmva/tmva/src/MethodTMlpANN.cxx
index 68fb7b7e570..3f82c5e48cc 100644
--- a/tmva/tmva/src/MethodTMlpANN.cxx
+++ b/tmva/tmva/src/MethodTMlpANN.cxx
@@ -24,28 +24,24 @@
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
-////////////////////////////////////////////////////////////////////////////////
+/*! \class TMVA::MethodTMlpANN
+\ingroup TMVA
+
+This is the TMVA TMultiLayerPerceptron interface class. It provides the
+training and testing the ROOT internal MLP class in the TMVA framework.
+
+Available learning methods:<br>
+
+  - Stochastic
+  - Batch
+  - SteepestDescent
+  - RibierePolak
+  - FletcherReeves
+  - BFGS
 
-/* Begin_Html
-
-   This is the TMVA TMultiLayerPerceptron interface class. It provides the
-   training and testing the ROOT internal MLP class in the TMVA framework.<be>
-
-   Available learning methods:<br>
-   <ul>
-   <li>Stochastic      </li>
-   <li>Batch           </li>
-   <li>SteepestDescent </li>
-   <li>RibierePolak    </li>
-   <li>FletcherReeves  </li>
-   <li>BFGS            </li>
-   </ul>
-   End_Html */
-//
-//  See the TMultiLayerPerceptron class description
-//  for details on this ANN.
-//
-//_______________________________________________________________________
+See the TMultiLayerPerceptron class description
+for details on this ANN.
+*/
 
 #include "TMVA/MethodTMlpANN.h"
 
@@ -192,15 +188,18 @@ void TMVA::MethodTMlpANN::CreateMLPOptions( TString layerSpec )
 
 ////////////////////////////////////////////////////////////////////////////////
 /// define the options (their key words) that can be set in the option string
+///
 /// know options:
-/// NCycles       <integer>    Number of training cycles (too many cycles could overtrain the network)
-/// HiddenLayers  <string>     Layout of the hidden layers (nodes per layer)
-///   * specifiactions for each hidden layer are separated by commata
-///   * for each layer the number of nodes can be either absolut (simply a number)
-///        or relative to the number of input nodes to the neural net (N)
-///   * there is always a single node in the output layer
+///
+///  - NCycles       <integer>    Number of training cycles (too many cycles could overtrain the network)
+///  - HiddenLayers  <string>     Layout of the hidden layers (nodes per layer)
+///     * specifications for each hidden layer are separated by comma
+///     * for each layer the number of nodes can be either absolut (simply a number)
+///          or relative to the number of input nodes to the neural net (N)
+///     * there is always a single node in the output layer
+///
 ///   example: a net with 6 input nodes and "Hiddenlayers=N-1,N-2" has 6,5,4,1 nodes in the
-///   layers 1,2,3,4, repectively
+///   layers 1,2,3,4, respectively
 
 void TMVA::MethodTMlpANN::DeclareOptions()
 {
@@ -257,12 +256,12 @@ Double_t TMVA::MethodTMlpANN::GetMvaValue( Double_t* err, Double_t* errUpper )
 /// performs TMlpANN training
 /// available learning methods:
 ///
-///       TMultiLayerPerceptron::kStochastic
-///       TMultiLayerPerceptron::kBatch
-///       TMultiLayerPerceptron::kSteepestDescent
-///       TMultiLayerPerceptron::kRibierePolak
-///       TMultiLayerPerceptron::kFletcherReeves
-///       TMultiLayerPerceptron::kBFGS
+///  - TMultiLayerPerceptron::kStochastic
+///  - TMultiLayerPerceptron::kBatch
+///  - TMultiLayerPerceptron::kSteepestDescent
+///  - TMultiLayerPerceptron::kRibierePolak
+///  - TMultiLayerPerceptron::kFletcherReeves
+///  - TMultiLayerPerceptron::kBFGS
 ///
 /// TMultiLayerPerceptron wants test and training tree at once
 /// so merge the training and testing trees from the MVA factory first:
@@ -350,7 +349,6 @@ void TMVA::MethodTMlpANN::Train( void )
    delete [] vArr;
 }
 
-
 ////////////////////////////////////////////////////////////////////////////////
 /// write weights to xml file
 
diff --git a/tmva/tmva/src/MinuitFitter.cxx b/tmva/tmva/src/MinuitFitter.cxx
index 0aa26612309..3017b2cd020 100644
--- a/tmva/tmva/src/MinuitFitter.cxx
+++ b/tmva/tmva/src/MinuitFitter.cxx
@@ -1,4 +1,4 @@
-// @(#)root/tmva $Id$ 
+// @(#)root/tmva $Id$
 // Author: Andraes Hoecker
 
 /**********************************************************************************
@@ -14,19 +14,18 @@
  *      Andreas Hoecker  <Andreas.Hocker@cern.ch> - CERN, Switzerland             *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      MPI-K Heidelberg, Germany                                                 *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
-//_______________________________________________________________________
-//                                                                      
-// Fitter using MINUIT
-//_______________________________________________________________________
-
+/*! \class TMVA::MinuitFitter
+\ingroup TMVA
+/Fitter using MINUIT
+*/
 #include "TMVA/MinuitFitter.h"
 
 #include "TMVA/Configurable.h"
@@ -45,19 +44,19 @@ ClassImp(TMVA::MinuitFitter)
 ////////////////////////////////////////////////////////////////////////////////
 /// constructor
 
-TMVA::MinuitFitter::MinuitFitter( IFitterTarget& target, 
-                                  const TString& name, 
-                                  std::vector<TMVA::Interval*>& ranges, 
-                                  const TString& theOption ) 
+TMVA::MinuitFitter::MinuitFitter( IFitterTarget& target,
+                                  const TString& name,
+                                  std::vector<TMVA::Interval*>& ranges,
+                                  const TString& theOption )
 : TMVA::FitterBase( target, name, ranges, theOption ),
    TMVA::IFitterTarget( )
 {
    // default parameters settings for Simulated Annealing algorithm
    DeclareOptions();
    ParseOptions();
-   
+
    Init();  // initialise the TFitter
-}            
+}
 
 ////////////////////////////////////////////////////////////////////////////////
 /// destructor
@@ -70,15 +69,15 @@ TMVA::MinuitFitter::~MinuitFitter( )
 ////////////////////////////////////////////////////////////////////////////////
 /// declare SA options
 
-void TMVA::MinuitFitter::DeclareOptions() 
+void TMVA::MinuitFitter::DeclareOptions()
 {
    DeclareOptionRef(fErrorLevel    =  1,     "ErrorLevel",    "TMinuit: error level: 0.5=logL fit, 1=chi-squared fit" );
    DeclareOptionRef(fPrintLevel    = -1,     "PrintLevel",    "TMinuit: output level: -1=least, 0, +1=all garbage" );
    DeclareOptionRef(fFitStrategy   = 2,      "FitStrategy",   "TMinuit: fit strategy: 2=best" );
    DeclareOptionRef(fPrintWarnings = kFALSE, "PrintWarnings", "TMinuit: suppress warnings" );
    DeclareOptionRef(fUseImprove    = kTRUE,  "UseImprove",    "TMinuit: use IMPROVE" );
-   DeclareOptionRef(fUseMinos      = kTRUE,  "UseMinos",      "TMinuit: use MINOS" );  
-   DeclareOptionRef(fBatch         = kFALSE, "SetBatch",      "TMinuit: use batch mode" );  
+   DeclareOptionRef(fUseMinos      = kTRUE,  "UseMinos",      "TMinuit: use MINOS" );
+   DeclareOptionRef(fBatch         = kFALSE, "SetBatch",      "TMinuit: use batch mode" );
    DeclareOptionRef(fMaxCalls      = 1000,   "MaxCalls",      "TMinuit: approximate maximum number of function calls" );
    DeclareOptionRef(fTolerance     = 0.1,    "Tolerance",     "TMinuit: tolerance to the function value at the minimum" );
 }
@@ -93,17 +92,17 @@ void TMVA::MinuitFitter::Init()
    // Execute fitting
    if (!fBatch) Log() << kINFO << "<MinuitFitter> Init " << Endl;
 
-   // timing of MC   
+   // timing of MC
    Timer timer;
 
    // initialize first -> prepare the fitter
 
    // instantiate minuit
-   // maximum number of fit parameters is equal to 
+   // maximum number of fit parameters is equal to
    // (2xnpar as workaround for TMinuit allocation bug (taken from RooMinuit))
    fMinWrap = new MinuitWrapper( fFitterTarget, 2*GetNpars() );
 
-   // output level      
+   // output level
    args[0] = fPrintLevel;
    fMinWrap->ExecuteCommand( "SET PRINTOUT", args, 1 );
 
@@ -111,14 +110,14 @@ void TMVA::MinuitFitter::Init()
 
    // set fitter object, and clear
    fMinWrap->Clear();
-   
+
    // error level: 1 (2*log(L) fit
    args[0] = fErrorLevel;
    fMinWrap->ExecuteCommand( "SET ERR", args, 1 );
 
-   // print warnings ?   
+   // print warnings ?
    if (!fPrintWarnings) fMinWrap->ExecuteCommand( "SET NOWARNINGS", args, 0 );
-      
+
    // define fit strategy
    args[0] = fFitStrategy;
    fMinWrap->ExecuteCommand( "SET STRATEGY", args, 1 );
@@ -140,20 +139,20 @@ Double_t TMVA::MinuitFitter::Run( std::vector<Double_t>& pars )
       Log() << kFATAL << "<Run> Mismatch in number of parameters: (a)"
             << GetNpars() << " != " << pars.size() << Endl;
 
-   // timing of MC   
+   // timing of MC
    Timer* timer = 0;
    if (!fBatch) timer = new Timer();
 
    // define fit parameters
    for (Int_t ipar=0; ipar<fNpars; ipar++) {
-      fMinWrap->SetParameter( ipar, Form( "Par%i",ipar ), 
-                              pars[ipar], fRanges[ipar]->GetWidth()/100.0, 
-                              fRanges[ipar]->GetMin(), fRanges[ipar]->GetMax() );      
+      fMinWrap->SetParameter( ipar, Form( "Par%i",ipar ),
+                              pars[ipar], fRanges[ipar]->GetWidth()/100.0,
+                              fRanges[ipar]->GetMin(), fRanges[ipar]->GetMax() );
       if (fRanges[ipar]->GetWidth() == 0.0) fMinWrap->FixParameter( ipar );
    }
 
    // --------- execute the fit
-   
+
    // continue with usual case
    args[0] = fMaxCalls;
    args[1] = fTolerance;
@@ -173,7 +172,7 @@ Double_t TMVA::MinuitFitter::Run( std::vector<Double_t>& pars )
    // retrieve fit result (statistics)
    Double_t chi2;
    Double_t edm;
-   Double_t errdef; 
+   Double_t errdef;
    Int_t    nvpar;
    Int_t    nparx;
    fMinWrap->GetStats( chi2, edm, errdef, nvpar, nparx );
@@ -191,27 +190,27 @@ Double_t TMVA::MinuitFitter::Run( std::vector<Double_t>& pars )
       pars[ipar] = currVal;
       fMinWrap->GetErrors( ipar, errp, errm, errsym, globcor );
    }
-            
+
    // clean up
 
-   // get elapsed time   
-   if (!fBatch) { 
-      Log() << kINFO << "Elapsed time: " << timer->GetElapsedTime() 
-            << "                            " << Endl;  
+   // get elapsed time
+   if (!fBatch) {
+      Log() << kINFO << "Elapsed time: " << timer->GetElapsedTime()
+            << "                            " << Endl;
       delete timer;
    }
 
    fMinWrap->Clear();
-   
+
    return chi2;
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// performs the fit by calliung Run(pars)
+/// performs the fit by calling Run(pars)
 
 Double_t TMVA::MinuitFitter::EstimatorFunction( std::vector<Double_t>& pars )
-{ 
-   return Run( pars ); 
+{
+   return Run( pars );
 }
 
 
diff --git a/tmva/tmva/src/MinuitWrapper.cxx b/tmva/tmva/src/MinuitWrapper.cxx
index 58edcfcf653..3ca07674a07 100644
--- a/tmva/tmva/src/MinuitWrapper.cxx
+++ b/tmva/tmva/src/MinuitWrapper.cxx
@@ -1,4 +1,4 @@
-// @(#)root/tmva $Id$ 
+// @(#)root/tmva $Id$
 // Author: Peter Speckmayer
 
 /**********************************************************************************
@@ -14,19 +14,18 @@
  *      Peter Speckmayer <peter.speckmayer@cern.ch> - CERN, Switzerland           *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      MPI-K Heidelberg, Germany                                                 *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
-//_______________________________________________________________________
-//                                                                      
-// Wrapper around MINUIT
-//_______________________________________________________________________
-
+/*! \class TMVA::MinuitWrapper
+\ingroup TMVA
+Wrapper around MINUIT
+*/
 #include "TMVA/MinuitWrapper.h"
 
 #include "TMVA/IFitterTarget.h"
@@ -52,7 +51,7 @@ TMVA::MinuitWrapper::MinuitWrapper( IFitterTarget& target, Int_t maxpar )
 Int_t TMVA::MinuitWrapper::Eval(Int_t /*npar*/, Double_t*, Double_t& f, Double_t* par, Int_t)
 {
    for (Int_t ipar=0; ipar<fNumPar; ipar++) fParameters[ipar] = par[ipar];
-   
+
    f = fFitterTarget.EstimatorFunction( fParameters );
    return 0;
 }
@@ -82,13 +81,13 @@ void TMVA::MinuitWrapper::Clear(Option_t *)
 
 ////////////////////////////////////////////////////////////////////////////////
 /// return global fit parameters
-///   amin     : chisquare
-///   edm      : estimated distance to minimum
-///   errdef
-///   nvpar    : number of variable parameters
-///   nparx    : total number of parameters
+///  - amin     : chisquare
+///  - edm      : estimated distance to minimum
+///  - errdef
+///  - nvpar    : number of variable parameters
+///  - nparx    : total number of parameters
 
-Int_t TMVA::MinuitWrapper::GetStats(Double_t &amin, Double_t &edm, Double_t &errdef, Int_t &nvpar, Int_t &nparx) 
+Int_t TMVA::MinuitWrapper::GetStats(Double_t &amin, Double_t &edm, Double_t &errdef, Int_t &nvpar, Int_t &nparx)
 {
    Int_t ierr = 0;
    mnstat(amin,edm,errdef,nvpar,nparx,ierr);
@@ -97,11 +96,11 @@ Int_t TMVA::MinuitWrapper::GetStats(Double_t &amin, Double_t &edm, Double_t &err
 
 ////////////////////////////////////////////////////////////////////////////////
 /// return current errors for a parameter
-///   ipar     : parameter number
-///   eplus    : upper error
-///   eminus   : lower error
-///   eparab   : parabolic error
-///   globcc   : global correlation coefficient
+///  - ipar     : parameter number
+///  - eplus    : upper error
+///  - eminus   : lower error
+///  - eparab   : parabolic error
+///  - globcc   : global correlation coefficient
 
 Int_t TMVA::MinuitWrapper::GetErrors(Int_t ipar,Double_t &eplus, Double_t &eminus, Double_t &eparab, Double_t &globcc)
 {
@@ -112,12 +111,12 @@ Int_t TMVA::MinuitWrapper::GetErrors(Int_t ipar,Double_t &eplus, Double_t &eminu
 
 ////////////////////////////////////////////////////////////////////////////////
 /// set initial values for a parameter
-///   ipar     : parameter number
-///   parname  : parameter name
-///   value    : initial parameter value
-///   verr     : initial error for this parameter
-///   vlow     : lower value for the parameter
-///   vhigh    : upper value for the parameter
+///  - ipar     : parameter number
+///  - parname  : parameter name
+///  - value    : initial parameter value
+///  - verr     : initial error for this parameter
+///  - vlow     : lower value for the parameter
+///  - vhigh    : upper value for the parameter
 
 Int_t TMVA::MinuitWrapper::SetParameter(Int_t ipar,const char *parname,Double_t value,Double_t verr,Double_t vlow, Double_t vhigh)
 {
diff --git a/tmva/tmva/src/MisClassificationError.cxx b/tmva/tmva/src/MisClassificationError.cxx
index f8eb04567ca..7bb19b37dfe 100644
--- a/tmva/tmva/src/MisClassificationError.cxx
+++ b/tmva/tmva/src/MisClassificationError.cxx
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -8,7 +8,7 @@
  * Web    : http://tmva.sourceforge.net                                           *
  *                                                                                *
  * Description: Implementation of the MisClassificationError as separation        *
- *              criterion:   1-max(p, 1-p) as 
+ *              criterion:   1-max(p, 1-p) as
  *                                                                                *
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
@@ -16,20 +16,19 @@
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                                                    * 
- *      Heidelberg U., Germany                                                    * 
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      Heidelberg U., Germany                                                    *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
-//_______________________________________________________________________
-//                                                                      
-// Implementation of the MisClassificationError as separation criterion 
-//                                                                      
-//_______________________________________________________________________
+/*! \class TMVA::MisClassificationError
+\ingroup TMVA
+Implementation of the MisClassificationError as separation criterion
+*/
 
 
 #include "TMVA/MisClassificationError.h"
@@ -39,7 +38,7 @@
 ClassImp(TMVA::MisClassificationError)
 
 ////////////////////////////////////////////////////////////////////////////////
-/// Misclassifiacton error   criterion: 1-max(p, 1-p)  (p: purity= s/(s+b))
+/// Misclassification error   criterion: 1-max(p, 1-p)  (p: purity= s/(s+b))
 
 Double_t  TMVA::MisClassificationError::GetSeparationIndex( const Double_t &s, const Double_t &b )
 {
diff --git a/tmva/tmva/src/ModulekNN.cxx b/tmva/tmva/src/ModulekNN.cxx
index d993eb43ba6..72d930aa0b8 100644
--- a/tmva/tmva/src/ModulekNN.cxx
+++ b/tmva/tmva/src/ModulekNN.cxx
@@ -23,6 +23,12 @@
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
+/*! \class TMVA::kNN
+\ingroup TMVA
+  kNN::Event describes point in input variable vector-space, with
+  additional functionality like distance between points
+*/
+
 #include "TMVA/ModulekNN.h"
 
 #include "TMVA/MsgLogger.h"
@@ -337,7 +343,7 @@ Bool_t TMVA::kNN::ModulekNN::Fill(const UShort_t odepth, const UInt_t ifrac, con
 /// find in tree
 /// if tree has been filled then search for nfind closest events
 /// if metic (fVarScale map) is computed then rescale event variables
-/// using previsouly computed width of variable distribution
+/// using previously computed width of variable distribution
 
 Bool_t TMVA::kNN::ModulekNN::Find(Event event, const UInt_t nfind, const std::string &option) const
 {
@@ -438,7 +444,7 @@ Bool_t TMVA::kNN::ModulekNN::Find(const UInt_t nfind, const std::string &option)
 ////////////////////////////////////////////////////////////////////////////////
 /// Optimize() balances binary tree for first odepth levels
 /// for each depth we split sorted depth % dimension variables
-/// into 2^odepth parts
+/// into \f$ 2^{odepth} \f$ parts
 
 TMVA::kNN::Node<TMVA::kNN::Event>* TMVA::kNN::ModulekNN::Optimize(const UInt_t odepth)
 {
@@ -529,7 +535,7 @@ TMVA::kNN::Node<TMVA::kNN::Event>* TMVA::kNN::ModulekNN::Optimize(const UInt_t o
 
 ////////////////////////////////////////////////////////////////////////////////
 /// compute scale factor for each variable (dimension) so that
-/// distance is computed uniformely along each dimension
+/// distance is computed uniformly along each dimension
 /// compute width of interval that includes (100 - 2*ifrac)% of events
 /// below, assume that in fVar each vector of values is sorted
 
diff --git a/tmva/tmva/src/MsgLogger.cxx b/tmva/tmva/src/MsgLogger.cxx
index 6c70bda5e72..9439b80076c 100644
--- a/tmva/tmva/src/MsgLogger.cxx
+++ b/tmva/tmva/src/MsgLogger.cxx
@@ -27,6 +27,11 @@
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
+/*! \class TMVA::MsgLogger
+\ingroup TMVA
+ostringstream derivative to redirect and format output
+*/
+
 // Local include(s):
 #include "TMVA/MsgLogger.h"
 
@@ -64,7 +69,7 @@ const std::map<TMVA::EMsgType, std::string>* TMVA::MsgLogger::fgColorMap = 0;
 #endif
 static std::unique_ptr<const std::map<TMVA::EMsgType, std::string> > gOwnTypeMap;
 static std::unique_ptr<const std::map<TMVA::EMsgType, std::string> > gOwnColorMap;
- 
+
 
 void   TMVA::MsgLogger::InhibitOutput() { fgInhibitOutput = kTRUE;  }
 void   TMVA::MsgLogger::EnableOutput()  { fgInhibitOutput = kFALSE; }
@@ -77,7 +82,7 @@ TMVA::MsgLogger::MsgLogger( const TObject* source, EMsgType minType )
      fActiveType( kINFO ),
      fMinType   ( minType )
 {
-   InitMaps();   
+   InitMaps();
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -125,7 +130,7 @@ TMVA::MsgLogger::~MsgLogger()
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// assingment operator
+/// assignment operator
 
 TMVA::MsgLogger& TMVA::MsgLogger::operator= ( const MsgLogger& parent )
 {
@@ -220,21 +225,21 @@ void TMVA::MsgLogger::WriteMsg( EMsgType type, const std::string& line ) const
   if ((stype = fgTypeMap.load()->find( type )) != fgTypeMap.load()->end()) {
     if (!gConfig().IsSilent() || type==kFATAL) {
       if (gConfig().UseColor()) {
-	// no text for INFO or VERBOSE
-	if (type == kHEADER || type ==kWARNING)
-	  std::cout << fgPrefix << line << std::endl; 
-	else if (type == kINFO || type == kVERBOSE)
-	  //std::cout << fgPrefix << line << std::endl; // no color for info
-	  std::cout << line << std::endl;
-	else{
-	  //std::cout<<"prefix='"<<fgPrefix<<"'"<<std::endl;
-	  std::cout << fgColorMap.load()->find( type )->second << "<" << stype->second << ">" << line << "\033[0m" << std::endl;
+   // no text for INFO or VERBOSE
+   if (type == kHEADER || type ==kWARNING)
+     std::cout << fgPrefix << line << std::endl;
+   else if (type == kINFO || type == kVERBOSE)
+     //std::cout << fgPrefix << line << std::endl; // no color for info
+     std::cout << line << std::endl;
+   else{
+     //std::cout<<"prefix='"<<fgPrefix<<"'"<<std::endl;
+     std::cout << fgColorMap.load()->find( type )->second << "<" << stype->second << ">" << line << "\033[0m" << std::endl;
 }
       }
 
       else {
-	if (type == kINFO) std::cout << fgPrefix << line << std::endl;
-	else               std::cout << fgPrefix << "<" << stype->second << "> " << line << std::endl;
+   if (type == kINFO) std::cout << fgPrefix << line << std::endl;
+   else               std::cout << fgPrefix << "<" << stype->second << "> " << line << std::endl;
       }
     }
   }
@@ -243,7 +248,7 @@ void TMVA::MsgLogger::WriteMsg( EMsgType type, const std::string& line ) const
    if (type == kFATAL) {
       std::cout << "***> abort program execution" << std::endl;
       throw std::runtime_error("FATAL error");
-      
+
       //std::exit(1);
       //assert(false);
    }
@@ -265,7 +270,7 @@ void TMVA::MsgLogger::InitMaps()
 {
    if(!fgTypeMap) {
       std::map<TMVA::EMsgType, std::string>*tmp  = new std::map<TMVA::EMsgType, std::string>();
-   
+
       (*tmp)[kVERBOSE]  = std::string("VERBOSE");
       (*tmp)[kDEBUG]    = std::string("DEBUG");
       (*tmp)[kINFO]     = std::string("INFO");
diff --git a/tmva/tmva/src/Node.cxx b/tmva/tmva/src/Node.cxx
index e84f36c76bd..091cabaa4d1 100644
--- a/tmva/tmva/src/Node.cxx
+++ b/tmva/tmva/src/Node.cxx
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$    
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate Data analysis       *
@@ -16,28 +16,26 @@
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
  *                                                                                *
  * CopyRight (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                                                    * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      MPI-K Heidelberg, Germany                                                 *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
-////////////////////////////////////////////////////////////////////////////////
+/*! \class TMVA::Node
+\ingroup TMVA
+Node for the BinarySearch or Decision Trees.
 
-/*
-  Node for the BinarySearch or Decision Trees.
-  
-  For the binary search tree, it basically consists of the EVENT, and
-  pointers to the parent and daughters
+For the binary search tree, it basically consists of the EVENT, and
+pointers to the parent and daughters
 
-  In case of the Decision Tree, it specifies parent and daughters, as
-  well as "which variable is used" in the selection of this node,
-  including the respective cut value.
+In case of the Decision Tree, it specifies parent and daughters, as
+well as "which variable is used" in the selection of this node,
+including the respective cut value.
 */
-//______________________________________________________________________
 
 #include <stdexcept>
 #include <iosfwd>
@@ -50,7 +48,7 @@ ClassImp(TMVA::Node)
 
 Int_t TMVA::Node::fgCount = 0;
 
-TMVA::Node::Node() 
+TMVA::Node::Node()
    : fParent( NULL ),
      fLeft  ( NULL),
      fRight ( NULL ),
@@ -65,13 +63,13 @@ TMVA::Node::Node()
 ////////////////////////////////////////////////////////////////////////////////
 /// constructor of a daughter node as a daughter of 'p'
 
-TMVA::Node::Node( Node* p, char pos ) 
-   : fParent ( p ), 
-     fLeft ( NULL ), 
-     fRight( NULL ),  
-     fPos  ( pos ), 
-     fDepth( p->GetDepth() + 1), 
-     fParentTree(p->GetParentTree()) 
+TMVA::Node::Node( Node* p, char pos )
+   : fParent ( p ),
+     fLeft ( NULL ),
+     fRight( NULL ),
+     fPos  ( pos ),
+     fDepth( p->GetDepth() + 1),
+     fParentTree(p->GetParentTree())
 {
    fgCount++;
    if (fPos == 'l' ) p->SetLeft(this);
@@ -79,16 +77,16 @@ TMVA::Node::Node( Node* p, char pos )
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// copy constructor, make sure you don't just copy the poiter to the node, but
-/// that the parents/daugthers are initialized to 0 (and set by the copy 
-/// constructors of the derived classes 
-
-TMVA::Node::Node ( const Node &n ) 
-   : fParent( NULL ), 
-     fLeft  ( NULL), 
-     fRight ( NULL ), 
-     fPos   ( n.fPos ), 
-     fDepth ( n.fDepth ), 
+/// copy constructor, make sure you don't just copy the pointer to the node, but
+/// that the parents/daughters are initialized to 0 (and set by the copy
+/// constructors of the derived classes
+
+TMVA::Node::Node ( const Node &n )
+   : fParent( NULL ),
+     fLeft  ( NULL),
+     fRight ( NULL ),
+     fPos   ( n.fPos ),
+     fDepth ( n.fDepth ),
      fParentTree( NULL )
 {
    fgCount++;
@@ -103,7 +101,7 @@ TMVA::Node::~Node()
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// retuns the global number of instantiated nodes
+/// returns the global number of instantiated nodes
 
 int TMVA::Node::GetCount()
 {
@@ -113,23 +111,23 @@ int TMVA::Node::GetCount()
 ////////////////////////////////////////////////////////////////////////////////
 ///recursively go through the part of the tree below this node and count all daughters
 
-Int_t TMVA::Node::CountMeAndAllDaughters() const 
+Int_t TMVA::Node::CountMeAndAllDaughters() const
 {
    Int_t n=1;
-   if (this->GetLeft() != NULL) 
-      n+= this->GetLeft()->CountMeAndAllDaughters(); 
-   if (this->GetRight() != NULL) 
-      n+= this->GetRight()->CountMeAndAllDaughters(); 
-  
+   if (this->GetLeft() != NULL)
+      n+= this->GetLeft()->CountMeAndAllDaughters();
+   if (this->GetRight() != NULL)
+      n+= this->GetRight()->CountMeAndAllDaughters();
+
    return n;
 }
 
 // print a node
 ////////////////////////////////////////////////////////////////////////////////
-/// output operator for a node  
+/// output operator for a node
 
 std::ostream& TMVA::operator<<( std::ostream& os, const TMVA::Node& node )
-{ 
+{
    node.Print(os);
    return os;                // Return the output stream.
 }
@@ -138,7 +136,7 @@ std::ostream& TMVA::operator<<( std::ostream& os, const TMVA::Node& node )
 /// output operator with a pointer to the node (which still prints the node itself)
 
 std::ostream& TMVA::operator<<( std::ostream& os, const TMVA::Node* node )
-{ 
+{
    if (node!=NULL) node->Print(os);
    return os;                // Return the output stream.
 }
@@ -179,7 +177,7 @@ void TMVA::Node::ReadXML( void* node,  UInt_t tmva_Version_Code )
       n->ReadXML(ch, tmva_Version_Code);
       if (n->GetPos()=='l')     { this->SetLeft(n);  }
       else if(n->GetPos()=='r') { this->SetRight(n); }
-      else { 
+      else {
          std::cout << "neither left nor right" << std::endl;
       }
       ch = gTools().GetNextChild(ch);
-- 
GitLab