diff --git a/tmva/Makefile b/tmva/Makefile
index 815695ae7d3a74bcfb53550720e99128a93247a2..f3247f6d33be130d511fe24cbed89d5a9da512b4 100644
--- a/tmva/Makefile
+++ b/tmva/Makefile
@@ -37,7 +37,7 @@ DICTH1       := Configurable.h Event.h Factory.h MethodBase.h MethodCompositeBas
 		MethodANNBase.h MethodTMlpANN.h MethodRuleFit.h MethodCuts.h MethodFisher.h \
 		MethodKNN.h MethodCFMlpANN.h MethodCFMlpANN_Utils.h MethodLikelihood.h \
 		MethodHMatrix.h MethodPDERS.h MethodBDT.h MethodDT.h MethodSVM.h MethodBayesClassifier.h \
-		MethodFDA.h MethodMLP.h MethodCommittee.h MethodSeedDistance.h MethodBoost.h \
+		MethodFDA.h MethodMLP.h MethodCommittee.h MethodBoost.h \
 		MethodPDEFoam.h MethodLD.h MethodCategory.h
 DICTH2       := TSpline2.h TSpline1.h PDF.h BinaryTree.h BinarySearchTreeNode.h BinarySearchTree.h \
 		Timer.h RootFinder.h CrossEntropy.h DecisionTree.h DecisionTreeNode.h MisClassificationError.h \
@@ -45,8 +45,8 @@ DICTH2       := TSpline2.h TSpline1.h PDF.h BinaryTree.h BinarySearchTreeNode.h
 		GeneticAlgorithm.h GeneticGenes.h GeneticPopulation.h GeneticRange.h GiniIndex.h \
 		GiniIndexWithLaplace.h SimulatedAnnealing.h
 DICTH3       := Config.h KDEKernel.h Interval.h FitterBase.h MCFitter.h GeneticFitter.h SimulatedAnnealingFitter.h \
-		MinuitFitter.h MinuitWrapper.h IFitterTarget.h IMetric.h MetricEuler.h MetricManhattan.h \
-		SeedDistance.h PDEFoam.h PDEFoamDistr.h PDEFoamVect.h PDEFoamCell.h BDTEventWrapper.h CCTreeWrapper.h \
+		MinuitFitter.h MinuitWrapper.h IFitterTarget.h \
+		PDEFoam.h PDEFoamDistr.h PDEFoamVect.h PDEFoamCell.h BDTEventWrapper.h CCTreeWrapper.h \
 		CCPruner.h CostComplexityPruneTool.h SVEvent.h
 DICTH4       := TNeuron.h TSynapse.h TActivationChooser.h TActivation.h TActivationSigmoid.h TActivationIdentity.h \
 		TActivationTanh.h TActivationRadial.h TNeuronInputChooser.h TNeuronInput.h TNeuronInputSum.h \
diff --git a/tmva/Module.mk b/tmva/Module.mk
index 7b33aa6d5f04bcfdd0929eaa5ffb7b6ac4bdf157..b29153ebb171aded49ef149389afc77ee94582ca 100644
--- a/tmva/Module.mk
+++ b/tmva/Module.mk
@@ -35,7 +35,7 @@ TMVAH1       := Configurable.h Event.h Factory.h MethodBase.h MethodCompositeBas
 		MethodANNBase.h MethodTMlpANN.h MethodRuleFit.h MethodCuts.h MethodFisher.h \
 		MethodKNN.h MethodCFMlpANN.h MethodCFMlpANN_Utils.h MethodLikelihood.h \
 		MethodHMatrix.h MethodPDERS.h MethodBDT.h MethodDT.h MethodSVM.h MethodBayesClassifier.h \
-		MethodFDA.h MethodMLP.h MethodCommittee.h MethodSeedDistance.h MethodBoost.h \
+		MethodFDA.h MethodMLP.h MethodCommittee.h MethodBoost.h \
 		MethodPDEFoam.h MethodLD.h MethodCategory.h
 TMVAH2       := TSpline2.h TSpline1.h PDF.h BinaryTree.h BinarySearchTreeNode.h BinarySearchTree.h \
 		Timer.h RootFinder.h CrossEntropy.h DecisionTree.h DecisionTreeNode.h MisClassificationError.h \
@@ -43,8 +43,8 @@ TMVAH2       := TSpline2.h TSpline1.h PDF.h BinaryTree.h BinarySearchTreeNode.h
 		GeneticAlgorithm.h GeneticGenes.h GeneticPopulation.h GeneticRange.h GiniIndex.h \
 		GiniIndexWithLaplace.h SimulatedAnnealing.h
 TMVAH3       := Config.h KDEKernel.h Interval.h FitterBase.h MCFitter.h GeneticFitter.h SimulatedAnnealingFitter.h \
-		MinuitFitter.h MinuitWrapper.h IFitterTarget.h IMetric.h MetricEuler.h MetricManhattan.h \
-		SeedDistance.h PDEFoam.h PDEFoamDistr.h PDEFoamVect.h PDEFoamCell.h BDTEventWrapper.h CCTreeWrapper.h \
+		MinuitFitter.h MinuitWrapper.h IFitterTarget.h  \
+		PDEFoam.h PDEFoamDistr.h PDEFoamVect.h PDEFoamCell.h BDTEventWrapper.h CCTreeWrapper.h \
 		CCPruner.h CostComplexityPruneTool.h SVEvent.h
 TMVAH4       := TNeuron.h TSynapse.h TActivationChooser.h TActivation.h TActivationSigmoid.h TActivationIdentity.h \
 		TActivationTanh.h TActivationRadial.h TNeuronInputChooser.h TNeuronInput.h TNeuronInputSum.h \
diff --git a/tmva/doc/v526/index.html b/tmva/doc/v526/index.html
index 0eb9f22808155b458f4fc6ce94db2226667b42d3..b2e7eebcb45e84c9e40e83b947b9af8659fa664e 100644
--- a/tmva/doc/v526/index.html
+++ b/tmva/doc/v526/index.html
@@ -1,5 +1,5 @@
-<br/> 
-<hr/> 
+<br> 
+<hr> 
 <a name="tmva"></a> 
 <h3>TMVA</h3>
 
@@ -22,39 +22,39 @@
       modelling, and hence increases the classification and regression
       performance. Presently, the Category method works for
       classification only, but regression will follow soon. Please
-      contact us if urgently needed.<br/>
+      contact us if urgently needed.<br>
 
       An example scripts and data files illustrating how the new
       Category method is configured and used. Please check the macros
       <tt>test/TMVAClassificationCategory.C</tt> and
       <tt>test/TMVAClassificationCategoryApplication.C</tt> or the
-      corresponding executables.</li>
+      corresponding executables.
 
-      <li> Regression functionality for gradient boosted trees using a Huber loss function.</li>
+      <li> Regression functionality for gradient boosted trees using a Huber loss function.
         
     </ul>
 
     <h4>Comments</h4>
 
     <p>
-      <em>On Input Data:</em><br/> 
+      <em>On Input Data:</em><br> 
 
       New TMVA event vector building. The code for splitting the input
       data into training and test samples for all classes and the
       mixing of those samples to one training and one test sample has
       been rewritten completely. The new code is more performant and
       has a clearer structure. This fixes several bugs which have been
-      reported by the TMVA users.</p>
+      reported by the TMVA users.
 
     <p>
-      <em>On Minimization:</em><br/> 
+      <em>On Minimization:</em><br> 
 
       Variables, targets and spectators are now checked if they are
       constant. (The execution of TMVA is stopped for variables and
-      targets, a warning is given for spectators.)</p>
+      targets, a warning is given for spectators.)
 
     <p>
-      <em>On Regression:</em><br/>
+      <em>On Regression:</em><br>
       
       The analysis type is no longer defined by calling a dedicated
       TestAllMethods-member-function of the Factory, but with the
@@ -62,66 +62,66 @@
       "Auto" where TMVA tries to determine the most suitable analysis
       type from the targets and classes the user has defined. Other
       values are "regression", "classification" and "multiclass" for
-      the forthcoming multiclass classification.<br/></p>
+      the forthcoming multiclass classification.<br>
 
-      <p>Missing regression evaluation plots for training sample were
-      added.</p>
+      Missing regression evaluation plots for training sample were
+      added.
       
     <p>
-      <em>On Cut method:</em><br/>
+      <em>On Cut method:</em><br>
 
-      Removed obsolete option "FVerySmart" from Cuts method.</p>
+      Removed obsolete option "FVerySmart" from Cuts method.
 
     <p>
-      <em>On MLP method:</em><br/>
+      <em>On MLP method:</em><br>
       
-      Display of convergence information in the progress bar for MLP during training.<br/>
+      Display of convergence information in the progress bar for MLP during training.<br>
 
       Creation of animated gifs for MLP convergence monitoring (please
-      contact authors if you want to do this).</p>
+      contact authors if you want to do this).
                 
     <p>
-      <em>On Datasets:</em><br/> 
+      <em>On Datasets:</em><br> 
 
       Checks are performed if events are unvoluntarily cut by using a
       non-filled array entry (e.g. "arr[4]" is used, when the array
       has not always at least 5 entries).  A warning is given in that
-      case.</p>
+      case.
         
 
     <h4>Bug fixes</h4>
 
     <ul>
-      <li>Spectators and Targets could not be used with by-hand assignment of events.</li>
+      <li>Spectators and Targets could not be used with by-hand assignment of events.
 
-      <li>Corrected types (training/testing) for assigning single events.</li>
+      <li>Corrected types (training/testing) for assigning single events.
   
       <li>Changed message from FATAL to WARNING when the user requests more events for 
-      training or testing than available.</li>
+      training or testing than available.
 
       <li>Fixed bug which caused TMVA to crash if the number of input variables exceeded 
-        the allowed maximum for generating scatter plots.</li>
+        the allowed maximum for generating scatter plots.
 
-      <li>Prevent TMVA from crashing when running with an empty TTree or TChain.</li>
+      <li>Prevent TMVA from crashing when running with an empty TTree or TChain.
 
       <li>A variable expression like "Alt$(arr[3],0)" can now be used
       to give a default value for a variable if for some events the
       array don't contain enough elements (e.g. in two jet events,
       sometimes only one jet is found and thus, the array jetPt[] has
-      only one entry in that cases).</li>
+      only one entry in that cases).
         
-      <li>Plot ranges for scatter-plots showing the transformed events are now correct.</li>
+      <li>Plot ranges for scatter-plots showing the transformed events are now correct.
         
-      <li>User defined training/testing-trees are now handled correctly.</li>
+      <li>User defined training/testing-trees are now handled correctly.
         
-      <li>Fix bug in correlation computation for regression.</li>
+      <li>Fix bug in correlation computation for regression.
         
-      <li>Consistent use of variable labels (for the log output) and variable titles (in histograms).</li>
+      <li>Consistent use of variable labels (for the log output) and variable titles (in histograms).
 
-      <li>Drawing of variable labels in network architecture display for regression mode has been added.</li>
+      <li>Drawing of variable labels in network architecture display for regression mode has been added.
 
-      <li>Bug fixes to Cuts which improves performance on datasets with many variables.</li>
+      <li>Bug fixes to Cuts which improves performance on datasets with many variables.
 
-      <li>Bug fix in GaussTransformation which improves handling of gaussian tails.</li>
+      <li>Bug fix in GaussTransformation which improves handling of gaussian tails.
 
     </ul>
diff --git a/tmva/doc/v528/index.html b/tmva/doc/v528/index.html
deleted file mode 100644
index 3eef2d58ef841cffc233eaffa393dc51bf4c4e3b..0000000000000000000000000000000000000000
--- a/tmva/doc/v528/index.html
+++ /dev/null
@@ -1,4 +0,0 @@
-<br> 
-<hr> 
-<a name="tmva"></a> 
-<h3>TMVA</h3>
diff --git a/tmva/inc/BinarySearchTreeNode.h b/tmva/inc/BinarySearchTreeNode.h
index e408e826aa9c06cae2649e02cf3cf2ef6889766d..c6f92d2dbceb00f64bd439aeae2175e990271045 100644
--- a/tmva/inc/BinarySearchTreeNode.h
+++ b/tmva/inc/BinarySearchTreeNode.h
@@ -113,7 +113,7 @@ namespace TMVA {
 
       Float_t     fWeight;
       // Float_t     fIsSignal;
-      Int_t       fClass;
+      UInt_t       fClass;
 
       Short_t     fSelector;       // index of variable used in node selection (decision tree) 
 
diff --git a/tmva/inc/BinaryTree.h b/tmva/inc/BinaryTree.h
index bd5f103cc749df796ba34cb5931aa3209338bf81..7a455c457d68808d9b7030b183cfc16a8f253b67 100644
--- a/tmva/inc/BinaryTree.h
+++ b/tmva/inc/BinaryTree.h
@@ -121,8 +121,8 @@ namespace TMVA {
       UInt_t     fNNodes;           // total number of nodes in the tree (counted)
       UInt_t     fDepth;            // maximal depth in tree reached
 
-      mutable MsgLogger* fLogger;   // message loggera    
-      MsgLogger& Log() const { return *fLogger; }
+      static MsgLogger* fgLogger;   // message logger, static to save resources    
+      MsgLogger& Log() const { return *fgLogger; }
 
       ClassDef(BinaryTree,0) // Base class for BinarySearch and Decision Trees
    };  
diff --git a/tmva/inc/Config.h b/tmva/inc/Config.h
index 809162b7bf306571e2d61c3b0ba7efd58e42403a..c6d79f3e5727efdd5d7e539b49bb0ea1d821eb9b 100644
--- a/tmva/inc/Config.h
+++ b/tmva/inc/Config.h
@@ -52,8 +52,8 @@ namespace TMVA {
                
    public:
 
-      static Config& Instance();
-      static void    DestroyInstance();
+      static Config& Instance() { return fgConfigPtr ? *fgConfigPtr : *(fgConfigPtr = new Config()); }
+      static void    DestroyInstance() { if (fgConfigPtr != 0) { delete fgConfigPtr; fgConfigPtr = 0; } }
 
       Bool_t UseColor() const { return fUseColoredConsole; }
       void   SetUseColor( Bool_t uc ) { fUseColoredConsole = uc; }
diff --git a/tmva/inc/DataInputHandler.h b/tmva/inc/DataInputHandler.h
index 1cd36751ebe343ab6234df27d77ccd0479ea1ebc..5af89c2eb76e2125a7311c35c6ccc8851dcf063a 100644
--- a/tmva/inc/DataInputHandler.h
+++ b/tmva/inc/DataInputHandler.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -13,6 +13,7 @@
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
  *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>  - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch>  - CERN, Switzerland          *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *                                                                                *
  * Copyright (c) 2006:                                                            *
diff --git a/tmva/inc/DataSet.h b/tmva/inc/DataSet.h
index 96c5bc7e373a90b65f1b0ae85da95af34ec513d6..1275a3ef2591c821b653c60a26433240b22fed0a 100644
--- a/tmva/inc/DataSet.h
+++ b/tmva/inc/DataSet.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -13,6 +13,7 @@
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
  *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>  - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch>  - CERN, Switzerland          *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *                                                                                *
  * Copyright (c) 2006:                                                            *
@@ -70,7 +71,7 @@
 #endif
 
 namespace TMVA {
-   
+
    class Event;
    class DataSetInfo;
    class MsgLogger;
@@ -92,8 +93,8 @@ namespace TMVA {
       Event*    GetEvent        ( Long64_t ievt ) const { fCurrentEventIdx = ievt; return GetEvent(); }
       Event*    GetTrainingEvent( Long64_t ievt ) const { return GetEvent(ievt, Types::kTraining); }
       Event*    GetTestEvent    ( Long64_t ievt ) const { return GetEvent(ievt, Types::kTesting); }
-      Event*    GetEvent        ( Long64_t ievt, Types::ETreeType type ) const { 
-         fCurrentTreeIdx = TreeIndex(type); fCurrentEventIdx = ievt; return GetEvent(); 
+      Event*    GetEvent        ( Long64_t ievt, Types::ETreeType type ) const {
+         fCurrentTreeIdx = TreeIndex(type); fCurrentEventIdx = ievt; return GetEvent();
       }
 
       UInt_t    GetNVariables() const;
@@ -115,13 +116,13 @@ namespace TMVA {
 
       Bool_t    HasNegativeEventWeights() const { return fHasNegativeEventWeights; }
 
-      Results*  GetResults   ( const TString &, 
+      Results*  GetResults   ( const TString &,
                                Types::ETreeType type,
                                Types::EAnalysisType analysistype );
 
       void      SetVerbose( Bool_t ) {}
 
-      // sets the number of blocks to which the training set is divided, 
+      // sets the number of blocks to which the training set is divided,
       // some of which are given to the Validation sample. As default they belong all to Training set.
       void      DivideTrainingSet( UInt_t blockNum );
 
diff --git a/tmva/inc/DataSetFactory.h b/tmva/inc/DataSetFactory.h
index 2890fffb3ad2f481688e9be64c8005c4a74e054d..04856343b555c65eeaa4f6a845d5259b5b998d30 100644
--- a/tmva/inc/DataSetFactory.h
+++ b/tmva/inc/DataSetFactory.h
@@ -26,8 +26,6 @@
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
-#define ALTERNATIVE_EVENT_VECTOR_BUILDING 1
-
 #ifndef ROOT_TMVA_DataSetFactory
 #define ROOT_TMVA_DataSetFactory
 
@@ -239,30 +237,7 @@ namespace TMVA {
 
       DataSet*  BuildInitialDataSet( DataSetInfo&, TMVA::DataInputHandler& );
       DataSet*  BuildDynamicDataSet( DataSetInfo& );
-
-#ifndef ALTERNATIVE_EVENT_VECTOR_BUILDING
-
-      void      BuildEventVector   ( DataSetInfo& dsi, 
-                                     DataInputHandler& dataInput, 
-                                     std::vector< std::vector< Event* > >& tmpEventVector, 
-                                     std::vector<Double_t>& sumOfWeights, 
-                                     std::vector<Double_t>& nTempEvents, 
-                                     std::vector<Double_t>& renormFactor,
-                                     std::vector< std::vector< std::pair< Long64_t, Types::ETreeType > > >& userDefinedEventTypes );
-      
-      DataSet*  MixEvents          ( DataSetInfo& dsi, 
-                                     std::vector< std::vector< Event* > >& tmpEventVector, 
-                                     std::vector< std::pair< Int_t, Int_t > >& nTrainTestEvents, 
-                                     const TString& splitMode, UInt_t splitSeed, 
-                                     std::vector<Double_t>& renormFactor,
-                                     std::vector< std::vector< std::pair< Long64_t, Types::ETreeType > > >& userDefinedEventTypes );
-
-      void      InitOptions        ( DataSetInfo& dsi, 
-                                     std::vector< std::pair< Int_t, Int_t > >& nTrainTestEvents, 
-                                     TString& normMode, UInt_t& splitSeed, TString& splitMode );
-      
-
-#else
+     
       // ---------- new versions
       void      BuildEventVector    ( DataSetInfo& dsi, 
                                       DataInputHandler& dataInput, 
@@ -275,7 +250,7 @@ namespace TMVA {
                                       const TString& mixMode, 
                                       const TString& normMode, 
                                       UInt_t splitSeed);
-
+      
       void      RenormEvents        ( DataSetInfo& dsi, 
                                       EventVectorOfClassesOfTreeType& tmpEventVector,
                                       const TString& normMode );
@@ -286,7 +261,6 @@ namespace TMVA {
       
 
       // ------------------------
-#endif
 
       // auxiliary functions to compute correlations
       TMatrixD* CalcCorrelationMatrix( DataSet*, const UInt_t classNumber );
diff --git a/tmva/inc/DataSetInfo.h b/tmva/inc/DataSetInfo.h
index 7de830c0f46ea6b4f6616ae2d2dd7131328d4ee8..eda3d001ab54d6bda2810730660ea8b4c8310673 100644
--- a/tmva/inc/DataSetInfo.h
+++ b/tmva/inc/DataSetInfo.h
@@ -1,5 +1,5 @@
 // // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -73,6 +73,7 @@ namespace TMVA {
    class DataSet;
    class VariableTransformBase;
    class MsgLogger;
+   class DataSetManager;
 
    class DataSetInfo : public TObject {
 
@@ -138,6 +139,7 @@ namespace TMVA {
       void               PrintClasses() const;
       UInt_t             GetNClasses() const { return fClasses.size(); }
       Bool_t             IsSignal( const Event* ev ) const;
+      std::vector<Float_t>* GetTargetsForMulticlass( const Event* ev );
 
       // by variable
       Int_t              FindVarIndex( const TString& )      const;
@@ -173,6 +175,15 @@ namespace TMVA {
 
    private:
 
+
+
+      TMVA::DataSetManager*            fDataSetManager; // DSMTEST
+      void                       SetDataSetManager( DataSetManager* dsm ) { fDataSetManager = dsm; } // DSMTEST
+      friend class DataSetManager;  // DSMTEST (datasetmanager test)
+
+
+
+
       DataSetInfo( const DataSetInfo& ) : TObject() {}
 
       void PrintCorrelationMatrix( TTree* theTree );
@@ -197,10 +208,13 @@ namespace TMVA {
       Bool_t                     fVerbose;           //! Verbosity
 
       UInt_t                     fSignalClass;       //! index of the class with the name signal
+
+      std::vector<Float_t>*      fTargetsForMulticlass;       //! all targets 0 except the one with index==classNumber
       
       mutable MsgLogger*         fLogger;            //! message logger
       MsgLogger& Log() const { return *fLogger; }
 
+
    };
 }
 
diff --git a/tmva/inc/DataSetManager.h b/tmva/inc/DataSetManager.h
index e45a7b0ef51fb0d0a1f46f1e202ec31b1a8709bd..864a5e5ab161b71c9b1fd6e539c19885bcd139a2 100644
--- a/tmva/inc/DataSetManager.h
+++ b/tmva/inc/DataSetManager.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -13,6 +13,7 @@
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
  *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>  - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch>  - CERN, Switzerland          *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *                                                                                *
  * Copyright (c) 2006:                                                            *
@@ -47,16 +48,24 @@ namespace TMVA {
    class DataSet;
    class DataSetInfo;
    class DataInputHandler;
+   class DataSetFactory; // DSMTEST
    class MsgLogger;
 
    class DataSetManager {
       
    public:
 
+
       // singleton class
-      static DataSetManager& Instance();
-      static void            CreateInstance( DataInputHandler& dataInput );
-      static void            DestroyInstance();
+//      static DataSetManager& Instance();
+//      static void            CreateInstance( DataInputHandler& dataInput );
+//      static void            DestroyInstance();
+
+      // private default constructor
+      DataSetManager(); // DSMTEST
+      DataSetManager( DataInputHandler& dataInput ); //DSMTEST
+      ~DataSetManager(); // DSMTEST
+
 
       // ownership stays with this handler
       DataSet*     CreateDataSet ( const TString& dsiName );
@@ -67,13 +76,15 @@ namespace TMVA {
 
    private:
 
-      ~DataSetManager();
+//      ~DataSetManager(); // DSMTEST moved to public
 
-      static DataSetManager* fgDSManager;
+//      static DataSetManager* fgDSManager; // removed DSMTEST
 
       // private default constructor
-      DataSetManager();
-      DataSetManager( DataInputHandler& dataInput );
+/*       DataSetManager(); */ // DSMTEST
+/*       DataSetManager( DataInputHandler& dataInput ); */ // DSMTEST
+
+// //      TMVA::DataSetFactory* fDatasetFactory; // DSMTEST
 
       // access to input data
       DataInputHandler& DataInput() { return fDataInput; }
diff --git a/tmva/inc/DecisionTree.h b/tmva/inc/DecisionTree.h
index b0926691247db10f59504392cc4a5656a564d020..ee8b77f87ecfd112927bbaeb70c65bced8d4e0dc 100644
--- a/tmva/inc/DecisionTree.h
+++ b/tmva/inc/DecisionTree.h
@@ -82,6 +82,7 @@ namespace TMVA {
       // the constructur needed for constructing the decision tree via training with events
       DecisionTree( SeparationBase *sepType, Int_t minSize, 
                     Int_t nCuts,
+                    UInt_t cls =0,
                     Bool_t randomisedTree=kFALSE, Int_t useNvars=0, 
                     UInt_t nNodesMax=999999, UInt_t nMaxDepth=9999999, 
                     Int_t iSeed=fgRandomSeed, Float_t purityLimit=0.5,
@@ -99,7 +100,7 @@ namespace TMVA {
       // building of a tree by recursivly splitting the nodes 
 
       UInt_t BuildTree( const EventList & eventSample, 
-                       DecisionTreeNode *node = NULL);
+                        DecisionTreeNode *node = NULL);
       // determine the way how a node is split (which variable, which cut value)
 
       Float_t TrainNode( const EventList & eventSample,  DecisionTreeNode *node ) { return TrainNodeFast( eventSample, node ); }
@@ -219,6 +220,7 @@ namespace TMVA {
 
       UInt_t     fNNodesMax;     // max # of nodes
       UInt_t     fMaxDepth;      // max depth
+      UInt_t     fClass;         // class which is treated as signal when building the tree
 
       static const Int_t  fgDebugLevel = 0;     // debug level determining some printout/control plots etc.
       Int_t     fTreeID;        // just an ID number given to the tree.. makes debugging easier as tree knows who he is.
diff --git a/tmva/inc/DecisionTreeNode.h b/tmva/inc/DecisionTreeNode.h
index a95ee2f2b6524fc27828b8e5fea4af945c76124b..a2e9d367b4716002f690393f0ce0ebf7753f2832 100644
--- a/tmva/inc/DecisionTreeNode.h
+++ b/tmva/inc/DecisionTreeNode.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$    
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss, Eckhard von Toerne
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -14,11 +14,13 @@
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
+ *      Eckhard von Toerne <evt@physik.uni-bonn.de>  - U. of Bonn, Germany        *
  *                                                                                *
- * Copyright (c) 2005:                                                            *
+ * Copyright (c) 2009:                                                            *
  *      CERN, Switzerland                                                         * 
  *      U. of Victoria, Canada                                                    * 
  *      MPI-K Heidelberg, Germany                                                 * 
+*       U. of Bonn, Germany                                                       *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
@@ -47,7 +49,64 @@
 #include <vector>
 #include <map>
 namespace TMVA {
-  
+
+   class DTNodeTrainingInfo
+   {
+   public:
+      DTNodeTrainingInfo():fSampleMin(), 
+                           fSampleMax(), 
+                           fNodeR(0),fSubTreeR(0),fAlpha(0),fG(0),fNTerminal(0),
+                           fNB(0),fNS(0),fSumTarget(0),fSumTarget2(0),fCC(0), 
+                           fNSigEvents ( 0 ), fNBkgEvents ( 0 ),
+                           fNEvents ( -1 ),
+                           fNSigEvents_unweighted ( 0 ),
+                           fNBkgEvents_unweighted ( 0 ),
+                           fNEvents_unweighted ( 0 ),
+                           fSeparationIndex (-1 ),
+                           fSeparationGain ( -1 )
+      {
+      }
+      std::vector< Float_t >  fSampleMin; // the minima for each ivar of the sample on the node during training
+      std::vector< Float_t >  fSampleMax; // the maxima for each ivar of the sample on the node during training      
+      Double_t fNodeR;           // node resubstitution estimate, R(t)
+      Double_t fSubTreeR;        // R(T) = Sum(R(t) : t in ~T)
+      Double_t fAlpha;           // critical alpha for this node
+      Double_t fG;               // minimum alpha in subtree rooted at this node
+      Int_t    fNTerminal;       // number of terminal nodes in subtree rooted at this node
+      Double_t fNB;              // sum of weights of background events from the pruning sample in this node
+      Double_t fNS;              // ditto for the signal events
+      Float_t  fSumTarget;       // sum of weight*target  used for the calculatio of the variance (regression)
+      Float_t  fSumTarget2;      // sum of weight*target^2 used for the calculatio of the variance (regression)
+      Double_t fCC;  // debug variable for cost complexity pruning .. 
+
+      Float_t  fNSigEvents;      // sum of weights of signal event in the node
+      Float_t  fNBkgEvents;      // sum of weights of backgr event in the node
+      Float_t  fNEvents;         // number of events in that entered the node (during training)    
+      Float_t  fNSigEvents_unweighted;      // sum of signal event in the node
+      Float_t  fNBkgEvents_unweighted;      // sum of backgr event in the node
+      Float_t  fNEvents_unweighted;         // number of events in that entered the node (during training)
+      Float_t  fSeparationIndex; // measure of "purity" (separation between S and B) AT this node
+      Float_t  fSeparationGain;  // measure of "purity", separation, or information gained BY this nodes selection
+
+      // copy constructor
+      DTNodeTrainingInfo(const DTNodeTrainingInfo& n) :
+         fSampleMin(),fSampleMax(), // Samplemin and max are reset in copy constructor
+         fNodeR(n.fNodeR), fSubTreeR(n.fSubTreeR),
+         fAlpha(n.fAlpha), fG(n.fG),      
+         fNTerminal(n.fNTerminal),
+         fNB(n.fNB), fNS(n.fNS),
+         fSumTarget(0),fSumTarget2(0), // SumTarget reset in copy constructor
+         fCC(0),
+         fNSigEvents ( n.fNSigEvents ), fNBkgEvents ( n.fNBkgEvents ),
+         fNEvents ( n.fNEvents ),
+         fNSigEvents_unweighted ( n.fNSigEvents_unweighted ),
+         fNBkgEvents_unweighted ( n.fNBkgEvents_unweighted ),
+         fNEvents_unweighted ( n.fNEvents_unweighted ),
+         fSeparationIndex( n.fSeparationIndex ),
+         fSeparationGain ( n.fSeparationGain )
+      { } 
+   };
+
    class Event;
    class MsgLogger;
 
@@ -110,69 +169,69 @@ namespace TMVA {
       Float_t GetRMS( void ) const { return fRMS;}
 
       // set the sum of the signal weights in the node
-      void SetNSigEvents( Float_t s ) { fNSigEvents = s; }
+      void SetNSigEvents( Float_t s ) { fTrainInfo->fNSigEvents = s; }
     
       // set the sum of the backgr weights in the node
-      void SetNBkgEvents( Float_t b ) { fNBkgEvents = b; }
+      void SetNBkgEvents( Float_t b ) { fTrainInfo->fNBkgEvents = b; }
     
       // set the number of events that entered the node (during training)
-      void SetNEvents( Float_t nev ){ fNEvents =nev ; }
+      void SetNEvents( Float_t nev ){ fTrainInfo->fNEvents =nev ; }
     
       // set the sum of the unweighted signal events in the node
-      void SetNSigEvents_unweighted( Float_t s ) { fNSigEvents_unweighted = s; }
+      void SetNSigEvents_unweighted( Float_t s ) { fTrainInfo->fNSigEvents_unweighted = s; }
     
       // set the sum of the unweighted backgr events in the node
-      void SetNBkgEvents_unweighted( Float_t b ) { fNBkgEvents_unweighted = b; }
+      void SetNBkgEvents_unweighted( Float_t b ) { fTrainInfo->fNBkgEvents_unweighted = b; }
     
       // set the number of unweighted events that entered the node (during training)
-      void SetNEvents_unweighted( Float_t nev ){ fNEvents_unweighted =nev ; }
+      void SetNEvents_unweighted( Float_t nev ){ fTrainInfo->fNEvents_unweighted =nev ; }
     
       // increment the sum of the signal weights in the node
-      void IncrementNSigEvents( Float_t s ) { fNSigEvents += s; }
+      void IncrementNSigEvents( Float_t s ) { fTrainInfo->fNSigEvents += s; }
     
       // increment the sum of the backgr weights in the node
-      void IncrementNBkgEvents( Float_t b ) { fNBkgEvents += b; }
+      void IncrementNBkgEvents( Float_t b ) { fTrainInfo->fNBkgEvents += b; }
     
       // increment the number of events that entered the node (during training)
-      void IncrementNEvents( Float_t nev ){ fNEvents +=nev ; }
+      void IncrementNEvents( Float_t nev ){ fTrainInfo->fNEvents +=nev ; }
     
       // increment the sum of the signal weights in the node
-      void IncrementNSigEvents_unweighted( ) { fNSigEvents_unweighted += 1; }
+      void IncrementNSigEvents_unweighted( ) { fTrainInfo->fNSigEvents_unweighted += 1; }
     
       // increment the sum of the backgr weights in the node
-      void IncrementNBkgEvents_unweighted( ) { fNBkgEvents_unweighted += 1; }
+      void IncrementNBkgEvents_unweighted( ) { fTrainInfo->fNBkgEvents_unweighted += 1; }
     
       // increment the number of events that entered the node (during training)
-      void IncrementNEvents_unweighted( ){ fNEvents_unweighted +=1 ; }
+      void IncrementNEvents_unweighted( ){ fTrainInfo->fNEvents_unweighted +=1 ; }
     
       // return the sum of the signal weights in the node
-      Float_t GetNSigEvents( void ) const  { return fNSigEvents; }
+      Float_t GetNSigEvents( void ) const  { return fTrainInfo->fNSigEvents; }
     
       // return the sum of the backgr weights in the node
-      Float_t GetNBkgEvents( void ) const  { return fNBkgEvents; }
+      Float_t GetNBkgEvents( void ) const  { return fTrainInfo->fNBkgEvents; }
     
       // return  the number of events that entered the node (during training)
-      Float_t GetNEvents( void ) const  { return fNEvents; }
+      Float_t GetNEvents( void ) const  { return fTrainInfo->fNEvents; }
     
       // return the sum of unweighted signal weights in the node
-      Float_t GetNSigEvents_unweighted( void ) const  { return fNSigEvents_unweighted; }
+      Float_t GetNSigEvents_unweighted( void ) const  { return fTrainInfo->fNSigEvents_unweighted; }
     
       // return the sum of unweighted backgr weights in the node
-      Float_t GetNBkgEvents_unweighted( void ) const  { return fNBkgEvents_unweighted; }
+      Float_t GetNBkgEvents_unweighted( void ) const  { return fTrainInfo->fNBkgEvents_unweighted; }
     
       // return  the number of unweighted events that entered the node (during training)
-      Float_t GetNEvents_unweighted( void ) const  { return fNEvents_unweighted; }
+      Float_t GetNEvents_unweighted( void ) const  { return fTrainInfo->fNEvents_unweighted; }
     
     
       // set the choosen index, measure of "purity" (separation between S and B) AT this node
-      void SetSeparationIndex( Float_t sep ){ fSeparationIndex =sep ; }
+      void SetSeparationIndex( Float_t sep ){ fTrainInfo->fSeparationIndex =sep ; }
       // return the separation index AT this node
-      Float_t GetSeparationIndex( void ) const  { return fSeparationIndex; }
+      Float_t GetSeparationIndex( void ) const  { return fTrainInfo->fSeparationIndex; }
     
       // set the separation, or information gained BY this nodes selection
-      void SetSeparationGain( Float_t sep ){ fSeparationGain =sep ; }
+      void SetSeparationGain( Float_t sep ){ fTrainInfo->fSeparationGain =sep ; }
       // return the gain in separation obtained by this nodes selection
-      Float_t GetSeparationGain( void ) const  { return fSeparationGain; }
+      Float_t GetSeparationGain( void ) const  { return fTrainInfo->fSeparationGain; }
     
       // printout of the node
       virtual void Print( ostream& os ) const;
@@ -199,42 +258,42 @@ namespace TMVA {
       void SetSequence(ULong_t s) {fSequence=s;}
     
       // the node resubstitution estimate, R(t), for Cost Complexity pruning
-      inline void SetNodeR( Double_t r ) { fNodeR = r;    }
-      inline Double_t GetNodeR( ) const  { return fNodeR; }
+      inline void SetNodeR( Double_t r ) { fTrainInfo->fNodeR = r;    }
+      inline Double_t GetNodeR( ) const  { return fTrainInfo->fNodeR; }
 
       // the resubstitution estimate, R(T_t), of the tree rooted at this node
-      inline void SetSubTreeR( Double_t r ) { fSubTreeR = r;    }
-      inline Double_t GetSubTreeR( ) const  { return fSubTreeR; }
+      inline void SetSubTreeR( Double_t r ) { fTrainInfo->fSubTreeR = r;    }
+      inline Double_t GetSubTreeR( ) const  { return fTrainInfo->fSubTreeR; }
 
       //                             R(t) - R(T_t)
       // the critical point alpha =  -------------
       //                              |~T_t| - 1
-      inline void SetAlpha( Double_t alpha ) { fAlpha = alpha; }
-      inline Double_t GetAlpha( ) const      { return fAlpha;  }
+      inline void SetAlpha( Double_t alpha ) { fTrainInfo->fAlpha = alpha; }
+      inline Double_t GetAlpha( ) const      { return fTrainInfo->fAlpha;  }
     
       // the minimum alpha in the tree rooted at this node
-      inline void SetAlphaMinSubtree( Double_t g ) { fG = g;    }
-      inline Double_t GetAlphaMinSubtree( ) const  { return fG; }
+      inline void SetAlphaMinSubtree( Double_t g ) { fTrainInfo->fG = g;    }
+      inline Double_t GetAlphaMinSubtree( ) const  { return fTrainInfo->fG; }
 
       // number of terminal nodes in the subtree rooted here
-      inline void SetNTerminal( Int_t n ) { fNTerminal = n;    }
-      inline Int_t GetNTerminal( ) const  { return fNTerminal; }
+      inline void SetNTerminal( Int_t n ) { fTrainInfo->fNTerminal = n;    }
+      inline Int_t GetNTerminal( ) const  { return fTrainInfo->fNTerminal; }
 
       // number of background/signal events from the pruning validation sample
-      inline void SetNBValidation( Double_t b ) { fNB = b; }
-      inline void SetNSValidation( Double_t s ) { fNS = s; }
-      inline Double_t GetNBValidation( ) const  { return fNB; }
-      inline Double_t GetNSValidation( ) const  { return fNS; }
+      inline void SetNBValidation( Double_t b ) { fTrainInfo->fNB = b; }
+      inline void SetNSValidation( Double_t s ) { fTrainInfo->fNS = s; }
+      inline Double_t GetNBValidation( ) const  { return fTrainInfo->fNB; }
+      inline Double_t GetNSValidation( ) const  { return fTrainInfo->fNS; }
 
     
-      inline void SetSumTarget(Float_t t)  {fSumTarget = t; }
-      inline void SetSumTarget2(Float_t t2){fSumTarget2 = t2; }
+      inline void SetSumTarget(Float_t t)  {fTrainInfo->fSumTarget = t; }
+      inline void SetSumTarget2(Float_t t2){fTrainInfo->fSumTarget2 = t2; }
 
-      inline void AddToSumTarget(Float_t t)  {fSumTarget += t; }
-      inline void AddToSumTarget2(Float_t t2){fSumTarget2 += t2; }
+      inline void AddToSumTarget(Float_t t)  {fTrainInfo->fSumTarget += t; }
+      inline void AddToSumTarget2(Float_t t2){fTrainInfo->fSumTarget2 += t2; }
 
-      inline Float_t GetSumTarget()  const {return fSumTarget; }
-      inline Float_t GetSumTarget2() const {return fSumTarget2; }
+      inline Float_t GetSumTarget()  const {return fTrainInfo? fTrainInfo->fSumTarget : -9999;}
+      inline Float_t GetSumTarget2() const {return fTrainInfo? fTrainInfo->fSumTarget2: -9999;}
 
     
       // reset the pruning validation data
@@ -246,46 +305,26 @@ namespace TMVA {
       void PrintPrune( ostream& os ) const ;
       void PrintRecPrune( ostream& os ) const;
 
-      void     SetCC(Double_t cc) {fCC = cc;};
-      Double_t GetCC() const {return fCC;};
+      void     SetCC(Double_t cc);
+      Double_t GetCC() const {return (fTrainInfo? fTrainInfo->fCC : -1.);}
 
       Float_t GetSampleMin(UInt_t ivar) const;
       Float_t GetSampleMax(UInt_t ivar) const;
       void     SetSampleMin(UInt_t ivar, Float_t xmin);
       void     SetSampleMax(UInt_t ivar, Float_t xmax);
 
+      static bool fgIsTraining; // static variable to flag training phase in which we need fTrainInfo
+
    private:
 
       virtual void ReadAttributes(void* node, UInt_t tmva_Version_Code = TMVA_VERSION_CODE );
       virtual Bool_t ReadDataRecord( istream& is, UInt_t tmva_Version_Code = TMVA_VERSION_CODE );
       virtual void ReadContent(std::stringstream& s);
 
-      Double_t fNodeR;           // node resubstitution estimate, R(t)
-      Double_t fSubTreeR;        // R(T) = Sum(R(t) : t in ~T)
-      Double_t fAlpha;           // critical alpha for this node
-      Double_t fG;               // minimum alpha in subtree rooted at this node
-      Int_t    fNTerminal;       // number of terminal nodes in subtree rooted at this node
-      Double_t fNB;              // sum of weights of background events from the pruning sample in this node
-      Double_t fNS;              // ditto for the signal events
-
-      Float_t  fSumTarget;       // sum of weight*target  used for the calculatio of the variance (regression)
-      Float_t  fSumTarget2;      // sum of weight*target^2 used for the calculatio of the variance (regression)
-    
-
       Float_t  fCutValue;        // cut value appplied on this node to discriminate bkg against sig
       Bool_t   fCutType;         // true: if event variable > cutValue ==> signal , false otherwise
       Short_t  fSelector;        // index of variable used in node selection (decision tree) 
     
-      Float_t  fNSigEvents;      // sum of weights of signal event in the node
-      Float_t  fNBkgEvents;      // sum of weights of backgr event in the node
-      Float_t  fNEvents;         // number of events in that entered the node (during training)
-    
-      Float_t  fNSigEvents_unweighted;      // sum of signal event in the node
-      Float_t  fNBkgEvents_unweighted;      // sum of backgr event in the node
-      Float_t  fNEvents_unweighted;         // number of events in that entered the node (during training)
-    
-      Float_t  fSeparationIndex; // measure of "purity" (separation between S and B) AT this node
-      Float_t  fSeparationGain;  // measure of "purity", separation, or information gained BY this nodes selection
       Float_t  fResponse;        // response value in case of regression
       Float_t  fRMS;             // response RMS of the regression node 
       Int_t    fNodeType;        // Type of node: -1 == Bkg-leaf, 1 == Signal-leaf, 0 = internal 
@@ -294,15 +333,9 @@ namespace TMVA {
 
       Bool_t   fIsTerminalNode;    //! flag to set node as terminal (i.e., without deleting its descendants)
 
-      Double_t fCC;              // debug variable for cost complexity pruing .. temporary bla
-
-      std::vector< Float_t >  fSampleMin; // the minima for each ivar of the sample on the node during training
-      std::vector< Float_t >  fSampleMax; // the maxima for each ivar of the sample on the node during training
-
-
-
       static MsgLogger* fgLogger;    // static because there is a huge number of nodes...
-    
+      mutable DTNodeTrainingInfo* fTrainInfo;
+
       ClassDef(DecisionTreeNode,0) // Node for the Decision Tree 
     
          };
diff --git a/tmva/inc/Event.h b/tmva/inc/Event.h
index a0dc4b9d6eea34a7e95ad4b41f33ee9b1d64d414..e04572292df2db7d984e072765d5f1a7044ef293 100644
--- a/tmva/inc/Event.h
+++ b/tmva/inc/Event.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$   
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -13,6 +13,7 @@
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
  *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>  - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch>  - CERN, Switzerland          *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
@@ -70,14 +71,12 @@ namespace TMVA {
       ~Event();
 
       // accessors
-      Bool_t  IsSignal()          const { return (fClass==fSignalClass); } // deprecated: use <DataSetInfo>.IsSignal( Event* )
       Bool_t  IsDynamic()         const {return fDynamic; }
 
       Float_t GetWeight()         const { return fWeight*fBoostWeight; }
       Float_t GetOriginalWeight() const { return fWeight; }
       Float_t GetBoostWeight()    const { return TMath::Max(Float_t(0.0001),fBoostWeight); }
       UInt_t  GetClass()          const { return fClass; }  
-      Int_t   GetSignalClass()    const { return fSignalClass; } // intermediate solution to keep IsSignal() of Event working
 
       UInt_t  GetNVariables()        const;
       UInt_t  GetNTargets()          const;
@@ -101,7 +100,6 @@ namespace TMVA {
       void    SetClass              ( UInt_t t )  { fClass=t; }
       void    SetVal                ( UInt_t ivar, Float_t val );
       void    SetTarget             ( UInt_t itgt, Float_t value );
-      void    SetSignalClass        ( UInt_t cls ){ fSignalClass = cls; } // intermediate solution to keep IsSignal() of Event working. TODO: remove IsSignal() from Event
       void    SetSpectator          ( UInt_t ivar, Float_t value );
       void    SetVariableArrangement( std::vector<UInt_t>* const m ) const;
 
@@ -122,7 +120,6 @@ namespace TMVA {
       Float_t                        fWeight;          // event weight (product of global and individual weights)
       Float_t                        fBoostWeight;     // internal weight to be set by boosting algorithm
       Bool_t                         fDynamic;         // is set when the dynamic values are taken
-      UInt_t                         fSignalClass;     // intermediate solution to keep IsSignal() of Event working. TODO: remove IsSignal() from Event
       
       static Int_t                   fgCount;          // count instances of Event
    };
diff --git a/tmva/inc/Factory.h b/tmva/inc/Factory.h
index d45dee22dc1e831312ee0bd26f6a73a14f035f91..e2e2dcb2ed741385240606e36d9d6358c3fddc03 100644
--- a/tmva/inc/Factory.h
+++ b/tmva/inc/Factory.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$   
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss, Kai Voss 
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -68,6 +68,7 @@ namespace TMVA {
    class MethodBase;
    class DataInputHandler;
    class DataSetInfo;
+   class DataSetManager;
    class VariableTransformBase;
 
    class Factory : public Configurable {
@@ -234,6 +235,9 @@ namespace TMVA {
 
       // data members
 
+
+      DataSetManager* fDataSetManager; // DSMTEST
+
       static TFile*                             fgTargetFile;     //! ROOT output file
 
       DataInputHandler*                         fDataInputHandler;
diff --git a/tmva/inc/IMetric.h b/tmva/inc/IMetric.h
deleted file mode 100644
index 9559484d4a918329c00873871f9dc07ec9e5af1b..0000000000000000000000000000000000000000
--- a/tmva/inc/IMetric.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// @(#)root/tmva $Id$ 
-// Author: Peter Speckmayer
-
-/**********************************************************************************
- * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
- * Package: TMVA                                                                  *
- * Class  : IMetric                                                         *
- * Web    : http://tmva.sourceforge.net                                           *
- *                                                                                *
- * Description:                                                                   *
- *       Fitter using a Genetic Algorithm                                         *
- *                                                                                *
- * Authors (alphabetical):                                                        *
- *      Peter Speckmayer <speckmay@mail.cern.ch>  - CERN, Switzerland             *
- *                                                                                *
- * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
- *                                                                                *
- * Redistribution and use in source and binary forms, with or without             *
- * modification, are permitted according to the terms listed in LICENSE           *
- * (http://tmva.sourceforge.net/LICENSE)                                          *
- **********************************************************************************/
-
-#ifndef ROOT_TMVA_IMetric
-#define ROOT_TMVA_IMetric
-
-#include <vector>
-
-#ifndef ROOT_Rtypes
-#include "Rtypes.h"
-#endif
-
-//////////////////////////////////////////////////////////////////////////
-//                                                                      //
-// IMetric                                                               //
-//                                                                      //
-// distance between two points in parameter space                       //
-//                                                                      //
-//////////////////////////////////////////////////////////////////////////
-
-
-namespace TMVA {
-
-
-   class IMetric {
-
-   public:
-
-      IMetric();
-      virtual ~IMetric() {}
-
-      virtual Double_t Distance( std::vector<Double_t>& pointA, std::vector<Double_t>& pointB ) = 0;
-      void SetParameters( std::vector<Double_t>* parameters ) { fParameters = parameters; };
-      std::vector<Double_t>* GetParameters() { return fParameters; };
-
-   protected:
-      std::vector<Double_t>* fParameters;
-      
-   private:
-      
-      ClassDef(IMetric,0) // calculates the "distance" between two points
-   };
-
-} // namespace TMVA
-
-#endif
-
-
diff --git a/tmva/inc/Interval.h b/tmva/inc/Interval.h
index 4a9f24bba2f196dffea893c88d038e175ee9718d..370d7f109043d79cc4510ee9e987ba4ca54a8ba8 100644
--- a/tmva/inc/Interval.h
+++ b/tmva/inc/Interval.h
@@ -68,8 +68,8 @@ namespace TMVA {
       Double_t fMin, fMax;    // the constraints of the Interval
       Int_t    fNbins;        // when >0 : number of bins (discrete interval); when =0 continuous interval
 
-      mutable MsgLogger* fLogger;   // message logger
-      MsgLogger& Log() const { return *fLogger; }          
+      static MsgLogger* fgLogger;   // message logger
+      MsgLogger& Log() const { return *fgLogger; }          
 
       ClassDef(Interval,0)    // Interval definition, continous and discrete
    };
diff --git a/tmva/inc/LinkDef1.h b/tmva/inc/LinkDef1.h
index 15910f491c879a3d1ca812f22fd77b37c405e61c..3574eecf86bad7892b03dde4f69a7caad7f494d2 100644
--- a/tmva/inc/LinkDef1.h
+++ b/tmva/inc/LinkDef1.h
@@ -35,7 +35,6 @@
 #pragma link C++ class TMVA::MethodFDA+;
 #pragma link C++ class TMVA::MethodMLP+;
 #pragma link C++ class TMVA::MethodCommittee+;
-#pragma link C++ class TMVA::MethodSeedDistance+;
 #pragma link C++ class TMVA::MethodBoost+;
 #pragma link C++ class TMVA::MethodPDEFoam+;
 #pragma link C++ class TMVA::MethodLD+;
diff --git a/tmva/inc/LinkDef3.h b/tmva/inc/LinkDef3.h
index 63d559c49f6aefd6fd1cee268e6d0e00f727d030..6c64978df1cb7cddfb90496c6b03a99fbfd46c33 100644
--- a/tmva/inc/LinkDef3.h
+++ b/tmva/inc/LinkDef3.h
@@ -22,10 +22,6 @@
 #pragma link C++ class TMVA::MinuitFitter+;
 #pragma link C++ class TMVA::MinuitWrapper+;
 #pragma link C++ class TMVA::IFitterTarget+;
-#pragma link C++ class TMVA::IMetric+;
-#pragma link C++ class TMVA::MetricEuler+;
-#pragma link C++ class TMVA::MetricManhattan+;
-#pragma link C++ class TMVA::SeedDistance+;
 #pragma link C++ class TMVA::PDEFoam+;
 #pragma link C++ class TMVA::PDEFoamDistr+;
 #pragma link C++ class TMVA::PDEFoamVect+;
diff --git a/tmva/inc/MethodANNBase.h b/tmva/inc/MethodANNBase.h
index 479fd194a246188e7c2a7bd959258bc514d23964..1241a67d72ded43f0e1a09b5497f04788ce0208b 100644
--- a/tmva/inc/MethodANNBase.h
+++ b/tmva/inc/MethodANNBase.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Matt Jachowski
+// Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -14,6 +14,7 @@
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker  <Andreas.Hocker@cern.ch> - CERN, Switzerland             *
  *      Matt Jachowski   <jachowski@stanford.edu> - Stanford University, USA      *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch>  - CERN, Switzerland          *
  *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>   - CERN, Switzerland             *
  *                                                                                *
  * Small changes (regression):                                                    *
@@ -122,6 +123,8 @@ namespace TMVA {
       virtual Double_t GetMvaValue( Double_t* err = 0 );
 
       virtual const std::vector<Float_t> &GetRegressionValues();
+
+      virtual const std::vector<Float_t> &GetMulticlassValues();
       
       // write method specific histos to target file
       virtual void WriteMonitoringHistosToFile() const;
@@ -133,8 +136,10 @@ namespace TMVA {
       virtual void DeclareOptions();
       virtual void ProcessOptions();
       
-      Bool_t Debug() const { return fgDEBUG; }
-      
+      Bool_t Debug() const;
+
+      enum EEstimator      { kMSE=0,kCE};        
+
    protected:
 
       virtual void MakeClassSpecific( std::ostream&, const TString& ) const;
@@ -159,10 +164,16 @@ namespace TMVA {
       TObjArray*    fNetwork;         // TObjArray of TObjArrays representing network
       TObjArray*    fSynapses;        // array of pointers to synapses, no structural data
       TActivation*  fActivation;      // activation function to be used for hidden layers
+      TActivation*  fOutput;          // activation function to be used for output layers, depending on estimator
       TActivation*  fIdentity;        // activation for input and output layers
       TRandom3*     frgen;            // random number generator for various uses
       TNeuronInput* fInputCalculator; // input calculator for all neurons
 
+      std::vector<Int_t>        fRegulatorIdx;  //index to different priors from every synapses 
+      std::vector<Double_t>     fRegulators;    //the priors as regulator        
+      EEstimator                fEstimator; 
+      TString                   fEstimatorS;
+
       // monitoring histograms
       TH1F* fEstimatorHistTrain; // monitors convergence of training sample
       TH1F* fEstimatorHistTest;  // monitors convergence of independent test sample
@@ -173,6 +184,12 @@ namespace TMVA {
       std::vector<TH1*> fEpochMonHistB; // epoch monitoring hitograms for background
       std::vector<TH1*> fEpochMonHistW; // epoch monitoring hitograms for weights
 
+      
+      // general
+      TMatrixD           fInvHessian;           // zjh
+      bool               fUseRegulator;         // zjh
+
+
    private:
       
       // helper functions for building network
@@ -200,11 +217,11 @@ namespace TMVA {
       TObjArray*              fInputLayer;      // cache this for fast access
       std::vector<TNeuron*>   fOutputNeurons;   // cache this for fast access
       TString                 fLayerSpec;       // layout specification option
-      
+      Int_t                   fRandomSeed;      // random seed for initial synapse weights
+
       // some static flags
       static const Bool_t fgDEBUG      = kTRUE;  // debug flag
-      static const Bool_t fgFIXED_SEED = kFALSE;  // fix rand generator seed
-          
+    
       ClassDef(MethodANNBase,0) // Base class for TMVA ANNs
    };
    
diff --git a/tmva/inc/MethodBDT.h b/tmva/inc/MethodBDT.h
index 05bbdab4b023be2f6fad09e5ad8fe50e0d877cc3..c0c00e97f7737c8ccdd315642fe4811cced2bdca 100644
--- a/tmva/inc/MethodBDT.h
+++ b/tmva/inc/MethodBDT.h
@@ -101,12 +101,13 @@ namespace TMVA {
       // calculate the MVA value
       Double_t GetMvaValue( Double_t* err = 0);
       Double_t GetMvaValue( Double_t* err , UInt_t useNTrees );
+      const std::vector<Float_t>& GetMulticlassValues();
 
       // regression response
       const std::vector<Float_t>& GetRegressionValues();
 
       // apply the boost algorithm to a tree in the collection
-      Double_t Boost( std::vector<TMVA::Event*>, DecisionTree *dt, Int_t iTree );
+      Double_t Boost( std::vector<TMVA::Event*>, DecisionTree *dt, Int_t iTree, UInt_t cls = 0);
 
       // ranking of input variables
       const Ranking* CreateRanking();
@@ -162,32 +163,31 @@ namespace TMVA {
       // binomial likelihood gradient boost for classification
       // (see Friedman: "Greedy Function Approximation: a Gradient Boosting Machine"
       // Technical report, Dept. of Statistics, Stanford University)
-      Double_t GradBoost( std::vector<TMVA::Event*>, DecisionTree *dt );
+      Double_t GradBoost( std::vector<TMVA::Event*>, DecisionTree *dt, UInt_t cls = 0);
       Double_t GradBoostRegression(std::vector<TMVA::Event*>, DecisionTree *dt );
       void InitGradBoost( std::vector<TMVA::Event*>);
-      void UpdateTargets( std::vector<TMVA::Event*>);
-      void UpdateTargetsRegression( std::vector<TMVA::Event*>,Bool_t first=kFALSE);
+      void UpdateTargets( std::vector<TMVA::Event*>, UInt_t cls = 0);
+      void UpdateTargetsRegression( std::vector<TMVA::Event*>,Bool_t first=kFALSE);    
       Double_t GetGradBoostMVA(TMVA::Event& e, UInt_t nTrees);
-      void GetRandomSubSample();
-      Double_t GetWeightedQuantile(std::vector<std::vector<Double_t> > &vec, const Double_t quantile, const Double_t SumOfWeights = 0.0);
+      void GetRandomSubSample();  
+      Double_t GetWeightedQuantile(std::vector<std::pair<Double_t, Double_t> > vec, const Double_t quantile, const Double_t SumOfWeights = 0.0);
 
       std::vector<TMVA::Event*>       fEventSample;     // the training events
       std::vector<TMVA::Event*>       fValidationSample;// the Validation events
       std::vector<TMVA::Event*>       fSubSample;       // subsample for bagged grad boost
-
       Int_t                           fNTrees;          // number of decision trees requested
       std::vector<DecisionTree*>      fForest;          // the collection of decision trees
       std::vector<double>             fBoostWeights;    // the weights applied in the individual boosts
-      std::vector<double>             fInitialWeights;  // the initial event weights
-      std::vector<double>             fRegResiduals;    // temporary storage for regression residuals
       TString                         fBoostType;       // string specifying the boost type
-      Double_t                        fSumOfWeights;    // total sum of all event weights
       Double_t                        fAdaBoostBeta;    // beta parameter for AdaBoost algorithm
       TString                         fAdaBoostR2Loss;  // loss type used in AdaBoostR2 (Linear,Quadratic or Exponential)
       Double_t                        fTransitionPoint; // break-down point for gradient regression
       Double_t                        fShrinkage;       // learning rate for gradient boost;
       Bool_t                          fBaggedGradBoost; // turn bagging in combination with grad boost on/off
       Double_t                        fSampleFraction;  // fraction of events used for bagged grad boost
+      Double_t                        fSumOfWeights;    // sum of all event weights
+      std::vector<pair<Double_t, Double_t> >       fWeightedResiduals;  // weighted regression residuals      
+      std::map< TMVA::Event*,std::vector<double> > fResiduals; // individual event residuals for gradient boost
 
       //options for the decision Tree
       SeparationBase                 *fSepType;         // the separation used in node splitting
diff --git a/tmva/inc/MethodBase.h b/tmva/inc/MethodBase.h
index 802f8f91a1745076b5cb0209f861700a165329ff..10e92ccb377067fd49fe1fddf24bd2973b57c78f 100644
--- a/tmva/inc/MethodBase.h
+++ b/tmva/inc/MethodBase.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$   
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss, Kai Voss 
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -18,9 +18,9 @@
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                                                    * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      MPI-K Heidelberg, Germany                                                 *
  *      LAPP, Annecy, France                                                      *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
@@ -75,26 +75,26 @@ namespace TMVA {
    class MethodCuts;
    class MethodBoost;
    class DataSetInfo;
-   
+
    class MethodBase : virtual public IMethod, public Configurable {
-      
+
    public:
 
       enum EWeightFileType { kROOT=0, kTEXT };
-      
+
       // default constructur
       MethodBase( const TString& jobName,
                   Types::EMVA methodType,
-                  const TString& methodTitle, 
+                  const TString& methodTitle,
                   DataSetInfo& dsi,
-                  const TString& theOption = "", 
+                  const TString& theOption = "",
                   TDirectory* theBaseDir = 0 );
-      
-      // constructor used for Testing + Application of the MVA, only (no training), 
+
+      // constructor used for Testing + Application of the MVA, only (no training),
       // using given weight file
       MethodBase( Types::EMVA methodType,
                   DataSetInfo& dsi,
-                  const TString& weightFile, 
+                  const TString& weightFile,
                   TDirectory* theBaseDir = 0 );
 
       // default destructur
@@ -126,6 +126,9 @@ namespace TMVA {
       // performs classifier testing
       virtual void     TestClassification();
 
+      // performs multiclass classifier testing
+      virtual void     TestMulticlass();
+
       // performs regression testing
       virtual void     TestRegression( Double_t& bias, Double_t& biasT, 
                                        Double_t& dev,  Double_t& devT, 
@@ -134,29 +137,41 @@ namespace TMVA {
                                        Double_t& corr, 
                                        Types::ETreeType type );
 
-      // classifier response - some methods may return a per-event error estimate (unless: *err = -1)
-      virtual Double_t GetMvaValue( Double_t* err = 0 ) = 0;
-
-      Double_t GetMvaValue( const TMVA::Event* const ev, Double_t* err = 0 );
-
       // options treatment
       virtual void     Init()           = 0;
       virtual void     DeclareOptions() = 0;
       virtual void     ProcessOptions() = 0;
       virtual void     DeclareCompatibilityOptions(); // declaration of past options
 
+      // classifier response - some methods may return a per-event error estimate (unless: *err = -1)
+      virtual Double_t GetMvaValue( Double_t* err = 0 ) = 0;
+
+      //zjh=>
+      virtual Double_t GetMvaValues( Double_t& errUpper, Double_t& errLower)
+		  {Double_t mva=GetMvaValue(&errUpper); errLower=errUpper;return mva;}
+      //<=zjh
+
+      // signal/background classification response
+      Double_t GetMvaValue( const TMVA::Event* const ev, Double_t* err = 0 );
+
       // regression response
       virtual const std::vector<Float_t>& GetRegressionValues() {
          std::vector<Float_t>* ptr = new std::vector<Float_t>(0);
          return (*ptr);
       }
 
+      // multiclass classification response
+      virtual const std::vector<Float_t>& GetMulticlassValues() {
+         std::vector<Float_t>* ptr = new std::vector<Float_t>(0);
+         return (*ptr);
+      }
+
       // probability of classifier response (mvaval) to be signal (requires "CreateMvaPdf" option set)
       virtual Double_t GetProba( Double_t mvaVal, Double_t ap_sig );
 
       // Rarity of classifier response (signal or background (default) is uniform in [0,1])
       virtual Double_t GetRarity( Double_t mvaVal, Types::ESBType reftype = Types::kBackground ) const;
- 
+
       // create ranking
       virtual const Ranking* CreateRanking() = 0;
 
@@ -176,7 +191,7 @@ namespace TMVA {
       void WriteStateToFile     () const;
       void ReadStateFromFile    ();
 
-   protected:      
+   protected:
       // the actual "weights"
       virtual void AddWeightsXMLTo      ( void* parent ) const = 0;
       virtual void ReadWeightsFromXML   ( void* wghtnode ) = 0;
@@ -191,9 +206,16 @@ namespace TMVA {
       void ReadStateFromXML     ( void* parent );
       void WriteStateToStream   ( std::ostream& tf ) const;   // needed for MakeClass
       void WriteVarsToStream    ( std::ostream& tf, const TString& prefix = "" ) const;  // needed for MakeClass
+
+
+   public: // these two need to be public, they are used to read in-memory weight-files
       void ReadStateFromStream  ( std::istream& tf );         // backward compatibility
       void ReadStateFromStream  ( TFile&        rf );         // backward compatibility
+#if ROOT_SVN_REVISION >= 32259
+      void ReadStateFromXMLString( const char* xmlstr );      // for reading from memory
+#endif
 
+   private:
       // the variable information
       void AddVarsXMLTo         ( void* parent  ) const;
       void AddSpectatorsXMLTo   ( void* parent  ) const;
@@ -215,9 +237,9 @@ namespace TMVA {
       // ---------- public evaluation methods --------------------------------------
 
       // individual initialistion for testing of each method
-      // overload this one for individual initialisation of the testing, 
-      // it is then called automatically within the global "TestInit" 
-      
+      // overload this one for individual initialisation of the testing,
+      // it is then called automatically within the global "TestInit"
+
       // variables (and private menber functions) for the Evaluation:
       // get the effiency. It fills a histogram for efficiency/vs/bkg
       // and returns the one value fo the efficiency demanded for 
@@ -321,6 +343,7 @@ namespace TMVA {
       virtual void          SetAnalysisType( Types::EAnalysisType type ) { fAnalysisType = type; }
       Types::EAnalysisType  GetAnalysisType() const { return fAnalysisType; }
       Bool_t                DoRegression() const { return fAnalysisType == Types::kRegression; }
+      Bool_t                DoMulticlass() const { return fAnalysisType == Types::kMulticlass; }
 
       // setter method for suppressing writing to XML and writing of standalone classes
       void                  DisableWriting(Bool_t setter){ fDisableWriting = setter; }
@@ -368,7 +391,7 @@ namespace TMVA {
       virtual void     MakeClassSpecificHeader( std::ostream&, const TString& = "" ) const {}
 
       // static pointer to this object - required for ROOT finder (to be solved differently)
-      static MethodBase* GetThisBase() { return fgThisBase; }
+      static MethodBase* GetThisBase();
 
       // some basic statistical analysis
       void Statistics( Types::ETreeType treeType, const TString& theVarName,
@@ -409,7 +432,7 @@ namespace TMVA {
       // ---------- private acccessors ---------------------------------------------
 
       // reset required for RootFinder
-      void             ResetThisBase() { fgThisBase = this; }
+      void             ResetThisBase();
 
       // ---------- private auxiliary methods --------------------------------------
 
@@ -427,6 +450,7 @@ namespace TMVA {
       virtual void     AddClassifierOutput    ( Types::ETreeType type );
       virtual void     AddClassifierOutputProb( Types::ETreeType type );
       virtual void     AddRegressionOutput    ( Types::ETreeType type );
+      virtual void     AddMulticlassOutput    ( Types::ETreeType type );
 
    private:
 
@@ -448,7 +472,8 @@ namespace TMVA {
 
       Types::EAnalysisType  fAnalysisType;         // method-mode : true --> regression, false --> classification
 
-      std::vector<Float_t>* fRegressionReturnVal;  // holds the return-value for the regression
+      std::vector<Float_t>* fRegressionReturnVal;  // holds the return-values for the regression
+      std::vector<Float_t>* fMulticlassReturnVal;  // holds the return-values for the multiclass classification
 
    private:
 
@@ -559,7 +584,7 @@ namespace TMVA {
       static MethodBase* fgThisBase;         // this pointer
 
 
-      // ===== depriciated options, kept for backward compatibility  =====
+      // ===== depreciated options, kept for backward compatibility  =====
    private:
 
       Bool_t           fNormalise;                   // normalise input variables
@@ -591,6 +616,7 @@ inline const TMVA::Event* TMVA::MethodBase::GetEvent( const TMVA::Event* ev ) co
    return GetTransformationHandler().Transform(ev);
 }
 
+//_______________________________________________________________________
 inline const TMVA::Event* TMVA::MethodBase::GetEvent() const 
 {
    if(fTmpEvent)
@@ -599,24 +625,28 @@ inline const TMVA::Event* TMVA::MethodBase::GetEvent() const
       return GetTransformationHandler().Transform(Data()->GetEvent());
 }
 
+//_______________________________________________________________________
 inline const TMVA::Event* TMVA::MethodBase::GetEvent( Long64_t ievt ) const 
 {
    assert(fTmpEvent==0);
    return GetTransformationHandler().Transform(Data()->GetEvent(ievt));
 }
 
+//_______________________________________________________________________
 inline const TMVA::Event* TMVA::MethodBase::GetEvent( Long64_t ievt, Types::ETreeType type ) const 
 {
    assert(fTmpEvent==0);
    return GetTransformationHandler().Transform(Data()->GetEvent(ievt, type));
 }
 
+//_______________________________________________________________________
 inline const TMVA::Event* TMVA::MethodBase::GetTrainingEvent( Long64_t ievt ) const 
 {
    assert(fTmpEvent==0);
    return GetEvent(ievt, Types::kTraining);
 }
 
+//_______________________________________________________________________
 inline const TMVA::Event* TMVA::MethodBase::GetTestingEvent( Long64_t ievt ) const 
 {
    assert(fTmpEvent==0);
diff --git a/tmva/inc/MethodBoost.h b/tmva/inc/MethodBoost.h
index 0091289bbdd129b6abdf23265e76c7bc96d00095..a7464b180af213cb072096326f79c14987522f0a 100644
--- a/tmva/inc/MethodBoost.h
+++ b/tmva/inc/MethodBoost.h
@@ -53,6 +53,10 @@
 
 namespace TMVA {
 
+   class Factory;  // DSMTEST
+   class Reader;   // DSMTEST
+   class DataSetManager;  // DSMTEST
+
    class MethodBoost : public MethodCompositeBase {
 
    public :
@@ -193,6 +197,16 @@ namespace TMVA {
       //whether to recalculate the MVA cut at every boosting step
       Bool_t            fRecalculateMVACut;
 
+
+      
+      DataSetManager* fDataSetManager; // DSMTEST
+      friend class Factory; // DSMTEST
+      friend class Reader;  // DSMTEST
+
+
+
+
+
    protected:
 
       // get help message text
diff --git a/tmva/inc/MethodCFMlpANN.h b/tmva/inc/MethodCFMlpANN.h
index 493ce074fed62359080d42ce83e6c6df52c40e41..3b21ca3a1864dda930f473db368d051ebce9a737 100644
--- a/tmva/inc/MethodCFMlpANN.h
+++ b/tmva/inc/MethodCFMlpANN.h
@@ -136,7 +136,7 @@ namespace TMVA {
       Int_t    GetClass( Int_t ivar             ) const { return (*fClass)[ivar]; }
 
       // static pointer to this object (required for external functions
-      static MethodCFMlpANN* This( void ) { return fgThis; }  
+      static MethodCFMlpANN* This( void );
 
       // ranking of input variables
       const Ranking* CreateRanking() { return 0; }
diff --git a/tmva/inc/MethodCategory.h b/tmva/inc/MethodCategory.h
index 2db823d231912f8554b24efeba99d8d8cdb9557e..f05fcca2809771973c00533cb9c343dbe1e8094b 100644
--- a/tmva/inc/MethodCategory.h
+++ b/tmva/inc/MethodCategory.h
@@ -52,6 +52,11 @@
 
 namespace TMVA {
 
+   class Factory;  // DSMTEST
+   class Reader;   // DSMTEST
+   class MethodBoost;   // DSMTEST
+   class DataSetManager;  // DSMTEST
+
    class MethodCategory : public MethodCompositeBase {
 
    public :
@@ -123,6 +128,11 @@ namespace TMVA {
       TTree *                    fCatTree; //! needed in conjunction with TTreeFormulas for evaluation category expressions
       std::vector<TTreeFormula*> fCatFormulas;
 
+      DataSetManager* fDataSetManager; // DSMTEST
+      friend class Factory; // DSMTEST
+      friend class Reader;  // DSMTEST
+      friend class MethodBoost;  // DSMTEST
+
       ClassDef(MethodCategory,0)
    };
 }
diff --git a/tmva/inc/MethodFDA.h b/tmva/inc/MethodFDA.h
index 55417aac035a2004a59e9b2a780c3d057f77a2f0..8e73a85d80e3594332dde5f8fc0c21e5e237fe87 100644
--- a/tmva/inc/MethodFDA.h
+++ b/tmva/inc/MethodFDA.h
@@ -1,4 +1,4 @@
-// @(#)root/tmva $Id$    
+// @(#)root/tmva $Id$
 // Author: Andreas Hoecker, Peter Speckmayer
 
 /**********************************************************************************
@@ -19,7 +19,7 @@
  *      Andreas Hoecker  <Andreas.Hocker@cern.ch> - CERN, Switzerland             *
  *      Peter Speckmayer <speckmay@mail.cern.ch>  - CERN, Switzerland             *
  *                                                                                *
- * Copyright (c) 2005-2006:                                                       *
+ * Copyright (c) 2005-2010:                                                       *
  *      CERN, Switzerland                                                         *
  *      MPI-K Heidelberg, Germany                                                 *
  *                                                                                *
@@ -65,20 +65,20 @@ namespace TMVA {
 
    public:
 
-      MethodFDA( const TString& jobName, 
-                 const TString& methodTitle, 
+      MethodFDA( const TString& jobName,
+                 const TString& methodTitle,
                  DataSetInfo& theData,
                  const TString& theOption = "",
                  TDirectory* theTargetDir = 0 );
-      
-      MethodFDA( DataSetInfo& theData, 
-                 const TString& theWeightFile,  
+
+      MethodFDA( DataSetInfo& theData,
+                 const TString& theWeightFile,
                  TDirectory* theTargetDir = NULL );
-      
+
       virtual ~MethodFDA( void );
 
       Bool_t HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets );
-    
+
       // training method
       void Train( void );
 
@@ -92,7 +92,8 @@ namespace TMVA {
       // calculate the MVA value
       Double_t GetMvaValue( Double_t* err = 0 );
 
-      std::vector<Float_t>& GetRegressionValues();
+      virtual const std::vector<Float_t>& GetRegressionValues();
+      virtual const std::vector<Float_t>& GetMulticlassValues();
 
       void Init( void );
 
@@ -114,12 +115,16 @@ namespace TMVA {
 
    private:
 
+      // compute multiclass values
+      void CalculateMulticlassValues( const TMVA::Event*& evt, std::vector<Double_t>& parameters, std::vector<Float_t>& values);
+
+
       // create and interpret formula expression and compute estimator
       void     CreateFormula   ();
-      Double_t InterpretFormula( const Event*, std::vector<Double_t>& pars );
+      Double_t InterpretFormula( const Event*, std::vector<Double_t>::iterator begin, std::vector<Double_t>::iterator end );
 
-      // clean up 
-      void ClearAll();      
+      // clean up
+      void ClearAll();
 
       // print fit results
       void PrintResults( const TString&, std::vector<Double_t>&, const Double_t ) const;
@@ -129,12 +134,12 @@ namespace TMVA {
       void ProcessOptions();
 
       TString                fFormulaStringP;     // string with function
-      TString                fParRangeStringP;    // string with ranges of parameters      
+      TString                fParRangeStringP;    // string with ranges of parameters
       TString                fFormulaStringT;     // string with function
-      TString                fParRangeStringT;    // string with ranges of parameters      
+      TString                fParRangeStringT;    // string with ranges of parameters
 
       TFormula*              fFormula;            // the discrimination function
-      Int_t                  fNPars;              // number of parameters
+      UInt_t                 fNPars;              // number of parameters
       std::vector<Interval*> fParRange;           // ranges of parameters
       std::vector<Double_t>  fBestPars;           // the pars that optimise (minimise) the estimator
       TString                fFitMethod;          // estimator optimisation method
@@ -146,7 +151,10 @@ namespace TMVA {
       // sum of weights (this should become centrally available through the dataset)
       Double_t               fSumOfWeightsSig;    // sum of weights (signal)
       Double_t               fSumOfWeightsBkg;    // sum of weights (background)
-      Double_t               fSumOfWeights;       // sum of weights 
+      Double_t               fSumOfWeights;       // sum of weights
+
+      //
+      Int_t                  fOutputDimensions;   // number of output values
 
       ClassDef(MethodFDA,0)  // Function Discriminant Analysis
    };
diff --git a/tmva/inc/MethodMLP.h b/tmva/inc/MethodMLP.h
index c6ceb4398fb5eab542bb985a0ef663c207d0f869..9503b13eb3da2b80e52c2c68adc9bb1abae8652b 100644
--- a/tmva/inc/MethodMLP.h
+++ b/tmva/inc/MethodMLP.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Matt Jachowski
+// Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -111,6 +111,8 @@ namespace TMVA {
       enum ETrainingMethod { kBP=0, kBFGS, kGA };
       enum EBPTrainingMode { kSequential=0, kBatch };
 
+      Double_t GetMvaValues( Double_t& errUpper, Double_t& errLower );          //zjh
+
    protected:
 
       // make ROOT-independent C++ class for classifier response (classifier-specific implementation)
@@ -141,12 +143,13 @@ namespace TMVA {
       Bool_t   GetHessian( TMatrixD &Hessian, TMatrixD &Gamma, TMatrixD &Delta );
       void     SetDir( TMatrixD &Hessian, TMatrixD &Dir );
       Double_t DerivDir( TMatrixD &Dir );
-      Bool_t   LineSearch( TMatrixD &Dir, std::vector<Double_t> &Buffer );
+      Bool_t   LineSearch( TMatrixD &Dir, std::vector<Double_t> &Buffer, Double_t* dError=0 ); //zjh
       void     ComputeDEDw();
       void     SimulateEvent( const Event* ev );
       void     SetDirWeights( std::vector<Double_t> &Origin, TMatrixD &Dir, Double_t alpha );
       Double_t GetError();
-      Double_t GetSqrErr( const Event* ev, UInt_t index = 0 );
+      Double_t GetMSEErr( const Event* ev, UInt_t index = 0 );   //zjh
+      Double_t GetCEErr( const Event* ev, UInt_t index = 0 );   //zjh
 
       // backpropagation functions
       void     BackPropagationMinimize( Int_t nEpochs );
@@ -171,12 +174,20 @@ namespace TMVA {
 #ifdef MethodMLP_UseMinuit__
       // minuit functions -- commented out because they rely on a static pointer
       void MinuitMinimize();
-      static MethodMLP* GetThisPtr() { return fgThis; }
+      static MethodMLP* GetThisPtr();
       static void IFCN( Int_t& npars, Double_t* grad, Double_t &f, Double_t* fitPars, Int_t ifl );
       void FCN( Int_t& npars, Double_t* grad, Double_t &f, Double_t* fitPars, Int_t ifl );
 #endif
 
       // general
+      bool               fUseRegulator;         // zjh
+      Double_t           fPrior;                // zjh
+      std::vector<Double_t> fPriorDev;          // zjh
+      void               GetApproxInvHessian ( TMatrixD& InvHessian, bool regulate=true );   //rank-1 approximation, neglect 2nd derivatives. //zjh
+      void               UpdateRegulators();    // zjh
+      void               UpdatePriors();        // zjh
+      Int_t				 fUpdateLimit;          // zjh
+
       ETrainingMethod fTrainingMethod; // method of training, BP or GA
       TString         fTrainMethodS;   // training method option param
 
diff --git a/tmva/inc/MethodPDERS.h b/tmva/inc/MethodPDERS.h
index 00849c2cf7dbfa49756aa1339501df4c97e9d59e..271e0e549dda767336f9c8918ae40fa824f13ae6 100644
--- a/tmva/inc/MethodPDERS.h
+++ b/tmva/inc/MethodPDERS.h
@@ -107,7 +107,7 @@ namespace TMVA {
       Double_t         GetVolumeContentForRoot( Double_t );
 
       // static pointer to this object
-      static MethodPDERS* ThisPDERS( void ) { return fgThisPDERS; }
+      static MethodPDERS* ThisPDERS( void );
 
    protected:
 
@@ -221,7 +221,7 @@ namespace TMVA {
 
       // this carrier
       static MethodPDERS* fgThisPDERS; // this pointer (required by root finder)
-      void UpdateThis() { fgThisPDERS = this; }
+      void UpdateThis();
 
       void Init( void );
 
diff --git a/tmva/inc/MethodSeedDistance.h b/tmva/inc/MethodSeedDistance.h
deleted file mode 100644
index d74088918f1c4237653238aefa6296b0da5dd3ff..0000000000000000000000000000000000000000
--- a/tmva/inc/MethodSeedDistance.h
+++ /dev/null
@@ -1,158 +0,0 @@
-// @(#)root/tmva $Id$    
-// Author: Andreas Hoecker, Peter Speckmayer
-
-/**********************************************************************************
- * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
- * Package: TMVA                                                                  *
- * Class  : MethodSeedDistance                                                    *
- * Web    : http://tmva.sourceforge.net                                           *
- *                                                                                *
- * Description:                                                                   *
- *                                                                                *
- * Authors (alphabetical):                                                        *
- *      Peter Speckmayer <speckmay@mail.cern.ch>  - CERN, Switzerland             *
- *                                                                                *
- * Copyright (c) 2005-2006:                                                       *
- *      CERN, Switzerland                                                         *
- *      MPI-K Heidelberg, Germany                                                 *
- *                                                                                *
- * Redistribution and use in source and binary forms, with or without             *
- * modification, are permitted according to the terms listed in LICENSE           *
- * (http://tmva.sourceforge.net/LICENSE)                                          *
- **********************************************************************************/
-
-#ifndef ROOT_TMVA_MethodSeedDistance
-#define ROOT_TMVA_MethodSeedDistance
-
-//////////////////////////////////////////////////////////////////////////
-//                                                                      //
-// MethodSeedDistance                                                   //
-//                                                                      //
-//////////////////////////////////////////////////////////////////////////
-
-#ifndef ROOT_TMVA_MethodBase
-#include "TMVA/MethodBase.h"
-#endif
-#ifndef ROOT_TMVA_IFitterTarget
-#include "TMVA/IFitterTarget.h"
-#endif
-
-class TFormula;
-
-namespace TMVA {
-
-   class Interval;
-   class Event;
-   class FitterBase;
-   class SeedDistance;
-   class IMetric;
-
-   class MethodSeedDistance : public MethodBase, public IFitterTarget {
-
-   public:
-
-      MethodSeedDistance( const TString& jobName, 
-                          const TString& methodTitle,
-                          DataSetInfo& theData,
-                          const TString& theOption = "",
-                          TDirectory* theTargetDir = 0 );
-      
-      MethodSeedDistance( DataSetInfo& theData,
-                          const TString& theWeightFile,
-                          TDirectory* theTargetDir = NULL );
-      
-      virtual ~MethodSeedDistance( void );
-    
-      virtual Bool_t HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets );
-
-      // training method
-      virtual void Train( void );
-
-      using MethodBase::ReadWeightsFromStream;
-
-      // write weights to file
-      void AddWeightsXMLTo( void* parent ) const;
-
-      // read weights from file
-      void ReadWeightsFromStream( istream& istr );
-      void ReadWeightsFromXML   ( void* /*wghtnode*/ ) {}
-
-      // calculate the MVA value
-      virtual Double_t GetMvaValue( Double_t* err = 0 );
-
-      void Init( void );
-
-      // ranking of input variables
-      const Ranking* CreateRanking() { return 0; }
-
-      Double_t EstimatorFunction( std::vector<Double_t>& );
-
-   protected:
-
-      // make ROOT-independent C++ class for classifier response (classifier-specific implementation)
-      virtual void MakeClassSpecific( std::ostream&, const TString& ) const;
-
-      // get help message text
-      void GetHelpMessage() const;
-
-      void MakeListFromStructure( std::vector<Double_t>& linear, 
-                                  std::vector< std::vector< Double_t > >& seeds,
-                                  std::vector<Double_t>& metricParams );
-
-      void MakeStructureFromList( std::vector<Double_t>& linear, 
-                                  std::vector< std::vector< Double_t > >& seeds,
-                                  std::vector<Double_t>& metricParams );
-
-   private:
-
-      // interpret formula expression and compute estimator
-      Double_t InterpretFormula( const Event&, std::vector<Double_t>& pars );
-
-      // clean up 
-      void ClearAll();
-
-      // print fit results
-      void PrintResults( const TString&, std::vector<Double_t>&, const Double_t ) const;
-
-      // the option handling methods
-      void DeclareOptions();
-      void ProcessOptions();
-
-      TString                fSeedRangeStringP;    // string with ranges of parameters      
-      TString                fSeedRangeStringT;    // string with ranges of parameters      
-      Bool_t                 fScalingFactor;
-
-      IMetric*               fMetric;
-      SeedDistance*          fSeedDistance;
-      std::vector< std::vector< Double_t > > fSeeds;    // the pars that optimise (minimise) the estimator
-      std::vector<Double_t>  fMetricPars;         // 
-      std::vector<Double_t>  fPars;           // the pars that optimise (minimise) the estimator
-
-      Int_t                  fDataSeeds;
-      Int_t                  fBackSeeds;
-      TString                fMetricType;
-
-      Bool_t                 fPow2Estimator;
-
-      Int_t                  fNPars;              // number of parameters
-      std::vector<TMVA::Interval*> fParRange;           // ranges of parameters
-      TString                fFitMethod;          // estimator optimisation method
-      TString                fConverger;          // fitmethod uses fConverger as intermediate step to converge into local minimas
-      FitterBase*            fFitter;             // the fitter used in the training
-      IFitterTarget*         fIntermediateFitter; // intermediate fitter
-
-
-      // speed up access to training events by caching
-      std::vector<const Event*>    fEventsSig;          // event cache (signal)
-      std::vector<const Event*>    fEventsBkg;          // event cache (background)
-
-      // sum of weights (this should become centrally available through the dataset)
-      Double_t               fSumOfWeightsSig;    // sum of weights (signal)
-      Double_t               fSumOfWeightsBkg;    // sum of weights (background)
-
-      ClassDef(MethodSeedDistance,0)  // Function Discriminant Analysis
-   };
-
-} // namespace TMVA
-
-#endif // MethodSeedDistance_H
diff --git a/tmva/inc/MetricEuler.h b/tmva/inc/MetricEuler.h
deleted file mode 100644
index b2562c2969e45a1e13f49eb9c5fc082f5b8db33e..0000000000000000000000000000000000000000
--- a/tmva/inc/MetricEuler.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// @(#)root/tmva $Id$ 
-// Author: Peter Speckmayer
-
-/**********************************************************************************
- * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
- * Package: TMVA                                                                  *
- * Class  : MetricEuler                                                         *
- * Web    : http://tmva.sourceforge.net                                           *
- *                                                                                *
- * Description:                                                                   *
- *       Fitter using a Genetic Algorithm                                         *
- *                                                                                *
- * Authors (alphabetical):                                                        *
- *      Peter Speckmayer <speckmay@mail.cern.ch>  - CERN, Switzerland             *
- *                                                                                *
- * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
- *                                                                                *
- * Redistribution and use in source and binary forms, with or without             *
- * modification, are permitted according to the terms listed in LICENSE           *
- * (http://tmva.sourceforge.net/LICENSE)                                          *
- **********************************************************************************/
-
-#ifndef ROOT_TMVA_MetricEuler
-#define ROOT_TMVA_MetricEuler
-
-#include <vector>
-
-#ifndef ROOT_TMVA_IMetric
-#ifndef ROOT_IMetric
-#include "IMetric.h"
-#endif
-#endif
-
-
-//////////////////////////////////////////////////////////////////////////
-//                                                                      //
-// MetricEuler                                                          //
-//                                                                      //
-// distance between two points in parameter space                       //
-//                                                                      //
-//////////////////////////////////////////////////////////////////////////
-
-
-namespace TMVA {
-
-   class IMetric;
-
-   class MetricEuler : public IMetric {
-
-   public:
-
-      MetricEuler();
-      virtual ~MetricEuler() {}
-
-      virtual Double_t Distance( std::vector<Double_t>& pointA, std::vector<Double_t>& pointB );
-
-   private:
-
-      ClassDef(MetricEuler,0) // calculates the "distance" between two points
-   };
-
-} // namespace TMVA
-
-#endif
-
-
diff --git a/tmva/inc/MetricManhattan.h b/tmva/inc/MetricManhattan.h
deleted file mode 100644
index 3467cfed65baaa790cfc5fa65e561a774bb7b46b..0000000000000000000000000000000000000000
--- a/tmva/inc/MetricManhattan.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// @(#)root/tmva $Id$ 
-// Author: Peter Speckmayer
-
-/**********************************************************************************
- * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
- * Package: TMVA                                                                  *
- * Class  : MetricManhattan                                                       *
- * Web    : http://tmva.sourceforge.net                                           *
- *                                                                                *
- * Description:                                                                   *
- *       Fitter using a Genetic Algorithm                                         *
- *                                                                                *
- * Authors (alphabetical):                                                        *
- *      Peter Speckmayer <speckmay@mail.cern.ch>  - CERN, Switzerland             *
- *                                                                                *
- * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
- *                                                                                *
- * Redistribution and use in source and binary forms, with or without             *
- * modification, are permitted according to the terms listed in LICENSE           *
- * (http://tmva.sourceforge.net/LICENSE)                                          *
- **********************************************************************************/
-
-#ifndef ROOT_TMVA_MetricManhattan
-#define ROOT_TMVA_MetricManhattan
-
-#include <vector>
-
-#ifndef ROOT_TMVA_IMetric
-#ifndef ROOT_IMetric
-#include "IMetric.h"
-#endif
-#endif
-
-//////////////////////////////////////////////////////////////////////////
-//                                                                      //
-// MetricManhattan                                                      //
-//                                                                      //
-// distance between two points in parameter space                       //
-//                                                                      //
-//////////////////////////////////////////////////////////////////////////
-
-
-namespace TMVA {
-
-   class IMetric;
-
-   class MetricManhattan : public IMetric {
-
-   public:
-
-      MetricManhattan();
-      virtual ~MetricManhattan() {}
-
-      virtual Double_t Distance( std::vector<Double_t>& pointA, std::vector<Double_t>& pointB );
-
-   private:
-
-      ClassDef(MetricManhattan,0) // calculates the "distance" between two points
-   };
-
-} // namespace TMVA
-
-#endif
-
-
diff --git a/tmva/inc/MsgLogger.h b/tmva/inc/MsgLogger.h
index eae938c6726b3a3b63ab5c5a0c297808a4c65f0a..3e32dc4e842e4cdee310491b1d2fcbd60cc046d7 100644
--- a/tmva/inc/MsgLogger.h
+++ b/tmva/inc/MsgLogger.h
@@ -71,7 +71,7 @@ namespace TMVA {
       std::string GetFormattedSource() const;
 
       static UInt_t GetMaxSourceSize()                    { return (UInt_t)fgMaxSourceSize; }
-      
+
       // Needed for copying
       MsgLogger& operator= ( const MsgLogger& parent );
 
@@ -105,14 +105,14 @@ namespace TMVA {
 
       const TObject*                  fObjSource;      // the source TObject (used for name)
       std::string                     fStrSource;      // alternative string source
-      const std::string               fPrefix;         // the prefix of the source name
-      const std::string               fSuffix;         // suffix following source name
+      static const std::string               fgPrefix;         // the prefix of the source name
+      static const std::string               fgSuffix;         // suffix following source name
       EMsgType                        fActiveType;     // active type
       static UInt_t                   fgMaxSourceSize; // maximum length of source name
       static Bool_t                   fgOutputSupressed; // disable the output globaly (used by generic booster)
 
-      std::map<EMsgType, std::string> fTypeMap;        // matches output types with strings
-      std::map<EMsgType, std::string> fColorMap;       // matches output types with terminal colors
+      static std::map<EMsgType, std::string> fgTypeMap;        // matches output types with strings
+      static std::map<EMsgType, std::string> fgColorMap;       // matches output types with terminal colors
       EMsgType                        fMinType;        // minimum type for output
 
       static Bool_t                   fgInhibitOutput; // flag to suppress all output
diff --git a/tmva/inc/Node.h b/tmva/inc/Node.h
index 502ffd3ed363f24308ea9720148594ff7cb2b380..27338b47d4345254f333b292b00798cb4ebdeb17 100644
--- a/tmva/inc/Node.h
+++ b/tmva/inc/Node.h
@@ -133,7 +133,7 @@ namespace TMVA {
       // set the pointer to the Parent Tree to which the Node belongs 
       void SetParentTree(TMVA::BinaryTree* t) {fParentTree = t;} 
 
-      int GetCount(){return fgCount;}
+      int GetCount();
 
       virtual Bool_t ReadDataRecord( std::istream&, UInt_t tmva_Version_Code = TMVA_VERSION_CODE ) = 0;
       virtual void ReadAttributes(void* node, UInt_t tmva_Version_Code = TMVA_VERSION_CODE  ) = 0;
diff --git a/tmva/inc/PDEFoamDistr.h b/tmva/inc/PDEFoamDistr.h
index e7b9fddac5585914b76cec4945acca82e29f34c3..8b39f8f75632536f5c4870a0eea8fae05828c3aa 100644
--- a/tmva/inc/PDEFoamDistr.h
+++ b/tmva/inc/PDEFoamDistr.h
@@ -74,7 +74,7 @@ namespace TMVA {
       TDensityCalc fDensityCalc;// method of density calculation
 
    protected:
-      Int_t fSignalClass;      // TODO: intermediate solution to keep IsSignal() of Event working. TODO: remove IsSignal() from Event
+      UInt_t fSignalClass;      // TODO: intermediate solution to keep IsSignal() of Event working. TODO: remove IsSignal() from Event
       Int_t fBackgroundClass;  // TODO: intermediate solution to keep IsSignal() of Event working. TODO: remove IsSignal() from Event
 
       mutable MsgLogger* fLogger;                     //! message logger
diff --git a/tmva/inc/PDEFoamVect.h b/tmva/inc/PDEFoamVect.h
index afcd14ed781d037150e8e988b14778eb6c1fbae2..d49740df27bb2c27cdbab9cd1cdd88b6a76143ad 100644
--- a/tmva/inc/PDEFoamVect.h
+++ b/tmva/inc/PDEFoamVect.h
@@ -44,8 +44,8 @@ namespace TMVA {
       Double_t   *fCoords;                  // [fDim] Coordinates
 
    protected:
-      mutable MsgLogger* fLogger;                     //! message logger
-      MsgLogger& Log() const { return *fLogger; }
+      static MsgLogger* fgLogger;                     //! message logger, static because there is a huge number of vectors...
+      MsgLogger& Log() const { return *fgLogger; }
 
    public:
       // constructor
diff --git a/tmva/inc/PDF.h b/tmva/inc/PDF.h
index baf00d03a948b80a4d41d3364c0144e3d1b53a3d..fe0f81e78d7648dbd5495292c2a1464c6b01d2f9 100644
--- a/tmva/inc/PDF.h
+++ b/tmva/inc/PDF.h
@@ -198,8 +198,8 @@ namespace TMVA {
       MsgLogger&               Log() const { return *fLogger; }    
 
       // static pointer to this object
-      static PDF*              fgThisPDF;             // this PDF pointer
-      static PDF*              ThisPDF( void ) { return fgThisPDF; }
+      static PDF*              fgThisPDF;             // this PDF pointer 
+      static PDF*              ThisPDF( void ); 
 
       // external auxiliary functions 
       static Double_t          IGetVal( Double_t*, Double_t* );
diff --git a/tmva/inc/Reader.h b/tmva/inc/Reader.h
index a2b686459ada99a475adece0d0993fcc9ee0593f..fbf06192a985961ed66038e9dc9bcc947126f33b 100644
--- a/tmva/inc/Reader.h
+++ b/tmva/inc/Reader.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$ 
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss, Kai Voss 
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -86,6 +86,9 @@ namespace TMVA {
   
       // book MVA method via weight file
       IMethod* BookMVA( const TString& methodTag, const TString& weightfile );
+#if ROOT_SVN_REVISION >= 32259
+      IMethod* BookMVA( TMVA::Types::EMVA methodType, const char* xmlstr );
+#endif
       IMethod* FindMVA( const TString& methodTag );
       // special function for Cuts to avoid dynamic_casts in ROOT macros, 
       // which are not properly handled by CINT
@@ -94,18 +97,25 @@ namespace TMVA {
 
       // returns the MVA response for given event
       Double_t EvaluateMVA( const std::vector<Float_t> &, const TString& methodTag, Double_t aux = 0 );    
-      Double_t EvaluateMVA( const std::vector<Double_t>&, const TString& methodTag, Double_t aux = 0 );    
+      Double_t EvaluateMVA( const std::vector<Double_t>&, const TString& methodTag, Double_t aux = 0 );
       Double_t EvaluateMVA( MethodBase* method,           Double_t aux = 0 );    
       Double_t EvaluateMVA( const TString& methodTag,     Double_t aux = 0 );    
 
       // returns error on MVA response for given event
       // NOTE: must be called AFTER "EvaluateMVA(...)" call !
       Double_t GetMVAError() const { return fMvaEventError; }
+      Double_t GetMVAError2() const { return fMvaEventError2; }	//zjh
 
+      // regression response
       const std::vector< Float_t >& EvaluateRegression( const TString& methodTag, Double_t aux = 0 );
       const std::vector< Float_t >& EvaluateRegression( MethodBase* method, Double_t aux = 0 );
       Float_t  EvaluateRegression( UInt_t tgtNumber, const TString& methodTag, Double_t aux = 0 );
 
+      // multiclass response
+      const std::vector< Float_t >& EvaluateMulticlass( const TString& methodTag, Double_t aux = 0 );
+      const std::vector< Float_t >& EvaluateMulticlass( MethodBase* method, Double_t aux = 0 );
+      Float_t  EvaluateMulticlass( UInt_t clsNumber, const TString& methodTag, Double_t aux = 0 );
+
       // probability and rarity accessors (see Users Guide for definition of Rarity)
       Double_t GetProba ( const TString& methodTag, Double_t ap_sig=0.5, Double_t mvaVal=-9999999 ); 
       Double_t GetRarity( const TString& methodTag, Double_t mvaVal=-9999999 );
@@ -128,6 +138,9 @@ namespace TMVA {
   
    private:
 
+      DataSetManager* fDataSetManager; // DSMTEST
+
+
       TString GetMethodTypeFromFile( const TString& filename );
 
       // this booking method is internal
@@ -151,9 +164,12 @@ namespace TMVA {
       Bool_t    fColor;      // color mode
 
       Double_t  fMvaEventError; // per-event error returned by MVA (unless: -1)
+      Double_t  fMvaEventError2; // per-event error returned by MVA (unless: -1)  //zjh
 
       std::map<TString, IMethod*> fMethodMap; // map of methods
 
+      std::vector<Float_t>        fTmpEvalVec; // temporary evaluation vector (if user input is v<double>)
+
       mutable MsgLogger* fLogger;   // message logger
       MsgLogger& Log() const { return *fLogger; }    
 
diff --git a/tmva/inc/ResultsMulticlass.h b/tmva/inc/ResultsMulticlass.h
new file mode 100644
index 0000000000000000000000000000000000000000..8c036cf7720e288b5748ddb8852e3ed63bdc626c
--- /dev/null
+++ b/tmva/inc/ResultsMulticlass.h
@@ -0,0 +1,91 @@
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
+
+/**********************************************************************************
+ * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
+ * Package: TMVA                                                                  *
+ * Class  : ResultsMulticlass                                                     *
+ * Web    : http://tmva.sourceforge.net                                           *
+ *                                                                                *
+ * Description:                                                                   *
+ *      Base-class for result-vectors                                             *
+ *                                                                                *
+ * Authors (alphabetical):                                                        *
+ *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch>  - CERN, Switzerland          *
+ *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>  - CERN, Switzerland              *
+ *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
+ *                                                                                *
+ * Copyright (c) 2006:                                                            *
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      MPI-K Heidelberg, Germany                                                 *
+ *                                                                                *
+ * Redistribution and use in source and binary forms, with or without             *
+ * modification, are permitted according to the terms listed in LICENSE           *
+ * (http://tmva.sourceforge.net/LICENSE)                                          *
+ **********************************************************************************/
+
+#ifndef ROOT_TMVA_ResultsMulticlass
+#define ROOT_TMVA_ResultsMulticlass
+
+//////////////////////////////////////////////////////////////////////////
+//                                                                      //
+// ResultsMulticlass                                                    //
+//                                                                      //
+// Class which takes the results of a multiclass classification         //
+//                                                                      //
+//////////////////////////////////////////////////////////////////////////
+
+#include <vector>
+
+#ifndef ROOT_TH1F
+#include "TH1F.h"
+#endif
+#ifndef ROOT_TH2F
+#include "TH2F.h"
+#endif
+
+#ifndef ROOT_TMVA_Results
+#include "TMVA/Results.h"
+#endif
+#ifndef ROOT_TMVA_Event
+#include "TMVA/Event.h"
+#endif
+
+namespace TMVA {
+
+   class MsgLogger;
+   
+   class ResultsMulticlass : public Results {
+
+   public:
+
+      ResultsMulticlass( const DataSetInfo* dsi );
+      ~ResultsMulticlass();
+
+      // setters
+      void     SetValue( std::vector<Float_t>& value, Int_t ievt );
+      void     Resize( Int_t entries )  { fMultiClassValues.resize( entries ); }
+      void     Clear()                  { fMultiClassValues.clear(); }
+
+      // getters
+      Long64_t GetSize() const        { return fMultiClassValues.size(); }
+      std::vector< Float_t >&              operator [] ( Int_t ievt ) const { return fMultiClassValues.at(ievt); }
+      std::vector<std::vector< Float_t> >* GetValueVector()  { return &fMultiClassValues; }
+
+      Types::EAnalysisType  GetAnalysisType() { return Types::kMulticlass; }
+
+      // histogramming
+      void     MakeHistograms();
+
+
+   private:
+
+      mutable std::vector<std::vector< Float_t> >  fMultiClassValues;        //! mva values (Results)
+      mutable MsgLogger* fLogger;                     //! message logger
+      MsgLogger& Log() const { return *fLogger; }
+   };
+}
+
+#endif
diff --git a/tmva/inc/RuleEnsemble.h b/tmva/inc/RuleEnsemble.h
index 54a6965e90ed2183bae2b2b8b716ad4e5354e5d1..ccc038208c0e77b8daa0217af21d21835dfce0ac 100644
--- a/tmva/inc/RuleEnsemble.h
+++ b/tmva/inc/RuleEnsemble.h
@@ -58,8 +58,8 @@ namespace TMVA {
 
    class TBits;
    class MethodBase;
-   class MethodRuleFit;
    class RuleFit;
+   class MethodRuleFit;
    class RuleEnsemble;
    class MsgLogger;
 
diff --git a/tmva/inc/RuleFitParams.h b/tmva/inc/RuleFitParams.h
index 49e1819eb70762c6a871c6801153e7d2cc2fc0e7..b60c82cf4f8f913d5ec3fcf67e29141774d8b435 100644
--- a/tmva/inc/RuleFitParams.h
+++ b/tmva/inc/RuleFitParams.h
@@ -47,15 +47,13 @@
 #include "TMVA/Event.h"
 #endif
 
-
 class TTree;
 
 namespace TMVA {
 
    class RuleEnsemble;
-   class RuleFit;
    class MsgLogger;
-
+   class RuleFit;
    class RuleFitParams {
 
    public:
@@ -96,7 +94,7 @@ namespace TMVA {
       void SetGDTauPrec( Double_t p )  { fGDTauPrec=p; CalcGDNTau(); fGDTauVec.resize(fGDNTau); }
 
       // return type such that +1 = signal and -1 = background
-      Int_t Type( const Event * e ) const { return (e->IsSignal() ? 1:-1); }
+      Int_t Type( const Event * e ) const; // return (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(e) ? 1:-1); }
       //
       UInt_t                            GetPathIdx1() const { return fPathIdx1; }
       UInt_t                            GetPathIdx2() const { return fPathIdx2; }
diff --git a/tmva/inc/SeedDistance.h b/tmva/inc/SeedDistance.h
deleted file mode 100644
index d0a4cb9a79b14ccf08837685fe11f6e5649ed005..0000000000000000000000000000000000000000
--- a/tmva/inc/SeedDistance.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// @(#)root/tmva $Id$ 
-// Author: Peter Speckmayer
-
-/**********************************************************************************
- * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
- * Package: TMVA                                                                  *
- * Class  : SeedDistance                                                          *
- * Web    : http://tmva.sourceforge.net                                           *
- *                                                                                *
- * Description:                                                                   *
- *       Searches for the nearest seed                                            *
- *                                                                                *
- * Authors (alphabetical):                                                        *
- *      Peter Speckmayer <speckmay@mail.cern.ch>  - CERN, Switzerland             *
- *                                                                                *
- * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
- *                                                                                *
- * Redistribution and use in source and binary forms, with or without             *
- * modification, are permitted according to the terms listed in LICENSE           *
- * (http://tmva.sourceforge.net/LICENSE)                                          *
- **********************************************************************************/
-
-#ifndef ROOT_TMVA_SeedDistance
-#define ROOT_TMVA_SeedDistance
-
-#include <vector>
-
-#ifndef ROOT_TMVA_IMetric
-#ifndef ROOT_IMetric
-#include "IMetric.h"
-#endif
-#endif
-
-
-//////////////////////////////////////////////////////////////////////////
-//                                                                      //
-// SeedDistance                                                          //
-//                                                                      //
-//                                                                      //
-//////////////////////////////////////////////////////////////////////////
-
-
-namespace TMVA {
-
-   class IMetric;
-
-   class SeedDistance {
-
-   public:
-
-      SeedDistance( IMetric& metric, std::vector< std::vector<Double_t> >& seeds );
-      virtual ~SeedDistance() {}
-
-      std::vector<Double_t>& GetDistances( std::vector<Double_t>& point );
-
-   protected:
-
-      std::vector< std::vector<Double_t> >& fSeeds;
-      std::vector<Double_t> fDistances;
-      IMetric& fMetric; 
-
-   private:
-
-      ClassDef(SeedDistance,0) // 
-   };
-
-} // namespace TMVA
-
-#endif
-
-
diff --git a/tmva/inc/TNeuron.h b/tmva/inc/TNeuron.h
index 9017e29e87df0606cb5340055187fcdd768cfd10..101b1413319e8f4e000d5a547e3fd5ea1a981bfe 100644
--- a/tmva/inc/TNeuron.h
+++ b/tmva/inc/TNeuron.h
@@ -163,8 +163,8 @@ namespace TMVA {
       TActivation*  fActivation;              // activation equation
       TNeuronInput* fInputCalculator;         // input calculator
 
-      mutable MsgLogger* fLogger;                     //! message logger
-      MsgLogger& Log() const { return *fLogger; }                       
+      static MsgLogger* fgLogger;                     //! message logger, static to save resources
+      MsgLogger& Log() const { return *fgLogger; }                       
 
       ClassDef(TNeuron,0) // Neuron class used by MethodANNBase derivative ANNs
    };
diff --git a/tmva/inc/TSynapse.h b/tmva/inc/TSynapse.h
index dbbce3f913257e4743cacf8d11ce304a9e523b6f..66107687d25c588355c8ecfed3e6e8c5c7057a6e 100644
--- a/tmva/inc/TSynapse.h
+++ b/tmva/inc/TSynapse.h
@@ -102,8 +102,8 @@ namespace TMVA {
       TNeuron* fPreNeuron;         // pointer to pre-neuron
       TNeuron* fPostNeuron;        // pointer to post-neuron
 
-      mutable MsgLogger* fLogger;                     //! message logger
-      MsgLogger& Log() const { return *fLogger; }                       
+      static MsgLogger* fgLogger;                     //! message logger, static to save resources
+      MsgLogger& Log() const { return *fgLogger; }                       
 
       ClassDef(TSynapse,0) // Synapse class used by MethodANNBase and derivatives
    };
diff --git a/tmva/inc/Tools.h b/tmva/inc/Tools.h
index e6a68f1d670e6dcac1a9958e18097eae5c22104d..f88266e88a474e7fab28fc9c7cfe0ad8e25ad105 100644
--- a/tmva/inc/Tools.h
+++ b/tmva/inc/Tools.h
@@ -57,6 +57,10 @@
 #include "TVectorDfwd.h"
 #endif
 
+#ifndef ROOT_TVectorDfwd
+#include "TVectorDfwd.h"
+#endif
+
 #ifndef ROOT_TMVA_Types
 #include "TMVA/Types.h"
 #endif
@@ -68,6 +72,7 @@ class TH1;
 class TH2;
 class TH2F;
 class TSpline;
+class TXMLEngine;
 
 namespace TMVA {
 
@@ -91,9 +96,11 @@ namespace TMVA {
       static void   DestroyInstance();
 
       // simple statistics operations on tree entries
-      void  ComputeStat( const std::vector<TMVA::Event*>&, std::vector<Float_t>*,
+      void  ComputeStat( const std::vector<TMVA::Event*>&,
+                         std::vector<Float_t>*,
                          Double_t&, Double_t&, Double_t&,
-                         Double_t&, Double_t&, Double_t&, Int_t signalClass, Bool_t norm = kFALSE );
+                         Double_t&, Double_t&, Double_t&, Int_t signalClass,
+                         Bool_t norm = kFALSE );
 
       // compute variance from sums
       inline Double_t ComputeVariance( Double_t sumx2, Double_t sumx, Int_t nx );
@@ -216,17 +223,21 @@ namespace TMVA {
       void        ReadTVectorDFromXML( void* node, const char* name, TVectorD* vec );
       Bool_t      HistoHasEquidistantBins(const TH1& h);
 
+      Bool_t      HasAttr     ( void* node, const char* attrname );
       template<typename T>
-      inline void ReadAttr( void* node, const char* , T& value );
-
-      inline void ReadAttr( void* node, const char* attrname, TString& value );
-
+      inline void ReadAttr    ( void* node, const char* , T& value );
+      void        ReadAttr    ( void* node, const char* attrname, TString& value );
       template<typename T>
       void        AddAttr     ( void* node, const char* , const T& value, Int_t precision = 16 );
+      void        AddAttr     ( void* node, const char* attrname, const char* value );
       void*       AddChild    ( void* parent, const char* childname, const char* content = 0 );
+      Bool_t      AddRawLine  ( void* node, const char * raw );
+      Bool_t      AddComment  ( void* node, const char* comment );
+
       void*       GetChild    ( void* parent, const char* childname=0 );
       void*       GetNextChild( void* prevchild, const char* childname=0 );
       const char* GetContent  ( void* node );
+      const char* GetName     ( void* node );
 
       TXMLEngine& xmlengine() { return *fXMLEngine; }
       TXMLEngine* fXMLEngine;
@@ -244,31 +255,25 @@ namespace TMVA {
 
 //_______________________________________________________________________
 template<typename T>
-void TMVA::Tools::ReadAttr( void* node, const char* attrname, T& value ) 
+void TMVA::Tools::ReadAttr( void* node, const char* attrname, T& value )
 {
-   // add attribute from xml
-   const char* val = xmlengine().GetAttr(node, attrname);
-   std::stringstream s(val);
+   // read attribute from xml
+   TString val;
+   ReadAttr( node, attrname, val );
+   std::stringstream s(val.Data());
    s >> value;
 }
 
-//_______________________________________________________________________
-void TMVA::Tools::ReadAttr( void* node, const char* attrname, TString& value ) 
-{
-   // add attribute from xml
-   const char* val = xmlengine().GetAttr(node, attrname);
-   value = TString(val);
-}
 
 //_______________________________________________________________________
-template<typename T> 
-void TMVA::Tools::AddAttr( void* node, const char* attrname, const T& value, Int_t precision ) 
+template<typename T>
+void TMVA::Tools::AddAttr( void* node, const char* attrname, const T& value, Int_t precision )
 {
    // add attribute to xml
    std::stringstream s;
    s.precision( precision );
    s << std::scientific << value;
-   gTools().xmlengine().NewAttr(node, 0, attrname, s.str().c_str());
+   AddAttr( node, attrname, s.str().c_str() );
 }
 
 //_______________________________________________________________________
diff --git a/tmva/inc/Types.h b/tmva/inc/Types.h
index e6b3d7a74c6cd13b5e40c1979380212b10f2a016..33ecfba7330056fa43be62b801e8fb0a0c57ff37 100644
--- a/tmva/inc/Types.h
+++ b/tmva/inc/Types.h
@@ -12,6 +12,7 @@
  *                                                                                *
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch>  - CERN, Switzerland          *
  *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>  - CERN, Switzerland              *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *                                                                                *
@@ -73,7 +74,6 @@ namespace TMVA {
       enum EMVA {
          kVariable    = 0,
          kCuts           ,     
-         kSeedDistance   ,     
          kLikelihood     ,
          kPDERS          ,
          kHMatrix        ,
@@ -143,8 +143,8 @@ namespace TMVA {
 
    public:
 
-      static Types& Instance() { return fgTypesPtr ? *fgTypesPtr : *(fgTypesPtr = new Types()); }
-      static void   DestroyInstance() { if (fgTypesPtr != 0) { delete fgTypesPtr; fgTypesPtr = 0; } }
+      static Types& Instance();
+      static void   DestroyInstance(); 
       ~Types();
 
       Types::EMVA   GetMethodType( const TString& method ) const;
diff --git a/tmva/inc/VariableNormalizeTransform.h b/tmva/inc/VariableNormalizeTransform.h
index 805c9f043b03c7355b1d1757e6577fb67a5621a2..49e06bc7b879ab6fb6a89e143056929d93cb2e1d 100644
--- a/tmva/inc/VariableNormalizeTransform.h
+++ b/tmva/inc/VariableNormalizeTransform.h
@@ -49,7 +49,10 @@ namespace TMVA {
    class VariableNormalizeTransform : public VariableTransformBase {
 
    public:
-  
+
+      typedef std::vector<Float_t>       FloatVector;
+      typedef std::vector< FloatVector > VectorOfFloatVectors;
+
       VariableNormalizeTransform( DataSetInfo& dsi );
       virtual ~VariableNormalizeTransform( void );
 
@@ -80,8 +83,8 @@ namespace TMVA {
 
       //      mutable Event*           fTransformedEvent;
 
-      std::vector< std::vector<Float_t> >    fMin;       //! Min of source range
-      std::vector< std::vector<Float_t> >    fMax;       //! Max of source range
+      VectorOfFloatVectors                   fMin;       //! Min of source range
+      VectorOfFloatVectors                   fMax;       //! Max of source range
 
       ClassDef(VariableNormalizeTransform,0) // Variable transformation: normalization
    };
diff --git a/tmva/inc/VariablePCATransform.h b/tmva/inc/VariablePCATransform.h
index 93e5983beda3e4505dc79808d97149385521f614..5ba2224c74d1907cc3759e43a4f67915dad7f700 100644
--- a/tmva/inc/VariablePCATransform.h
+++ b/tmva/inc/VariablePCATransform.h
@@ -71,7 +71,8 @@ namespace TMVA {
    private:
 
       void CalculatePrincipalComponents( const std::vector<Event*>& );
-      std::vector<Float_t> X2P( const std::vector<Float_t>&, Int_t cls ) const;
+      void X2P( std::vector<Float_t>&, const std::vector<Float_t>&, Int_t cls ) const;
+      void P2X( std::vector<Float_t>&, const std::vector<Float_t>&, Int_t cls ) const;
 
       //      mutable Event*     fTransformedEvent;
 
diff --git a/tmva/inc/VariableTransformBase.h b/tmva/inc/VariableTransformBase.h
index 9e6098cc3e488558b66cf6228a78a4003f49317c..f318bb6cc54d5db1726d1287cc722f5e771da3f2 100644
--- a/tmva/inc/VariableTransformBase.h
+++ b/tmva/inc/VariableTransformBase.h
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// Author: Andreas Hoecker, Peter Speckmayer,Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -12,6 +12,7 @@
  *                                                                                *
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch> - CERN, Switzerland           *
  *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>  - CERN, Switzerland              *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *                                                                                *
@@ -67,6 +68,10 @@ namespace TMVA {
 
    public:
 
+      typedef std::vector<std::pair<Char_t,UInt_t> > VectorOfCharAndInt;
+      typedef VectorOfCharAndInt::iterator       ItVarTypeIdx;
+      typedef VectorOfCharAndInt::const_iterator ItVarTypeIdxConst;
+
       VariableTransformBase( DataSetInfo& dsi, Types::EVariableTransform tf, const TString& trfName );
       virtual ~VariableTransformBase( void );
 
@@ -82,6 +87,12 @@ namespace TMVA {
       Bool_t IsCreated()    const { return fCreated; }
       Bool_t IsNormalised() const { return fNormalise; }
 
+      // variable selection
+      virtual void           SelectInput( const TString& inputVariables  );
+      virtual void           GetInput ( const Event* event, std::vector<Float_t>& input  ) const;
+      virtual void           SetOutput( Event* event, std::vector<Float_t>& output, const Event* oldEvent = 0 ) const;
+      virtual void           CountVariableTypes( UInt_t& nvars, UInt_t& ntgts, UInt_t& nspcts );
+
       void SetUseSignalTransform( Bool_t e=kTRUE) { fUseSignalTransform = e; }
       Bool_t UseSignalTransform() const { return fUseSignalTransform; }
 
@@ -91,14 +102,14 @@ namespace TMVA {
       virtual void WriteTransformationToStream ( std::ostream& o ) const = 0;
       virtual void ReadTransformationFromStream( std::istream& istr, const TString& classname="" ) = 0;
 
-      virtual void AttachXMLTo(void* parent) = 0;
-      virtual void ReadFromXML( void* trfnode ) = 0;
+      virtual void AttachXMLTo(void* parent);
+      virtual void ReadFromXML( void* trfnode );
 
       Types::EVariableTransform GetVariableTransform() const { return fVariableTransform; }
 
       // writer of function code
       virtual void MakeFunction( std::ostream& fout, const TString& fncName, Int_t part,
-                                 UInt_t trCounter, Int_t cls ) = 0;
+                                 UInt_t trCounter, Int_t cls );
 
       // provides string vector giving explicit transformation
       virtual std::vector<TString>* GetTransformationStrings( Int_t cls ) const;
@@ -119,8 +130,9 @@ namespace TMVA {
       void SetNVariables( UInt_t i )      { fNVars = i; }
       void SetName( const TString& c )    { fTransformName = c; }
 
-      UInt_t GetNVariables() const { return fDsi.GetNVariables(); }
-      UInt_t GetNTargets()   const { return fDsi.GetNTargets(); }
+      UInt_t GetNVariables()  const { return fDsi.GetNVariables();  }
+      UInt_t GetNTargets()    const { return fDsi.GetNTargets();    }
+      UInt_t GetNSpectators() const { return fDsi.GetNSpectators(); }
 
       DataSetInfo& fDsi;
 
@@ -132,6 +144,10 @@ namespace TMVA {
       mutable Event*           fTransformedEvent;     // holds the current transformed event
       mutable Event*           fBackTransformedEvent; // holds the current back-transformed event
 
+      // variable selection
+      VectorOfCharAndInt               fGet;           // get variables/targets/spectators
+
+
    private:
 
       Types::EVariableTransform fVariableTransform;  // Decorrelation, PCA, etc.
@@ -147,6 +163,7 @@ namespace TMVA {
       std::vector<TMVA::VariableInfo>  fVariables;          // event variables [saved to weight file]
       std::vector<TMVA::VariableInfo>  fTargets;            // event targets [saved to weight file --> TODO ]
 
+
    protected:
 
       TMVAVersion_t                    fTMVAVersion;
diff --git a/tmva/inc/Version.h b/tmva/inc/Version.h
index 2a32abe03d6cd264453d8357250c483806274630..8d5f8466c8e0d5ee20ad62d9df227e642e47f458 100644
--- a/tmva/inc/Version.h
+++ b/tmva/inc/Version.h
@@ -38,10 +38,10 @@
 //                                                                      //
 //////////////////////////////////////////////////////////////////////////
 
-#define TMVA_RELEASE      "4.0.4"
-#define TMVA_RELEASE_DATE "Dec 11, 2009"
-#define TMVA_RELEASE_TIME "10:19:58"
-#define TMVA_VERSION_CODE 262148
+#define TMVA_RELEASE      "4.0.7"
+#define TMVA_RELEASE_DATE "Jun 14, 2010"
+#define TMVA_RELEASE_TIME "17:20:00"
+#define TMVA_VERSION_CODE 262151
 #define TMVA_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
 
 #endif
diff --git a/tmva/src/BinarySearchTree.cxx b/tmva/src/BinarySearchTree.cxx
index f46c72a496c61155dfc018b57ca9f710a06e6bbd..ed3b8599db4c48ed0c1c8283e7af79a07cf603e0 100644
--- a/tmva/src/BinarySearchTree.cxx
+++ b/tmva/src/BinarySearchTree.cxx
@@ -27,7 +27,7 @@
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  *                                                                                *
  **********************************************************************************/
-      
+
 //////////////////////////////////////////////////////////////////////////
 //                                                                      //
 // BinarySearchTree                                                     //
@@ -85,7 +85,7 @@ TMVA::BinarySearchTree::BinarySearchTree( void ) :
    fCanNormalize( kFALSE )
 {
    // default constructor
-   fLogger->SetSource( "BinarySearchTree" );
+   fNEventsW[0]=fNEventsW[1]=0.;
 }
 
 //_______________________________________________________________________
@@ -98,7 +98,7 @@ TMVA::BinarySearchTree::BinarySearchTree( const BinarySearchTree &b)
      fCanNormalize( kFALSE )
 {
    // copy constructor that creates a true copy, i.e. a completely independent tree 
-   fLogger->SetSource( "BinarySearchTree" );
+   fNEventsW[0]=fNEventsW[1]=0.;
    Log() << kFATAL << " Copy constructor not implemented yet " << Endl;
 }
 
diff --git a/tmva/src/BinarySearchTreeNode.cxx b/tmva/src/BinarySearchTreeNode.cxx
index 6bf20cb2d5faea17d02ad9fbd2362570a2893ff0..a77f8d68be4adff5a4a143553062ec7fd32dd08d 100644
--- a/tmva/src/BinarySearchTreeNode.cxx
+++ b/tmva/src/BinarySearchTreeNode.cxx
@@ -55,7 +55,7 @@ TMVA::BinarySearchTreeNode::BinarySearchTreeNode( const Event* e )
      fEventV  ( std::vector<Float_t>() ),
      fTargets ( std::vector<Float_t>() ),
      fWeight  ( e==0?0:e->GetWeight()  ),
-     fClass   ( e==0?1:(e->IsSignal()?0:1) ), // see BinarySearchTree.h, line Mean() RMS() Min() and Max()
+     fClass   ( e==0?0:e->GetClass() ), // see BinarySearchTree.h, line Mean() RMS() Min() and Max()
      fSelector( -1 )
 {
    // constructor of a node for the search tree
@@ -73,7 +73,7 @@ TMVA::BinarySearchTreeNode::BinarySearchTreeNode( BinarySearchTreeNode* parent,
    fEventV  ( std::vector<Float_t>() ),
    fTargets ( std::vector<Float_t>() ),
    fWeight  ( 0  ),
-   fClass   ( -1 ),
+   fClass   ( 0 ),
    fSelector( -1 )
 {
    // constructor of a daughter node as a daughter of 'p'
@@ -233,9 +233,11 @@ void TMVA::BinarySearchTreeNode::AddAttributesToNode(void* node) const {
 void TMVA::BinarySearchTreeNode::AddContentToNode( std::stringstream& s ) const 
 {
    // adding attributes to tree node
+   std::ios_base::fmtflags ff = s.flags();
    s.precision( 16 );
-   for (UInt_t i=0; i<fEventV.size();  i++) s << std::scientific << " " << fEventV[i];   
+   for (UInt_t i=0; i<fEventV.size();  i++) s << std::scientific << " " << fEventV[i];
    for (UInt_t i=0; i<fTargets.size(); i++) s << std::scientific << " " << fTargets[i];
+   s.flags(ff);
 }
 //_______________________________________________________________________
 void TMVA::BinarySearchTreeNode::ReadContent( std::stringstream& s ) 
diff --git a/tmva/src/BinaryTree.cxx b/tmva/src/BinaryTree.cxx
index 3de764dc95874f45c54899df2d8f4ce405803929..642428b386e0ed2dab6e9b3c6f0daeb9752ccf7a 100644
--- a/tmva/src/BinaryTree.cxx
+++ b/tmva/src/BinaryTree.cxx
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$    
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -17,10 +17,10 @@
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      DESY, Germany                                                             * 
- *      U. of Victoria, Canada                                                    * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      DESY, Germany                                                             *
+ *      U. of Victoria, Canada                                                    *
+ *      MPI-K Heidelberg, Germany                                                 *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
@@ -47,28 +47,29 @@
 
 ClassImp(TMVA::BinaryTree)
 
+TMVA::MsgLogger* TMVA::BinaryTree::fgLogger = 0;
+
 //_______________________________________________________________________
 TMVA::BinaryTree::BinaryTree( void )
-   : fRoot  ( NULL ), 
+   : fRoot  ( NULL ),
      fNNodes( 0 ),
-     fDepth ( 0 ),
-     fLogger( new MsgLogger("BinaryTree") )
+     fDepth ( 0 )
 {
    // constructor for a yet "empty" tree. Needs to be filled afterwards
+   if (!fgLogger) fgLogger =  new MsgLogger("BinaryTree");
 }
 
 //_______________________________________________________________________
-TMVA::BinaryTree::~BinaryTree( void ) 
+TMVA::BinaryTree::~BinaryTree( void )
 {
    //destructor (deletes the nodes and "events" if owned by the tree
    this->DeleteNode( fRoot );
-   delete fLogger;
    fRoot=0;
 }
 
 //_______________________________________________________________________
 void TMVA::BinaryTree::DeleteNode( TMVA::Node* node )
-{ 
+{
    // protected, recursive, function used by the class destructor and when Pruning
    if (node != NULL) { //If the node is not NULL...
       this->DeleteNode(node->GetLeft());  //Delete its left node.
@@ -100,7 +101,7 @@ UInt_t TMVA::BinaryTree::CountNodes(TMVA::Node *n)
    if (n == NULL){ //default, start at the tree top, then descend recursively
       n = (Node*)this->GetRoot();
       if (n == NULL) return 0 ;
-   } 
+   }
 
    UInt_t countNodes=1;
 
@@ -125,7 +126,8 @@ void TMVA::BinaryTree::Print(ostream & os) const
 //_______________________________________________________________________
 void* TMVA::BinaryTree::AddXMLTo(void* parent) const {
    // add attributes to XML
-   void* bdt = gTools().xmlengine().NewChild(parent, 0, "BinaryTree");
+
+   void* bdt = gTools().AddChild(parent, "BinaryTree");
    gTools().AddAttr( bdt, "type" , ClassName() );
    this->GetRoot()->AddXMLTo(bdt);
    return bdt;
@@ -136,7 +138,7 @@ void TMVA::BinaryTree::ReadXML(void* node, UInt_t tmva_Version_Code ) {
    // read attributes from XML
    this->DeleteNode( fRoot );
    fRoot= CreateNode();
-   void* trnode = gTools().xmlengine().GetChild(node);
+   void* trnode = gTools().GetChild(node);
    fRoot->ReadXML(trnode, tmva_Version_Code);
    this->SetTotalTreeDepth();
 }
diff --git a/tmva/src/CCTreeWrapper.cxx b/tmva/src/CCTreeWrapper.cxx
index 197403207d740e1c8f1146b5d53dd1aa74562c12..c712a03bdf26fe6938048fa772c9f3613bb15eed 100644
--- a/tmva/src/CCTreeWrapper.cxx
+++ b/tmva/src/CCTreeWrapper.cxx
@@ -28,22 +28,22 @@
 using namespace TMVA;
 
 //_______________________________________________________________________
-TMVA::CCTreeWrapper::CCTreeNode::CCTreeNode( DecisionTreeNode* n ) : fDTNode(n) {
+TMVA::CCTreeWrapper::CCTreeNode::CCTreeNode( DecisionTreeNode* n ) :
+   Node(),
+   fNLeafDaughters(0),
+   fNodeResubstitutionEstimate(-1.0),
+   fResubstitutionEstimate(-1.0),
+   fAlphaC(-1.0),
+   fMinAlphaC(-1.0),
+   fDTNode(n)
+{
    //constructor of the CCTreeNode
-
-   if(((DecisionTreeNode*) n->GetRight()) != NULL &&
-      ((DecisionTreeNode*) n->GetLeft()) != NULL ) {
+   if ( n != NULL && n->GetRight() != NULL && n->GetLeft() != NULL ) {
       SetRight( new CCTreeNode( ((DecisionTreeNode*) n->GetRight()) ) );
       GetRight()->SetParent(this);
       SetLeft( new CCTreeNode( ((DecisionTreeNode*) n->GetLeft()) ) );
       GetLeft()->SetParent(this);
    }
-
-   fNLeafDaughters = 0;
-   fNodeResubstitutionEstimate = -1.0;
-   fResubstitutionEstimate = -1.0;
-   fAlphaC = -1.0;
-   fMinAlphaC = -1.0;
 }
 
 //_______________________________________________________________________
@@ -178,7 +178,7 @@ Double_t TMVA::CCTreeWrapper::TestTreeQuality( const EventList* validationSample
    for (UInt_t ievt=0; ievt < validationSample->size(); ievt++) {
       Bool_t isSignalType = (CheckEvent(*(*validationSample)[ievt]) > fDTParent->GetNodePurityLimit() ) ? 1 : 0;
       
-      if (isSignalType == (*validationSample)[ievt]->IsSignal()) {
+      if (isSignalType == ((*validationSample)[ievt]->GetClass() == 0)) {
          ncorrect += (*validationSample)[ievt]->GetWeight();
       }
       else{
@@ -202,7 +202,7 @@ Double_t TMVA::CCTreeWrapper::TestTreeQuality( const DataSet* validationSample )
 
       Bool_t isSignalType = (CheckEvent(*ev) > fDTParent->GetNodePurityLimit() ) ? 1 : 0;
       
-      if (isSignalType == ev->IsSignal()) {
+      if (isSignalType == (ev->GetClass() == 0)) {
          ncorrect += ev->GetWeight();
       }
       else{
diff --git a/tmva/src/ClassifierFactory.cxx b/tmva/src/ClassifierFactory.cxx
index 78e5edd8d6b78a9cb6629a9278d233578dd1414c..734adb645de9d2bf7d277839f9a1d124e12cf0e2 100644
--- a/tmva/src/ClassifierFactory.cxx
+++ b/tmva/src/ClassifierFactory.cxx
@@ -144,5 +144,5 @@ void TMVA::ClassifierFactory::Print() const
    std::cout << "Print: ClassifierFactory<> knows about " << fCalls.size() << " objects" << std::endl;  
 
    CallMap::const_iterator it = fCalls.begin();
-   for (; it != fCalls.end(); ++it) std::cout << "Registerted object name " << it -> first << std::endl;
+   for (; it != fCalls.end(); ++it) std::cout << "Registered object name " << it -> first << std::endl;
 }
diff --git a/tmva/src/Config.cxx b/tmva/src/Config.cxx
index 367d5373c936e3f14d48198436d06a4edeeea959..f9def192ba991898ffcd76ee0b5824529540dd01 100644
--- a/tmva/src/Config.cxx
+++ b/tmva/src/Config.cxx
@@ -66,17 +66,3 @@ TMVA::Config::~Config()
    delete fLogger;
 }
 
-//_______________________________________________________________________
-void TMVA::Config::DestroyInstance()
-{
-   // static function: destroy TMVA instance
-   if (fgConfigPtr != 0) { delete fgConfigPtr; fgConfigPtr = 0;}
-}
-
-//_______________________________________________________________________
-TMVA::Config& TMVA::Config::Instance()
-{
-   // static function: returns  TMVA instance
-   return fgConfigPtr ? *fgConfigPtr :*(fgConfigPtr = new Config());
-}
-
diff --git a/tmva/src/Configurable.cxx b/tmva/src/Configurable.cxx
index 37a9ecaa8c0683d71615a6864d9fa5b7216c1d1e..e5b9b2b882f6a43a6f8a381492f658eca9951232 100644
--- a/tmva/src/Configurable.cxx
+++ b/tmva/src/Configurable.cxx
@@ -46,15 +46,14 @@ End_Html */
 #include "TMatrix.h"
 #include "TMath.h"
 #include "TFile.h"
-#include "TKey.h" 
-#include "TXMLEngine.h" 
+#include "TKey.h"
 
 #include "TMVA/Configurable.h"
 #include "TMVA/Config.h"
 #include "TMVA/Tools.h"
 
 // don't change this flag without a good reason ! The FitterBase code won't work anymore !!!
-// #define TMVA_Configurable_SanctionUnknownOption kTRUE 
+// #define TMVA_Configurable_SanctionUnknownOption kTRUE
 
 ClassImp(TMVA::Configurable)
 
@@ -64,17 +63,17 @@ ClassImp(TMVA::Configurable)
 #endif
 
 //_______________________________________________________________________
-TMVA::Configurable::Configurable( const TString& theOption)  
+TMVA::Configurable::Configurable( const TString& theOption)
    : fOptions                    ( theOption ),
      fLooseOptionCheckingEnabled ( kTRUE ),
      fLastDeclaredOption         ( 0 ),
      fConfigName                 ( "Configurable" ), // must be replaced by name of class that uses the configurable
-     fConfigDescription          ( "No description" ), 
+     fConfigDescription          ( "No description" ),
      fReferenceFile              ( "None" ),
      fLogger                     ( new MsgLogger(this) )
 {
    // constructor
-   fListOfOptions.SetOwner();   
+   fListOfOptions.SetOwner();
 
    // check if verbosity "V" set in option
    if (gTools().CheckForVerboseOption( theOption )) Log().SetMinType( kVERBOSE );
@@ -191,7 +190,7 @@ void TMVA::Configurable::ParseOptions()
                   else {
                      // since we don't know what else is comming we just put everthing into a map
                      if (!decOpt->SetValue(optval, idx))
-                        Log() << kFATAL << "Index " << idx << " too large for option " << decOpt->TheName()
+                        Log() << kFATAL << "Index " << idx << " too large (" << optval << ") for option " << decOpt->TheName()
                                 << ", allowed range is [0," << decOpt->GetArraySize()-1 << "]" << Endl;
                   }
                } 
@@ -336,7 +335,7 @@ void TMVA::Configurable::AddOptionsXMLTo( void* parent ) const
 {
    // write options to XML file
    if (!parent) return;
-   void* opts = gTools().xmlengine().NewChild(parent, 0, "Options");
+   void* opts = gTools().AddChild(parent, "Options");
    TListIter optIt( &fListOfOptions );
    while (OptionBase * opt = (OptionBase *) optIt()) {
       void* optnode = 0;
@@ -347,10 +346,10 @@ void TMVA::Configurable::AddOptionsXMLTo( void* parent ) const
             if(i>0) s << " ";
             s << std::scientific << opt->GetValue(i);
          }
-         optnode = gTools().xmlengine().NewChild(opts,0,"Option",s.str().c_str());
-      } 
+         optnode = gTools().AddChild(opts,"Option",s.str().c_str());
+      }
       else {
-         optnode = gTools().xmlengine().NewChild(opts,0,"Option", opt->GetValue());
+         optnode = gTools().AddChild(opts,"Option", opt->GetValue());
       }
       gTools().AddAttr(optnode, "name", opt->TheName());
       if (opt->IsArrayOpt()) {
@@ -361,18 +360,18 @@ void TMVA::Configurable::AddOptionsXMLTo( void* parent ) const
 }
 
 //______________________________________________________________________
-void TMVA::Configurable::ReadOptionsFromXML( void* node ) 
+void TMVA::Configurable::ReadOptionsFromXML( void* node )
 {
-   void* opt = gTools().xmlengine().GetChild(node);
+   void* opt = gTools().GetChild(node);
    TString optName, optValue;
    fOptions="";
    while (opt != 0) {
       if (fOptions.Length()!=0) fOptions += ":";
       gTools().ReadAttr(opt, "name", optName);
-      optValue = TString( gTools().xmlengine().GetNodeContent(opt) );
+      optValue = TString( gTools().GetContent(opt) );
       std::stringstream s("");
       s.precision( 16 );
-      if (gTools().xmlengine().HasAttr(opt, "size")) {
+      if (gTools().HasAttr(opt, "size")) {
          UInt_t size;
          gTools().ReadAttr(opt, "size", size);
          std::vector<TString> values = gTools().SplitString(optValue, ' ');
@@ -380,17 +379,17 @@ void TMVA::Configurable::ReadOptionsFromXML( void* node )
             if(i!=0) s << ":";
             s << std::scientific << optName << "[" << i << "]=" << values[i];
          }
-      } 
+      }
       else {
          s << std::scientific << optName << "=" << optValue;
       }
       fOptions += s.str().c_str();
-      opt = gTools().xmlengine().GetNext(opt);
+      opt = gTools().GetNextChild(opt);
    }
 }
 
 //______________________________________________________________________
-void TMVA::Configurable::WriteOptionsReferenceToFile()  
+void TMVA::Configurable::WriteOptionsReferenceToFile()
 {
    // write complete options to output stream
 
diff --git a/tmva/src/DataSet.cxx b/tmva/src/DataSet.cxx
index af346e854c0bb8a48e0d3ed03d59366c1b2fd686..b8320519295825a7b35a844a6fbe29c3962e3989 100644
--- a/tmva/src/DataSet.cxx
+++ b/tmva/src/DataSet.cxx
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -47,6 +47,9 @@
 #ifndef ROOT_TMVA_ResultsClassification
 #include "TMVA/ResultsClassification.h"
 #endif
+#ifndef ROOT_TMVA_ResultsMulticlass
+#include "TMVA/ResultsMulticlass.h"
+#endif
 #ifndef ROOT_TMVA_Configurable
 #include "TMVA/Configurable.h"
 #endif
@@ -58,7 +61,8 @@ TMVA::DataSet::DataSet(const DataSetInfo& dsi)
      fCurrentTreeIdx(0),
      fCurrentEventIdx(0),
      fHasNegativeEventWeights(kFALSE),
-     fLogger( new MsgLogger(TString(TString("Dataset:")+dsi.GetName()).Data()) )
+     fLogger( new MsgLogger(TString(TString("Dataset:")+dsi.GetName()).Data()) ),
+     fTrainingBlockSize(0)
 {
    // constructor
    for (UInt_t i=0; i<4; i++) fEventCollection[i] = new std::vector<Event*>();
@@ -266,8 +270,7 @@ TMVA::Results* TMVA::DataSet::GetResults( const TString & resultsName,
       newresults = new ResultsRegression(&fdsi);
       break;
    case Types::kMulticlass:
-//      newresults = new ResultsMulticlass(&fdsi);
-      newresults = new Results(&fdsi);
+      newresults = new ResultsMulticlass(&fdsi);
       break;
    case Types::kNoAnalysisType:
       newresults = new Results(&fdsi);
@@ -425,9 +428,9 @@ void TMVA::DataSet::CreateSampling() const
 
    if (!fSampling.at(treeIdx) ) return;
 
-   if (fSamplingRandom == 0 ) 
-      Log() << kWARNING 
-              << "no random generator present for creating a random/importance sampling (initialized?)" << Endl;
+   if (fSamplingRandom == 0 )
+      Log() << kFATAL
+            << "no random generator present for creating a random/importance sampling (initialized?)" << Endl;
 
    // delete the previous selection
    fSamplingSelected.at(treeIdx).clear();
@@ -554,10 +557,10 @@ TTree* TMVA::DataSet::GetTree( Types::ETreeType type )
    // replace by:  [Joerg]
    Float_t **metVals = new Float_t*[fResults.at(t).size()];
    for(UInt_t i=0; i<fResults.at(t).size(); i++ )
-      metVals[i] = new Float_t[fdsi.GetNTargets()+1];
+      metVals[i] = new Float_t[fdsi.GetNTargets()+fdsi.GetNClasses()];
 
    // create branches for event-variables
-   tree->Branch( "class", &cls, "class/I" ); 
+   tree->Branch( "classID", &cls, "classID/I" ); 
    classNameBranch = tree->Branch( "className",(void*)className, "className/C" ); 
 
 
@@ -593,11 +596,27 @@ TTree* TMVA::DataSet::GetTree( Types::ETreeType type )
    n = 0;
    for (std::map< TString, Results* >::iterator itMethod = fResults.at(t).begin(); 
         itMethod != fResults.at(t).end(); itMethod++) {
+
+
+      Log() << "analysis type " << (itMethod->second->GetAnalysisType()==Types::kRegression ? "Regression" :
+				     (itMethod->second->GetAnalysisType()==Types::kMulticlass ? "multiclass" : "classification" )) << Endl;
+
+
       if (itMethod->second->GetAnalysisType() == Types::kClassification) {
          // classification
          tree->Branch( itMethod->first, &(metVals[n][0]), itMethod->first + "/F" );
-      }
-      else if (itMethod->second->GetAnalysisType() == Types::kRegression) {
+      } else if (itMethod->second->GetAnalysisType() == Types::kMulticlass) {
+         // multiclass classification
+         TString leafList("");
+         for (UInt_t iCls = 0; iCls < fdsi.GetNClasses(); iCls++) {
+            if (iCls > 0) leafList.Append( ":" );
+            leafList.Append( fdsi.GetClassInfo( iCls )->GetName() );
+            leafList.Append( "/F" );
+         }
+         Log() << kDEBUG << "itMethod->first " << itMethod->first <<  "    LEAFLIST: " 
+               << leafList << "    itMethod->second " << itMethod->second <<  Endl;
+         tree->Branch( itMethod->first, (metVals[n]), leafList );
+      } else if (itMethod->second->GetAnalysisType() == Types::kRegression) {
          // regression
          TString leafList("");
          for (UInt_t iTgt = 0; iTgt < fdsi.GetNTargets(); iTgt++) {
@@ -644,6 +663,16 @@ TTree* TMVA::DataSet::GetTree( Types::ETreeType type )
             ResultsClassification *results = dynamic_cast<ResultsClassification*>( itMethod->second );
             metVals[n][0] = results->operator[](iEvt);
          }
+         else if (itMethod->second->GetAnalysisType() == Types::kMulticlass) {
+            // multiclass classification
+            ResultsMulticlass *results = dynamic_cast<ResultsMulticlass*>( itMethod->second );
+
+            std::vector< Float_t > vals = results->operator[](iEvt);
+            for (UInt_t nCls = 0, nClsEnd=fdsi.GetNClasses(); nCls < nClsEnd; nCls++) {
+               Float_t val = vals.at(nCls);
+               metVals[n][nCls] = val;
+            }
+         }
          else if (itMethod->second->GetAnalysisType() == Types::kRegression) {
             // regression
             ResultsRegression *results = dynamic_cast<ResultsRegression*>( itMethod->second );
diff --git a/tmva/src/DataSetFactory.cxx b/tmva/src/DataSetFactory.cxx
index 7ee6e3bf3f12e7ca26aebfa678f5685eba3a3cf5..183996585ae57a53ba273d0cab82ab10c35974e4 100644
--- a/tmva/src/DataSetFactory.cxx
+++ b/tmva/src/DataSetFactory.cxx
@@ -175,594 +175,6 @@ TMVA::DataSet* TMVA::DataSetFactory::BuildDynamicDataSet( TMVA::DataSetInfo& dsi
 }
 
 
-#ifndef ALTERNATIVE_EVENT_VECTOR_BUILDING
-
-
-//_______________________________________________________________________
-void TMVA::DataSetFactory::InitOptions( TMVA::DataSetInfo& dsi, 
-                                        std::vector< std::pair< Int_t, Int_t > >& nTrainTestEvents, 
-                                        TString& normMode, UInt_t& splitSeed, 
-                                        TString& splitMode ) 
-{
-   // the dataset splitting
-   Configurable splitSpecs( dsi.GetSplitOptions() );
-   splitSpecs.SetConfigName("DataSetFactory");
-   splitSpecs.SetConfigDescription( "Configuration options given in the \"PrepareForTrainingAndTesting\" call; these options define the creation of the data sets used for training and expert validation by TMVA" );
-
-   splitMode = "Random";    // the splitting mode
-   splitSpecs.DeclareOptionRef( splitMode, "SplitMode",
-                                "Method of picking training and testing events (default: random)" );
-   splitSpecs.AddPreDefVal(TString("Random"));
-   splitSpecs.AddPreDefVal(TString("Alternate"));
-   splitSpecs.AddPreDefVal(TString("Block"));
-
-   splitSeed = 100;
-   splitSpecs.DeclareOptionRef( splitSeed, "SplitSeed",
-                                "Seed for random event shuffling" );   
-
-   normMode = "NumEvents";  // the weight normalisation modes
-   splitSpecs.DeclareOptionRef( normMode, "NormMode",
-                                "Overall renormalisation of event-by-event weights (NumEvents: average weight of 1 per event, independently for signal and background; EqualNumEvents: average weight of 1 per event for signal, and sum of weights for background equal to sum of weights for signal)" );
-   splitSpecs.AddPreDefVal(TString("None"));
-   splitSpecs.AddPreDefVal(TString("NumEvents"));
-   splitSpecs.AddPreDefVal(TString("EqualNumEvents"));
-
-   // the number of events
-   nTrainTestEvents.resize( dsi.GetNClasses() );
-   for (UInt_t cl = 0; cl < dsi.GetNClasses(); cl++) {
-      nTrainTestEvents.at(cl).first  = 0;
-      nTrainTestEvents.at(cl).second = 0;
-      TString clName = dsi.GetClassInfo(cl)->GetName();
-      TString titleTrain =  TString().Format("Number of training events of class %s (default: 0 = all)",clName.Data()).Data();
-      TString titleTest  =  TString().Format("Number of test events of class %s (default: 0 = all)",clName.Data()).Data();
-      splitSpecs.DeclareOptionRef( nTrainTestEvents.at(cl).first , TString("nTrain_")+clName, titleTrain );
-      splitSpecs.DeclareOptionRef( nTrainTestEvents.at(cl).second, TString("nTest_")+clName, titleTest  );
-   }
-
-   splitSpecs.DeclareOptionRef( fVerbose, "V", "Verbosity (default: true)" );
-
-   splitSpecs.DeclareOptionRef( fVerboseLevel=TString("Info"), "VerboseLevel", "VerboseLevel (Debug/Verbose/Info)" );
-   splitSpecs.AddPreDefVal(TString("Debug"));
-   splitSpecs.AddPreDefVal(TString("Verbose"));
-   splitSpecs.AddPreDefVal(TString("Info"));
-
-   splitSpecs.ParseOptions();
-   splitSpecs.CheckForUnusedOptions();
-
-   // output logging verbosity
-   if (Verbose()) fLogger->SetMinType( kVERBOSE );   
-   if (fVerboseLevel.CompareTo("Debug")   ==0) fLogger->SetMinType( kDEBUG );
-   if (fVerboseLevel.CompareTo("Verbose") ==0) fLogger->SetMinType( kVERBOSE );
-   if (fVerboseLevel.CompareTo("Info")    ==0) fLogger->SetMinType( kINFO );
-
-   // put all to upper case
-   splitMode.ToUpper(); normMode.ToUpper();
-}
-
-
-//_______________________________________________________________________
-void TMVA::DataSetFactory::BuildEventVector( TMVA::DataSetInfo& dsi, 
-                                             TMVA::DataInputHandler& dataInput, 
-                                             std::vector< std::vector< Event* > >& tmpEventVector, 
-                                             std::vector<Double_t>& sumOfWeights, 
-                                             std::vector<Double_t>& nTempEvents, 
-                                             std::vector<Double_t>& renormFactor,
-                                             std::vector< std::vector< std::pair< Long64_t, Types::ETreeType > > >& userDefinedEventTypes ) 
-{
-   // build event vector
-   tmpEventVector.resize(dsi.GetNClasses());
-
-   // create the type, weight and boostweight branches
-   const UInt_t nvars    = dsi.GetNVariables();
-   const UInt_t ntgts    = dsi.GetNTargets();
-   const UInt_t nvis     = dsi.GetNSpectators();
-   //   std::vector<Float_t> fmlEval(nvars+ntgts+1+1+nvis);     // +1+1 for results of evaluation of cut and weight ttreeformula  
-
-   // the sum of weights should be renormalised to the number of events
-   renormFactor.assign( dsi.GetNClasses(), -1 );
-
-
-   // number of signal and background events passing cuts
-   std::vector< Int_t >    nInitialEvents( dsi.GetNClasses() );
-   std::vector< Int_t >    nEvBeforeCut(   dsi.GetNClasses() );
-   std::vector< Int_t >    nEvAfterCut(    dsi.GetNClasses() );
-   std::vector< Float_t >  nWeEvBeforeCut( dsi.GetNClasses() );
-   std::vector< Float_t >  nWeEvAfterCut(  dsi.GetNClasses() );
-   std::vector< Double_t > nNegWeights(    dsi.GetNClasses() );
-   std::vector< Float_t* > varAvLength(    dsi.GetNClasses() );
-
-   Bool_t haveArrayVariable = kFALSE;
-   Bool_t *varIsArray = new Bool_t[nvars];
-
-   for (size_t i=0; i<varAvLength.size(); i++) {
-      varAvLength[i] = new Float_t[nvars];
-      for (UInt_t ivar=0; ivar<nvars; ivar++) {
-         //varIsArray[ivar] = kFALSE;
-         varAvLength[i][ivar] = 0;
-      }
-   }
-
-   // if we work with chains we need to remember the current tree
-   // if the chain jumps to a new tree we have to reset the formulas
-   for (UInt_t cl=0; cl<dsi.GetNClasses(); cl++) {
-
-      Log() << kINFO << "Create training and testing trees: looping over class " << dsi.GetClassInfo(cl)->GetName() 
-            << "..." << Endl;
-
-      // info output for weights
-      const TString tmpWeight = dsi.GetClassInfo(cl)->GetWeight();
-      if (tmpWeight!="") {
-         Log() << kINFO << "Weight expression for class \"" << dsi.GetClassInfo(cl)->GetName() << "\": \""
-               << tmpWeight << "\"" << Endl; 
-      }
-      else {
-         Log() << kINFO << "No weight expression defined for class \"" << dsi.GetClassInfo(cl)->GetName() 
-               << "\"" << Endl; 
-      }
-      
-      // used for chains only
-      TString currentFileName("");
-      
-      std::vector<TreeInfo>::const_iterator treeIt(dataInput.begin(dsi.GetClassInfo(cl)->GetName()));
-      for (;treeIt!=dataInput.end(dsi.GetClassInfo(cl)->GetName()); treeIt++) {
-
-         // read first the variables
-         std::vector<Float_t> vars(nvars);
-         std::vector<Float_t> tgts(ntgts);
-         std::vector<Float_t> vis(nvis);
-         TreeInfo currentInfo = *treeIt;
-         
-         Bool_t isChain = (TString("TChain") == currentInfo.GetTree()->ClassName());
-         currentInfo.GetTree()->LoadTree(0);
-         ChangeToNewTree( currentInfo, dsi );
-
-         // count number of events in tree before cut
-         nInitialEvents.at(cl) += currentInfo.GetTree()->GetEntries();
-         
-         std::vector< std::pair< Long64_t, Types::ETreeType > >& userEvType = userDefinedEventTypes.at(cl);
-         if (userEvType.size() == 0 || userEvType.back().second != currentInfo.GetTreeType()) {
-            userEvType.push_back( std::make_pair< Long64_t, Types::ETreeType >(tmpEventVector.at(cl).size(), currentInfo.GetTreeType()) );
-         }
-
-         // loop over events in ntuple
-         for (Long64_t evtIdx = 0; evtIdx < currentInfo.GetTree()->GetEntries(); evtIdx++) {
-            currentInfo.GetTree()->LoadTree(evtIdx);
-            
-            // may need to reload tree in case of chains
-            if (isChain) {
-               if (currentInfo.GetTree()->GetTree()->GetDirectory()->GetFile()->GetName() != currentFileName) {
-                  currentFileName = currentInfo.GetTree()->GetTree()->GetDirectory()->GetFile()->GetName();
-                  ChangeToNewTree( currentInfo, dsi );
-               }
-            }
-            currentInfo.GetTree()->GetEntry(evtIdx);
-            Int_t sizeOfArrays = 1;
-            Int_t prevArrExpr = 0;
-            
-            // ======= evaluate all formulas =================
-
-            // first we check if some of the formulas are arrays
-            for (UInt_t ivar=0; ivar<nvars; ivar++) {
-               Int_t ndata = fInputFormulas[ivar]->GetNdata();
-               varAvLength[cl][ivar] += ndata;
-               if (ndata == 1) continue;
-               haveArrayVariable = kTRUE;
-               varIsArray[ivar] = kTRUE;
-               if (sizeOfArrays == 1) {
-                  sizeOfArrays = ndata;
-                  prevArrExpr = ivar;
-               } 
-               else if (sizeOfArrays!=ndata) {
-                  Log() << kERROR << "ERROR while preparing training and testing trees:" << Endl;
-                  Log() << "   multiple array-type expressions of different length were encountered" << Endl;
-                  Log() << "   location of error: event " << evtIdx 
-                        << " in tree " << currentInfo.GetTree()->GetName()
-                        << " of file " << currentInfo.GetTree()->GetCurrentFile()->GetName() << Endl;
-                  Log() << "   expression " << fInputFormulas[ivar]->GetTitle() << " has " 
-                        << ndata << " entries, while" << Endl;
-                  Log() << "   expression " << fInputFormulas[prevArrExpr]->GetTitle() << " has "
-                        << fInputFormulas[prevArrExpr]->GetNdata() << " entries" << Endl;
-                  Log() << kFATAL << "Need to abort" << Endl;
-               }
-            }
-
-            // now we read the information
-            for (Int_t idata = 0;  idata<sizeOfArrays; idata++) {
-               Bool_t containsNaN = kFALSE;
-
-               TTreeFormula* formula = 0;
-
-               // the cut expression
-               Float_t cutVal = 1;
-               formula = fCutFormulas[cl];
-               if (formula) {
-                  Int_t ndata = formula->GetNdata();
-                  cutVal = (ndata==1 ? 
-                            formula->EvalInstance(0) :
-                            formula->EvalInstance(idata));
-                  if (TMath::IsNaN(cutVal)) {
-                     containsNaN = kTRUE;
-                     Log() << kWARNING << "Cut expression resolves to infinite value (NaN): " 
-                           << formula->GetTitle() << Endl;
-                  }
-               }
-               
-               // the input variable
-               for (UInt_t ivar=0; ivar<nvars; ivar++) {
-                  formula = fInputFormulas[ivar];
-                  Int_t ndata = formula->GetNdata();               
-                  vars[ivar] = (ndata == 1 ? 
-                                formula->EvalInstance(0) : 
-                                formula->EvalInstance(idata));
-                  if (TMath::IsNaN(vars[ivar])) {
-                     containsNaN = kTRUE;
-                     Log() << kWARNING << "Input expression resolves to infinite value (NaN): " 
-                           << formula->GetTitle() << Endl;
-                  }
-               }
-
-               // the targets
-               for (UInt_t itrgt=0; itrgt<ntgts; itrgt++) {
-                  formula = fTargetFormulas[itrgt];
-                  Int_t ndata = formula->GetNdata();               
-                  tgts[itrgt] = (ndata == 1 ? 
-                                 formula->EvalInstance(0) : 
-                                 formula->EvalInstance(idata));
-                  if (TMath::IsNaN(tgts[itrgt])) {
-                     containsNaN = kTRUE;
-                     Log() << kWARNING << "Target expression resolves to infinite value (NaN): " 
-                           << formula->GetTitle() << Endl;
-                  }
-               }
-
-               // the spectators
-               for (UInt_t itVis=0; itVis<nvis; itVis++) {
-                  formula = fSpectatorFormulas[itVis];
-                  Int_t ndata = formula->GetNdata();               
-                  vis[itVis] = (ndata == 1 ? 
-                                formula->EvalInstance(0) : 
-                                formula->EvalInstance(idata));
-                  if (TMath::IsNaN(vis[itVis])) {
-                     containsNaN = kTRUE;
-                     Log() << kWARNING << "Spectator expression resolves to infinite value (NaN): " 
-                           << formula->GetTitle() << Endl;
-                  }
-               }
-
-
-               // the weight
-               Float_t weight = currentInfo.GetWeight(); // multiply by tree weight
-               formula = fWeightFormula[cl];
-               if (formula!=0) {
-                  Int_t ndata = formula->GetNdata();
-                  weight *= (ndata == 1 ?
-                             formula->EvalInstance() :
-                             formula->EvalInstance(idata));
-                  if (TMath::IsNaN(weight)) {
-                     containsNaN = kTRUE;
-                     Log() << kWARNING << "Weight expression resolves to infinite value (NaN): " 
-                           << formula->GetTitle() << Endl;
-                  }
-               }
-            
-               // Count the events before rejection due to cut or NaN value
-               // (weighted and unweighted)
-               nEvBeforeCut.at(cl) ++;
-               if (!TMath::IsNaN(weight))
-                  nWeEvBeforeCut.at(cl) += weight;
-
-               // apply the cut
-               // skip rest if cut is not fulfilled
-               if (cutVal<0.5) continue;
-
-               // global flag if negative weights exist -> can be used by classifiers who may 
-               // require special data treatment (also print warning)
-               if (weight < 0) nNegWeights.at(cl)++;
-
-               // now read the event-values (variables and regression targets)
-
-               if (containsNaN) {
-                  Log() << kWARNING << "Event " << evtIdx;
-                  if (sizeOfArrays>1) Log() << kWARNING << " rejected" << Endl;
-                  continue;
-               }
-
-               // Count the events after rejection due to cut or NaN value
-               // (weighted and unweighted)
-               nEvAfterCut.at(cl) ++;
-               nWeEvAfterCut.at(cl) += weight;
-
-               // event accepted, fill temporary ntuple
-               tmpEventVector.at(cl).push_back(new Event(vars, tgts , vis, cl , weight));
-
-               // --------------- this is to keep <Event>->IsSignal() working. TODO: this should be removed on the long run
-               ClassInfo* ci = dsi.GetClassInfo("Signal");
-               if( ci == 0 ) tmpEventVector.at(cl).back()->SetSignalClass( 0 );
-               else          tmpEventVector.at(cl).back()->SetSignalClass( ci->GetNumber()   );
-               // ---------------
-
-               // add up weights
-               sumOfWeights.at(cl) += weight;
-               nTempEvents.at(cl)  += 1;
-            }
-         }
-         
-         currentInfo.GetTree()->ResetBranchAddresses();
-      }
-
-      // compute renormalisation factors
-      renormFactor.at(cl) = nTempEvents.at(cl)/sumOfWeights.at(cl);
-   }
-
-   // for output, check maximum class name length
-   Int_t maxL = dsi.GetClassNameMaxLength();
-   
-   Log() << kINFO << "Number of events in input trees (after possible flattening of arrays):" << Endl;
-   for (UInt_t cl = 0; cl < dsi.GetNClasses(); cl++) {
-      Log() << kINFO << "    " 
-            << setiosflags(ios::left) << std::setw(maxL) << dsi.GetClassInfo(cl)->GetName() 
-            << "      -- number of events       : "
-            << std::setw(5) << nEvBeforeCut.at(cl) 
-            << "  / sum of weights: " << std::setw(5) << nWeEvBeforeCut.at(cl) << Endl;
-   }
-
-   for (UInt_t cl = 0; cl < dsi.GetNClasses(); cl++) {
-      Log() << kINFO << "    " << std::setw(maxL) << dsi.GetClassInfo(cl)->GetName() 
-            <<" tree -- total number of entries: " 
-            << std::setw(5) << dataInput.GetEntries(dsi.GetClassInfo(cl)->GetName()) << Endl;
-   }
-
-   Log() << kINFO << "Preselection:" << Endl;
-   if (dsi.HasCuts()) {
-      for (UInt_t cl = 0; cl< dsi.GetNClasses(); cl++) {
-         Log() << kINFO << "    " << setiosflags(ios::left) << std::setw(maxL) << dsi.GetClassInfo(cl)->GetName() 
-               << " requirement: \"" << dsi.GetClassInfo(cl)->GetCut() << "\"" << Endl;
-         Log() << kINFO << "    " 
-               << setiosflags(ios::left) << std::setw(maxL) << dsi.GetClassInfo(cl)->GetName() 
-               << "      -- number of events passed: "
-               << std::setw(5) << nEvAfterCut.at(cl)
-               << "  / sum of weights: " << std::setw(5) << nWeEvAfterCut.at(cl) << Endl;
-         Log() << kINFO << "    " 
-               << setiosflags(ios::left) << std::setw(maxL) << dsi.GetClassInfo(cl)->GetName() 
-               << "      -- efficiency             : "
-               << std::setw(6) << nWeEvAfterCut.at(cl)/nWeEvBeforeCut.at(cl) << Endl;
-      }
-   }
-   else Log() << kINFO << "    No preselection cuts applied on event classes" << Endl;
-
-   delete[] varIsArray;
-   for (size_t i=0; i<varAvLength.size(); i++)
-      delete[] varAvLength[i];
-
-}
-
-//_______________________________________________________________________
-TMVA::DataSet* TMVA::DataSetFactory::MixEvents( TMVA::DataSetInfo& dsi, 
-                                                std::vector< std::vector< TMVA::Event* > >& tmpEventVector, 
-                                                std::vector< std::pair< Int_t, Int_t > >& nTrainTestEvents,
-                                                const TString& splitMode, UInt_t splitSeed, 
-                                                std::vector<Double_t>& renormFactor,
-                                                std::vector< std::vector< std::pair< Long64_t, Types::ETreeType > > >& userDefinedEventTypes )
-{
-   // create a dataset from the datasetinfo object
-   DataSet* ds = new DataSet(dsi);
-   
-   typedef std::vector<Event*>::size_type EVTVSIZE;
-
-   std::vector<EVTVSIZE> origSize(dsi.GetNClasses());
-
-   Log() << kVERBOSE << "Number of available training events:" << Endl;
-   for ( UInt_t cl = 0; cl<dsi.GetNClasses(); cl++ ) {
-      origSize.at(cl) = tmpEventVector.at(cl).size();
-      Log() << kVERBOSE << "  " << dsi.GetClassInfo(cl)->GetName() << "    : " << origSize.at(cl) << Endl;
-   }
-
-   std::vector< std::vector< EVTVSIZE > > finalNEvents( dsi.GetNClasses() );
-   for (UInt_t cl = 0; cl < dsi.GetNClasses(); cl++) {
-      finalNEvents.at(cl).resize(2); // resize: training and test
-      finalNEvents[cl][0] = nTrainTestEvents.at(cl).first;
-      finalNEvents[cl][1] = nTrainTestEvents.at(cl).second;
-   }
-
-   // loop over all classes
-   for ( UInt_t cl = 0; cl<dsi.GetNClasses(); cl++) { 
-      
-      if (finalNEvents.at(cl).at(0)>origSize.at(cl)) // training
-         Log() << kFATAL << "More training events requested than available for the class " << dsi.GetClassInfo(cl)->GetName() << Endl;
-      
-      if (finalNEvents.at(cl).at(1)>origSize.at(cl)) // testing
-         Log() << kFATAL << "More testing events requested than available for the class" << dsi.GetClassInfo(cl)->GetName() << Endl;
-      
-      if (finalNEvents.at(cl).at(0)>origSize.at(cl) || finalNEvents.at(cl).at(1)>origSize.at(cl) ) // training and testing
-         Log() << kFATAL << "More testing and training events requested than available for the class" << dsi.GetClassInfo(cl)->GetName() << Endl;
-
-      if (finalNEvents.at(cl).at(0)==0 || finalNEvents.at(cl).at(1)==0) {   // if events requested for training or testing are 0 (== all)
-         if (finalNEvents.at(cl).at(0)==0 && finalNEvents.at(cl).at(1)==0) { // if both, training and testing are 0
-            finalNEvents.at(cl).at(0) = finalNEvents.at(cl).at(1) = origSize.at(cl)/2;  // use half of the events for training, the other half for testing
-         }
-     
-    else if (finalNEvents.at(cl).at(1)==0) { // if testing is chosen "all"
-            finalNEvents.at(cl).at(1)  = origSize.at(cl) - finalNEvents.at(cl).at(0); // take the remaining events (not training) for testing
-         } 
-         else {          // the other way around
-            finalNEvents.at(cl).at(0)  = origSize.at(cl) - finalNEvents.at(cl).at(1); // take the remaining events (not testing) for training
-         }
-      }
-   }
-
-   TRandom3 rndm( splitSeed ); 
-
-   // create event-lists for mixing
-   std::vector< std::vector< TEventList* > > evtList(dsi.GetNClasses());
-   for (UInt_t cl = 0; cl < dsi.GetNClasses(); cl++) {
-      evtList.at(cl).resize(2);
-      evtList.at(cl).at(0) = new TEventList();
-      evtList.at(cl).at(1) = new TEventList();
-   }
-
-   std::vector< std::vector< std::vector<EVTVSIZE>::size_type > > userDefN(dsi.GetNClasses());  // class/training-testing/<size>
-
-   for ( UInt_t cl = 0; cl<dsi.GetNClasses(); cl++ ) { // loop over the different classes
-   
-      const std::vector<EVTVSIZE>::size_type size = origSize.at(cl);
-
-      userDefN[cl].resize(2,0); // 0 training, 1 testing
-
-      if (splitMode == "RANDOM") {
-
-         Log() << kINFO << "Randomly shuffle events in training and testing trees for " << dsi.GetClassInfo(cl)->GetName() << Endl;
-
-         // the index array
-         std::vector<EVTVSIZE> idxArray(0);
-         idxArray.reserve(size);
-         //         std::vector<Char_t>   allPickedIdxArray(size);
-         //         allPickedIdxArray.assign( size, Char_t(kFALSE) );
-
-         // search for all events of which the trees have been defined as training or testing by the user
-         std::vector< std::pair< Long64_t, Types::ETreeType > >::iterator it = userDefinedEventTypes[cl].begin();
-         Types::ETreeType currentType = Types::kMaxTreeType;
-         
-         Bool_t haveWarnedBefore = kFALSE;
-         for (EVTVSIZE i = 0; i < size; i++) {
-            // if i is larger than the eventnumber of the current entry of the user defined types
-            if (it!=userDefinedEventTypes[cl].end() && Long64_t(i) >= (*it).first) {
-               // then take the treetype as currentType and increse the iterator by one to point at the next entry
-               currentType = (*it).second;
-               it++;
-            }
-            // now things depending on the current tree type (the tree type of the event)
-            // if (currentType == Types::kMaxTreeType ) ===> do nothing
-            if (currentType == Types::kTraining || currentType == Types::kTesting) {
-               Int_t tp = (currentType == Types::kTraining?0:1);
-               evtList[cl][tp]->Enter(Long64_t(i));       // add the eventnumber of the picked event to the TEventList
-               (userDefN[cl][tp])++;                      // one more has been picked
-               //               allPickedIdxArray[i] = Char_t(kTRUE);              // mark as picked
-
-               if ( !haveWarnedBefore && (finalNEvents.at(cl).at(tp) < userDefN[cl][tp]) ) {
-                  Log() << kWARNING << "More events requested for " << (currentType == Types::kTraining?"training":"testing") 
-                        << " than provided explicitly as " << (currentType == Types::kTraining?"training":"testing") << "-events "
-                        << "(by having given TMVA::Types::kTraining and/or TMVA::Types::kTesting at the AddTree-commands)." 
-                        << "--> all defined " <<(currentType == Types::kTraining?"training":"testing") << "-events have been taken." << Endl;
-                  haveWarnedBefore = kTRUE;
-               }
-            } else {
-               idxArray.push_back( i );
-            }
-         }
-
-         //         for (EVTVSIZE i=0; i<size; i++) { idxArray.at(i)=i; }
-      
-         for (Int_t itype=0; itype<2; itype++) {  // training (0) and then testing (1)
-            // consistency check of the user-input
-            if( (userDefN[cl][itype]+idxArray.size()) < finalNEvents.at(cl).at(itype) )
-               Log() << kWARNING << "More " << (currentType == Types::kTraining?"training":"testing")  
-                     << " events [" << finalNEvents[cl][itype] << "] requested than available for the class "
-                     << dsi.GetClassInfo(cl)->GetName() << " [defined:  " << userDefN[cl][itype] 
-                     << " + undefined: " << idxArray.size() 
-                     << "]." << Endl;
-
-            EVTVSIZE pos = 0;
-            Int_t nSelected = 0;
-            while( !idxArray.empty() && (userDefN[cl][itype]+nSelected)<finalNEvents.at(cl).at(itype)  ){
-               pos = rndm.Integer(idxArray.size() );
-               evtList.at(cl).at(itype)->Enter(Long64_t(idxArray.at(pos)));
-               idxArray.erase( idxArray.begin()+pos );
-               nSelected++;
-            }
-
-
-            //             // the selected events
-            //             std::vector<Char_t> thisPickedIdxArray(size);
-            //             thisPickedIdxArray.assign( size, Char_t(kFALSE) );
-
-
-            //             EVTVSIZE pos = 0;
-            //             for (EVTVSIZE i=0; i<finalNEvents.at(cl).at(itype); i++) {
-            //                // throw random positions until one is found where the event hasn't been picked yet
-            //                do { 
-            //                   pos = EVTVSIZE(size * rndm.Rndm()); 
-            //                } while (allPickedIdxArray.at(idxArray.at(pos)) == Char_t(kTRUE) );
-            //                // pick the found event
-            //                thisPickedIdxArray.at(idxArray.at(pos)) = Char_t(kTRUE);
-            //                allPickedIdxArray .at(idxArray.at(pos)) = Char_t(kTRUE);
-            //             }
-            //             // write all for this class and this event type picked events into the according TEventList
-            //             for (EVTVSIZE i=0; i<size; i++) if (thisPickedIdxArray.at(i)==Char_t(kTRUE)) evtList.at(cl).at(itype)->Enter(Long64_t(i)); 
-         }
-      } 
-      else if (splitMode == "ALTERNATE") {
-         Log() << kINFO << "Pick alternating training and test events from input tree for " 
-               << dsi.GetClassInfo(cl)->GetName() << Endl;
-      
-         Int_t ntrain = finalNEvents.at(cl).at(0);   // training
-         Int_t ntest  = finalNEvents.at(cl).at(1);   // testing
-
-         UInt_t lcd       = LargestCommonDivider(ntrain,ntest);
-         UInt_t trainfrac = ntrain/lcd;
-         UInt_t modulo    = (ntrain+ntest)/lcd;
-
-         for (EVTVSIZE i=0; i<finalNEvents.at(cl).at(0)+finalNEvents.at(cl).at(1); i++) {
-            Bool_t isTrainingEvent = (i%modulo)<trainfrac;
-            evtList.at(cl).at(isTrainingEvent ? 0:1)->Enter( i );
-         }
-      }
-      else if (splitMode == "BLOCK") {
-         Log() << kINFO << "Pick block-wise training and test events from input tree for " 
-               << dsi.GetClassInfo(cl)->GetName() << Endl;
-      
-         for (EVTVSIZE i=0; i<finalNEvents.at(cl).at(0); i++)     // training events
-            evtList.at(cl).at(0)->Enter( i );                     // write them into the training-eventlist of that class
-         for (EVTVSIZE i=0; i<finalNEvents.at(cl).at(1); i++)     // test events 
-            evtList.at(cl).at(1)->Enter( i + finalNEvents.at(cl).at(0));  // write them into test-eventlist of that class
-
-      }
-      else Log() << kFATAL << "Unknown type: " << splitMode << Endl;
-   }
-
-   // merge signal and background trees, and renormalise the event weights in this step   
-   for (Int_t itreeType=0; itreeType<2; itreeType++) {
-
-      Log() << kINFO << "Create internal " << (itreeType == 0 ? "training" : "testing") << " tree" << Endl;        
-
-      std::vector<Event*>* newEventVector = new std::vector<Event*>();
-      // hand the event vector over to the dataset, which will have to take care of destroying it
-      ds->SetEventCollection(newEventVector, (itreeType==0? Types::kTraining : Types::kTesting) ); 
-
-      EVTVSIZE newVectSize = 0;
-      for (UInt_t cl = 0; cl < dsi.GetNClasses(); cl++) {
-         newVectSize += evtList.at(cl).at(itreeType)->GetN();
-      }
-      newEventVector->reserve( newVectSize );
-
-      for ( UInt_t cl=0; cl<dsi.GetNClasses(); cl++) {
-
-         // renormalise only if non-trivial renormalisation factor
-         for (EVTVSIZE ievt=0; ievt<tmpEventVector.at(cl).size(); ievt++) {
-            if (!evtList.at(cl).at(itreeType)->Contains(Long64_t(ievt))) continue;
-
-            newEventVector->push_back(tmpEventVector.at(cl)[ievt] );
-            newEventVector->back()->ScaleWeight( renormFactor.at(cl) );
-
-            ds->IncrementNClassEvents( itreeType, cl );
-         }
-      }
-   }
-
-   for (UInt_t cl = 0; cl < dsi.GetNClasses(); cl++) {
-      tmpEventVector.at(cl).clear(); 
-      tmpEventVector.at(cl).resize(0);
-   }
-
-   for (UInt_t cl = 0; cl < dsi.GetNClasses(); cl++) {
-      delete evtList.at(cl).at(0);
-      delete evtList.at(cl).at(1);
-   }
-   return ds;
-}
-
-#endif
-
-
 //_______________________________________________________________________
 TMVA::DataSet* TMVA::DataSetFactory::BuildInitialDataSet( DataSetInfo& dsi, DataInputHandler& dataInput ) 
 {
@@ -783,7 +195,6 @@ TMVA::DataSet* TMVA::DataSetFactory::BuildInitialDataSet( DataSetInfo& dsi, Data
    TString mixMode;
    UInt_t splitSeed;
 
-#ifdef ALTERNATIVE_EVENT_VECTOR_BUILDING
    // ======= build event-vector tentative new ordering =================================
    
    TMVA::EventVectorOfClassesOfTreeType tmpEventVector;
@@ -796,68 +207,7 @@ TMVA::DataSet* TMVA::DataSetFactory::BuildInitialDataSet( DataSetInfo& dsi, Data
    DataSet* ds = MixEvents( dsi, tmpEventVector, nTrainTestEvents, splitMode, mixMode, normMode, splitSeed);
 
 
-#else
-
-   std::vector< std::pair< Int_t, Int_t > > nTrainTestEvents;
-   std::vector< std::vector< std::pair< Long64_t, Types::ETreeType > > > userDefinedEventTypes( dsi.GetNClasses() ); // class/automatically growing/startindex+treetype
-
-   InitOptions( dsi, nTrainTestEvents, normMode, splitSeed, splitMode );
-
-   // ======= build event-vector =================================
-   
-   std::vector< std::vector< Event* > > tmpEventVector;
-
-   std::vector<Double_t> sumOfWeights( dsi.GetNClasses() );
-   std::vector<Double_t> nTempEvents ( dsi.GetNClasses() );
-   std::vector<Double_t> renormFactor( dsi.GetNClasses() );
-   BuildEventVector( dsi, dataInput, tmpEventVector, sumOfWeights, nTempEvents, renormFactor, userDefinedEventTypes );
-
-   // ============================================================
-   // create training and test tree
-   // ============================================================
-
-   Log() << kINFO << "Prepare training and Test samples:" << Endl;
-
-   // ============================================================
-   // renormalisation
-   // ============================================================
-
-   // print rescaling info
-   if (normMode == "NONE") {
-      Log() << kINFO << "No weight renormalisation applied: use original event weights" << Endl;
-      renormFactor.assign( dsi.GetNClasses(), 1.0 );
-   }
-   else if (normMode == "NUMEVENTS") {
-      Log() << kINFO << "Weight renormalisation mode: \"NumEvents\": renormalise the different classes" << Endl;
-      Log() << kINFO << "... weights independently so that Sum[i=1..N_j]{w_i} = N_j, j=0,1,2..." << Endl;
-      Log() << kINFO << "... (note that N_j is the sum of training and test events)" << Endl;
-      for (UInt_t cl=0; cl<dsi.GetNClasses(); cl++) { 
-         Log() << kINFO << "Rescale " << dsi.GetClassInfo(cl)->GetName() << " event weights by factor: " << renormFactor.at(cl) << Endl;
-      }
-   }
-   else if (normMode == "EQUALNUMEVENTS") {
-      Log() << kINFO << "Weight renormalisation mode: \"EqualNumEvents\": renormalise weights of events of classes" << Endl;
-      Log() << kINFO << "   so that Sum[i=1..N_j]{w_i} = N_classA, j=classA, classB, ..." << Endl;
-      Log() << kINFO << "   (note that N_j is the sum of training and test events)" << Endl;
-
-      for (UInt_t cl = 1; cl < dsi.GetNClasses(); cl++ ) {
-         renormFactor.at(cl) *= nTempEvents.at(0)/nTempEvents.at(cl);
-      }
-      for (UInt_t cl=0; cl<dsi.GetNClasses(); cl++) { 
-         Log() << kINFO << "Rescale " << dsi.GetClassInfo(cl)->GetName() << " event weights by factor: " << renormFactor.at(cl) << Endl;
-      }
-   }
-   else {
-      Log() << kFATAL << "<PrepareForTrainingAndTesting> Unknown NormMode: " << normMode << Endl;
-   }
-   dsi.SetNormalization( normMode );
-
-   // ============= now the events have to be mixed and put into training- and test-eventcollections =============
-   
-   DataSet* ds = MixEvents( dsi, tmpEventVector, nTrainTestEvents, splitMode, splitSeed, renormFactor, userDefinedEventTypes );
-
-#endif
-   
+  
    Int_t maxL = dsi.GetClassNameMaxLength();
    Log() << kINFO << "Collected:" << Endl;
    for (UInt_t cl = 0; cl < dsi.GetNClasses(); cl++) {
@@ -1225,7 +575,6 @@ TMatrixD* TMVA::DataSetFactory::CalcCovarianceMatrix( DataSet * ds, const UInt_t
 
 
 
-#ifdef ALTERNATIVE_EVENT_VECTOR_BUILDING
 // --------------------------------------- new versions
 
 
@@ -1545,11 +894,6 @@ void  TMVA::DataSetFactory::BuildEventVector( TMVA::DataSetInfo& dsi,
                // event accepted, fill temporary ntuple
                tmpEventVector.find(currentInfo.GetTreeType())->second.at(cl).push_back(new Event(vars, tgts , vis, cl , weight));
 
-               // --------------- this is to keep <Event>->IsSignal() working. TODO: this should be removed on the long run
-               ClassInfo* ci = dsi.GetClassInfo("Signal");
-               if( ci == 0 ) tmpEventVector[currentInfo.GetTreeType()].at(cl).back()->SetSignalClass( 0 );
-               else          tmpEventVector[currentInfo.GetTreeType()].at(cl).back()->SetSignalClass( ci->GetNumber()   );
-               // ---------------
             }
          }
          
@@ -1616,7 +960,7 @@ TMVA::DataSet*  TMVA::DataSetFactory::MixEvents( DataSetInfo& dsi,
 
 //    // check if the vectors of all classes are empty
    for( Int_t cls = 0, clsEnd = dsi.GetNClasses(); cls < clsEnd; ++cls ){
-      emptyUndefined |= tmpEventVector[Types::kMaxTreeType].at(cls).empty();
+      emptyUndefined &= tmpEventVector[Types::kMaxTreeType].at(cls).empty();
    }
 
    TMVA::RandomGenerator rndm( splitSeed );
@@ -2136,4 +1480,3 @@ void  TMVA::DataSetFactory::RenormEvents( TMVA::DataSetInfo& dsi,
 
 
 
-#endif
diff --git a/tmva/src/DataSetInfo.cxx b/tmva/src/DataSetInfo.cxx
index 4a0a4da58bd4acadd53b3eacd00906fe5bf92af0..1b5ac04b16f2be848d407e53f867749990793383 100644
--- a/tmva/src/DataSetInfo.cxx
+++ b/tmva/src/DataSetInfo.cxx
@@ -60,6 +60,7 @@
 //_______________________________________________________________________
 TMVA::DataSetInfo::DataSetInfo(const TString& name) 
    : TObject(),
+     fDataSetManager(NULL),
      fName(name),
      fDataSet( 0 ),
      fNeedsRebuilding( kTRUE ),
@@ -72,6 +73,7 @@ TMVA::DataSetInfo::DataSetInfo(const TString& name)
      fOwnRootDir(0),
      fVerbose( kFALSE ),
      fSignalClass(0),
+     fTargetsForMulticlass(0),
      fLogger( new MsgLogger("DataSetInfo", kINFO) )
 {
    // constructor
@@ -82,9 +84,13 @@ TMVA::DataSetInfo::DataSetInfo(const TString& name)
 TMVA::DataSetInfo::~DataSetInfo() 
 {
    // destructor
-   if(fDataSet!=0) delete fDataSet;
+   ClearDataSet();
    
-   for(UInt_t i=0; i<fClasses.size(); i++) delete fClasses[i];
+   for(UInt_t i=0, iEnd = fClasses.size(); i<iEnd; ++i) {
+      delete fClasses[i];
+   }
+
+   delete fTargetsForMulticlass;
 
    delete fLogger;
 }
@@ -144,7 +150,17 @@ void TMVA::DataSetInfo::PrintClasses() const
 //_______________________________________________________________________
 Bool_t TMVA::DataSetInfo::IsSignal( const TMVA::Event* ev ) const 
 {
-    return (ev->GetClass()  == fSignalClass); 
+   return (ev->GetClass()  == fSignalClass); 
+}
+
+//_______________________________________________________________________
+std::vector<Float_t>*  TMVA::DataSetInfo::GetTargetsForMulticlass( const TMVA::Event* ev ) 
+{
+   if( !fTargetsForMulticlass ) fTargetsForMulticlass = new std::vector<Float_t>( GetNClasses() );
+//   fTargetsForMulticlass->resize( GetNClasses() );
+   fTargetsForMulticlass->assign( GetNClasses(), 0.0 );
+   fTargetsForMulticlass->at( ev->GetClass() ) = 1.0;
+   return fTargetsForMulticlass; 
 }
 
 
@@ -397,7 +413,13 @@ TMVA::DataSet* TMVA::DataSetInfo::GetDataSet() const
    // returns data set
    if (fDataSet==0 || fNeedsRebuilding) {
       if(fDataSet!=0) ClearDataSet();
-      fDataSet = DataSetManager::Instance().CreateDataSet(GetName());
+//      fDataSet = DataSetManager::Instance().CreateDataSet(GetName()); //DSMTEST replaced by following lines
+      if( !fDataSetManager )
+	 Log() << kFATAL << "DataSetManager has not been set in DataSetInfo (GetDataSet() )." << Endl;
+      fDataSet = fDataSetManager->CreateDataSet(GetName());
+
+
+
       fNeedsRebuilding = kFALSE;
    }
    return fDataSet;
diff --git a/tmva/src/DataSetManager.cxx b/tmva/src/DataSetManager.cxx
index 4f4694d6a73dcccc5b643125b61ef9eb597b7c57..caa8119d719dc647a0a69e05a864275481cdde07 100644
--- a/tmva/src/DataSetManager.cxx
+++ b/tmva/src/DataSetManager.cxx
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -44,10 +44,11 @@ using std::endl;
 #include "TMVA/MsgLogger.h"
 #endif
 
-TMVA::DataSetManager* TMVA::DataSetManager::fgDSManager = 0;
-TMVA::DataSetManager& TMVA::DataSetManager::Instance() { return *fgDSManager; }      
-void TMVA::DataSetManager::CreateInstance( DataInputHandler& dataInput ) { fgDSManager = new DataSetManager(dataInput); }
-void TMVA::DataSetManager::DestroyInstance() { if (fgDSManager) { delete fgDSManager; fgDSManager=0; } }
+//TMVA::DataSetManager* TMVA::DataSetManager::fgDSManager = 0; // DSMTEST removed
+//TMVA::DataSetManager& TMVA::DataSetManager::Instance() { return *fgDSManager; }      // DSMTEST removed
+// void TMVA::DataSetManager::CreateInstance( DataInputHandler& dataInput ) { fgDSManager = new DataSetManager(dataInput); } // DSMTEST removed
+
+// void TMVA::DataSetManager::DestroyInstance() { if (fgDSManager) { delete fgDSManager; fgDSManager=0; } } // DSMTEST removed
 
 //_______________________________________________________________________
 TMVA::DataSetManager::DataSetManager( DataInputHandler& dataInput ) 
@@ -62,10 +63,10 @@ TMVA::DataSetManager::DataSetManager( DataInputHandler& dataInput )
 TMVA::DataSetManager::~DataSetManager() 
 {
    // destructor
-   fDataSetInfoCollection.SetOwner();
-
-   TMVA::DataSetFactory::destroyInstance();
+//   fDataSetInfoCollection.SetOwner(); // DSMTEST --> created a segfault because the DataSetInfo-objects got deleted twice
 
+   TMVA::DataSetFactory::destroyInstance(); 
+   
    delete fLogger;
 }
 
@@ -91,6 +92,9 @@ TMVA::DataSetInfo* TMVA::DataSetManager::GetDataSetInfo(const TString& dsiName)
 TMVA::DataSetInfo& TMVA::DataSetManager::AddDataSetInfo(DataSetInfo& dsi) 
 {
    // stores a copy of the dataset info object
+
+   dsi.SetDataSetManager( this ); // DSMTEST
+
    DataSetInfo * dsiInList = GetDataSetInfo(dsi.GetName());
    if (dsiInList!=0) return *dsiInList;
    fDataSetInfoCollection.Add( const_cast<DataSetInfo*>(&dsi) );
diff --git a/tmva/src/DecisionTree.cxx b/tmva/src/DecisionTree.cxx
index f8fd0281f408b0b47c670b8b767ba4f43d3f49cd..9c0e7f97f129555fcc38a6727ae3adb8cf490726 100644
--- a/tmva/src/DecisionTree.cxx
+++ b/tmva/src/DecisionTree.cxx
@@ -99,19 +99,18 @@ TMVA::DecisionTree::DecisionTree():
    fMyTrandom  (NULL),
    fNNodesMax(999999),
    fMaxDepth(999999),
+   fClass(0),
    fTreeID(0)
 {
    // default constructor using the GiniIndex as separation criterion, 
    // no restrictions on minium number of events in a leave note or the
    // separation gain in the node splitting
-   
-   fLogger->SetSource( "DecisionTree" );
 }
 
 //_______________________________________________________________________
-TMVA::DecisionTree::DecisionTree( TMVA::SeparationBase *sepType,Int_t minSize, Int_t nCuts, 
+TMVA::DecisionTree::DecisionTree( TMVA::SeparationBase *sepType,Int_t minSize, Int_t nCuts, UInt_t cls, 
                                   Bool_t randomisedTree, Int_t useNvars, UInt_t nNodesMax,
-                                  UInt_t nMaxDepth, Int_t iSeed, Float_t purityLimit, Int_t treeID ):
+                                  UInt_t nMaxDepth, Int_t iSeed, Float_t purityLimit, Int_t treeID):
    BinaryTree(),
    fNvars          (0),
    fNCuts          (nCuts),
@@ -124,6 +123,7 @@ TMVA::DecisionTree::DecisionTree( TMVA::SeparationBase *sepType,Int_t minSize, I
    fMyTrandom      (new TRandom3(iSeed)),
    fNNodesMax      (nNodesMax),
    fMaxDepth       (nMaxDepth),
+   fClass          (cls),
    fTreeID         (treeID)
 {
    // constructor specifying the separation type, the min number of
@@ -131,8 +131,6 @@ TMVA::DecisionTree::DecisionTree( TMVA::SeparationBase *sepType,Int_t minSize, I
    // number of bins in the grid used in applying the cut for the node
    // splitting.
    
-   fLogger->SetSource( "DecisionTree" );
-
    if (sepType == NULL) { // it is interpreted as a regression tree, where
                           // currently the separation type (simple least square)
                           // cannot be chosen freely)
@@ -140,7 +138,7 @@ TMVA::DecisionTree::DecisionTree( TMVA::SeparationBase *sepType,Int_t minSize, I
       fRegType = new RegressionVariance();
       if ( nCuts <=0 ) {
          fNCuts = 200;
-         Log() << kWarning << " You had choosen the training mode using optimal cuts, not\n"
+         Log() << kWARNING << " You had choosen the training mode using optimal cuts, not\n"
                << " based on a grid of " << fNCuts << " by setting the option NCuts < 0\n"
                << " as this doesn't exist yet, I set it to " << fNCuts << " and use the grid"
                << Endl;
@@ -164,15 +162,15 @@ TMVA::DecisionTree::DecisionTree( const DecisionTree &d ):
    fMyTrandom      (new TRandom3(fgRandomSeed)),  // well, that means it's not an identical copy. But I only ever intend to really copy trees that are "outgrown" already. 
    fNNodesMax  (d.fNNodesMax),
    fMaxDepth   (d.fMaxDepth),
+   fClass      (d.fClass),
    fTreeID     (d.fTreeID),
    fAnalysisType(d.fAnalysisType)
 {
-   // copy constructor that creates a true copy, i.e. a completely independent tree 
-   // the node copy will recursively copy all the nodes 
+   // copy constructor that creates a true copy, i.e. a completely independent tree
+   // the node copy will recursively copy all the nodes
    this->SetRoot( new DecisionTreeNode ( *((DecisionTreeNode*)(d.GetRoot())) ) );
    this->SetParentTreeInNodes();
    fNNodes = d.fNNodes;
-   fLogger->SetSource( "DecisionTree" );
 }
 
 
@@ -180,9 +178,9 @@ TMVA::DecisionTree::DecisionTree( const DecisionTree &d ):
 TMVA::DecisionTree::~DecisionTree()
 {
    // destructor
-  
+
    // desctruction of the tree nodes done in the "base class" BinaryTree
-  
+
    if (fMyTrandom) delete fMyTrandom;
 }
 
@@ -190,24 +188,24 @@ TMVA::DecisionTree::~DecisionTree()
 void TMVA::DecisionTree::SetParentTreeInNodes( DecisionTreeNode *n )
 {
    // descend a tree to find all its leaf nodes, fill max depth reached in the
-   // tree at the same time. 
-  
+   // tree at the same time.
+
    if (n == NULL) { //default, start at the tree top, then descend recursively
       n = (DecisionTreeNode*) this->GetRoot();
       if (n == NULL) {
          Log() << kFATAL << "SetParentTreeNodes: started with undefined ROOT node" <<Endl;
          return ;
       }
-   } 
-  
+   }
+
    if ((this->GetLeftDaughter(n) == NULL) && (this->GetRightDaughter(n) != NULL) ) {
       Log() << kFATAL << " Node with only one daughter?? Something went wrong" << Endl;
       return;
    }  else if ((this->GetLeftDaughter(n) != NULL) && (this->GetRightDaughter(n) == NULL) ) {
       Log() << kFATAL << " Node with only one daughter?? Something went wrong" << Endl;
       return;
-   } 
-   else { 
+   }
+   else {
       if (this->GetLeftDaughter(n) != NULL) {
          this->SetParentTreeInNodes( this->GetLeftDaughter(n) );
       }
@@ -222,9 +220,9 @@ void TMVA::DecisionTree::SetParentTreeInNodes( DecisionTreeNode *n )
 
 //_______________________________________________________________________
 UInt_t TMVA::DecisionTree::BuildTree( const vector<TMVA::Event*> & eventSample,
-                                      TMVA::DecisionTreeNode *node ) 
+                                      TMVA::DecisionTreeNode *node)
 {
-   // building the decision tree by recursively calling the splitting of 
+   // building the decision tree by recursively calling the splitting of
    // one (root-) node into two daughter nodes (returns the number of nodes)
 
    Bool_t IsRootNode=kFALSE;
@@ -232,14 +230,14 @@ UInt_t TMVA::DecisionTree::BuildTree( const vector<TMVA::Event*> & eventSample,
       IsRootNode = kTRUE;
       //start with the root node
       node = new TMVA::DecisionTreeNode();
-      fNNodes = 1;   
+      fNNodes = 1;
       this->SetRoot(node);
       // have to use "s" for start as "r" for "root" would be the same as "r" for "right"
       this->GetRoot()->SetPos('s');
       this->GetRoot()->SetDepth(0);
       this->GetRoot()->SetParentTree(this);
    }
-  
+
    UInt_t nevents = eventSample.size();
 
    if (nevents > 0 ) {
@@ -247,20 +245,20 @@ UInt_t TMVA::DecisionTree::BuildTree( const vector<TMVA::Event*> & eventSample,
       fVariableImportance.resize(fNvars);
    }
    else Log() << kFATAL << ":<BuildTree> eventsample Size == 0 " << Endl;
-  
+
    Float_t s=0, b=0;
    Float_t suw=0, buw=0;
    Float_t target=0, target2=0;
    const UInt_t cNvars = fNvars;
-   Float_t *xmin = new Float_t[Int_t(cNvars)]; 
-   Float_t *xmax = new Float_t[Int_t(cNvars)]; 
+   Float_t *xmin = new Float_t[Int_t(cNvars)];
+   Float_t *xmax = new Float_t[Int_t(cNvars)];
    for (UInt_t iev=0; iev<eventSample.size(); iev++) {
       const TMVA::Event* evt = eventSample[iev];
       const Float_t weight = evt->GetWeight();
-      if (evt->IsSignal()) {
+      if (evt->GetClass() == fClass) {
          s += weight;
          suw += 1;
-      } 
+      }
       else {
          b += weight;
          buw += 1;
@@ -274,14 +272,14 @@ UInt_t TMVA::DecisionTree::BuildTree( const vector<TMVA::Event*> & eventSample,
       for (UInt_t ivar=0; ivar<fNvars; ivar++) {
          const Float_t val = evt->GetValue(ivar);
          if (iev==0) xmin[ivar]=xmax[ivar]=val;
-         if (val < xmin[ivar]) xmin[ivar]=val; 
-         if (val > xmax[ivar]) xmax[ivar]=val; 
+         if (val < xmin[ivar]) xmin[ivar]=val;
+         if (val > xmax[ivar]) xmax[ivar]=val;
       }
    }
-  
+
    if (s+b < 0) {
-      Log() << kWARNING << " One of the Decision Tree nodes has negative total number of signal or background events. " 
-            << "(Nsig="<<s<<" Nbkg="<<b<<" Probaby you use a Monte Carlo with negative weights. That should in principle " 
+      Log() << kWARNING << " One of the Decision Tree nodes has negative total number of signal or background events. "
+            << "(Nsig="<<s<<" Nbkg="<<b<<" Probaby you use a Monte Carlo with negative weights. That should in principle "
             << "be fine as long as on average you end up with something positive. For this you have to make sure that the "
             << "minimul number of (unweighted) events demanded for a tree node (currently you use: nEventsMin="<<fMinSize
             << ", you can set this via the BDT option string when booking the classifier) is large enough to allow for "
@@ -290,15 +288,15 @@ UInt_t TMVA::DecisionTree::BuildTree( const vector<TMVA::Event*> & eventSample,
             << "with negative weight in the training." << Endl;
       double nBkg=0.;
       for (UInt_t i=0; i<eventSample.size(); i++) {
-         if (!(eventSample[i]->IsSignal())) {
+         if (eventSample[i]->GetClass() != fClass) {
             nBkg += eventSample[i]->GetWeight();
-            std::cout << "Event "<< i<< " has (original) weight: " <<  eventSample[i]->GetWeight()/eventSample[i]->GetBoostWeight() 
-                      << " boostWeight: " << eventSample[i]->GetBoostWeight() << std::endl;
+            Log() << kINFO << "Event "<< i<< " has (original) weight: " <<  eventSample[i]->GetWeight()/eventSample[i]->GetBoostWeight() 
+		  << " boostWeight: " << eventSample[i]->GetBoostWeight() << Endl;
          }
       }
-      std::cout << " that gives in total: " << nBkg<<std::endl;
-   } 
-  
+      Log() << kINFO << " that gives in total: " << nBkg<<Endl;
+   }
+
    node->SetNSigEvents(s);
    node->SetNBkgEvents(b);
    node->SetNSigEvents_unweighted(suw);
@@ -313,7 +311,7 @@ UInt_t TMVA::DecisionTree::BuildTree( const vector<TMVA::Event*> & eventSample,
    }
    delete[] xmin;
    delete[] xmax;
-  
+
    // I now demand the minimum number of events for both daughter nodes. Hence if the number
    // of events in the parent node is not at least two times as big, I don't even need to try
    // splitting
@@ -325,9 +323,9 @@ UInt_t TMVA::DecisionTree::BuildTree( const vector<TMVA::Event*> & eventSample,
          separationGain = this->TrainNodeFast(eventSample, node);
       else
          separationGain = this->TrainNodeFull(eventSample, node);
-      
-      if (separationGain < std::numeric_limits<double>::epsilon()) { // we could not gain anything, e.g. all events are in one bin, 
-         /// if (separationGain < 0.00000001) { // we could not gain anything, e.g. all events are in one bin, 
+
+      if (separationGain < std::numeric_limits<double>::epsilon()) { // we could not gain anything, e.g. all events are in one bin,
+         /// if (separationGain < 0.00000001) { // we could not gain anything, e.g. all events are in one bin,
          // no cut can actually do anything to improve the node
          // hence, naturally, the current node is a leaf node
          if (DoRegression()) {
@@ -360,7 +358,7 @@ UInt_t TMVA::DecisionTree::BuildTree( const vector<TMVA::Event*> & eventSample,
             }
          }
 
-      
+
          // sanity check
          if (leftSample.size() == 0 || rightSample.size() == 0) {
             Log() << kFATAL << "<TrainNode> all events went to the same branch" << Endl
@@ -370,7 +368,7 @@ UInt_t TMVA::DecisionTree::BuildTree( const vector<TMVA::Event*> & eventSample,
                   << "--- this should never happen, please write a bug report to Helge.Voss@cern.ch"
                   << Endl;
          }
-      
+
          // continue building daughter nodes for the left and the right eventsample
          TMVA::DecisionTreeNode *rightNode = new TMVA::DecisionTreeNode(node,'r');
          fNNodes++;
@@ -378,14 +376,14 @@ UInt_t TMVA::DecisionTree::BuildTree( const vector<TMVA::Event*> & eventSample,
          //         rightNode->SetDepth( node->GetDepth() + 1 );
          rightNode->SetNEvents(nRight);
          rightNode->SetNEvents_unweighted(rightSample.size());
-      
+
          TMVA::DecisionTreeNode *leftNode = new TMVA::DecisionTreeNode(node,'l');
          fNNodes++;
          //         leftNode->SetPos('l');
          //         leftNode->SetDepth( node->GetDepth() + 1 );
          leftNode->SetNEvents(nLeft);
          leftNode->SetNEvents_unweighted(leftSample.size());
-      
+
          node->SetNodeType(0);
          node->SetLeft(leftNode);
          node->SetRight(rightNode);
@@ -393,7 +391,7 @@ UInt_t TMVA::DecisionTree::BuildTree( const vector<TMVA::Event*> & eventSample,
          this->BuildTree(rightSample, rightNode);
          this->BuildTree(leftSample,  leftNode );
       }
-   } 
+   }
    else{ // it is a leaf node
       if (DoRegression()) {
          node->SetSeparationIndex(fRegType->GetSeparationIndex(s+b,target,target2));
@@ -439,13 +437,13 @@ void TMVA::DecisionTree::FillEvent( TMVA::Event & event,
    node->IncrementNEvents( event.GetWeight() );
    node->IncrementNEvents_unweighted( );
   
-   if (event.IsSignal()) {
+   if (event.GetClass() == fClass) {
       node->IncrementNSigEvents( event.GetWeight() );
       node->IncrementNSigEvents_unweighted( );
    } 
    else {
       node->IncrementNBkgEvents( event.GetWeight() );
-      node->IncrementNSigEvents_unweighted( );
+      node->IncrementNBkgEvents_unweighted( );
    }
    node->SetSeparationIndex(fSepType->GetSeparationIndex(node->GetNSigEvents(),
                                                          node->GetNBkgEvents()));
@@ -636,7 +634,7 @@ void TMVA::DecisionTree::CheckEventWithPrunedTree( const Event& e ) const
    }
 
    while(current != NULL) {
-      if(e.IsSignal())
+      if(e.GetClass() == fClass)
          current->SetNSValidation(current->GetNSValidation() + e.GetWeight());
       else
          current->SetNBValidation(current->GetNBValidation() + e.GetWeight());
@@ -906,9 +904,8 @@ Float_t TMVA::DecisionTree::TrainNodeFast( const vector<TMVA::Event*> & eventSam
    nTotS=0; nTotB=0;
    nTotS_unWeighted=0; nTotB_unWeighted=0;   
    for (UInt_t iev=0; iev<nevents; iev++) {
-      Bool_t eventType = eventSample[iev]->IsSignal();
       Float_t eventWeight =  eventSample[iev]->GetWeight(); 
-      if (eventType) {
+      if (eventSample[iev]->GetClass() == fClass) {
          nTotS+=eventWeight;
          nTotS_unWeighted++;
       }
@@ -925,7 +922,7 @@ Float_t TMVA::DecisionTree::TrainNodeFast( const vector<TMVA::Event*> & eventSam
             Float_t eventData = eventSample[iev]->GetValue(ivar); 
             // "maximum" is nbins-1 (the "-1" because we start counting from 0 !!
             iBin = TMath::Min(Int_t(nBins-1),TMath::Max(0,int (nBins*(eventData-xmin[ivar])/(xmax[ivar]-xmin[ivar]) ) ));
-            if (eventType) {
+            if (eventSample[iev]->GetClass() == fClass) {
                nSelS[ivar][iBin]+=eventWeight;
                nSelS_unWeighted[ivar][iBin]++;
             } 
@@ -1085,7 +1082,7 @@ Float_t TMVA::DecisionTree::TrainNodeFull( const vector<TMVA::Event*> & eventSam
    // Initialize (un)weighted counters for signal & background
    // Construct a list of event wrappers that point to the original data
    for( vector<TMVA::Event*>::const_iterator it = eventSample.begin(); it != eventSample.end(); ++it ) {
-      if( (*it)->IsSignal() ) { // signal or background event
+      if((*it)->GetClass() == fClass) { // signal or background event
          nTotS += (*it)->GetWeight();
          ++nTotS_unWeighted;
       }
@@ -1128,7 +1125,7 @@ Float_t TMVA::DecisionTree::TrainNodeFull( const vector<TMVA::Event*> & eventSam
       Float_t bkgWeightCtr = 0.0, sigWeightCtr = 0.0;
       vector<TMVA::BDTEventWrapper>::iterator it = bdtEventSample.begin(), it_end = bdtEventSample.end();
       for( ; it != it_end; ++it ) {
-         if( (**it)->IsSignal() ) // specify signal or background event
+         if((**it)->GetClass() == fClass ) // specify signal or background event
             sigWeightCtr += (**it)->GetWeight();
          else 
             bkgWeightCtr += (**it)->GetWeight(); 
@@ -1250,8 +1247,8 @@ Float_t  TMVA::DecisionTree::SamplePurity( vector<TMVA::Event*> eventSample )
   
    Float_t sumsig=0, sumbkg=0, sumtot=0;
    for (UInt_t ievt=0; ievt<eventSample.size(); ievt++) {
-      if (!(eventSample[ievt]->IsSignal())) sumbkg+=eventSample[ievt]->GetWeight();
-      if ((eventSample[ievt]->IsSignal())) sumsig+=eventSample[ievt]->GetWeight();
+      if (eventSample[ievt]->GetClass() != fClass) sumbkg+=eventSample[ievt]->GetWeight();
+      else sumsig+=eventSample[ievt]->GetWeight();
       sumtot+=eventSample[ievt]->GetWeight();
    }
    // sanity check
diff --git a/tmva/src/DecisionTreeNode.cxx b/tmva/src/DecisionTreeNode.cxx
index 2122a8cfbde6dc1533307b0bae33eefc23c81e73..6126d858317a7d2ab4edc07f4650a342d731164d 100644
--- a/tmva/src/DecisionTreeNode.cxx
+++ b/tmva/src/DecisionTreeNode.cxx
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$    
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss, Eckhard von Toerne 
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -14,11 +14,13 @@
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
+ *      Eckhard von Toerne <evt@physik.uni-bonn.de>  - U. of Bonn, Germany        *
  *                                                                                *
- * CopyRight (c) 2005:                                                            *
+ * CopyRight (c) 2009:                                                            *
  *      CERN, Switzerland                                                         * 
  *      U. of Victoria, Canada                                                    * 
  *      MPI-K Heidelberg, Germany                                                 * 
+*       U. of Bonn, Germany                                                       *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
@@ -48,37 +50,30 @@ using std::string;
 ClassImp(TMVA::DecisionTreeNode)
 
 TMVA::MsgLogger* TMVA::DecisionTreeNode::fgLogger = 0;
-   
+bool     TMVA::DecisionTreeNode::fgIsTraining = false;
+
 //_______________________________________________________________________
 TMVA::DecisionTreeNode::DecisionTreeNode()
    : TMVA::Node(),
      fCutValue(0),
      fCutType ( kTRUE ),
-     fSelector ( -1 ),  
-     fNSigEvents ( 0 ),
-     fNBkgEvents ( 0 ),
-     fNEvents ( -1 ),
-     fNSigEvents_unweighted ( 0 ),
-     fNBkgEvents_unweighted ( 0 ),
-     fNEvents_unweighted ( 0 ),
-     fSeparationIndex (-1 ),
-     fSeparationGain ( -1 ),
+     fSelector ( -1 ),       
      fResponse(-99 ),
      fNodeType (-99 ),
      fSequence ( 0 ),
-     fIsTerminalNode( kFALSE ),
-     fCC(0)
+     fIsTerminalNode( kFALSE )
 {
    // constructor of an essentially "empty" node floating in space
    if (!fgLogger) fgLogger = new TMVA::MsgLogger( "DecisionTreeNode" );
 
-   fNodeR    = 0;      // node resubstitution estimate, R(t)
-   fSubTreeR = 0;      // R(T) = Sum(R(t) : t in ~T)
-   fAlpha    = 0;      // critical alpha for this node
-   fG        = 0;      // minimum alpha in subtree rooted at this node
-   fNTerminal  = 0;      // number of terminal nodes in subtree rooted at this node
-   fNB  = 0;      // sum of weights of background events from the pruning sample in this node
-   fNS  = 0;      // ditto for the signal events
+   if (fgIsTraining){
+      fTrainInfo = new DTNodeTrainingInfo();
+      //std::cout << "Node constructor with TrainingINFO"<<std::endl;
+   }
+   else {
+      //std::cout << "**Node constructor WITHOUT TrainingINFO"<<std::endl;
+      fTrainInfo = 0;
+   }
 }
 
 //_______________________________________________________________________
@@ -87,14 +82,6 @@ TMVA::DecisionTreeNode::DecisionTreeNode(TMVA::Node* p, char pos)
      fCutValue( 0 ),
      fCutType ( kTRUE ),
      fSelector( -1 ),  
-     fNSigEvents ( 0 ),
-     fNBkgEvents ( 0 ),
-     fNEvents ( -1 ),
-     fNSigEvents_unweighted ( 0 ),
-     fNBkgEvents_unweighted ( 0 ),
-     fNEvents_unweighted ( 0 ),
-     fSeparationIndex( -1 ),
-     fSeparationGain ( -1 ),
      fResponse(-99 ),
      fNodeType( -99 ),
      fSequence( 0 ),
@@ -110,13 +97,15 @@ TMVA::DecisionTreeNode::DecisionTreeNode(TMVA::Node* p, char pos)
    } else {
       fSequence =  ((DecisionTreeNode*)p)->GetSequence();
    }      
-   fNodeR    = 0;      // node resubstitution estimate, R(t)
-   fSubTreeR = 0;      // R(T) = Sum(R(t) : t in ~T)
-   fAlpha    = 0;      // critical alpha for this node
-   fG        = 0;      // minimum alpha in subtree rooted at this node
-   fNTerminal  = 0;      // number of terminal nodes in subtree rooted at this node
-   fNB  = 0;      // sum of weights of background events from the pruning sample in this node
-   fNS  = 0;      // ditto for the signal events
+
+   if (fgIsTraining){
+      fTrainInfo = new DTNodeTrainingInfo();
+      //std::cout << "Node constructor with TrainingINFO"<<std::endl;
+   }
+   else {
+      //std::cout << "**Node constructor WITHOUT TrainingINFO"<<std::endl;
+      fTrainInfo = 0;
+   }
 }
 
 //_______________________________________________________________________
@@ -126,14 +115,6 @@ TMVA::DecisionTreeNode::DecisionTreeNode(const TMVA::DecisionTreeNode &n,
      fCutValue( n.fCutValue ),
      fCutType ( n.fCutType ),
      fSelector( n.fSelector ),  
-     fNSigEvents ( n.fNSigEvents ),
-     fNBkgEvents ( n.fNBkgEvents ),
-     fNEvents ( n.fNEvents ),
-     fNSigEvents_unweighted ( n.fNSigEvents_unweighted ),
-     fNBkgEvents_unweighted ( n.fNBkgEvents_unweighted ),
-     fNEvents_unweighted ( n.fNEvents_unweighted ),
-     fSeparationIndex( n.fSeparationIndex ),
-     fSeparationGain ( n.fSeparationGain ),
      fResponse( n.fResponse ),
      fNodeType( n.fNodeType ),
      fSequence( n.fSequence ),
@@ -150,13 +131,14 @@ TMVA::DecisionTreeNode::DecisionTreeNode(const TMVA::DecisionTreeNode &n,
    if (n.GetRight() == 0 ) this->SetRight(NULL);
    else this->SetRight( new DecisionTreeNode( *((DecisionTreeNode*)(n.GetRight())),this));
    
-   fNodeR    = n.fNodeR; 
-   fSubTreeR = n.fSubTreeR;
-   fAlpha    = n.fAlpha;   
-   fG        = n.fG;      
-   fNTerminal  = n.fNTerminal;
-   fNB  = n.fNB;
-   fNS  = n.fNS;
+   if (fgIsTraining){
+      fTrainInfo = new DTNodeTrainingInfo(*(n.fTrainInfo));
+      //std::cout << "Node constructor with TrainingINFO"<<std::endl;
+   }
+   else {
+      //std::cout << "**Node constructor WITHOUT TrainingINFO"<<std::endl;
+      fTrainInfo = 0;
+   }
 }
 
 
@@ -334,14 +316,14 @@ Bool_t TMVA::DecisionTreeNode::ReadDataRecord( istream& is, UInt_t tmva_Version_
 void TMVA::DecisionTreeNode::ClearNodeAndAllDaughters()
 {
    // clear the nodes (their S/N, Nevents etc), just keep the structure of the tree
-   fNSigEvents=0;
-   fNBkgEvents=0;
-   fNEvents = 0;
-   fNSigEvents_unweighted=0;
-   fNBkgEvents_unweighted=0;
-   fNEvents_unweighted = 0;
-   fSeparationIndex=-1;
-   fSeparationGain=-1;
+   SetNSigEvents(0);
+   SetNBkgEvents(0);
+   SetNEvents(0);
+   SetNSigEvents_unweighted(0);
+   SetNBkgEvents_unweighted(0);
+   SetNEvents_unweighted(0);
+   SetSeparationIndex(-1);
+   SetSeparationGain(-1);
 
    if (this->GetLeft()  != NULL) ((DecisionTreeNode*)(this->GetLeft()))->ClearNodeAndAllDaughters();
    if (this->GetRight() != NULL) ((DecisionTreeNode*)(this->GetRight()))->ClearNodeAndAllDaughters();
@@ -367,11 +349,11 @@ void TMVA::DecisionTreeNode::PrintPrune( ostream& os ) const {
    // printout of the node (can be read in with ReadDataRecord)
 
    os << "----------------------" << std::endl 
-      << "|~T_t| " << fNTerminal << std::endl 
-      << "R(t): " << fNodeR << std::endl 
-      << "R(T_t): " << fSubTreeR << std::endl
-      << "g(t): " << fAlpha << std::endl
-      << "G(t): "  << fG << std::endl;
+      << "|~T_t| " << GetNTerminal() << std::endl 
+      << "R(t): " << GetNodeR() << std::endl 
+      << "R(T_t): " << GetSubTreeR() << std::endl
+      << "g(t): " << GetAlpha() << std::endl
+      << "G(t): "  << GetAlphaMinSubtree() << std::endl;
 }
 
 //_______________________________________________________________________
@@ -385,11 +367,18 @@ void TMVA::DecisionTreeNode::PrintRecPrune( ostream& os ) const {
    }
 }
 
+//_______________________________________________________________________
+void TMVA::DecisionTreeNode::SetCC(Double_t cc) 
+{
+   if (fTrainInfo) fTrainInfo->fCC = cc; 
+   else *fgLogger << kFATAL << "call to SetCC without trainingInfo" << Endl;
+}
+
 //_______________________________________________________________________
 Float_t TMVA::DecisionTreeNode::GetSampleMin(UInt_t ivar) const {
    // return the minimum of variable ivar from the training sample 
    // that pass/end up in this node 
-   if (ivar < fSampleMin.size()) return fSampleMin[ivar];
+   if (fTrainInfo && ivar < fTrainInfo->fSampleMin.size()) return fTrainInfo->fSampleMin[ivar];
    else *fgLogger << kFATAL << "You asked for Min of the event sample in node for variable " 
                  << ivar << " that is out of range" << Endl;
    return -9999;
@@ -399,7 +388,7 @@ Float_t TMVA::DecisionTreeNode::GetSampleMin(UInt_t ivar) const {
 Float_t TMVA::DecisionTreeNode::GetSampleMax(UInt_t ivar) const {
    // return the maximum of variable ivar from the training sample 
    // that pass/end up in this node 
-   if (ivar < fSampleMin.size()) return fSampleMax[ivar];
+   if (fTrainInfo && ivar < fTrainInfo->fSampleMin.size()) return fTrainInfo->fSampleMax[ivar];
    else *fgLogger << kFATAL << "You asked for Max of the event sample in node for variable " 
                  << ivar << " that is out of range" << Endl;
    return 9999;
@@ -409,39 +398,52 @@ Float_t TMVA::DecisionTreeNode::GetSampleMax(UInt_t ivar) const {
 void TMVA::DecisionTreeNode::SetSampleMin(UInt_t ivar, Float_t xmin){
    // set the minimum of variable ivar from the training sample 
    // that pass/end up in this node 
-   if ( ivar >= fSampleMin.size()) fSampleMin.resize(ivar+1);
-   fSampleMin[ivar]=xmin;
+   if ( fTrainInfo && ivar >= fTrainInfo->fSampleMin.size()) fTrainInfo->fSampleMin.resize(ivar+1);
+   fTrainInfo->fSampleMin[ivar]=xmin;
 }
 
 //_______________________________________________________________________
 void TMVA::DecisionTreeNode::SetSampleMax(UInt_t ivar, Float_t xmax){
    // set the maximum of variable ivar from the training sample 
    // that pass/end up in this node 
-   if ( ivar >= fSampleMax.size()) fSampleMax.resize(ivar+1);
-   fSampleMax[ivar]=xmax;
+   if ( fTrainInfo && ivar >= fTrainInfo->fSampleMax.size()) fTrainInfo->fSampleMax.resize(ivar+1);
+   fTrainInfo->fSampleMax[ivar]=xmax;
 }
 
 //_______________________________________________________________________
 void TMVA::DecisionTreeNode::ReadAttributes(void* node, UInt_t /* tmva_Version_Code */  ) 
 {   
+   Float_t tempNSigEvents,tempNBkgEvents,tempNEvents,tempNSigEvents_unweighted,  tempNBkgEvents_unweighted,tempNEvents_unweighted, tempSeparationIndex, tempSeparationGain;  
+   Double_t tempCC;
 
    // read attribute from xml
    gTools().ReadAttr(node, "Seq",   fSequence               );
    gTools().ReadAttr(node, "IVar",  fSelector               );
    gTools().ReadAttr(node, "Cut",   fCutValue               );
    gTools().ReadAttr(node, "cType", fCutType                );
-   gTools().ReadAttr(node, "nS",    fNSigEvents             );
-   gTools().ReadAttr(node, "nB",    fNBkgEvents             );
-   gTools().ReadAttr(node, "nEv",   fNEvents                );
-   gTools().ReadAttr(node, "nSuw",  fNSigEvents_unweighted  );
-   gTools().ReadAttr(node, "nBuw",  fNBkgEvents_unweighted  );
-   gTools().ReadAttr(node, "nEvuw", fNEvents_unweighted     );
-   gTools().ReadAttr(node, "sepI",  fSeparationIndex        );
-   gTools().ReadAttr(node, "sepG",  fSeparationGain         );
+   gTools().ReadAttr(node, "nS",    tempNSigEvents             );
+   gTools().ReadAttr(node, "nB",    tempNBkgEvents             );
+   gTools().ReadAttr(node, "nEv",   tempNEvents                );
+   gTools().ReadAttr(node, "nSuw",  tempNSigEvents_unweighted  );
+   gTools().ReadAttr(node, "nBuw",  tempNBkgEvents_unweighted  );
+   gTools().ReadAttr(node, "nEvuw", tempNEvents_unweighted     );
+   gTools().ReadAttr(node, "sepI",  tempSeparationIndex        );
+   gTools().ReadAttr(node, "sepG",  tempSeparationGain         );
    gTools().ReadAttr(node, "res",   fResponse               );
    gTools().ReadAttr(node, "rms",   fRMS                    );
    gTools().ReadAttr(node, "nType", fNodeType               );
-   gTools().ReadAttr(node, "CC",    fCC                     );
+   gTools().ReadAttr(node, "CC",    tempCC                  );
+   if (fTrainInfo){
+      SetNSigEvents(tempNSigEvents);
+      SetNBkgEvents(tempNBkgEvents);
+      SetNEvents(tempNEvents);
+      SetNSigEvents_unweighted(tempNSigEvents_unweighted);
+      SetNBkgEvents_unweighted(tempNBkgEvents_unweighted);
+      SetNEvents_unweighted(tempNEvents_unweighted);
+      SetSeparationIndex(tempSeparationIndex);
+      SetSeparationGain(tempSeparationGain);
+      SetCC(tempCC);  
+   }
 }
 
 
diff --git a/tmva/src/Event.cxx b/tmva/src/Event.cxx
index b3d3dc52bb47815fbd945b50d3c09094bf6c1162..1f958db4534694f8ff051ae99875d774b92178e8 100644
--- a/tmva/src/Event.cxx
+++ b/tmva/src/Event.cxx
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$   
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -12,6 +12,7 @@
  *                                                                                *
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch> - CERN, Switzerland           *
  *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>  - CERN, Switzerland              *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *                                                                                *
@@ -46,8 +47,7 @@ TMVA::Event::Event()
      fClass(1),
      fWeight(1.0),
      fBoostWeight(1.0),
-     fDynamic(kFALSE),
-     fSignalClass( 100 ) // TODO: remove this.. see "IsSignal"
+     fDynamic(kFALSE)
 {
    // copy constructor
    fgCount++; 
@@ -66,8 +66,7 @@ TMVA::Event::Event( const std::vector<Float_t>& ev,
      fClass(cls),
      fWeight(weight),
      fBoostWeight(boostweight),
-     fDynamic(kFALSE),
-     fSignalClass( 100 ) // TODO: remove this.. see "IsSignal"
+     fDynamic(kFALSE)
 {
    // constructor
    fgCount++;
@@ -87,8 +86,7 @@ TMVA::Event::Event( const std::vector<Float_t>& ev,
      fClass(cls),
      fWeight(weight),
      fBoostWeight(boostweight),
-     fDynamic(kFALSE),
-     fSignalClass( 100 ) // TODO: remove this.. see "IsSignal"
+     fDynamic(kFALSE)
 {
    // constructor
    fgCount++;
@@ -106,8 +104,7 @@ TMVA::Event::Event( const std::vector<Float_t>& ev,
      fClass(cls),
      fWeight(weight),
      fBoostWeight(boostweight),
-     fDynamic(kFALSE),
-     fSignalClass( 100 ) // TODO: remove this.. see "IsSignal"
+     fDynamic(kFALSE)
 {
    // constructor
    fgCount++;
@@ -122,8 +119,7 @@ TMVA::Event::Event( const std::vector<Float_t*>*& evdyn, UInt_t nvar )
      fClass(0),
      fWeight(0),
      fBoostWeight(0),
-     fDynamic(true),
-     fSignalClass( 100 ) // TODO: remove this.. see "IsSignal" ... !!!!!! NOT CLEAR TO ME WHAT VALUE TO SET HERE...
+     fDynamic(true)
 {
    // constructor for single events
    fgValuesDynamic = (std::vector<Float_t*>*) evdyn;
@@ -139,8 +135,7 @@ TMVA::Event::Event( const Event& event )
      fClass(event.fClass),
      fWeight(event.fWeight),
      fBoostWeight(event.fBoostWeight),
-     fDynamic(event.fDynamic),
-     fSignalClass( event.fSignalClass ) // TODO: remove this.. see "IsSignal"
+     fDynamic(event.fDynamic)
 {
    // copy constructor
    fgCount++; 
@@ -182,10 +177,14 @@ void TMVA::Event::CopyVarValues( const Event& other )
 {
    // copies only the variable values
    fValues      = other.fValues;
+   fTargets     = other.fTargets;
+   fSpectators  = other.fSpectators;
+
+   fVariableArrangement = other.fVariableArrangement;
+
    fClass       = other.fClass;
    fWeight      = other.fWeight;
    fBoostWeight = other.fBoostWeight;
-   fSignalClass = other.fSignalClass;      // TODO: remove this.. see "IsSignal"
 }
 
 //____________________________________________________________
@@ -224,10 +223,10 @@ const std::vector<Float_t>& TMVA::Event::GetValues() const
       assert(0);
    }
    if (fDynamic) {
-      if (fgValuesDynamic->size()-GetNSpectators() != fValues.size()) {
-         std::cout << "ERROR Event::GetValues() is trying to change the size of the variable vector, exiting ..." << std::endl;
-         assert(0);
-      }
+//       if (fgValuesDynamic->size()-GetNSpectators() != fValues.size()) {
+//          std::cout << "ERROR Event::GetValues() is trying to change the size of the variable vector, exiting ..." << std::endl;
+//          assert(0);
+//       }
       fValues.clear();
       for (std::vector<Float_t*>::const_iterator it = fgValuesDynamic->begin(); 
            it != fgValuesDynamic->end()-GetNSpectators(); it++) { 
diff --git a/tmva/src/Factory.cxx b/tmva/src/Factory.cxx
index 836b54a7f25a46e24a45e099368d59065a50bd36..83970f55fc44765b858b98e0fe48436ac4db892d 100644
--- a/tmva/src/Factory.cxx
+++ b/tmva/src/Factory.cxx
@@ -1,5 +1,5 @@
 // @(#)Root/tmva $Id$   
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss, Kai Voss 
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -64,6 +64,7 @@
 #include "TMVA/DataSetManager.h"
 #include "TMVA/DataSetInfo.h"
 #include "TMVA/MethodBoost.h"
+#include "TMVA/MethodCategory.h"
 
 #include "TMVA/VariableIdentityTransform.h"
 #include "TMVA/VariableDecorrTransform.h"
@@ -73,6 +74,7 @@
 
 #include "TMVA/ResultsClassification.h"
 #include "TMVA/ResultsRegression.h"
+#include "TMVA/ResultsMulticlass.h"
 
 const Int_t  MinNoTrainingEvents = 10;
 const Int_t  MinNoTestEvents     = 1;
@@ -86,6 +88,7 @@ ClassImp(TMVA::Factory)
 //_______________________________________________________________________
 TMVA::Factory::Factory( TString jobName, TFile* theTargetFile, TString theOption )
   : Configurable          ( theOption ),
+    fDataSetManager       ( NULL ), //DSMTEST
     fDataInputHandler     ( new DataInputHandler ),
     fTransformations      ( "" ),
     fVerbose              ( kFALSE ),
@@ -102,7 +105,9 @@ TMVA::Factory::Factory( TString jobName, TFile* theTargetFile, TString theOption
 
    fgTargetFile = theTargetFile;
 
-   DataSetManager::CreateInstance(*fDataInputHandler);
+//   DataSetManager::CreateInstance(*fDataInputHandler); // DSMTEST removed
+   fDataSetManager = new DataSetManager( *fDataInputHandler ); // DSMTEST 
+
 
    // render silent
    if (gTools().CheckForSilentOption( GetOptions() )) Log().InhibitOutput(); // make sure is silent if wanted to
@@ -176,7 +181,9 @@ TMVA::Factory::~Factory( void )
    delete fDataInputHandler;
 
    // destroy singletons
-   DataSetManager::DestroyInstance();
+//   DataSetManager::DestroyInstance(); // DSMTEST replaced by following line
+   delete fDataSetManager; // DSMTEST
+
    // problem with call of REGISTER_METHOD macro ...
    //   ClassifierFactory::DestroyInstance();
    //   Types::DestroyInstance();
@@ -206,17 +213,20 @@ void TMVA::Factory::SetVerbose( Bool_t v )
 //_______________________________________________________________________
 TMVA::DataSetInfo& TMVA::Factory::AddDataSet( DataSetInfo &dsi )
 {
-   return DataSetManager::Instance().AddDataSetInfo(dsi);
+//   return DataSetManager::Instance().AddDataSetInfo(dsi); // DSMTEST replaced by following line
+   return fDataSetManager->AddDataSetInfo(dsi); // DSMTEST
 }
 
 //_______________________________________________________________________
 TMVA::DataSetInfo& TMVA::Factory::AddDataSet( const TString& dsiName )
 {
-   DataSetInfo* dsi = DataSetManager::Instance().GetDataSetInfo(dsiName);
+//   DataSetInfo* dsi = DataSetManager::Instance().GetDataSetInfo(dsiName); // DSMTEST replaced by following line
+   DataSetInfo* dsi = fDataSetManager->GetDataSetInfo(dsiName); // DSMTEST
 
    if (dsi!=0) return *dsi;
    
-   return DataSetManager::Instance().AddDataSetInfo(*(new DataSetInfo(dsiName)));
+//   return DataSetManager::Instance().AddDataSetInfo(*(new DataSetInfo(dsiName))); // DSMTEST replaced by following line
+   return fDataSetManager->AddDataSetInfo(*(new DataSetInfo(dsiName))); // DSMTEST
 }
 
 
@@ -230,7 +240,7 @@ TTree* TMVA::Factory::CreateEventAssignTrees( const TString& name )
    // create the data assignment tree (for event-wise data assignment by user)
    TTree * assignTree = new TTree( name, name );
    assignTree->Branch( "type",   &fATreeType,   "ATreeType/I" );
-   assignTree->Branch( "weight", &fATreeWeight, "ATreeWeight/I" );
+   assignTree->Branch( "weight", &fATreeWeight, "ATreeWeight/F" );
 
    std::vector<VariableInfo>& vars = DefaultDataSetInfo().GetVariableInfos();
    std::vector<VariableInfo>& tgts = DefaultDataSetInfo().GetTargetInfos();
@@ -381,6 +391,8 @@ void TMVA::Factory::AddTree( TTree* tree, const TString& className, Double_t wei
    if( fAnalysisType == Types::kNoAnalysisType && DefaultDataSetInfo().GetNClasses() > 2 )
       fAnalysisType = Types::kMulticlass;
 
+   Log() << kINFO << "Add Tree " << tree->GetName() << " of type " << className 
+         << " with " << tree->GetEntries() << " events" << Endl;
    DataInput().AddTree(tree, className, weight, cut, tt );
 }
 
@@ -694,12 +706,25 @@ TMVA::MethodBase* TMVA::Factory::BookMethod( TString theMethodName, TString meth
                                                  methodTitle,
                                                  DefaultDataSetInfo(),
                                                  theOption );
-      (dynamic_cast<MethodBoost*>(im))->SetBoostedMethodName( theMethodName );
+      MethodBoost* methBoost = dynamic_cast<MethodBoost*>(im); // DSMTEST divided into two lines
+      methBoost->SetBoostedMethodName( theMethodName ); // DSMTEST divided into two lines
+      if( !methBoost ) // DSMTEST
+	 Log() << kERROR << "Method with type kBoost cannot be casted to MethodCategory. /Factory" << Endl; // DSMTEST
+      methBoost->fDataSetManager = fDataSetManager; // DSMTEST
+
    }
 
    MethodBase *method = (dynamic_cast<MethodBase*>(im));
 
 
+   // set fDataSetManager if MethodCategory (to enable Category to create datasetinfo objects) // DSMTEST
+   if( method->GetMethodType() == Types::kCategory ){ // DSMTEST
+      MethodCategory *methCat = (dynamic_cast<MethodCategory*>(im)); // DSMTEST
+      if( !methCat ) // DSMTEST
+	 Log() << kERROR << "Method with type kCategory cannot be casted to MethodCategory. /Factory" << Endl; // DSMTEST
+      methCat->fDataSetManager = fDataSetManager; // DSMTEST
+   } // DSMTEST
+
 
    if (!method->HasAnalysisType( fAnalysisType, 
                                  DefaultDataSetInfo().GetNClasses(),
@@ -841,26 +866,63 @@ void TMVA::Factory::WriteDataInformation()
          }
          delete trClsList;
 
+	 TString variables = "_V_";
+
+	 VariableTransformBase* transformation = NULL;
          if (trName=='I') {
-            trfs.back()->AddTransformation( new VariableIdentityTransform ( DefaultDataSetInfo() ), idxCls );
+	    transformation = new VariableIdentityTransform ( DefaultDataSetInfo() );
             identityTrHandler = trfs.back();
-         } 
-         else if (trName=='D') {
-            trfs.back()->AddTransformation( new VariableDecorrTransform   ( DefaultDataSetInfo() ), idxCls );
-         } 
-         else if (trName=='P') {
-            trfs.back()->AddTransformation( new VariablePCATransform      ( DefaultDataSetInfo() ), idxCls );
-         } 
-         else if (trName=='G') {
-            trfs.back()->AddTransformation( new VariableGaussTransform    ( DefaultDataSetInfo() ), idxCls );
-         } 
-         else if (trName=='N') {
-            trfs.back()->AddTransformation( new VariableNormalizeTransform( DefaultDataSetInfo() ), idxCls );
-         } 
-         else {
-            Log() << kINFO << "The transformation " << *trfsDefIt << " definition is not valid, the \n"
-                    << "transformation " << trName << " is not known!" << Endl;
-         }
+	 } 
+	 else if      (trName == "D" || trName == "Deco" || trName == "Decorrelate"){
+	    if( variables.Length() == 0 )
+	       variables = "_V_";
+	    transformation = new VariableDecorrTransform( DefaultDataSetInfo());
+	 }
+         else if (trName == "P" || trName == "PCA"){
+	    if( variables.Length() == 0 )
+	       variables = "_V_";
+	    transformation = new VariablePCATransform   ( DefaultDataSetInfo());
+	 }
+         else if (trName == "G" || trName == "Gauss"){
+	    if( variables.Length() == 0 )
+	       variables = "_V_,_T_";
+	    transformation = new VariableGaussTransform ( DefaultDataSetInfo());
+	 }
+         else if (trName == "N" || trName == "Norm" || trName == "Normalise" || trName == "Normalize")
+	 {
+	    if( variables.Length() == 0 )
+	       variables = "_V_,_T_";
+	    transformation = new VariableNormalizeTransform( DefaultDataSetInfo());
+	 }
+         else
+            Log() << kFATAL << "<ProcessOptions> Variable transform '"
+                  << trName << "' unknown." << Endl;
+
+	 if( transformation ){
+	    transformation->SelectInput( "_V_" );
+	    trfs.back()->AddTransformation(transformation, idxCls);
+	 }
+
+//          if (trName=='I') {
+//             trfs.back()->AddTransformation( new VariableIdentityTransform ( DefaultDataSetInfo() ), idxCls );
+//             identityTrHandler = trfs.back();
+//          } 
+//          else if (trName=='D') {
+//             trfs.back()->AddTransformation( new VariableDecorrTransform   ( DefaultDataSetInfo() ), idxCls );
+//          } 
+//          else if (trName=='P') {
+//             trfs.back()->AddTransformation( new VariablePCATransform      ( DefaultDataSetInfo() ), idxCls );
+//          } 
+//          else if (trName=='G') {
+//             trfs.back()->AddTransformation( new VariableGaussTransform    ( DefaultDataSetInfo() ), idxCls );
+//          } 
+//          else if (trName=='N') {
+//             trfs.back()->AddTransformation( new VariableNormalizeTransform( DefaultDataSetInfo() ), idxCls );
+//          } 
+//          else {
+//             Log() << kINFO << "The transformation " << *trfsDefIt << " definition is not valid, the \n"
+//                     << "transformation " << trName << " is not known!" << Endl;
+//          }
       }
    }
 
@@ -904,7 +966,8 @@ void TMVA::Factory::TrainAllMethods()
 
    // here the training starts
    Log() << kINFO << "Train all methods for " 
-         << (fAnalysisType == Types::kRegression ? "Regression" : "Classification") << " ..." << Endl;
+         << (fAnalysisType == Types::kRegression ? "Regression" : 
+	     (fAnalysisType == Types::kMulticlass ? "Multiclass" : "Classification") ) << " ..." << Endl;
 
    MVector::iterator itrMethod;
 
@@ -922,7 +985,8 @@ void TMVA::Factory::TrainAllMethods()
       }
 
       Log() << kINFO << "Train method: " << mva->GetMethodName() << " for " 
-            << (fAnalysisType == Types::kRegression ? "Regression" : "Classification") << Endl;
+            << (fAnalysisType == Types::kRegression ? "Regression" : 
+		(fAnalysisType == Types::kMulticlass ? "Multiclass classification" : "Classification")) << Endl;
       mva->TrainMethod();
       Log() << kINFO << "Training finished" << Endl;
    }
@@ -1002,7 +1066,8 @@ void TMVA::Factory::TestAllMethods()
       MethodBase* mva = dynamic_cast<MethodBase*>(*itrMethod);
       Types::EAnalysisType analysisType = mva->GetAnalysisType();
       Log() << kINFO << "Test method: " << mva->GetMethodName() << " for " 
-              << (analysisType == Types::kRegression ? "Regression" : "Classification") << " performance" << Endl;
+              << (analysisType == Types::kRegression ? "Regression" : 
+		  (analysisType == Types::kMulticlass ? "Multiclass classification" : "Classification")) << " performance" << Endl;
       mva->AddOutput( Types::kTesting, analysisType );
    }
 }
@@ -1125,6 +1190,7 @@ void TMVA::Factory::EvaluateAllMethods( void )
    MVector methodsNoCuts; 
 
    Bool_t doRegression = kFALSE;
+   Bool_t doMulticlass = kFALSE;
 
    // iterate over methods and evaluate
    MVector::iterator itrMethod    = fMethods.begin();
@@ -1169,6 +1235,11 @@ void TMVA::Factory::EvaluateAllMethods( void )
          Log() << kINFO << "Write evaluation histograms to file" << Endl;
          theMethod->WriteEvaluationHistosToFile(Types::kTesting);
          theMethod->WriteEvaluationHistosToFile(Types::kTraining);
+      } else if (theMethod->DoMulticlass()) {
+	 doMulticlass = kTRUE;
+         Log() << kINFO << "Evaluate multiclass classification method: " << theMethod->GetMethodName() << Endl;         
+
+	 theMethod->TestMulticlass();
       } else {
          
          Log() << kINFO << "Evaluate classifier: " << theMethod->GetMethodName() << Endl;
@@ -1245,9 +1316,11 @@ void TMVA::Factory::EvaluateAllMethods( void )
       rmstrainT[0]  = vtmp[15];
       minftestT[0]  = vtmp[16];
       minftrainT[0] = vtmp[17];
-   }
-   // now sort the variables according to the best 'eff at Beff=0.10'
-   else {
+   } else if( doMulticlass ) {
+      // do nothing for the moment
+      // TODO: fill in something meaningfull
+   }  else {
+      // now sort the variables according to the best 'eff at Beff=0.10'
       for (Int_t k=0; k<2; k++) {
          std::vector< std::vector<Double_t> > vtemp;
          vtemp.push_back( effArea[k] );  // this is the vector that is ranked
@@ -1291,7 +1364,7 @@ void TMVA::Factory::EvaluateAllMethods( void )
    
    const Int_t nmeth = methodsNoCuts.size();
    const Int_t nvar  = DefaultDataSetInfo().GetNVariables();
-   if (!doRegression) {
+   if (!doRegression && !doMulticlass ) {
 
       if (nmeth > 0) {
 
@@ -1493,7 +1566,9 @@ void TMVA::Factory::EvaluateAllMethods( void )
       Log() << kINFO << hLine << Endl;
       Log() << kINFO << Endl;
    }
-   else {
+   else if( doMulticlass ){
+      // do whatever necessary  TODO
+   } else {
       Log() << Endl;
       TString hLine = "--------------------------------------------------------------------------------";
       Log() << kINFO << "Evaluation results ranked by best signal efficiency and purity (area)" << Endl;
diff --git a/tmva/src/FitterBase.cxx b/tmva/src/FitterBase.cxx
index e706de0b62dd8f3ec6a374f7faac011065126929..2ef6628c3295f02826c1d7500f4126f2ebc8106b 100644
--- a/tmva/src/FitterBase.cxx
+++ b/tmva/src/FitterBase.cxx
@@ -57,7 +57,7 @@ TMVA::FitterBase::FitterBase( IFitterTarget& target,
      fFitterTarget( target ),
      fRanges( ranges ),
      fNpars( ranges.size() ),
-     fLogger( new MsgLogger(this) ),
+     fLogger( new MsgLogger("FitterBase", kINFO) ),
      fClassName( name )
 {
    // constructor   
diff --git a/tmva/src/GiniIndex.cxx b/tmva/src/GiniIndex.cxx
index 0cdd52a9d0214d63998430daf546387d0d052e86..8b70ba3fa7f61f1c26dee3655234b6105e537e40 100644
--- a/tmva/src/GiniIndex.cxx
+++ b/tmva/src/GiniIndex.cxx
@@ -53,6 +53,9 @@ Double_t TMVA::GiniIndex::GetSeparationIndex( const Double_t &s, const Double_t
    //            c(k) is the number of elements that belong to class k     
    //     for just Signal and Background classes this boils down to:       
    //     Gini(Sample) = 2s*b/(s+b)^2    ( = 2 * purity * (1-purity) )                                     
+   //   
+   // !! what we use here is 2*Gini.. as for the later use the factor
+   //    2 is irrelevant and hence I'd like to save this calculation
 
    if (s+b <= 0)      return 0;
    if (s<=0 || b <=0) return 0;
diff --git a/tmva/src/IMetric.cxx b/tmva/src/IMetric.cxx
deleted file mode 100644
index ba2d360fd3bd3bbd8855567a6e64b53603a465d8..0000000000000000000000000000000000000000
--- a/tmva/src/IMetric.cxx
+++ /dev/null
@@ -1,42 +0,0 @@
-// @(#)root/tmva $Id$ 
-// Author: Andreas Hoecker, Peter Speckmayer
-
-/**********************************************************************************
- * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
- * Package: TMVA                                                                  *
- * Class  : IMetric                                                         *
- * Web    : http://tmva.sourceforge.net                                           *
- *                                                                                *
- * Description:                                                                   *
- *      Implementation                                                            *
- *                                                                                *
- * Authors (alphabetical):                                                        *
- *      Peter Speckmayer <speckmay@mail.cern.ch> - CERN, Switzerland              *
- *                                                                                *
- * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
- *                                                                                *
- * Redistribution and use in source and binary forms, with or without             *
- * modification, are permitted according to the terms listed in LICENSE           *
- * (http://tmva.sourceforge.net/LICENSE)                                          *
- **********************************************************************************/
-
-//_______________________________________________________________________
-//                                                                      
-// interface for a metric
-//
-//_______________________________________________________________________
-
-#include "TMVA/IMetric.h"
-
-ClassImp(TMVA::IMetric)
-
-//_______________________________________________________________________
-TMVA::IMetric::IMetric() 
-   : fParameters( NULL )
-{
-   // constructor
-}            
-
-
diff --git a/tmva/src/Interval.cxx b/tmva/src/Interval.cxx
index 2d84a7ba6d0fbc02c80dcf4c6926803781e9a351..b3c90ff8b00976de026c599452cbd222308c5ad1 100644
--- a/tmva/src/Interval.cxx
+++ b/tmva/src/Interval.cxx
@@ -37,13 +37,16 @@
 
 ClassImp(TMVA::Interval)
 
+TMVA::MsgLogger* TMVA::Interval::fgLogger = 0;
+
 //_______________________________________________________________________
 TMVA::Interval::Interval( Double_t min, Double_t max, Int_t nbins ) : 
    fMin(min),
    fMax(max),
-   fNbins(nbins),
-   fLogger( new MsgLogger("Interval") )
+   fNbins(nbins)
 {
+   if (!fgLogger) fgLogger = new MsgLogger("Interval");
+
    // defines minimum and maximum of an interval
    // when nbins == 0, interval describes a discrete distribution (equally distributed in the interval)
    // when nbins > 0, interval describes a continous interval
@@ -62,15 +65,15 @@ TMVA::Interval::Interval( Double_t min, Double_t max, Int_t nbins ) :
 TMVA::Interval::Interval( const Interval& other ) :
    fMin  ( other.fMin ),
    fMax  ( other.fMin ),
-   fNbins( other.fNbins ),
-   fLogger( new MsgLogger("Interval") )
-{}
+   fNbins( other.fNbins )
+{
+   if (!fgLogger) fgLogger = new MsgLogger("Interval");
+}
 
 //_______________________________________________________________________
 TMVA::Interval::~Interval()
 {
    // destructor
-   delete fLogger;
 }
 
 //_______________________________________________________________________
diff --git a/tmva/src/MethodANNBase.cxx b/tmva/src/MethodANNBase.cxx
index 8b7ab16d2363ccf4cee89ee87e0e6eed71557182..122b26dff9158e2a7dacdd8ebb1c8334be7cece6 100644
--- a/tmva/src/MethodANNBase.cxx
+++ b/tmva/src/MethodANNBase.cxx
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Matt Jachowski
+// Author: Andreas Hoecker, Peter Speckmayer, Matt Jachowski
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -12,17 +12,14 @@
  *      from background.                                                          *
  *                                                                                *
  * Authors (alphabetical):                                                        *
- *      Andreas Hoecker  <Andreas.Hocker@cern.ch>   - CERN, Switzerland           *
- *      Matt Jachowski   <jachowski@stanford.edu>   - Stanford University, USA    *
- *      Joerg Stelzer    <stelzer@cern.ch>          - DESY, Germany               *
- *      Peter Speckmayer <peter.speckmayer@cern.ch> - CERN, Switzerland           *
- *                                                                                *
- * Small changes (regression):                                                    *
- *      Krzysztof Danielowski <danielow@cern.ch>  - IFJ PAN & AGH, Poland         *
- *      Kamil Kraszewski      <kalq@cern.ch>      - IFJ PAN & UJ , Poland         *
- *      Maciej Kruk           <mkruk@cern.ch>     - IFJ PAN & AGH, Poland         *
- *                                                                                *
- *                                                                                *
+ *      Krzysztof Danielowski <danielow@cern.ch>       - IFJ & AGH, Poland        *
+ *      Andreas Hoecker       <Andreas.Hocker@cern.ch> - CERN, Switzerland        *
+ *      Matt Jachowski        <jachowski@stanford.edu> - Stanford University, USA *
+ *      Kamil Kraszewski      <kalq@cern.ch>           - IFJ & UJ, Poland         *
+ *      Maciej Kruk           <mkruk@cern.ch>          - IFJ & AGH, Poland        *
+ *      Peter Speckmayer      <peter.speckmayer@cern.ch> - CERN, Switzerland      *
+ *      Joerg Stelzer         <stelzer@cern.ch>        - DESY, Germany            *
+ *      Jiahang Zhong         <Jiahang.Zhong@cern.ch>  - Academia Sinica, Taipei  *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
  *      CERN, Switzerland                                                         *
@@ -33,13 +30,14 @@
  **********************************************************************************/
 
 //_______________________________________________________________________
-//                                                                      
-// Base class for all TMVA methods using artificial neural networks      
-//                                                                      
+//
+// Base class for all TMVA methods using artificial neural networks
+//
 //_______________________________________________________________________
 
 #include <vector>
 #include <cstdlib>
+#include <stdexcept>
 
 #include "TString.h"
 #include "TTree.h"
@@ -48,7 +46,6 @@
 #include "TRandom3.h"
 #include "TH2F.h"
 #include "TH1.h"
-#include "TXMLEngine.h"
 
 #include "TMVA/MethodBase.h"
 #include "TMVA/MethodANNBase.h"
@@ -110,6 +107,13 @@ void TMVA::MethodANNBase::DeclareOptions()
    DeclareOptionRef( fNcycles    = 500,       "NCycles",         "Number of training cycles" );
    DeclareOptionRef( fLayerSpec  = "N,N-1",   "HiddenLayers",    "Specification of hidden layer architecture" );
    DeclareOptionRef( fNeuronType = "sigmoid", "NeuronType",      "Neuron activation function type" );
+   DeclareOptionRef( fRandomSeed = 0, "RandomSeed", "Random seed for initial synapse weights (0 means unique seed for each run)");
+
+   DeclareOptionRef(fEstimatorS="MSE", "EstimatorType",
+                    "MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood" ); //zjh
+   AddPreDefVal(TString("MSE"));  //zjh
+   AddPreDefVal(TString("CE"));   //zjh
+
 
    TActivationChooser aChooser;
    vector<TString>* names = aChooser.GetAllActivationNames();
@@ -131,6 +135,9 @@ void TMVA::MethodANNBase::DeclareOptions()
 void TMVA::MethodANNBase::ProcessOptions()
 {
    // do nothing specific at this moment
+  if      ( DoRegression() || DoMulticlass())  fEstimatorS = "MSE";    //zjh
+  if      (fEstimatorS == "MSE" )  fEstimator = kMSE;    //zjh  (to test all others)
+	else if (fEstimatorS == "CE")    fEstimator = kCE;      //zjh
    vector<Int_t>* layout = ParseLayoutString(fLayerSpec);
    BuildNetwork(layout);
 }
@@ -158,7 +165,9 @@ vector<Int_t>* TMVA::MethodANNBase::ParseLayoutString(TString layerSpec)
       layout->push_back(nNodes);
    }
    if( DoRegression() )
-      layout->push_back( DataInfo().GetNTargets() );  // one output node
+      layout->push_back( DataInfo().GetNTargets() );  // one output node for each target
+   else if( DoMulticlass() )
+      layout->push_back( DataInfo().GetNClasses() );  // one output node for each class
    else
       layout->push_back(1);  // one output node (for signal/background classification)
 
@@ -177,6 +186,7 @@ void TMVA::MethodANNBase::InitANNBase()
    fNetwork         = NULL;
    frgen            = NULL;
    fActivation      = NULL;
+   fOutput          = NULL; //zjh
    fIdentity        = NULL;
    fInputCalculator = NULL;
    fSynapses        = NULL;
@@ -192,8 +202,7 @@ void TMVA::MethodANNBase::InitANNBase()
    fInputLayer = NULL;
    fOutputNeurons.clear();
 
-   if (fgFIXED_SEED) frgen = new TRandom3(1);   // fix output for debugging
-   else              frgen = new TRandom3(0);   // seed = 0 means random seed
+   frgen = new TRandom3(fRandomSeed);
 
    fSynapses = new TObjArray();
 }
@@ -222,6 +231,7 @@ void TMVA::MethodANNBase::DeleteNetwork()
 
    if (frgen != NULL)            delete frgen;
    if (fActivation != NULL)      delete fActivation;
+   if (fOutput != NULL)          delete fOutput;  //zjh
    if (fIdentity != NULL)        delete fIdentity;
    if (fInputCalculator != NULL) delete fInputCalculator;
    if (fSynapses != NULL)        delete fSynapses;
@@ -229,6 +239,7 @@ void TMVA::MethodANNBase::DeleteNetwork()
    fNetwork         = NULL;
    frgen            = NULL;
    fActivation      = NULL;
+   fOutput          = NULL; //zjh
    fIdentity        = NULL;
    fInputCalculator = NULL;
    fSynapses        = NULL;
@@ -254,6 +265,14 @@ void TMVA::MethodANNBase::BuildNetwork( vector<Int_t>* layout, vector<Double_t>*
    // build network given a layout (number of neurons in each layer)
    // and optional weights array
 
+	if (fEstimator!=kMSE && fEstimator!=kCE) {
+		if		(fEstimatorS == "MSE")  fEstimator = kMSE;    //zjh
+		else if (fEstimatorS == "CE")    fEstimator = kCE;      //zjh
+	}
+	if (fEstimator!=kMSE && fEstimator!=kCE) Log()<<kWARNING<<"Estimator type unspecified \t"<<Endl; //zjh
+
+
+
    Log() << kINFO << "Building Network" << Endl;
 
    DeleteNetwork();
@@ -263,10 +282,14 @@ void TMVA::MethodANNBase::BuildNetwork( vector<Int_t>* layout, vector<Double_t>*
    TActivationChooser aChooser;
    fActivation = aChooser.CreateActivation(fNeuronType);
    fIdentity   = aChooser.CreateActivation("linear");
+   if (fEstimator==kMSE)  fOutput = aChooser.CreateActivation("linear");  //zjh
+   else if (fEstimator==kCE)   fOutput = aChooser.CreateActivation("sigmoid"); //zjh
    TNeuronInputChooser iChooser;
    fInputCalculator = iChooser.CreateNeuronInput(fNeuronInputType);
 
    fNetwork = new TObjArray();
+   fRegulatorIdx.clear();		//zjh
+   fRegulators.clear();			//zjh
    BuildLayers( layout, fromFile );
 
    // cache input layer and output neuron for fast access
@@ -299,18 +322,21 @@ void TMVA::MethodANNBase::BuildLayers( vector<Int_t>* layout, Bool_t fromFile )
    }
 
    // cache pointers to synapses for fast access, the order matters
-   for (Int_t i = 0; i < numLayers; i++) {                                       
-      TObjArray* layer = (TObjArray*)fNetwork->At(i);                             
-      Int_t numNeurons = layer->GetEntriesFast();                                 
-      for (Int_t j = 0; j < numNeurons; j++) {                                    
-         TNeuron* neuron = (TNeuron*)layer->At(j);                                 
-         Int_t numSynapses = neuron->NumPostLinks();                               
-         for (Int_t k = 0; k < numSynapses; k++) {                                 
-            TSynapse* synapse = neuron->PostLinkAt(k);                              
-            fSynapses->Add(synapse);                                   
-         }                                                                         
-      }                                                                           
-   }  
+   for (Int_t i = 0; i < numLayers; i++) {
+      TObjArray* layer = (TObjArray*)fNetwork->At(i);
+      Int_t numNeurons = layer->GetEntriesFast();
+      if (i!=0 && i!=numLayers-1) fRegulators.push_back(0.);  //zjh
+      for (Int_t j = 0; j < numNeurons; j++) {
+         if (i==0) fRegulators.push_back(0.);			//zjh
+         TNeuron* neuron = (TNeuron*)layer->At(j);
+         Int_t numSynapses = neuron->NumPostLinks();
+         for (Int_t k = 0; k < numSynapses; k++) {
+            TSynapse* synapse = neuron->PostLinkAt(k);
+            fSynapses->Add(synapse);
+            fRegulatorIdx.push_back(fRegulators.size()-1);	//zjh
+         }
+      }
+   }
 }
 
 //______________________________________________________________________________
@@ -343,7 +369,7 @@ void TMVA::MethodANNBase::BuildLayer( Int_t numNeurons, TObjArray* curLayer,
             // output layer
             if (layerIndex == numLayers-1) {
                neuron->SetOutputNeuron();
-               neuron->SetActivationEqn(fIdentity);
+               neuron->SetActivationEqn(fOutput);     //zjh
          }
             // hidden layers
             else neuron->SetActivationEqn(fActivation);
@@ -483,7 +509,7 @@ void TMVA::MethodANNBase::PrintNetwork() const
    // print network representation, for debugging
    if (!Debug()) return;
 
-   Log() << Endl;
+   Log() << kINFO << Endl;
    PrintMessage( "printing network " );
    Log() << kINFO << "-------------------------------------------------------------------" << Endl;
 
@@ -596,25 +622,56 @@ const std::vector<Float_t> &TMVA::MethodANNBase::GetRegressionValues()
    return *fRegressionReturnVal;
 }
 
+//_______________________________________________________________________
+const std::vector<Float_t> &TMVA::MethodANNBase::GetMulticlassValues() 
+{
+   // get the multiclass classification values generated by the NN
+   TNeuron* neuron;
+
+   TObjArray* inputLayer = (TObjArray*)fNetwork->At(0);
+
+   const Event * ev = GetEvent();
+
+   for (UInt_t i = 0; i < GetNvar(); i++) {
+      neuron = (TNeuron*)inputLayer->At(i);
+      neuron->ForceValue( ev->GetValue(i) );
+   }
+   ForceNetworkCalculations();
+
+   // check the output of the network
+   TObjArray* outputLayer = (TObjArray*)fNetwork->At( fNetwork->GetEntriesFast()-1 );
+
+   if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
+   fMulticlassReturnVal->clear();
+
+   for (UInt_t itgt = 0, itgtEnd = DataInfo().GetNClasses(); itgt < itgtEnd; itgt++) {
+      fMulticlassReturnVal->push_back( ((TNeuron*)outputLayer->At(itgt))->GetActivationValue() );
+   }
+
+   return *fMulticlassReturnVal;
+}
+
+
 //_______________________________________________________________________
 void TMVA::MethodANNBase::AddWeightsXMLTo( void* parent ) const 
 {
    // create XML description of ANN classifier
    Int_t numLayers = fNetwork->GetEntriesFast();
    void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
-   gTools().xmlengine().NewAttr(wght, 0, "NLayers", gTools().StringFromInt(fNetwork->GetEntriesFast()) );
+   void* xmlLayout = gTools().xmlengine().NewChild(wght, 0, "Layout");
+   gTools().xmlengine().NewAttr(xmlLayout, 0, "NLayers", gTools().StringFromInt(fNetwork->GetEntriesFast()) );
    TString weights = "";
    for (Int_t i = 0; i < numLayers; i++) {
       TObjArray* layer = (TObjArray*)fNetwork->At(i);
       Int_t numNeurons = layer->GetEntriesFast();
-      void* layerxml = gTools().xmlengine().NewChild(wght, 0, "Layer");
+      void* layerxml = gTools().xmlengine().NewChild(xmlLayout, 0, "Layer");
       gTools().xmlengine().NewAttr(layerxml, 0, "Index",    gTools().StringFromInt(i) );
       gTools().xmlengine().NewAttr(layerxml, 0, "NNeurons", gTools().StringFromInt(numNeurons) );
       for (Int_t j = 0; j < numNeurons; j++) {
          TNeuron* neuron = (TNeuron*)layer->At(j);
          Int_t numSynapses = neuron->NumPostLinks();
-         void* neuronxml = gTools().xmlengine().NewChild(layerxml, 0, "Neuron");
-         gTools().xmlengine().NewAttr(neuronxml, 0, "NSynapses", gTools().StringFromInt(numSynapses) );
+         void* neuronxml = gTools().AddChild(layerxml, "Neuron");
+         gTools().AddAttr(neuronxml, "NSynapses", gTools().StringFromInt(numSynapses) );
          if(numSynapses==0) continue;
          stringstream s("");
          s.precision( 16 );
@@ -622,8 +679,42 @@ void TMVA::MethodANNBase::AddWeightsXMLTo( void* parent ) const
             TSynapse* synapse = neuron->PostLinkAt(k);
             s << std::scientific << synapse->GetWeight() << " ";
          }
-         gTools().xmlengine().AddRawLine( neuronxml, s.str().c_str() );
+         gTools().AddRawLine( neuronxml, s.str().c_str() );
+      }
+   }
+
+   // if inverse hessian exists, write inverse hessian to weight file
+   if( fInvHessian.GetNcols()>0 ){
+      void* xmlInvHessian = gTools().xmlengine().NewChild(wght, 0, "InverseHessian");
+
+      // get the matrix dimensions
+      Int_t nElements = fInvHessian.GetNoElements();
+      Int_t nRows     = fInvHessian.GetNrows();
+      Int_t nCols     = fInvHessian.GetNcols();
+      gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NElements", gTools().StringFromInt(nElements) );
+      gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NRows", gTools().StringFromInt(nRows) );
+      gTools().xmlengine().NewAttr(xmlInvHessian, 0, "NCols", gTools().StringFromInt(nCols) );
+
+      // read in the matrix elements
+      Double_t* elements = new Double_t[nElements+10];
+      fInvHessian.GetMatrix2Array( elements );
+
+      // store the matrix elements row-wise
+      Int_t index = 0;
+      for( Int_t row = 0; row < nRows; ++row ){
+	 void* xmlRow = gTools().xmlengine().NewChild(xmlInvHessian, 0, "Row");
+	 gTools().xmlengine().NewAttr(xmlRow, 0, "Index", gTools().StringFromInt(row) );
+
+	 // create the rows
+         stringstream s("");
+         s.precision( 16 );
+	 for( Int_t col = 0; col < nCols; ++col ){
+            s << std::scientific << (*(elements+index)) << " ";
+	    ++index;
+	 }
+         gTools().xmlengine().AddRawLine( xmlRow, s.str().c_str() );
       }
+      delete[] elements;
    }
 }
 
@@ -637,53 +728,115 @@ void TMVA::MethodANNBase::ReadWeightsFromXML( void* wghtnode )
    Bool_t fromFile = kTRUE;
    vector<Int_t>* layout = new vector<Int_t>();
 
+   void* xmlLayout = NULL;
+   try{
+      xmlLayout = gTools().GetChild(wghtnode, "Layout");
+   }catch( std::logic_error& excpt ){
+      xmlLayout = wghtnode;
+   }
+   if( !xmlLayout ){
+      Log() << kFATAL << "xml node if layout is empty" << Endl;
+   }
+   
+
    UInt_t nLayers;
-   gTools().ReadAttr( wghtnode, "NLayers", nLayers );
+   gTools().ReadAttr( xmlLayout, "NLayers", nLayers );
    layout->resize( nLayers );
 
-   void* ch = gTools().xmlengine().GetChild(wghtnode);
+   void* ch = gTools().xmlengine().GetChild(xmlLayout);
    UInt_t index;
    UInt_t nNeurons;
    while (ch) {
       gTools().ReadAttr( ch, "Index",   index   );
       gTools().ReadAttr( ch, "NNeurons", nNeurons );
       layout->at(index) = nNeurons;
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 
    BuildNetwork( layout, NULL, fromFile );
    // fill the weights of the synapses
    UInt_t nSyn;
    Float_t weight;
-   ch = gTools().xmlengine().GetChild(wghtnode);
+   ch = gTools().xmlengine().GetChild(xmlLayout);
    UInt_t iLayer = 0;
    while (ch) {  // layers
       TObjArray* layer = (TObjArray*)fNetwork->At(iLayer);
       gTools().ReadAttr( ch, "Index",   index   );
       gTools().ReadAttr( ch, "NNeurons", nNeurons );
 
-      void* nodeN = gTools().xmlengine().GetChild(ch);
+      void* nodeN = gTools().GetChild(ch);
       UInt_t iNeuron = 0;
       while( nodeN ){ // neurons
          TNeuron *neuron = (TNeuron*)layer->At(iNeuron);
          gTools().ReadAttr( nodeN, "NSynapses", nSyn );
          if( nSyn > 0 ){
-            const char* content = gTools().xmlengine().GetNodeContent(nodeN);
+            const char* content = gTools().GetContent(nodeN);
             std::stringstream s(content);
             for (UInt_t iSyn = 0; iSyn<nSyn; iSyn++) { // synapses
-               
+
                TSynapse* synapse = neuron->PostLinkAt(iSyn);
                s >> weight;
                //Log() << kWARNING << neuron << " " << weight <<  Endl;
                synapse->SetWeight(weight);
             }
          }
-         nodeN = gTools().xmlengine().GetNext(nodeN);
+         nodeN = gTools().GetNextChild(nodeN);
          iNeuron++;
       }
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
       iLayer++;
    }
+
+
+
+   void* xmlInvHessian = NULL;
+   try{
+      xmlInvHessian = gTools().GetChild(wghtnode, "InverseHessian");
+   }catch( std::logic_error& excpt ){
+      // no inverse hessian available
+      return;  // ------------------ return from subroutine
+   }
+   if( !xmlInvHessian ){
+      Log() << kINFO << "xml node of inverse hessian is empty" << Endl;
+   }
+
+   fUseRegulator = kTRUE;
+
+   Int_t nElements = 0;
+   Int_t nRows     = 0;
+   Int_t nCols     = 0;
+   gTools().ReadAttr( xmlInvHessian, "NElements", nElements );
+   gTools().ReadAttr( xmlInvHessian, "NRows", nRows );
+   gTools().ReadAttr( xmlInvHessian, "NCols", nCols );
+
+   // adjust the matrix dimensions
+   fInvHessian.ResizeTo( nRows, nCols );
+
+   // prepare an array to read in the values
+   Double_t* elements = new Double_t[nElements+10];
+
+
+
+   void* xmlRow = gTools().xmlengine().GetChild(xmlInvHessian);
+   Int_t row = 0;
+   index = 0;
+   while (xmlRow) {  // rows
+      gTools().ReadAttr( xmlRow, "Index",   row   );
+
+      const char* content = gTools().xmlengine().GetNodeContent(xmlRow);
+
+      std::stringstream s(content);
+      for (Int_t iCol = 0; iCol<nCols; iCol++) { // columns
+	 s >> (*(elements+index));
+	 ++index;
+      }
+      xmlRow = gTools().xmlengine().GetNext(xmlRow);
+      ++row;
+   }
+
+   fInvHessian.SetMatrixArray( elements );
+
+   delete[] elements;
 }
 
 
@@ -826,6 +979,7 @@ void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString&
 
    fout << endl;
    fout << "   double ActivationFnc(double x) const;" << endl;
+   fout << "   double OutputActivationFnc(double x) const;" << endl;     //zjh
    fout << endl;
    fout << "   int fLayers;" << endl;
    fout << "   int fLayerSize["<<numLayers<<"];" << endl;
@@ -910,6 +1064,7 @@ void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString&
       fout << "      }" << endl;
       if (i+1 != numLayers-1) // in the last layer no activation function is applied
          fout << "      fWeights[" << i+1 << "][o] = ActivationFnc(fWeights[" << i+1 << "][o]);" << endl;
+      else	fout << "      fWeights[" << i+1 << "][o] = OutputActivationFnc(fWeights[" << i+1 << "][o]);" << endl; //zjh
       fout << "   }" << endl;
    }
    fout << endl;
@@ -919,6 +1074,8 @@ void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString&
    fout << endl;
    TString fncName = className+"::ActivationFnc";
    fActivation->MakeFunction(fout, fncName);
+   fncName = className+"::OutputActivationFnc";  	//zjh
+   fOutput->MakeFunction(fout, fncName); 			//zjh
 
    fout << "   " << endl;
    fout << "// Clean up" << endl;
@@ -927,3 +1084,9 @@ void TMVA::MethodANNBase::MakeClassSpecific( std::ostream& fout, const TString&
    fout << "   // nothing to clear" << endl;
    fout << "}" << endl;
 }
+//_________________________________________________________________________
+Bool_t TMVA::MethodANNBase::Debug() const 
+{ 
+   // who the hell makes such strange Debug flags that even use "global pointers"..
+   return fgDEBUG; 
+}
diff --git a/tmva/src/MethodBDT.cxx b/tmva/src/MethodBDT.cxx
index 0553d8a0c222a99448634848865f11730603d3b3..c109f6a22c094ef96c0041b994ba066e3972b89e 100644
--- a/tmva/src/MethodBDT.cxx
+++ b/tmva/src/MethodBDT.cxx
@@ -63,13 +63,19 @@
 //
 // Boosting:
 //
-// The idea behind the boosting is, that signal events from the training
-// sample, that end up in a background node (and vice versa) are given a
-// larger weight than events that are in the correct leave node. This
-// results in a re-weighed training event sample, with which then a new
-// decision tree can be developed. The boosting can be applied several
-// times (typically 100-500 times) and one ends up with a set of decision
-// trees (a forest).
+// The idea behind adaptive boosting (AdaBoost) is, that signal events
+// from the training sample, that end up in a background node
+// (and vice versa) are given a larger weight than events that are in
+// the correct leave node. This results in a re-weighed training event
+// sample, with which then a new decision tree can be developed.
+// The boosting can be applied several times (typically 100-500 times)
+// and one ends up with a set of decision trees (a forest).
+// Gradient boosting works more like a function expansion approach, where
+// each tree corresponds to a summand. The parameters for each summand (tree)
+// are determined by the minimization of a error function (binomial log-
+// likelihood for classification and Huber loss for regression).
+// A greedy algorithm is used, which means, that only one tree is modified
+// at a time, while the other trees stay fixed.
 //
 // Bagging:
 //
@@ -121,6 +127,7 @@
 #include "TMVA/CrossEntropy.h"
 #include "TMVA/MisClassificationError.h"
 #include "TMVA/Results.h"
+#include "TMVA/ResultsMulticlass.h"
 
 using std::vector;
 
@@ -157,13 +164,13 @@ TMVA::MethodBDT::MethodBDT( DataSetInfo& theData,
 //_______________________________________________________________________
 Bool_t TMVA::MethodBDT::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets )
 {
-   // BDT can handle classification with 2 classes and regression with one regression-target
-   if( type == Types::kClassification && numberClasses == 2 ) return kTRUE;
+   // BDT can handle classification with multiple classes and regression with one regression-target
+   if (type == Types::kClassification && numberClasses == 2) return kTRUE;
+   if (type == Types::kMulticlass ) return kTRUE;
    if( type == Types::kRegression && numberTargets == 1 ) return kTRUE;
    return kFALSE;
 }
 
-
 //_______________________________________________________________________
 void TMVA::MethodBDT::DeclareOptions()
 {
@@ -383,6 +390,7 @@ void TMVA::MethodBDT::Init( void )
    fUseNTrainEvents = Data()->GetNTrainingEvents();
    fNNodesMax       = 1000000;
    fShrinkage       = 1.0;
+   fSumOfWeights    = 0.0;
 
    // reference cut value to distinguish signal-like from background-like events
    SetSignalReferenceCut( 0 );
@@ -445,6 +453,7 @@ void TMVA::MethodBDT::InitEventSample( void )
 void TMVA::MethodBDT::Train()
 {
    // BDT training
+   TMVA::DecisionTreeNode::fgIsTraining=true;
 
    // fill the STL Vector with the event sample
    InitEventSample();
@@ -456,11 +465,7 @@ void TMVA::MethodBDT::Train()
 
    Log() << kINFO << "Training "<< fNTrees << " Decision Trees ... patience please" << Endl;
 
-   Results* results = Data()->GetResults(GetMethodName(), Types::kTraining, GetAnalysisType());
-
-   // book monitoring histograms (currently for AdaBost, only)
-
-   
+ 
    // weights applied in boosting
    Int_t nBins;
    Double_t xMin,xMax;
@@ -476,35 +481,42 @@ void TMVA::MethodBDT::Train()
       xMax = 1;
       hname="Boost event weights distribution";
    }
-      
+
+   // book monitoring histograms (for AdaBost only)   
+
    TH1* h = new TH1F("BoostWeight",hname,nBins,xMin,xMax);
-   h->SetXTitle("boost weight");
-   results->Store(h, "BoostWeights");
-
-   // weights applied in boosting vs tree number
-   h = new TH1F("BoostWeightVsTree","Boost weights vs tree",fNTrees,0,fNTrees);
-   h->SetXTitle("#tree");
-   h->SetYTitle("boost weight");
-   results->Store(h, "BoostWeightsVsTree");
-
-   // error fraction vs tree number
-   h = new TH1F("ErrFractHist","error fraction vs tree number",fNTrees,0,fNTrees);
-   h->SetXTitle("#tree");
-   h->SetYTitle("error fraction");
-   results->Store(h, "ErrorFrac");
-
-   // nNodesBeforePruning vs tree number
    TH1* nodesBeforePruningVsTree = new TH1I("NodesBeforePruning","nodes before pruning",fNTrees,0,fNTrees);
-   nodesBeforePruningVsTree->SetXTitle("#tree");
-   nodesBeforePruningVsTree->SetYTitle("#tree nodes");
-   results->Store(nodesBeforePruningVsTree);
-
-   // nNodesAfterPruning vs tree number
    TH1* nodesAfterPruningVsTree = new TH1I("NodesAfterPruning","nodes after pruning",fNTrees,0,fNTrees);
-   nodesAfterPruningVsTree->SetXTitle("#tree");
-   nodesAfterPruningVsTree->SetYTitle("#tree nodes");
-   results->Store(nodesAfterPruningVsTree);
 
+   if(!DoMulticlass()){
+      Results* results = Data()->GetResults(GetMethodName(), Types::kTraining, GetAnalysisType());
+
+      h->SetXTitle("boost weight");
+      results->Store(h, "BoostWeights");
+      
+      // weights applied in boosting vs tree number
+      h = new TH1F("BoostWeightVsTree","Boost weights vs tree",fNTrees,0,fNTrees);
+      h->SetXTitle("#tree");
+      h->SetYTitle("boost weight");
+      results->Store(h, "BoostWeightsVsTree");
+      
+      // error fraction vs tree number
+      h = new TH1F("ErrFractHist","error fraction vs tree number",fNTrees,0,fNTrees);
+      h->SetXTitle("#tree");
+      h->SetYTitle("error fraction");
+      results->Store(h, "ErrorFrac");
+      
+      // nNodesBeforePruning vs tree number
+      nodesBeforePruningVsTree->SetXTitle("#tree");
+      nodesBeforePruningVsTree->SetYTitle("#tree nodes");
+      results->Store(nodesBeforePruningVsTree);
+      
+      // nNodesAfterPruning vs tree number
+      nodesAfterPruningVsTree->SetXTitle("#tree");
+      nodesAfterPruningVsTree->SetYTitle("#tree nodes");
+      results->Store(nodesAfterPruningVsTree);
+   }
+   
    fMonitorNtuple= new TTree("MonitorNtuple","BDT variables");
    fMonitorNtuple->Branch("iTree",&fITree,"iTree/I");
    fMonitorNtuple->Branch("boostWeight",&fBoostWeight,"boostWeight/D");
@@ -527,56 +539,73 @@ void TMVA::MethodBDT::Train()
 
    for (int itree=0; itree<fNTrees; itree++) {
       timer.DrawProgressBar( itree );
-
-      fForest.push_back( new DecisionTree( fSepType, fNodeMinEvents, fNCuts,
-                                           fRandomisedTrees, fUseNvars, fNNodesMax, fMaxDepth,
-                                           itree, fNodePurityLimit, itree));
-      if (fBaggedGradBoost) nNodesBeforePruning = fForest.back()->BuildTree(fSubSample);
-      else                  nNodesBeforePruning = fForest.back()->BuildTree(fEventSample);
-
-      if (fBoostType!="Grad")
-         if (fUseYesNoLeaf && !DoRegression() ){ // remove leaf nodes where both daughter nodes are of same type
-            nNodesBeforePruning = fForest.back()->CleanTree();
+      if(DoMulticlass()){
+         if (fBoostType!="Grad"){
+            Log() << kFATAL << "Multiclass is currently only supported by gradient boost. "
+                  << "Please change boost option accordingly (GradBoost)."
+                  << Endl;
+         }
+         UInt_t nClasses = DataInfo().GetNClasses();
+         for (UInt_t i=0;i<nClasses;i++){
+            fForest.push_back( new DecisionTree( fSepType, fNodeMinEvents, fNCuts, i,
+                                                 fRandomisedTrees, fUseNvars, fNNodesMax, fMaxDepth,
+                                                 itree*nClasses+i, fNodePurityLimit, itree*nClasses+i));
+            if (fBaggedGradBoost) nNodesBeforePruning = fForest.back()->BuildTree(fSubSample);
+            else                  nNodesBeforePruning = fForest.back()->BuildTree(fEventSample);  
+            fBoostWeights.push_back(this->Boost(fEventSample, fForest.back(), itree, i));
          }
-      nNodesBeforePruningCount += nNodesBeforePruning;
-      nodesBeforePruningVsTree->SetBinContent(itree+1,nNodesBeforePruning);
-
-      fForest.back()->SetPruneMethod(fPruneMethod); // set the pruning method for the tree
-      fForest.back()->SetPruneStrength(fPruneStrength); // set the strength parameter
-
-      std::vector<Event*> * validationSample = NULL;
-      if(fAutomatic) validationSample = &fValidationSample;
-
-      if(fBoostType=="Grad"){
-         this->Boost(fEventSample, fForest.back(), itree);
       }
-      else {
-         if(!fPruneBeforeBoost) { // only prune after boosting
-            fBoostWeights.push_back( this->Boost(fEventSample, fForest.back(), itree) );
-            // if fAutomatic == true, pruneStrength will be the optimal pruning strength
-            // determined by the pruning algorithm; otherwise, it is simply the strength parameter
-            // set by the user
-            Double_t pruneStrength = fForest.back()->PruneTree(validationSample);
-            alpha->SetBinContent(itree+1,pruneStrength);
+      else{
+         
+         fForest.push_back( new DecisionTree( fSepType, fNodeMinEvents, fNCuts, 0,
+                                              fRandomisedTrees, fUseNvars, fNNodesMax, fMaxDepth,
+                                              itree, fNodePurityLimit, itree));
+         if (fBaggedGradBoost) nNodesBeforePruning = fForest.back()->BuildTree(fSubSample);
+         else                  nNodesBeforePruning = fForest.back()->BuildTree(fEventSample);
+         
+         if (fBoostType!="Grad")
+            if (fUseYesNoLeaf && !DoRegression() ){ // remove leaf nodes where both daughter nodes are of same type
+               nNodesBeforePruning = fForest.back()->CleanTree();
+            }
+         nNodesBeforePruningCount += nNodesBeforePruning;
+         nodesBeforePruningVsTree->SetBinContent(itree+1,nNodesBeforePruning);
+         
+         fForest.back()->SetPruneMethod(fPruneMethod); // set the pruning method for the tree
+         fForest.back()->SetPruneStrength(fPruneStrength); // set the strength parameter
+         
+         std::vector<Event*> * validationSample = NULL;
+         if(fAutomatic) validationSample = &fValidationSample;
+         
+         if(fBoostType=="Grad"){
+            fBoostWeights.push_back(this->Boost(fEventSample, fForest.back(), itree));
          }
-         else { // prune first, then apply a boosting cycle
-            Double_t pruneStrength = fForest.back()->PruneTree(validationSample);
-            alpha->SetBinContent(itree+1,pruneStrength);
-            fBoostWeights.push_back( this->Boost(fEventSample, fForest.back(), itree) );
+         else {
+            if(!fPruneBeforeBoost) { // only prune after boosting
+               fBoostWeights.push_back( this->Boost(fEventSample, fForest.back(), itree) );
+               // if fAutomatic == true, pruneStrength will be the optimal pruning strength
+               // determined by the pruning algorithm; otherwise, it is simply the strength parameter
+               // set by the user
+               Double_t pruneStrength = fForest.back()->PruneTree(validationSample);
+               alpha->SetBinContent(itree+1,pruneStrength);
+            }
+            else { // prune first, then apply a boosting cycle
+               Double_t pruneStrength = fForest.back()->PruneTree(validationSample);
+               alpha->SetBinContent(itree+1,pruneStrength);
+               fBoostWeights.push_back( this->Boost(fEventSample, fForest.back(), itree) );
          }
-         
-         if (fUseYesNoLeaf && !DoRegression() ){ // remove leaf nodes where both daughter nodes are of same type
-            fForest.back()->CleanTree();
+            
+            if (fUseYesNoLeaf && !DoRegression() ){ // remove leaf nodes where both daughter nodes are of same type
+               fForest.back()->CleanTree();
+            }
          }
+         nNodesAfterPruning = fForest.back()->GetNNodes();
+         nNodesAfterPruningCount += nNodesAfterPruning;
+         nodesAfterPruningVsTree->SetBinContent(itree+1,nNodesAfterPruning);
+         
+         fITree = itree;
+         fMonitorNtuple->Fill();
       }
-      nNodesAfterPruning = fForest.back()->GetNNodes();
-      nNodesAfterPruningCount += nNodesAfterPruning;
-      nodesAfterPruningVsTree->SetBinContent(itree+1,nNodesAfterPruning);
-
-      fITree = itree;
-      fMonitorNtuple->Fill();
    }
-
    alpha->Write();
 
    // get elapsed time
@@ -592,6 +621,7 @@ void TMVA::MethodBDT::Train()
             << nNodesAfterPruningCount/fNTrees
             << Endl;
    }
+   TMVA::DecisionTreeNode::fgIsTraining=false;
 }
 
 //_______________________________________________________________________
@@ -601,9 +631,8 @@ void TMVA::MethodBDT::GetRandomSubSample()
    UInt_t nevents = fEventSample.size();
    UInt_t nfraction = static_cast<UInt_t>(fSampleFraction*Data()->GetNTrainingEvents());
 
-   //for (UInt_t i=0; i<fSubSample.size();i++)
    if (fSubSample.size()!=0) fSubSample.clear();
-   TRandom3 *trandom   = new TRandom3(fForest.size());
+   TRandom3 *trandom   = new TRandom3(fForest.size()+1);
 
    for (UInt_t ievt=0; ievt<nfraction; ievt++) { // recreate new random subsample
       fSubSample.push_back(fEventSample[(static_cast<UInt_t>(trandom->Uniform(nevents)-1))]);
@@ -623,18 +652,36 @@ Double_t TMVA::MethodBDT::GetGradBoostMVA(TMVA::Event& e, UInt_t nTrees)
    return 2.0/(1.0+exp(-2.0*sum))-1; //MVA output between -1 and 1
 }
 
-
 //_______________________________________________________________________
-void TMVA::MethodBDT::UpdateTargets(vector<TMVA::Event*> eventSample)
+void TMVA::MethodBDT::UpdateTargets(vector<TMVA::Event*> eventSample, UInt_t cls)
 {
    //Calculate residua for all events;
-   UInt_t iValue=0;
-   for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
-      fBoostWeights[iValue]+=fForest.back()->CheckEvent(*(*e),kFALSE);
-      Double_t p_sig=1.0/(1.0+exp(-2.0*fBoostWeights[iValue]));
-      Double_t res = ((*e)->IsSignal()?1:0)-p_sig;
-      (*e)->SetTarget(0,res);
-      iValue++;
+
+   if(DoMulticlass()){
+      UInt_t nClasses = DataInfo().GetNClasses();
+      for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
+         fResiduals[*e].at(cls)+=fForest.back()->CheckEvent(*(*e),kFALSE);
+         if(cls == nClasses-1){
+            for(UInt_t i=0;i<nClasses;i++){
+               Double_t norm = 0.0;
+               for(UInt_t j=0;j<nClasses;j++){
+                  if(i!=j)
+                     norm+=exp(fResiduals[*e].at(j)-fResiduals[*e].at(i));
+               }
+               Double_t p_cls = 1.0/(1.0+norm);
+               Double_t res = ((*e)->GetClass()==i)?(1.0-p_cls):(-p_cls);
+               (*e)->SetTarget(i,res);
+            }
+         }
+      }
+   }
+   else{
+      for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
+         fResiduals[*e].at(0)+=fForest.back()->CheckEvent(*(*e),kFALSE);
+         Double_t p_sig=1.0/(1.0+exp(-2.0*fResiduals[*e].at(0)));
+         Double_t res = (DataInfo().IsSignal(*e)?1:0)-p_sig;
+         (*e)->SetTarget(0,res);
+      }
    }   
 }
 
@@ -642,74 +689,72 @@ void TMVA::MethodBDT::UpdateTargets(vector<TMVA::Event*> eventSample)
 void TMVA::MethodBDT::UpdateTargetsRegression(vector<TMVA::Event*> eventSample, Bool_t first)
 {
    //Calculate current residuals for all events and update targets for next iteration
-   vector<Double_t> absResiduals;
-   vector< vector<Double_t> > temp;
+   vector< pair<Double_t, Double_t> > temp;
    UInt_t i=0;
    for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
       if(first){
-         fRegResiduals.push_back((*e)->GetTarget(0)-fBoostWeights[i]);
+         fWeightedResiduals[i].first -= fBoostWeights[i];
       }
       else{
-         fRegResiduals[i]-=fForest.back()->CheckEvent(*(*e),kFALSE);
+         fWeightedResiduals[i].first -= fForest.back()->CheckEvent(*(*e),kFALSE);
       }
-      absResiduals.push_back(fabs(fRegResiduals[i]));
+      temp.push_back(make_pair(fabs(fWeightedResiduals[i].first),fWeightedResiduals[i].second));
       i++;
    }
-   temp.push_back(absResiduals);
-   temp.push_back(fInitialWeights);
-   fTransitionPoint = GetWeightedQuantile(temp,0.9);
+   fTransitionPoint = GetWeightedQuantile(temp,0.7,fSumOfWeights);
    i=0;
    for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
-      if(absResiduals[i]<=fTransitionPoint)
-         (*e)->SetTarget(0,fRegResiduals[i]);
+ 
+      if(temp[i].first<=fTransitionPoint)
+         (*e)->SetTarget(0,fWeightedResiduals[i].first);
       else
-         (*e)->SetTarget(0,fTransitionPoint*(fRegResiduals[i]<0?-1.0:1.0));
+         (*e)->SetTarget(0,fTransitionPoint*(fWeightedResiduals[i].first<0?-1.0:1.0));
       i++;
    }
 }
 
 //_______________________________________________________________________
-Double_t TMVA::MethodBDT::GetWeightedQuantile(vector<  vector<Double_t> > &vec, const Double_t quantile, const Double_t SumOfWeights){
-   //calculates the quantile of the distribution in vec[0] weighted with the values in vec[1]
-   gTools().UsefulSortAscending( vec );
-   Double_t norm = fSumOfWeights;
-   if(SumOfWeights!=0.0) norm = SumOfWeights;
+Double_t TMVA::MethodBDT::GetWeightedQuantile(vector<  pair<Double_t, Double_t> > vec, const Double_t quantile, const Double_t norm){
+   //calculates the quantile of the distribution of the first pair entries weighted with the values in the second pair entries
    Double_t temp = 0.0;
-   
+   std::sort(vec.begin(), vec.end());
    Int_t i = 0;
    while(temp <= norm*quantile){
-      temp += vec[1][i];
+      temp += vec[i].second;
       i++;
    }
       
-   return vec[0][i];
+   return vec[i].first;
 }
 
 //_______________________________________________________________________
-Double_t TMVA::MethodBDT::GradBoost( vector<TMVA::Event*> eventSample, DecisionTree *dt )
+Double_t TMVA::MethodBDT::GradBoost( vector<TMVA::Event*> eventSample, DecisionTree *dt, UInt_t cls)
 {
-   //Calculate the desired response value for each region (line search)
+   //Calculate the desired response value for each region
    std::map<TMVA::DecisionTreeNode*,vector<Double_t> > leaves;
    for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
+      Double_t weight = (*e)->GetWeight();
       TMVA::DecisionTreeNode* node = dt->GetEventNode(*(*e));
       if ((leaves[node]).size()==0){
-         (leaves[node]).push_back((*e)->GetTarget(0) * (*e)->GetWeight());
-         (leaves[node]).push_back(fabs((*e)->GetTarget(0))*(1.0-fabs((*e)->GetTarget(0))) * (*e)->GetWeight() * (*e)->GetWeight());
+         (leaves[node]).push_back((*e)->GetTarget(cls)* weight);
+         (leaves[node]).push_back(fabs((*e)->GetTarget(cls))*(1.0-fabs((*e)->GetTarget(cls))) * weight* weight);
       }
       else {
-         (leaves[node])[0]+=((*e)->GetTarget(0) * (*e)->GetWeight());
-         (leaves[node])[1]+=fabs((*e)->GetTarget(0))*(1.0-fabs((*e)->GetTarget(0))) *
-            ((*e)->GetWeight()) * ((*e)->GetWeight());
+         (leaves[node])[0]+=((*e)->GetTarget(cls)* weight);
+         (leaves[node])[1]+=fabs((*e)->GetTarget(cls))*(1.0-fabs((*e)->GetTarget(cls))) * weight* weight;
       }
    }
    for (std::map<TMVA::DecisionTreeNode*,vector<Double_t> >::iterator iLeave=leaves.begin();
         iLeave!=leaves.end();++iLeave){
       if ((iLeave->second)[1]<1e-30) (iLeave->second)[1]=1e-30;
 
-      (iLeave->first)->SetResponse(fShrinkage*0.5*(iLeave->second)[0]/((iLeave->second)[1]));
+      (iLeave->first)->SetResponse(fShrinkage/DataInfo().GetNClasses()*(iLeave->second)[0]/((iLeave->second)[1]));
    }
    //call UpdateTargets before next tree is grown
-   UpdateTargets(eventSample);
+   if(DoMulticlass())
+      UpdateTargets(eventSample, cls);
+   else
+      UpdateTargets(eventSample);
    if (fBaggedGradBoost) GetRandomSubSample();
    return 1; //trees all have the same weight
 }
@@ -718,30 +763,23 @@ Double_t TMVA::MethodBDT::GradBoost( vector<TMVA::Event*> eventSample, DecisionT
 Double_t TMVA::MethodBDT::GradBoostRegression( vector<TMVA::Event*> eventSample, DecisionTree *dt )
 {
    // Implementation of M_TreeBoost using a Huber loss function as desribed by Friedman 1999
-   std::map<TMVA::DecisionTreeNode*,vector< vector<Double_t> > > leaves;
+   std::map<TMVA::DecisionTreeNode*,Double_t > leaveWeights;
+   std::map<TMVA::DecisionTreeNode*,vector< pair<Double_t, Double_t> > > leaves;
    UInt_t i =0;
    for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
-      TMVA::DecisionTreeNode* node = dt->GetEventNode(*(*e));
-      if(leaves[node].size()==0){
-         (leaves[node]).push_back(vector<Double_t>());
-         (leaves[node]).push_back(vector<Double_t>());
-      }
-      (leaves[node])[0].push_back(fRegResiduals[i]);
-      (leaves[node])[1].push_back((*e)->GetWeight());
+      TMVA::DecisionTreeNode* node = dt->GetEventNode(*(*e));      
+      (leaves[node]).push_back(make_pair(fWeightedResiduals[i].first,(*e)->GetWeight()));
+      (leaveWeights[node]) += (*e)->GetWeight();
       i++;
    }
 
-   for (std::map<TMVA::DecisionTreeNode*,vector<vector<Double_t> > >::iterator iLeave=leaves.begin();
+   for (std::map<TMVA::DecisionTreeNode*,vector< pair<Double_t, Double_t> > >::iterator iLeave=leaves.begin();
         iLeave!=leaves.end();++iLeave){
-      Double_t LeaveWeight = 0;
-      for(UInt_t j=0;j<((iLeave->second)[0].size());j++){
-         LeaveWeight+=((iLeave->second)[1][j]);
-      }
       Double_t shift=0,diff= 0;
-      Double_t ResidualMedian = GetWeightedQuantile(iLeave->second,0.5,LeaveWeight);
-      for(UInt_t j=0;j<((iLeave->second)[0].size());j++){
-         diff = (iLeave->second)[0][j]-ResidualMedian;
-         shift+=1.0/((iLeave->second)[0].size())*((diff<0)?-1.0:1.0)*TMath::Min(fTransitionPoint,fabs(diff));
+      Double_t ResidualMedian = GetWeightedQuantile(iLeave->second,0.5,leaveWeights[iLeave->first]);
+      for(UInt_t j=0;j<((iLeave->second).size());j++){
+         diff = (iLeave->second)[j].first-ResidualMedian;
+         shift+=1.0/((iLeave->second).size())*((diff<0)?-1.0:1.0)*TMath::Min(fTransitionPoint,fabs(diff));
       }
       (iLeave->first)->SetResponse(fShrinkage*(ResidualMedian+shift));
    }
@@ -755,28 +793,33 @@ void TMVA::MethodBDT::InitGradBoost( vector<TMVA::Event*> eventSample)
    // initialize targets for first tree
    fSepType=NULL; //set fSepType to NULL (regression trees are used for both classification an regression)
    if(DoRegression()){
-
-      vector< vector<Double_t> > weightedTargetValues;
-      vector<Double_t> targets;
-       for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
-         targets.push_back((*e)->GetTarget(0));
-         fInitialWeights.push_back((*e)->GetWeight());
+      for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
+         fWeightedResiduals.push_back(make_pair((*e)->GetTarget(0), (*e)->GetWeight()));
          fSumOfWeights+=(*e)->GetWeight();
       }
-      weightedTargetValues.push_back(targets);
-      weightedTargetValues.push_back(fInitialWeights);
-      Double_t weightedMedian = GetWeightedQuantile(weightedTargetValues,0.5);
+      Double_t weightedMedian = GetWeightedQuantile(fWeightedResiduals,0.5, fSumOfWeights);
  
       for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
          fBoostWeights.push_back(weightedMedian);  
       }
       UpdateTargetsRegression(eventSample,kTRUE);
    }
+   else if(DoMulticlass()){
+      UInt_t nClasses = DataInfo().GetNClasses();
+      for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
+         for (UInt_t i=0;i<nClasses;i++){
+            //Calculate initial residua, assuming equal probability for all classes
+            Double_t r = (*e)->GetClass()==i?(1-1.0/nClasses):(-1.0/nClasses);
+            (*e)->SetTarget(i,r);
+            fResiduals[*e].push_back(0);   
+         }
+      }
+   }
    else{
       for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
-         Double_t r = ((*e)->IsSignal()?1:0)-0.5; //Calculate initial residua
+         Double_t r = (DataInfo().IsSignal(*e)?1:0)-0.5; //Calculate initial residua
          (*e)->SetTarget(0,r);
-         fBoostWeights.push_back(0);
+         fResiduals[*e].push_back(0);         
       }
    }
    if (fBaggedGradBoost) GetRandomSubSample(); 
@@ -790,7 +833,7 @@ Double_t TMVA::MethodBDT::TestTreeQuality( DecisionTree *dt )
    for (UInt_t ievt=0; ievt<fValidationSample.size(); ievt++) {
       Bool_t isSignalType= (dt->CheckEvent(*(fValidationSample[ievt])) > fNodePurityLimit ) ? 1 : 0;
 
-      if (isSignalType == ((fValidationSample[ievt])->IsSignal()) ) {
+      if (isSignalType == (DataInfo().IsSignal(fValidationSample[ievt])) ) {
          ncorrect += fValidationSample[ievt]->GetWeight();
       }
       else{
@@ -802,7 +845,7 @@ Double_t TMVA::MethodBDT::TestTreeQuality( DecisionTree *dt )
 }
 
 //_______________________________________________________________________
-Double_t TMVA::MethodBDT::Boost( vector<TMVA::Event*> eventSample, DecisionTree *dt, Int_t iTree )
+ Double_t TMVA::MethodBDT::Boost( vector<TMVA::Event*> eventSample, DecisionTree *dt, Int_t iTree, UInt_t cls )
 {
    // apply the boosting alogrithim (the algorithm is selecte via the the "option" given
    // in the constructor. The return value is the boosting weight
@@ -814,6 +857,8 @@ Double_t TMVA::MethodBDT::Boost( vector<TMVA::Event*> eventSample, DecisionTree
    else if (fBoostType=="Grad"){
       if(DoRegression())
          return this->GradBoostRegression(eventSample, dt);
+      else if(DoMulticlass())
+         return this->GradBoost (eventSample, dt, cls);
       else
          return this->GradBoost (eventSample, dt);
    }
@@ -850,8 +895,8 @@ Double_t TMVA::MethodBDT::AdaBoost( vector<TMVA::Event*> eventSample, DecisionTr
          if (tmpDev > maxDev) maxDev = tmpDev;
       }else{
          Bool_t isSignalType = (dt->CheckEvent(*(*e),fUseYesNoLeaf) > fNodePurityLimit );
-         //       if (!(isSignalType == DataInfo().IsSignal((*e)))) {
-         if (!(isSignalType == (*e)->IsSignal())) {
+
+         if (!(isSignalType == DataInfo().IsSignal(*e))) {
             sumwfalse+= w;
          }
       }
@@ -886,9 +931,18 @@ Double_t TMVA::MethodBDT::AdaBoost( vector<TMVA::Event*> eventSample, DecisionTr
    if (err >= 0.5) { // sanity check ... should never happen as otherwise there is apparently
       // something odd with the assignement of the leaf nodes (rem: you use the training
       // events for this determination of the error rate)
-      Log() << kWARNING << " The error rate in the BDT boosting is > 0.5. ("<< err
-            << ") That should not happen, please check your code (i.e... the BDT code), I "
-            << " set it to 0.5.. just to continue.." <<  Endl;
+      if (dt->GetNNodes() == 1){
+         Log() << kWARNING << " YOUR tree has only 1 Node... kind of a funny *tree*. I cannot " 
+               << "boost such a thing... if after 1 step the error rate is == 0.5"
+               << Endl
+               << "please check why this happens, maybe too many events per node requested ?"
+               << Endl;
+         
+      }else{
+         Log() << kWARNING << " The error rate in the BDT boosting is > 0.5. ("<< err
+               << ") That should not happen, please check your code (i.e... the BDT code), I "
+               << " set it to 0.5.. just to continue.." <<  Endl;
+      }
       err = 0.5;
    } else if (err < 0) {
       Log() << kWARNING << " The error rate in the BDT boosting is < 0. That can happen"
@@ -907,8 +961,8 @@ Double_t TMVA::MethodBDT::AdaBoost( vector<TMVA::Event*> eventSample, DecisionTr
    Results* results = Data()->GetResults(GetMethodName(),Types::kTraining, Types::kMaxAnalysisType);
 
    for (vector<TMVA::Event*>::iterator e=eventSample.begin(); e!=eventSample.end();e++) {
-      //       if ((!( (dt->CheckEvent(*(*e),fUseYesNoLeaf) > fNodePurityLimit ) == DataInfo().IsSignal((*e)))) || DoRegression()) {
-      if ((!( (dt->CheckEvent(*(*e),fUseYesNoLeaf) > fNodePurityLimit ) == (*e)->IsSignal())) || DoRegression()) {
+ 
+      if ((!( (dt->CheckEvent(*(*e),fUseYesNoLeaf) > fNodePurityLimit ) == DataInfo().IsSignal(*e))) || DoRegression()) {
          Double_t boostfactor = boostWeight;
          if (DoRegression()) boostfactor = TMath::Power(1/boostWeight,(1.-TMath::Abs(dt->CheckEvent(*(*e),kFALSE) - (*e)->GetTarget(0) )/maxDev ) );
          if ( (*e)->GetWeight() > 0 ){
@@ -1034,15 +1088,15 @@ Double_t TMVA::MethodBDT::AdaBoostR2( vector<TMVA::Event*> eventSample, Decision
          Float_t newBoostWeight = (*e)->GetBoostWeight() * boostfactor;
          Float_t newWeight = (*e)->GetWeight() * (*e)->GetBoostWeight() * boostfactor;
          if (newWeight == 0) {
-            std::cout << "Weight=    "   <<   (*e)->GetWeight() << std::endl;
-            std::cout << "BoostWeight= " <<   (*e)->GetBoostWeight() << std::endl;
-            std::cout << "boostweight="<<boostWeight << "  err= " <<err << std::endl; 
-            std::cout << "NewBoostWeight= " <<   newBoostWeight << std::endl;
-            std::cout << "boostfactor= " <<  boostfactor << std::endl;
-            std::cout << "maxDev     = " <<  maxDev << std::endl;
-            std::cout << "tmpDev     = " <<  TMath::Abs(dt->CheckEvent(*(*e),kFALSE) - (*e)->GetTarget(0) ) << std::endl;
-            std::cout << "target     = " <<  (*e)->GetTarget(0)  << std::endl; 
-            std::cout << "estimate   = " <<  dt->CheckEvent(*(*e),kFALSE)  << std::endl;
+            Log() << kINFO << "Weight=    "   <<   (*e)->GetWeight() << Endl;
+            Log() << kINFO  << "BoostWeight= " <<   (*e)->GetBoostWeight() << Endl;
+            Log() << kINFO  << "boostweight="<<boostWeight << "  err= " <<err << Endl; 
+            Log() << kINFO  << "NewBoostWeight= " <<   newBoostWeight << Endl;
+            Log() << kINFO  << "boostfactor= " <<  boostfactor << Endl;
+            Log() << kINFO  << "maxDev     = " <<  maxDev << Endl;
+            Log() << kINFO  << "tmpDev     = " <<  TMath::Abs(dt->CheckEvent(*(*e),kFALSE) - (*e)->GetTarget(0) ) << Endl;
+            Log() << kINFO  << "target     = " <<  (*e)->GetTarget(0)  << Endl; 
+            Log() << kINFO  << "estimate   = " <<  dt->CheckEvent(*(*e),kFALSE)  << Endl;
          }
          (*e)->SetBoostWeight( newBoostWeight );
          //         (*e)->SetBoostWeight( (*e)->GetBoostWeight() * boostfactor);
@@ -1070,8 +1124,8 @@ Double_t TMVA::MethodBDT::AdaBoostR2( vector<TMVA::Event*> eventSample, Decision
 //_______________________________________________________________________
 void TMVA::MethodBDT::AddWeightsXMLTo( void* parent ) const
 {
-   // write weights to XML 
-   void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
+   // write weights to XML
+   void* wght = gTools().AddChild(parent, "Weights");
    gTools().AddAttr( wght, "NTrees", fForest.size() );
    gTools().AddAttr( wght, "TreeType", fForest.back()->GetAnalysisType() );
 
@@ -1098,7 +1152,7 @@ void TMVA::MethodBDT::ReadWeightsFromXML(void* parent) {
    gTools().ReadAttr( parent, "NTrees", ntrees );
    gTools().ReadAttr( parent, "TreeType", analysisType );
 
-   void* ch = gTools().xmlengine().GetChild(parent);
+   void* ch = gTools().GetChild(parent);
    i=0;
    while(ch) {
       fForest.push_back( dynamic_cast<DecisionTree*>( BinaryTree::CreateFromXML(ch, GetTrainingTMVAVersionCode()) ) );
@@ -1106,7 +1160,7 @@ void TMVA::MethodBDT::ReadWeightsFromXML(void* parent) {
       fForest.back()->SetTreeID(i++);
       gTools().ReadAttr(ch,"boostWeight",boostWeight);
       fBoostWeights.push_back(boostWeight);
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 }
 
@@ -1178,8 +1232,44 @@ Double_t TMVA::MethodBDT::GetMvaValue( Double_t* err, UInt_t useNTrees )
          norm  += 1;
       }
    }
-   return myMVA /= norm;
+   return ( norm > std::numeric_limits<double>::epsilon() ) ? myMVA /= norm : 0 ;
 }
+
+//_______________________________________________________________________
+const std::vector<Float_t>& TMVA::MethodBDT::GetMulticlassValues()
+{
+   // get the multiclass MVA response for the BDT classifier
+
+   const TMVA::Event& e = *GetEvent();
+   if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
+   fMulticlassReturnVal->clear();
+
+   std::vector<double> temp;
+
+   UInt_t nClasses = DataInfo().GetNClasses();
+   for(UInt_t iClass=0; iClass<nClasses; iClass++){
+      temp.push_back(0.0);
+      for(UInt_t itree = iClass; itree<fForest.size(); itree+=nClasses){
+         temp[iClass] += fForest[itree]->CheckEvent(e,kFALSE);
+      }
+   }    
+
+   for(UInt_t iClass=0; iClass<nClasses; iClass++){
+      Double_t norm = 0.0;
+      for(UInt_t j=0;j<nClasses;j++){
+         if(iClass!=j)
+            norm+=exp(temp[j]-temp[iClass]);
+      }
+      (*fMulticlassReturnVal).push_back(1.0/(1.0+norm));
+   }
+
+   
+   return *fMulticlassReturnVal;
+}
+
+
+
+
 //_______________________________________________________________________
 const std::vector<Float_t> & TMVA::MethodBDT::GetRegressionValues()
 {
@@ -1251,7 +1341,7 @@ const std::vector<Float_t> & TMVA::MethodBDT::GetRegressionValues()
             norm  += 1;
          }
       }
-      fRegressionReturnVal->push_back( myMVA/norm );
+      fRegressionReturnVal->push_back( ( norm > std::numeric_limits<double>::epsilon() ) ? myMVA /= norm : 0 );
    }
    return *fRegressionReturnVal;
 }
diff --git a/tmva/src/MethodBase.cxx b/tmva/src/MethodBase.cxx
index 05d5fdd08b190e54bc5ab9d5bc9480da87e631a2..24dbe1b4e92267f14673dc43e80185d985313401 100644
--- a/tmva/src/MethodBase.cxx
+++ b/tmva/src/MethodBase.cxx
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss, Kai Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -104,6 +104,7 @@
 #include "TMVA/Tools.h"
 #include "TMVA/ResultsClassification.h"
 #include "TMVA/ResultsRegression.h"
+#include "TMVA/ResultsMulticlass.h"
 
 ClassImp(TMVA::MethodBase)
 
@@ -132,6 +133,7 @@ TMVA::MethodBase::MethodBase( const TString& jobName,
    fTmpEvent                  ( 0 ),
    fAnalysisType              ( Types::kNoAnalysisType ),
    fRegressionReturnVal       ( 0 ),
+   fMulticlassReturnVal       ( 0 ),
    fDisableWriting            ( kFALSE ),
    fDataSetInfo               ( dsi ),
    fSignalReferenceCut        ( 0.5 ),
@@ -329,6 +331,7 @@ void TMVA::MethodBase::InitBase()
       fInputVars->push_back(DataInfo().GetVariableInfo(ivar).GetLabel());
    }
    fRegressionReturnVal = 0;
+   fMulticlassReturnVal = 0;
 
    fEventCollections.resize( 2 );
    fEventCollections.at(0) = 0;
@@ -445,7 +448,86 @@ void TMVA::MethodBase::ProcessBaseOptions()
 //_______________________________________________________________________
 void TMVA::MethodBase::CreateVariableTransforms(const TString& trafoDefinition )
 {
-   if (trafoDefinition != "None") {
+   // create variable transformations
+
+   if (trafoDefinition == "None") // no transformations
+      return;
+
+   if( trafoDefinition.Contains("+") || trafoDefinition.Contains("(") ) { // new format
+      TList* trList = gTools().ParseFormatLine( trafoDefinition, "+" );
+      TListIter trIt(trList);
+      while (TObjString* os = (TObjString*)trIt()) {
+	 TString tdef = os->GetString();
+         Int_t idxCls = -1;
+
+	 TString variables = "_V_";
+	 if( tdef.Contains("(") ) { // contains selection of variables
+	    Ssiz_t parStart = tdef.Index( "(" );
+	    Ssiz_t parLen   = tdef.Index( ")", parStart )-parStart+1;
+
+	    variables = tdef(parStart,parLen);
+	    tdef.Remove(parStart,parLen);
+	    variables.Remove(parLen-1,1);
+	    variables.Remove(0,1);
+	 }
+
+         TList* trClsList = gTools().ParseFormatLine( tdef, "_" ); // split entry to get trf-name and class-name
+         TListIter trClsIt(trClsList);
+         const TString& trName = ((TObjString*)trClsList->At(0))->GetString();
+
+         if (trClsList->GetEntries() > 1) {
+            TString trCls = "AllClasses";
+            ClassInfo *ci = NULL;
+            trCls  = ((TObjString*)trClsList->At(1))->GetString();
+            if (trCls != "AllClasses") {
+               ci = DataInfo().GetClassInfo( trCls );
+               if (ci == NULL)
+                  Log() << kFATAL << "Class " << trCls << " not known for variable transformation "
+                        << trName << ", please check." << Endl;
+               else
+                  idxCls = ci->GetNumber();
+            }
+         }
+
+
+	 VariableTransformBase* transformation = NULL;
+         if      (trName == "D" || trName == "Deco" || trName == "Decorrelate"){
+	    if( variables.Length() == 0 )
+	       variables = "_V_";
+	    transformation = new VariableDecorrTransform( DataInfo());
+	 }
+         else if (trName == "P" || trName == "PCA"){
+	    if( variables.Length() == 0 )
+	       variables = "_V_";
+	    transformation = new VariablePCATransform   ( DataInfo());
+	 }
+         else if (trName == "G" || trName == "Gauss"){
+	    if( variables.Length() == 0 )
+	       variables = "_V_,_T_";
+	    transformation = new VariableGaussTransform ( DataInfo());
+	 }
+         else if (trName == "N" || trName == "Norm" || trName == "Normalise" || trName == "Normalize")
+	 {
+	    if( variables.Length() == 0 )
+	       variables = "_V_,_T_";
+	    transformation = new VariableNormalizeTransform( DataInfo());
+	 }
+         else
+            Log() << kFATAL << "<ProcessOptions> Variable transform '"
+                  << trName << "' unknown." << Endl;
+
+	 if( transformation ){
+	    transformation->SelectInput( variables );
+	    GetTransformationHandler().AddTransformation(transformation, idxCls);
+	 }
+      }
+      
+
+
+      return;
+   }
+
+   if (trafoDefinition != "None") { // old format
       TList* trList = gTools().ParseFormatLine( trafoDefinition, "," );
       TListIter trIt(trList);
       while (TObjString* os = (TObjString*)trIt()) {
@@ -469,17 +551,27 @@ void TMVA::MethodBase::CreateVariableTransforms(const TString& trafoDefinition )
             }
          }
 
+	 VariableTransformBase* transformation = NULL;
+	 TString variables = "_V_";
          if      (trName == "D" || trName == "Deco" || trName == "Decorrelate")
-            GetTransformationHandler().AddTransformation( new VariableDecorrTransform   ( DataInfo()) , idxCls );
+	    transformation = new VariableDecorrTransform( DataInfo());
          else if (trName == "P" || trName == "PCA")
-            GetTransformationHandler().AddTransformation( new VariablePCATransform      ( DataInfo()), idxCls );
+	    transformation = new VariablePCATransform   ( DataInfo());
          else if (trName == "G" || trName == "Gauss")
-            GetTransformationHandler().AddTransformation( new VariableGaussTransform    ( DataInfo()), idxCls );
-         else if (trName == "N" || trName == "Norm" || trName == "Normalise" || trName == "Normalize")
-            GetTransformationHandler().AddTransformation( new VariableNormalizeTransform( DataInfo()), idxCls );
+	    transformation = new VariableGaussTransform ( DataInfo());
+         else if (trName == "N" || trName == "Norm" || trName == "Normalise" || trName == "Normalize"){
+	    variables = "_V_,_T_";
+	    transformation = new VariableNormalizeTransform( DataInfo());
+	 }
          else
             Log() << kFATAL << "<ProcessOptions> Variable transform '"
-                  << trName << "' unknown." << Endl;
+                  << trName << "' unknown." << Endl;         
+         Log() << kINFO << " create Transformation " << trName << " with reference class " << DataInfo().GetClassInfo(idxCls)->GetName() << "=("<< idxCls <<")"<<Endl;
+
+	 if( transformation ){
+	    transformation->SelectInput( variables );
+	    GetTransformationHandler().AddTransformation(transformation, idxCls);
+	 }
       }
    }
 }
@@ -604,6 +696,45 @@ void TMVA::MethodBase::AddRegressionOutput(Types::ETreeType type)
    regRes->CreateDeviationHistograms( histNamePrefix );
 }
 
+//_______________________________________________________________________
+void TMVA::MethodBase::AddMulticlassOutput(Types::ETreeType type)
+{
+   // prepare tree branch with the method's discriminating variable
+
+   Data()->SetCurrentType(type);
+
+   Log() << kINFO << "Create results for " << (type==Types::kTraining?"training":"testing") << Endl;
+
+   ResultsMulticlass* regMulti = (ResultsMulticlass*)Data()->GetResults(GetMethodName(), type, Types::kMulticlass);
+
+   Long64_t nEvents = Data()->GetNEvents();
+
+   // use timer
+   Timer timer( nEvents, GetName(), kTRUE );
+
+   Log() << kINFO << "Evaluation of " << GetMethodName() << " on "
+         << (type==Types::kTraining?"training":"testing") << " sample" << Endl;
+
+   regMulti->Resize( nEvents );
+   for (Int_t ievt=0; ievt<nEvents; ievt++) {
+      Data()->SetCurrentEvent(ievt);
+      std::vector< Float_t > vals = GetMulticlassValues();
+      regMulti->SetValue( vals, ievt );
+      timer.DrawProgressBar( ievt );
+   }
+
+   Log() << kINFO << "Elapsed time for evaluation of " << nEvents <<  " events: "
+         << timer.GetElapsedTime() << "       " << Endl;
+
+   // store time used for testing
+   if (type==Types::kTesting)
+      SetTestTime(timer.ElapsedSeconds());
+
+   TString histNamePrefix(GetTestvarName());
+   histNamePrefix += (type==Types::kTraining?"train":"test");
+//   regMulti->CreateDeviationHistograms( histNamePrefix );
+}
+
 //_______________________________________________________________________
 Double_t TMVA::MethodBase::GetMvaValue( const Event* const ev, Double_t* err ) {
    fTmpEvent = ev;
@@ -793,6 +924,106 @@ void TMVA::MethodBase::TestRegression( Double_t& bias, Double_t& biasT,
    Data()->SetCurrentType(savedType);   
 }
 
+
+//_______________________________________________________________________
+void TMVA::MethodBase::TestMulticlass()
+{
+   // test multiclass classification 
+
+   Types::ETreeType savedType = Data()->GetCurrentType();
+   Data()->SetCurrentType(Types::kTesting);
+
+//    ResultsMulticlass* mvaRes = dynamic_cast<ResultsMulticlass*>
+//       ( Data()->GetResults(GetMethodName(),Types::kTesting, Types::kMulticlass) );
+
+//    bias = 0; biasT = 0; dev = 0; devT = 0; rms = 0; rmsT = 0;
+//    Double_t sumw = 0;
+//    Double_t m1 = 0, m2 = 0, s1 = 0, s2 = 0, s12 = 0; // for correlation
+//    const Int_t nevt = GetNEvents();
+//    Float_t* rV = new Float_t[nevt];
+//    Float_t* tV = new Float_t[nevt];
+//    Float_t* wV = new Float_t[nevt];
+//    Float_t  xmin = 1e30, xmax = -1e30;
+//     for (Long64_t ievt=0; ievt<nevt; ievt++) {
+      
+//       const Event* ev = Data()->GetEvent(ievt); // NOTE: need untransformed event here !
+//       Float_t t = ev->GetTarget(0);
+//       Float_t w = ev->GetWeight();
+//       Float_t r = GetRegressionValues()[0];
+//       Float_t d = (r-t);
+
+//       // find min/max
+//       xmin = TMath::Min(xmin, TMath::Min(t, r));
+//       xmax = TMath::Max(xmax, TMath::Max(t, r));
+
+//       // store for truncated RMS computation
+//       rV[ievt] = r;
+//       tV[ievt] = t;
+//       wV[ievt] = w;
+      
+//       // compute deviation-squared
+//       sumw += w;
+//       bias += w * d;
+//       dev  += w * TMath::Abs(d);
+//       rms  += w * d * d;
+
+//       // compute correlation between target and regression estimate
+//       m1  += t*w; s1 += t*t*w;
+//       m2  += r*w; s2 += r*r*w;
+//       s12 += t*r;
+//    }
+
+//    // standard quantities
+//    bias /= sumw;
+//    dev  /= sumw;
+//    rms  /= sumw;
+//    rms  = TMath::Sqrt(rms - bias*bias);
+
+//    // correlation
+//    m1   /= sumw; 
+//    m2   /= sumw; 
+//    corr  = s12/sumw - m1*m2;
+//    corr /= TMath::Sqrt( (s1/sumw - m1*m1) * (s2/sumw - m2*m2) );
+
+//    // create histogram required for computeation of mutual information
+//    TH2F* hist  = new TH2F( "hist",  "hist",  150, xmin, xmax, 100, xmin, xmax );
+//    TH2F* histT = new TH2F( "histT", "histT", 150, xmin, xmax, 100, xmin, xmax );
+
+//    // compute truncated RMS and fill histogram
+//    Double_t devMax = bias + 2*rms;
+//    Double_t devMin = bias - 2*rms;
+//    sumw = 0;
+//    int ic=0;
+//    for (Long64_t ievt=0; ievt<nevt; ievt++) {
+//       Float_t d = (rV[ievt] - tV[ievt]);
+//       hist->Fill( rV[ievt], tV[ievt], wV[ievt] );
+//       if (d >= devMin && d <= devMax) {
+//          sumw  += wV[ievt];
+//          biasT += wV[ievt] * d;
+//          devT  += wV[ievt] * TMath::Abs(d);
+//          rmsT  += wV[ievt] * d * d;       
+//          histT->Fill( rV[ievt], tV[ievt], wV[ievt] );
+//          ic++;
+//       }
+//    }   
+//    biasT /= sumw;
+//    devT  /= sumw;
+//    rmsT  /= sumw;
+//    rmsT  = TMath::Sqrt(rmsT - biasT*biasT);
+//    mInf  = gTools().GetMutualInformation( *hist );
+//    mInfT = gTools().GetMutualInformation( *histT );
+
+//    delete hist;
+//    delete histT;
+
+//    delete [] rV;
+//    delete [] tV;
+//    delete [] wV;
+
+   Data()->SetCurrentType(savedType);   
+}
+
+
 //_______________________________________________________________________
 void TMVA::MethodBase::TestClassification()
 {
@@ -957,18 +1188,20 @@ void TMVA::MethodBase::WriteStateToStream( std::ostream& tf ) const
 void TMVA::MethodBase::AddInfoItem( void* gi, const TString& name, const TString& value) const 
 {
    // xml writing
-   void* it = gTools().xmlengine().NewChild(gi,0,"Info");
-   gTools().xmlengine().NewAttr(it,0,"name", name);
-   gTools().xmlengine().NewAttr(it,0,"value", value);
+   void* it = gTools().AddChild(gi,"Info");
+   gTools().AddAttr(it,"name", name);
+   gTools().AddAttr(it,"value", value);
 }
 
 //_______________________________________________________________________
 void TMVA::MethodBase::AddOutput( Types::ETreeType type, Types::EAnalysisType analysisType ) {
    if (analysisType == Types::kRegression) {
       AddRegressionOutput( type );
+   } else if (analysisType == Types::kMulticlass ){
+      AddMulticlassOutput( type );
    } else {
       AddClassifierOutput( type );
-      if (HasMVAPdfs()) 
+      if (HasMVAPdfs())
          AddClassifierOutputProb( type );
    }
 }
@@ -978,12 +1211,12 @@ void TMVA::MethodBase::WriteStateToXML( void* parent ) const
 {
    // general method used in writing the header of the weight files where
    // the used variables, variable transformation type etc. is specified
-   
+
    if (!parent) return;
 
    UserGroup_t* userInfo = gSystem->GetUserInfo();
 
-   void* gi = gTools().xmlengine().NewChild(parent, 0, "GeneralInfo");
+   void* gi = gTools().AddChild(parent, "GeneralInfo");
    AddInfoItem( gi, "TMVA Release", GetTrainingTMVAVersionString() + " [" + gTools().StringFromInt(GetTrainingTMVAVersionCode()) + "]" );
    AddInfoItem( gi, "ROOT Release", GetTrainingROOTVersionString() + " [" + gTools().StringFromInt(GetTrainingROOTVersionCode()) + "]");
    AddInfoItem( gi, "Creator", userInfo->fUser);
@@ -993,7 +1226,9 @@ void TMVA::MethodBase::WriteStateToXML( void* parent ) const
    AddInfoItem( gi, "Training events", gTools().StringFromInt(Data()->GetNTrainingEvents()));
    AddInfoItem( gi, "TrainingTime", gTools().StringFromDouble(const_cast<TMVA::MethodBase*>(this)->GetTrainTime()));
 
-   TString analysisType(((const_cast<TMVA::MethodBase*>(this)->GetAnalysisType()==Types::kRegression) ? "Regression" : "Classification"));
+   Types::EAnalysisType aType = const_cast<TMVA::MethodBase*>(this)->GetAnalysisType();
+   TString analysisType((aType==Types::kRegression) ? "Regression" :
+                        (aType==Types::kMulticlass ? "Multiclass" : "Classification"));
    AddInfoItem( gi, "AnalysisType", analysisType );
    delete userInfo;
 
@@ -1010,16 +1245,15 @@ void TMVA::MethodBase::WriteStateToXML( void* parent ) const
    // write target info if in regression mode
    if(DoRegression())
       AddTargetsXMLTo(parent);
-   
 
    // write transformations
    GetTransformationHandler().AddXMLTo( parent );
-   
+
    // write MVA variable distributions
-   void* pdfs = gTools().xmlengine().NewChild(parent, 0, "MVAPdfs");
+   void* pdfs = gTools().AddChild(parent, "MVAPdfs");
    if (fMVAPdfS) fMVAPdfS->AddXMLTo(pdfs);
    if (fMVAPdfB) fMVAPdfB->AddXMLTo(pdfs);
-   
+
    // write weights
    AddWeightsXMLTo( parent );
 }
@@ -1057,9 +1291,9 @@ void TMVA::MethodBase::WriteStateToFile() const
    Log() << kINFO << "Creating weight file in xml format: "
          << gTools().Color("lightblue") << xmlfname << gTools().Color("reset") << Endl;
    void* doc      = gTools().xmlengine().NewDoc();
-   void* rootnode = gTools().xmlengine().NewChild(0,0,"MethodSetup");
+   void* rootnode = gTools().AddChild(0,"MethodSetup");
    gTools().xmlengine().DocSetRootElement(doc,rootnode);
-   gTools().xmlengine().NewAttr(rootnode,0,"Method", GetMethodTypeName() + "::" + GetMethodName());
+   gTools().AddAttr(rootnode,"Method", GetMethodTypeName() + "::" + GetMethodName());
    WriteStateToXML(rootnode);
    gTools().xmlengine().SaveDoc(doc,xmlfname);
 }
@@ -1077,17 +1311,17 @@ void TMVA::MethodBase::ReadStateFromFile()
          << gTools().Color("lightblue") << tfname << gTools().Color("reset") << Endl;
 
    if (tfname.EndsWith(".xml") ) {
-      void* doc = gTools().xmlengine().ParseFile(tfname); 
+      void* doc = gTools().xmlengine().ParseFile(tfname);
       void* rootnode = gTools().xmlengine().DocGetRootElement(doc); // node "MethodSetup"
       ReadStateFromXML(rootnode);
-   } 
+   }
    else {
       filebuf fb;
       fb.open(tfname.Data(),ios::in);
       if (!fb.is_open()) { // file not found --> Error
          Log() << kFATAL << "<ReadStateFromFile> "
                << "Unable to open input weight file: " << tfname << Endl;
-      }      
+      }
       istream fin(&fb);
       ReadStateFromStream(fin);
       fb.close();
@@ -1101,14 +1335,24 @@ void TMVA::MethodBase::ReadStateFromFile()
       ReadStateFromStream( *rfile );
       rfile->Close();
    }
+}
 
-   // update transformation handler
-   if (GetTransformationHandler().GetCallerName() == "") GetTransformationHandler().SetCallerName( GetName() );
+#if ROOT_SVN_REVISION >= 32259
+//_______________________________________________________________________
+void TMVA::MethodBase::ReadStateFromXMLString( const char* xmlstr ) {
+   // for reading from memory
+   
+   void* doc = gTools().xmlengine().ParseString(xmlstr);
+
+   void* rootnode = gTools().xmlengine().DocGetRootElement(doc); // node "MethodSetup"
+
+   return ReadStateFromXML(rootnode);
 
 }
+#endif
 
 //_______________________________________________________________________
-void TMVA::MethodBase::ReadStateFromXML( void* methodNode ) 
+void TMVA::MethodBase::ReadStateFromXML( void* methodNode )
 {
    TString fullMethodName;
    gTools().ReadAttr( methodNode, "Method", fullMethodName );
@@ -1119,21 +1363,21 @@ void TMVA::MethodBase::ReadStateFromXML( void* methodNode )
    Log() << kINFO << "Read method \"" << GetMethodName() << "\" of type \"" << GetMethodTypeName() << "\"" << Endl;
 
    TString nodeName("");
-   void* ch = gTools().xmlengine().GetChild(methodNode);
+   void* ch = gTools().GetChild(methodNode);
    while (ch!=0) {
-      nodeName = TString( gTools().xmlengine().GetNodeName(ch) );
+      nodeName = TString( gTools().GetName(ch) );
 
       if (nodeName=="GeneralInfo") {
          // read analysis type
 
          TString name(""),val("");
-         void* antypeNode = gTools().xmlengine().GetChild(ch);
+         void* antypeNode = gTools().GetChild(ch);
          while (antypeNode) {
             gTools().ReadAttr( antypeNode, "name",   name );
-            
-            if (name == "TrainingTime") 
+
+            if (name == "TrainingTime")
                gTools().ReadAttr( antypeNode, "value",  fTrainTime );
-      
+
             if (name == "AnalysisType") {
                gTools().ReadAttr( antypeNode, "value",  val );
                val.ToLower();
@@ -1142,7 +1386,7 @@ void TMVA::MethodBase::ReadStateFromXML( void* methodNode )
                else if (val == "multiclass" )     SetAnalysisType( Types::kMulticlass );
                else Log() << kFATAL << "Analysis type " << val << " is not known." << Endl;
             }
-            
+
             if (name == "TMVA Release" || name == "TMVA" ){
                TString s;
                gTools().ReadAttr( antypeNode, "value", s);
@@ -1156,14 +1400,14 @@ void TMVA::MethodBase::ReadStateFromXML( void* methodNode )
                fROOTTrainingVersion = TString(s(s.Index("[")+1,s.Index("]")-s.Index("[")-1)).Atoi();
                Log() << kINFO << "MVA method was trained with ROOT Version: " << GetTrainingROOTVersionString() << Endl;
             }
-            antypeNode = gTools().xmlengine().GetNext(antypeNode);
+            antypeNode = gTools().GetNextChild(antypeNode);
          }
-      } 
+      }
       else if (nodeName=="Options") {
          ReadOptionsFromXML(ch);
          ParseOptions();
-         
-      } 
+
+      }
       else if (nodeName=="Variables") {
          ReadVariablesFromXML(ch);
       }
@@ -1181,25 +1425,29 @@ void TMVA::MethodBase::ReadStateFromXML( void* methodNode )
          TString pdfname;
          if (fMVAPdfS) delete fMVAPdfS;
          if (fMVAPdfB) delete fMVAPdfB;
-         void* pdfnode = gTools().xmlengine().GetChild(ch);
+         void* pdfnode = gTools().GetChild(ch);
          if (pdfnode) {
             gTools().ReadAttr(pdfnode, "Name", pdfname);
             fMVAPdfS = new PDF(pdfname);
             fMVAPdfS->ReadXML(pdfnode);
-            pdfnode = gTools().xmlengine().GetNext(pdfnode);
+            pdfnode = gTools().GetNextChild(pdfnode);
             gTools().ReadAttr(pdfnode, "Name", pdfname);
             fMVAPdfB = new PDF(pdfname);
             fMVAPdfB->ReadXML(pdfnode);
          }
-      } 
+      }
       else if (nodeName=="Weights") {
          ReadWeightsFromXML(ch);
-      } 
+      }
       else {
          std::cout << "Unparsed: " << nodeName << std::endl;
       }
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
+
    }
+
+   // update transformation handler
+   if (GetTransformationHandler().GetCallerName() == "") GetTransformationHandler().SetCallerName( GetName() );
 }
 
 //_______________________________________________________________________
@@ -1310,6 +1558,10 @@ void TMVA::MethodBase::ReadStateFromStream( std::istream& fin )
    while (!TString(buf).BeginsWith("#WGT")) fin.getline(buf,512);
    fin.getline(buf,512);
    ReadWeightsFromStream( fin );;
+
+   // update transformation handler
+   if (GetTransformationHandler().GetCallerName() == "") GetTransformationHandler().SetCallerName( GetName() );
+
 }
 
 //_______________________________________________________________________
@@ -1367,12 +1619,12 @@ void TMVA::MethodBase::ReadVarsFromStream( std::istream& istr )
 void TMVA::MethodBase::AddVarsXMLTo( void* parent ) const 
 {
    // write variable info to XML 
-   void* vars = gTools().xmlengine().NewChild(parent, 0, "Variables");
-   gTools().xmlengine().NewAttr( vars, 0, "NVar", gTools().StringFromInt(DataInfo().GetNVariables()) );
+   void* vars = gTools().AddChild(parent, "Variables");
+   gTools().AddAttr( vars, "NVar", gTools().StringFromInt(DataInfo().GetNVariables()) );
 
    for (UInt_t idx=0; idx<DataInfo().GetVariableInfos().size(); idx++) {
       VariableInfo& vi = DataInfo().GetVariableInfos()[idx];
-      void* var = gTools().xmlengine().NewChild( vars, 0, "Variable" );
+      void* var = gTools().AddChild( vars, "Variable" );
       gTools().AddAttr( var, "VarIndex", idx );
       vi.AddToXML( var );
    }
@@ -1382,7 +1634,7 @@ void TMVA::MethodBase::AddVarsXMLTo( void* parent ) const
 void TMVA::MethodBase::AddSpectatorsXMLTo( void* parent ) const 
 {
    // write spectator info to XML 
-   void* specs = gTools().xmlengine().NewChild(parent, 0, "Spectators");
+   void* specs = gTools().AddChild(parent, "Spectators");
 
    UInt_t writeIdx=0;
    for (UInt_t idx=0; idx<DataInfo().GetSpectatorInfos().size(); idx++) {
@@ -1398,23 +1650,23 @@ void TMVA::MethodBase::AddSpectatorsXMLTo( void* parent ) const
          if(!vi.GetTitle().BeginsWith(GetMethodName()+":") )
             continue;
       }
-      void* spec = gTools().xmlengine().NewChild( specs, 0, "Spectator" );
+      void* spec = gTools().AddChild( specs, "Spectator" );
       gTools().AddAttr( spec, "SpecIndex", writeIdx++ );
       vi.AddToXML( spec );
    }
-   gTools().xmlengine().NewAttr( specs, 0, "NSpec", gTools().StringFromInt(writeIdx) );
+   gTools().AddAttr( specs, "NSpec", gTools().StringFromInt(writeIdx) );
 }
 
 //_______________________________________________________________________
 void TMVA::MethodBase::AddTargetsXMLTo( void* parent ) const 
 {
    // write target info to XML 
-   void* targets = gTools().xmlengine().NewChild(parent, 0, "Targets");
-   gTools().xmlengine().NewAttr( targets, 0, "NTrgt", gTools().StringFromInt(DataInfo().GetNTargets()) );
+   void* targets = gTools().AddChild(parent, "Targets");
+   gTools().AddAttr( targets, "NTrgt", gTools().StringFromInt(DataInfo().GetNTargets()) );
 
    for (UInt_t idx=0; idx<DataInfo().GetTargetInfos().size(); idx++) {
       VariableInfo& vi = DataInfo().GetTargetInfos()[idx];
-      void* tar = gTools().xmlengine().NewChild( targets, 0, "Target" );
+      void* tar = gTools().AddChild( targets, "Target" );
       gTools().AddAttr( tar, "TargetIndex", idx );
       vi.AddToXML( tar );
    }
@@ -1436,7 +1688,7 @@ void TMVA::MethodBase::ReadVariablesFromXML( void* varnode )
    // we want to make sure all variables are read in the order they are defined
    VariableInfo readVarInfo, existingVarInfo;
    int varIdx = 0;
-   void* ch = gTools().xmlengine().GetChild(varnode);
+   void* ch = gTools().GetChild(varnode);
    while (ch) {
       gTools().ReadAttr( ch, "VarIndex", varIdx);
       existingVarInfo = DataInfo().GetVariableInfos()[varIdx];
@@ -1455,7 +1707,7 @@ void TMVA::MethodBase::ReadVariablesFromXML( void* varnode )
          Log() << kINFO << "   var #" << varIdx <<" declared in file  : " << readVarInfo.GetExpression() << Endl;
          Log() << kFATAL << "The expression declared to the Reader needs to be checked (name or order are wrong)" << Endl;
       }
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 }
 
@@ -1475,7 +1727,7 @@ void TMVA::MethodBase::ReadSpectatorsFromXML( void* specnode )
    // we want to make sure all variables are read in the order they are defined
    VariableInfo readSpecInfo, existingSpecInfo;
    int specIdx = 0;
-   void* ch = gTools().xmlengine().GetChild(specnode);
+   void* ch = gTools().GetChild(specnode);
    while (ch) {
       gTools().ReadAttr( ch, "SpecIndex", specIdx);
       existingSpecInfo = DataInfo().GetSpectatorInfos()[specIdx];
@@ -1494,7 +1746,7 @@ void TMVA::MethodBase::ReadSpectatorsFromXML( void* specnode )
          Log() << kINFO << "   var #" << specIdx <<" declared in file  : " << readSpecInfo.GetExpression() << Endl;
          Log() << kFATAL << "The expression declared to the Reader needs to be checked (name or order are wrong)" << Endl;
       }
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 }
 
@@ -1507,13 +1759,13 @@ void TMVA::MethodBase::ReadTargetsFromXML( void* tarnode )
 
    int tarIdx = 0;
    TString expression;
-   void* ch = gTools().xmlengine().GetChild(tarnode);
+   void* ch = gTools().GetChild(tarnode);
    while (ch) {
       gTools().ReadAttr( ch, "TargetIndex", tarIdx);
       gTools().ReadAttr( ch, "Expression", expression);
       DataInfo().AddTarget(expression,"","",0,0);
      
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 }
 
@@ -1534,7 +1786,7 @@ TDirectory* TMVA::MethodBase::BaseDir() const
    TString defaultDir = GetMethodName();
 
    TObject* o = methodDir->FindObject(defaultDir);
-   if (o!=0 && o->InheritsFrom(TDirectory::Class())) dir = (TDirectory*)o;
+   if (o!=0 && o->InheritsFrom("TDirectory")) dir = (TDirectory*)o;
 
    if (dir != 0) return dir;
 
@@ -1833,7 +2085,7 @@ Double_t TMVA::MethodBase::GetEfficiency( const TString& theString, Types::ETree
       for (UInt_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
 
          // read the tree
-         Bool_t  isSignal  = GetEvent(ievt)->IsSignal();
+         Bool_t  isSignal  = DataInfo().IsSignal(GetEvent(ievt));
          Float_t theWeight = GetEvent(ievt)->GetWeight();
          Float_t theVal    = (*mvaRes)[ievt];
 
@@ -2720,3 +2972,17 @@ TString TMVA::MethodBase::GetTrainingROOTVersionString() const
 
    return TString(Form("%i.%02i/%02i",a,b,c));
 }
+ 
+//_______________________________________________________________________
+TMVA::MethodBase* TMVA::MethodBase::GetThisBase()
+{
+   // return a pointer the base class of this method
+   return fgThisBase; 
+}
+
+//_______________________________________________________________________
+void TMVA::MethodBase::ResetThisBase() 
+{ 
+   // reset required for RootFinder
+   fgThisBase = this; 
+}
diff --git a/tmva/src/MethodBoost.cxx b/tmva/src/MethodBoost.cxx
index 69aa8809e94f9d1228b1f22e5ff96fc2305a1bb2..0d6725e079d94e522fb234531218ce7be18d82d9 100644
--- a/tmva/src/MethodBoost.cxx
+++ b/tmva/src/MethodBoost.cxx
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$   
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss,Or Cohen, Eckhard von Toerne 
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss,Or Cohen, Eckhard von Toerne
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -19,10 +19,10 @@
  *      Eckhard v. Toerne  <evt@uni-bonn.de>        - U of Bonn, Germany          *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                    #include "TMVA/Timer.h"                                * 
- *      MPI-K Heidelberg, Germany                                                 * 
- *      U. of Bonn, Germany                                                      *
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                    #include "TMVA/Timer.h"         *
+ *      MPI-K Heidelberg, Germany                                                 *
+ *      U. of Bonn, Germany                                                       *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
@@ -52,6 +52,7 @@
 #include "TMVA/MethodCompositeBase.h"
 #include "TMVA/MethodBase.h"
 #include "TMVA/MethodBoost.h"
+#include "TMVA/MethodCategory.h"
 #include "TMVA/Tools.h"
 #include "TMVA/ClassifierFactory.h"
 #include "TMVA/Timer.h"
@@ -71,7 +72,7 @@ TMVA::MethodBoost::MethodBoost( const TString& jobName,
                                 TDirectory* theTargetDir ) :
    TMVA::MethodCompositeBase( jobName, Types::kBoost, methodTitle, theData, theOption, theTargetDir ),
    fBoostedMethodTitle(methodTitle),
-   fBoostedMethodOptions(theOption), 
+   fBoostedMethodOptions(theOption),
    fMonitorHist(0)
 {}
 
@@ -118,7 +119,7 @@ void TMVA::MethodBoost::DeclareOptions()
    
    DeclareOptionRef( fMonitorBoostedMethod = kTRUE, "Boost_MonitorMethod",
                      "Whether to write monitoring histogram for each boosted classifier");
-
+   
    DeclareOptionRef(fBoostType  = "AdaBoost", "Boost_Type", "Boosting type for the classifiers");
    AddPreDefVal(TString("AdaBoost"));
    AddPreDefVal(TString("Bagging"));
@@ -145,7 +146,7 @@ void TMVA::MethodBoost::DeclareOptions()
 }
 
 //_______________________________________________________________________
-Bool_t TMVA::MethodBoost::BookMethod( Types::EMVA theMethod, TString methodTitle, TString theOption ) 
+Bool_t TMVA::MethodBoost::BookMethod( Types::EMVA theMethod, TString methodTitle, TString theOption )
 {
    // just registering the string from which the boosted classifier will be created
    fBoostedMethodName = Types::Instance().GetMethodName( theMethod );
@@ -249,6 +250,23 @@ void TMVA::MethodBoost::Train()
 
          // supressing the rest of the classifier output the right way
          MethodBase *meth = (dynamic_cast<MethodBase*>(method));
+
+
+
+
+
+	 // set fDataSetManager if MethodCategory (to enable Category to create datasetinfo objects) // DSMTEST
+	 if( meth->GetMethodType() == Types::kCategory ){ // DSMTEST
+	    MethodCategory *methCat = (dynamic_cast<MethodCategory*>(meth)); // DSMTEST
+	    if( !methCat ) // DSMTEST
+	       Log() << kERROR << "Method with type kCategory cannot be casted to MethodCategory. /MethodBoost" << Endl; // DSMTEST
+	    methCat->fDataSetManager = fDataSetManager; // DSMTEST
+	 } // DSMTEST
+
+
+
+
+
          meth->SetMsgType(kWARNING);
          meth->SetupMethod();
          meth->ParseOptions();
@@ -423,7 +441,7 @@ void TMVA::MethodBoost::TestClassification()
       for (Long64_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
          Event* ev = Data()->GetEvent(ievt);
          Float_t w = ev->GetWeight();
-         if (ev->IsSignal()) {
+         if (DataInfo().IsSignal(ev)) {
             for (UInt_t imtd=0; imtd<nloop; imtd++) {
                fTestSigMVAHist[imtd]->Fill(fMethods[imtd]->GetMvaValue(),w);
             }
@@ -460,7 +478,7 @@ void TMVA::MethodBoost::WriteEvaluationHistosToFile(Types::ETreeType treetype)
 }
 
 //_______________________________________________________________________
-void TMVA::MethodBoost::ProcessOptions() 
+void TMVA::MethodBoost::ProcessOptions()
 {
    // process user options
 }
@@ -468,7 +486,7 @@ void TMVA::MethodBoost::ProcessOptions()
 //_______________________________________________________________________
 void TMVA::MethodBoost::SingleTrain()
 {
-   // initialization 
+   // initialization
    Data()->SetCurrentType(Types::kTraining);
    MethodBase* meth = dynamic_cast<MethodBase*>(GetLastMethod());
    meth->TrainMethod();
@@ -489,16 +507,16 @@ void TMVA::MethodBoost::FindMVACut()
       Double_t* err=new Double_t[nValBins];
       const Double_t valmin=-1.;
       const Double_t valmax=1.;
-      for (Int_t i=0;i<nValBins;i++) err[i]=0.; 
+      for (Int_t i=0;i<nValBins;i++) err[i]=0.;
       Double_t sum = 0.;
       for (Long64_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
          Double_t weight = GetEvent(ievt)->GetWeight();
-         sum +=weight; 
-         Double_t val=method->GetMvaValue(); 
+         sum +=weight;
+         Double_t val=method->GetMvaValue();
          Int_t ibin = (Int_t) (((val-valmin)/(valmax-valmin))*nValBins);
          if (ibin>=nValBins) ibin = nValBins-1;
          if (ibin<0) ibin = 0;
-         if (Data()->GetEvent(ievt)->IsSignal()){
+         if (DataInfo().IsSignal(Data()->GetEvent(ievt))){
             for (Int_t i=ibin;i<nValBins;i++) err[i]+=weight;
          }
          else {
@@ -516,7 +534,7 @@ void TMVA::MethodBoost::FindMVACut()
       Double_t sigCutVal = valmin + (valmax-valmin)*minbin/nValBins;
       method->SetSignalReferenceCut(sigCutVal);
       //std::cout << "Setting method cut to " <<method->GetSignalReferenceCut()<< " minerr=" << minerr/sum<<endl;
-      delete err; 
+      delete[] err;
    }
 }
 
@@ -534,7 +552,7 @@ void TMVA::MethodBoost::SingleBoost()
    // finding the wrong events and calculating their total weights
    for (Long64_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
       ev = Data()->GetEvent(ievt);
-      sig=ev->IsSignal();
+      sig=DataInfo().IsSignal(ev);
       v = method->GetMvaValue();
       w = ev->GetWeight();
       wo = ev->GetOriginalWeight();
@@ -572,8 +590,8 @@ void TMVA::MethodBoost::SingleBoost()
       alphaWeight = -alphaWeight;
    }
    if (fBoostType == "AdaBoost") {
-      // ADA boosting, rescaling the weight of the wrong events according to the error level 
-      // over the entire test sample rescaling all the weights to have the same sum, but without 
+      // ADA boosting, rescaling the weight of the wrong events according to the error level
+      // over the entire test sample rescaling all the weights to have the same sum, but without
       // touching the original weights (changing only the boosted weight of all the events)
       // first reweight
       Double_t Factor=0., FactorOrig=0.;
@@ -584,9 +602,9 @@ void TMVA::MethodBoost::SingleBoost()
          Factor += ev->GetBoostWeight();
       }
       Factor = FactorOrig/Factor;
-      // next normalize the weights 
+      // next normalize the weights
       for (Long64_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
-         Data()->GetEvent(ievt)->ScaleBoostWeight(Factor); 
+         Data()->GetEvent(ievt)->ScaleBoostWeight(Factor);
       }
 
    }
@@ -598,7 +616,7 @@ void TMVA::MethodBoost::SingleBoost()
          ev->SetBoostWeight(trandom->Rndm());
          sumAll1+=ev->GetWeight();
       }
-      // rescaling all the weights to have the same sum, but without touching the original 
+      // rescaling all the weights to have the same sum, but without touching the original
       // weights (changing only the boosted weight of all the events)
       Double_t Factor=sumAll/sumAll1;
       for (Long64_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
@@ -611,7 +629,7 @@ void TMVA::MethodBoost::SingleBoost()
    else if (fMethodWeightType == "Average") fMethodWeight.push_back(1.0);
    else                                     fMethodWeight.push_back(0);
 
-   delete WrongDetection;
+   delete[] WrongDetection;
 }
 
 //_______________________________________________________________________
diff --git a/tmva/src/MethodCFMlpANN.cxx b/tmva/src/MethodCFMlpANN.cxx
index 15cd26450fad217bf21ee34b97f1e2bfcf36bba4..52d9621fcca4ee27f7d74b60b0e4b72c1aa0bb43 100644
--- a/tmva/src/MethodCFMlpANN.cxx
+++ b/tmva/src/MethodCFMlpANN.cxx
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$    
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate Data analysis       *
@@ -17,9 +17,9 @@
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                                                    * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      MPI-K Heidelberg, Germany                                                 *
  *      LAPP, Annecy, France                                                      *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
@@ -28,10 +28,10 @@
  **********************************************************************************/
 
 //_______________________________________________________________________
-//                                                                      
+//
 // Begin_Html
 /*
-  Interface to Clermond-Ferrand artificial neural network 
+  Interface to Clermond-Ferrand artificial neural network
 
   <p>
   The CFMlpANN belong to the class of Multilayer Perceptrons (MLP), which are 
@@ -214,7 +214,7 @@ void TMVA::MethodCFMlpANN::ProcessOptions()
          const Event * ev = GetEvent(ievt);
 
          // identify signal and background events  
-         (*fClass)[ievt] = ev->IsSignal() ? 1 : 2;
+         (*fClass)[ievt] = DataInfo().IsSignal(ev) ? 1 : 2;
       
          // use normalized input Data
          for (ivar=0; ivar<GetNvar(); ivar++) {
@@ -398,44 +398,44 @@ void TMVA::MethodCFMlpANN::ReadWeightsFromStream( istream & istr )
    // number of output classes must be 2
    if (lclass != 2) // wrong file
       Log() << kFATAL << "<ReadWeightsFromFile> mismatch in number of classes" << Endl;
-          
+
    // check that we are not at the end of the file
    if (istr.eof( ))
       Log() << kFATAL << "<ReadWeightsFromStream> reached EOF prematurely " << Endl;
 
    // read extrema of input variables
-   for (UInt_t ivar=0; ivar<GetNvar(); ivar++) 
+   for (UInt_t ivar=0; ivar<GetNvar(); ivar++)
       istr >> fVarn_1.xmax[ivar] >> fVarn_1.xmin[ivar];
-            
+
    // read number of layers (sum of: input + output + hidden)
    istr >> fParam_1.layerm;
-            
+
    if (fYNN != 0) {
       for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
       delete[] fYNN;
       fYNN = 0;
    }
    fYNN = new Double_t*[fParam_1.layerm];
-   for (Int_t layer=0; layer<fParam_1.layerm; layer++) {              
+   for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
       // read number of neurons for each layer
       istr >> fNeur_1.neuron[layer];
       fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
    }
-            
+
    // to read dummy lines
    const Int_t nchar( 100 );
    char* dumchar = new char[nchar];
-            
+
    // read weights
    for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
-              
+
       Int_t nq = fNeur_1.neuron[layer]/10;
       Int_t nr = fNeur_1.neuron[layer] - nq*10;
-              
+
       Int_t kk(0);
       if (nr==0) kk = nq;
       else       kk = nq+1;
-              
+
       for (Int_t k=1; k<=kk; k++) {
          Int_t jmin = 10*k - 9;
          Int_t jmax = 10*k;
@@ -454,13 +454,13 @@ void TMVA::MethodCFMlpANN::ReadWeightsFromStream( istream & istr )
    }
 
    for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
-              
+
       // skip 2 empty lines
       istr.getline( dumchar, nchar );
       istr.getline( dumchar, nchar );
-              
+
       istr >> fDel_1.temp[layer];
-   }            
+   }
 
    // sanity check
    if ((Int_t)GetNvar() != fNeur_1.neuron[0]) {
@@ -469,7 +469,7 @@ void TMVA::MethodCFMlpANN::ReadWeightsFromStream( istream & istr )
    }
 
    fNlayers = fParam_1.layerm;
-   delete dumchar;
+   delete[] dumchar;
 }
 
 //_______________________________________________________________________
@@ -510,52 +510,52 @@ void TMVA::MethodCFMlpANN::AddWeightsXMLTo( void* parent ) const
 {
    // write weights to xml file
 
-   void *wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
+   void *wght = gTools().AddChild(parent, "Weights");
    gTools().AddAttr(wght,"NVars",fParam_1.nvar);
    gTools().AddAttr(wght,"NClasses",fParam_1.lclass);
    gTools().AddAttr(wght,"NLayers",fParam_1.layerm);
-   void* minmaxnode = gTools().xmlengine().NewChild(wght, 0, "VarMinMax");
+   void* minmaxnode = gTools().AddChild(wght, "VarMinMax");
    stringstream s;
    s.precision( 16 );
    for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++) 
       s << std::scientific << fVarn_1.xmin[ivar] <<  " " << fVarn_1.xmax[ivar] <<  " ";
-   gTools().xmlengine().AddRawLine( minmaxnode, s.str().c_str() );
-   void* neurons = gTools().xmlengine().NewChild(wght, 0, "NNeurons");
+   gTools().AddRawLine( minmaxnode, s.str().c_str() );
+   void* neurons = gTools().AddChild(wght, "NNeurons");
    stringstream n;
    n.precision( 16 );
    for (Int_t layer=0; layer<fParam_1.layerm; layer++)
       n << std::scientific << fNeur_1.neuron[layer] << " ";
-   gTools().xmlengine().AddRawLine( neurons, n.str().c_str() );
+   gTools().AddRawLine( neurons, n.str().c_str() );
    for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
-      void* layernode = gTools().xmlengine().NewChild(wght, 0, "Layer"+gTools().StringFromInt(layer));
+      void* layernode = gTools().AddChild(wght, "Layer"+gTools().StringFromInt(layer));
       gTools().AddAttr(layernode,"NNeurons",fNeur_1.neuron[layer]);
       void* neuronnode=NULL;
       for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
-         neuronnode = gTools().xmlengine().NewChild(layernode,0,"Neuron"+gTools().StringFromInt(neuron));
+         neuronnode = gTools().AddChild(layernode,"Neuron"+gTools().StringFromInt(neuron));
          stringstream weights;
          weights.precision( 16 );         
          weights << std::scientific << Ww_ref(fNeur_1.ww, layer+1, neuron+1);
          for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
             weights << " " << std::scientific << W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
          }
-         gTools().xmlengine().AddRawLine( neuronnode, weights.str().c_str() );
+         gTools().AddRawLine( neuronnode, weights.str().c_str() );
       }
    }
-   void* tempnode = gTools().xmlengine().NewChild(wght, 0, "LayerTemp");
+   void* tempnode = gTools().AddChild(wght, "LayerTemp");
    stringstream temp;
    temp.precision( 16 );
    for (Int_t layer=0; layer<fParam_1.layerm; layer++) {         
        temp << std::scientific << fDel_1.temp[layer] << " ";
    }   
-   gTools().xmlengine().AddRawLine(tempnode, temp.str().c_str() );
+   gTools().AddRawLine(tempnode, temp.str().c_str() );
 }
 //_______________________________________________________________________
 void TMVA::MethodCFMlpANN::ReadWeightsFromXML( void* wghtnode )
 {
    // read weights from xml file
    gTools().ReadAttr( wghtnode, "NLayers",fParam_1.layerm );
-   void* minmaxnode = gTools().xmlengine().GetChild(wghtnode);
-   const char* minmaxcontent = gTools().xmlengine().GetNodeContent(minmaxnode);
+   void* minmaxnode = gTools().GetChild(wghtnode);
+   const char* minmaxcontent = gTools().GetContent(minmaxnode);
    std::stringstream content(minmaxcontent);
    for (UInt_t ivar=0; ivar<GetNvar(); ivar++) 
       content >> fVarn_1.xmin[ivar] >> fVarn_1.xmax[ivar];
@@ -565,8 +565,8 @@ void TMVA::MethodCFMlpANN::ReadWeightsFromXML( void* wghtnode )
       fYNN = 0;
    }
    fYNN = new Double_t*[fParam_1.layerm];
-   void *layernode=gTools().xmlengine().GetNext(minmaxnode);
-   const char* neuronscontent = gTools().xmlengine().GetNodeContent(layernode);
+   void *layernode=gTools().GetNextChild(minmaxnode);
+   const char* neuronscontent = gTools().GetContent(layernode);
    stringstream ncontent(neuronscontent);
    for (Int_t layer=0; layer<fParam_1.layerm; layer++) {              
       // read number of neurons for each layer;
@@ -574,21 +574,21 @@ void TMVA::MethodCFMlpANN::ReadWeightsFromXML( void* wghtnode )
       fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
    }
    for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
-      layernode=gTools().xmlengine().GetNext(layernode);
+      layernode=gTools().GetNextChild(layernode);
       void* neuronnode=NULL;
-      neuronnode = gTools().xmlengine().GetChild(layernode);
+      neuronnode = gTools().GetChild(layernode);
       for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
-         const char* neuronweights = gTools().xmlengine().GetNodeContent(neuronnode);
+         const char* neuronweights = gTools().GetContent(neuronnode);
          stringstream weights(neuronweights);
          weights >> Ww_ref(fNeur_1.ww, layer+1, neuron+1);
          for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
             weights >> W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
          }
-         neuronnode=gTools().xmlengine().GetNext(neuronnode);
+         neuronnode=gTools().GetNextChild(neuronnode);
       }
    } 
-   void* tempnode=gTools().xmlengine().GetNext(layernode);
-   const char* temp = gTools().xmlengine().GetNodeContent(tempnode);
+   void* tempnode=gTools().GetNextChild(layernode);
+   const char* temp = gTools().GetContent(tempnode);
    stringstream t(temp);
    for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
       t >> fDel_1.temp[layer];
@@ -657,8 +657,12 @@ void TMVA::MethodCFMlpANN::PrintWeights( std::ostream & o ) const
       o << "Del.temp in layer " << layer << " :  " << fDel_1.temp[layer] << endl;
    }      
 }
-
 //_______________________________________________________________________
+TMVA::MethodCFMlpANN* TMVA::MethodCFMlpANN::This( void ) 
+{ 
+// static pointer to this object (required for external functions
+   return fgThis; 
+}  
 void TMVA::MethodCFMlpANN::MakeClassSpecific( std::ostream& fout, const TString& className ) const
 {
    // write specific classifier response
diff --git a/tmva/src/MethodCategory.cxx b/tmva/src/MethodCategory.cxx
index 94680c55eb642d6db0a9ab68dd1685968197a4cc..374267b7c835ba1e8f56af1ae5cbfed742f1af0f 100644
--- a/tmva/src/MethodCategory.cxx
+++ b/tmva/src/MethodCategory.cxx
@@ -68,8 +68,9 @@ TMVA::MethodCategory::MethodCategory( const TString& jobName,
                                       DataSetInfo& theData,
                                       const TString& theOption,
                                       TDirectory* theTargetDir )
-   :  TMVA::MethodCompositeBase( jobName, Types::kCategory, methodTitle, theData, theOption, theTargetDir ),
-   fCatTree(0)
+ : TMVA::MethodCompositeBase( jobName, Types::kCategory, methodTitle, theData, theOption, theTargetDir ),
+   fCatTree(0),
+   fDataSetManager(NULL)
 {
    // standard constructor
 }
@@ -79,7 +80,8 @@ TMVA::MethodCategory::MethodCategory( DataSetInfo& dsi,
                                       const TString& theWeightFile,
                                       TDirectory* theTargetDir )
    : TMVA::MethodCompositeBase( Types::kCategory, dsi, theWeightFile, theTargetDir ),
-   fCatTree(0)
+     fCatTree(0),
+     fDataSetManager(NULL)
 {
    // constructor from weight file
 }
@@ -183,7 +185,8 @@ TMVA::DataSetInfo& TMVA::MethodCategory::CreateCategoryDSI(const TCut& theCut,
    DataSetInfo* dsi = new DataSetInfo(dsiName);
 
    // register the new dsi
-   DataSetManager::Instance().AddDataSetInfo(*dsi);
+//   DataSetManager::Instance().AddDataSetInfo(*dsi); // DSMTEST replaced by following line
+   fDataSetManager->AddDataSetInfo(*dsi);
 
    // copy the targets and spectators from the old dsi to the new dsi
    std::vector<VariableInfo>::iterator itrVarInfo;
@@ -431,7 +434,7 @@ void TMVA::MethodCategory::ReadWeightsFromXML( void* wghtnode )
    TString theVariables;
    Int_t titleLength;
    gTools().ReadAttr( wghtnode, "NSubMethods",  nSubMethods );
-   void* subMethodNode = gTools().xmlengine().GetChild(wghtnode);
+   void* subMethodNode = gTools().GetChild(wghtnode);
 
    Log() << kINFO << "Recreating sub-classifiers from XML-file " << Endl;
 
@@ -481,7 +484,7 @@ void TMVA::MethodCategory::ReadWeightsFromXML( void* wghtnode )
          }
       }
 
-      subMethodNode = gTools().xmlengine().GetNext(subMethodNode);
+      subMethodNode = gTools().GetNextChild(subMethodNode);
    }
 
    InitCircularTree(DataInfo());
diff --git a/tmva/src/MethodCommittee.cxx b/tmva/src/MethodCommittee.cxx
index 23d73bf56cac2a8b021bcea39a7c9d588df89605..d3dc95d1dfa087b744ff4ca4c95d41fe2da43e19 100644
--- a/tmva/src/MethodCommittee.cxx
+++ b/tmva/src/MethodCommittee.cxx
@@ -259,7 +259,7 @@ Double_t TMVA::MethodCommittee::AdaBoost( TMVA::MethodBase* method )
       Bool_t isSignalType = mbase->IsSignalLike();
       
       // to prevent code duplication
-      if (isSignalType == ev->IsSignal())
+      if (isSignalType == DataInfo().IsSignal(ev))
          correctSelected.push_back( kTRUE );
       else {
          sumwfalse += ev->GetBoostWeight();
diff --git a/tmva/src/MethodCompositeBase.cxx b/tmva/src/MethodCompositeBase.cxx
index 407876174850540c494dc2ceb05854c283dc3de4..e11f379a1bb3642a57633db464d0fbd5b847d537 100644
--- a/tmva/src/MethodCompositeBase.cxx
+++ b/tmva/src/MethodCompositeBase.cxx
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$   
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss,Or Cohen 
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss,Or Cohen
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -18,9 +18,9 @@
  *      Or Cohen        <orcohenor@gmail.com>    - Weizmann Inst., Israel         *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                                                    * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      MPI-K Heidelberg, Germany                                                 *
  *      LAPP, Annecy, France                                                      *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
@@ -102,11 +102,11 @@ TMVA::IMethod* TMVA::MethodCompositeBase::GetMethod( const Int_t index ) const
 //_______________________________________________________________________
 void TMVA::MethodCompositeBase::AddWeightsXMLTo( void* parent ) const 
 {
-   void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
+   void* wght = gTools().AddChild(parent, "Weights");
    gTools().AddAttr( wght, "NMethods",   fMethods.size()   );
    for (UInt_t i=0; i< fMethods.size(); i++) 
    {
-      void* methxml = gTools().xmlengine().NewChild( wght, 0, "Method" );
+      void* methxml = gTools().AddChild( wght, "Method" );
       MethodBase* method = dynamic_cast<MethodBase*>(fMethods[i]);
       gTools().AddAttr(methxml,"Index",          i ); 
       gTools().AddAttr(methxml,"Weight",         fMethodWeight[i]); 
@@ -142,7 +142,7 @@ void TMVA::MethodCompositeBase::ReadWeightsFromXML( void* wghtnode )
    fMethods.clear();
    fMethodWeight.clear();
    gTools().ReadAttr( wghtnode, "NMethods",  nMethods );
-   void* ch = gTools().xmlengine().GetChild(wghtnode);
+   void* ch = gTools().GetChild(wghtnode);
    for (UInt_t i=0; i< nMethods; i++) {
       Double_t methodWeight, methodSigCut;
       gTools().ReadAttr( ch, "Weight",   methodWeight   );
@@ -163,7 +163,7 @@ void TMVA::MethodCompositeBase::ReadWeightsFromXML( void* wghtnode )
       fMethodWeight.push_back(methodWeight);
       MethodBase* meth = dynamic_cast<MethodBase*>(fMethods.back());
 
-      void* methXML = gTools().xmlengine().GetChild(ch);
+      void* methXML = gTools().GetChild(ch);
       meth->SetupMethod();
       meth->ReadWeightsFromXML(methXML);
       meth->SetMsgType(kWARNING);
@@ -172,7 +172,7 @@ void TMVA::MethodCompositeBase::ReadWeightsFromXML( void* wghtnode )
       meth->CheckSetup();
       meth->SetSignalReferenceCut(methodSigCut);
 
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
    //Log() << kINFO << "Reading methods from XML done " << Endl;
 }
diff --git a/tmva/src/MethodCuts.cxx b/tmva/src/MethodCuts.cxx
index 02fa3ac0ab0383192c735add5958e678a5b8434b..814fec1930d07d594df0d81501e686a54751e059 100644
--- a/tmva/src/MethodCuts.cxx
+++ b/tmva/src/MethodCuts.cxx
@@ -98,7 +98,6 @@ End_Html */
 #include "TGraph.h"
 #include "TSpline.h"
 #include "TRandom3.h"
-#include "TXMLEngine.h"
 
 #include "TMVA/ClassifierFactory.h"
 #include "TMVA/MethodCuts.h"
@@ -1231,11 +1230,11 @@ void TMVA::MethodCuts::AddWeightsXMLTo( void* parent ) const
    std::vector<Double_t> cutsMin;
    std::vector<Double_t> cutsMax;
 
-   void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
+   void* wght = gTools().AddChild(parent, "Weights");
    gTools().AddAttr( wght, "OptimisationMethod", (Int_t)fEffMethod);
    gTools().AddAttr( wght, "FitMethod",          (Int_t)fFitMethod );
    gTools().AddAttr( wght, "nbins",              fNbins );
-   gTools().xmlengine().AddComment( wght, Form( "Below are the optimised cuts for %i variables: Format: ibin(hist) effS effB cutMin[ivar=0] cutMax[ivar=0] ... cutMin[ivar=n-1] cutMax[ivar=n-1]", GetNvar() ) );
+   gTools().AddComment( wght, Form( "Below are the optimised cuts for %i variables: Format: ibin(hist) effS effB cutMin[ivar=0] cutMax[ivar=0] ... cutMin[ivar=n-1] cutMax[ivar=n-1]", GetNvar() ) );
 
    // NOTE: The signal efficiency written out into 
    //       the weight file does not correspond to the center of the bin within which the 
@@ -1250,11 +1249,11 @@ void TMVA::MethodCuts::AddWeightsXMLTo( void* parent ) const
       Double_t trueEffS = GetCuts( effS, cutsMin, cutsMax );
       if (TMath::Abs(trueEffS) < 1e-10) trueEffS = 0;
       
-      void* binxml = gTools().xmlengine().NewChild( wght, 0, "Bin" );
+      void* binxml = gTools().AddChild( wght, "Bin" );
       gTools().AddAttr( binxml, "ibin", ibin+1   );
       gTools().AddAttr( binxml, "effS", trueEffS );
       gTools().AddAttr( binxml, "effB", fEffBvsSLocal->GetBinContent( ibin + 1 ) );
-      void* cutsxml = gTools().xmlengine().NewChild( binxml, 0, "Cuts" );
+      void* cutsxml = gTools().AddChild( binxml, "Cuts" );
       for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
          gTools().AddAttr( cutsxml, Form( "cutMin_%i", ivar ), cutsMin[ivar] );
          gTools().AddAttr( cutsxml, Form( "cutMax_%i", ivar ), cutsMax[ivar] );
@@ -1319,12 +1318,12 @@ void TMVA::MethodCuts::ReadWeightsFromXML( void* wghtnode )
    // read efficeincies and cuts
    Int_t   tmpbin;
    Float_t tmpeffS, tmpeffB;
-   void* ch = gTools().xmlengine().GetChild(wghtnode);
+   void* ch = gTools().GetChild(wghtnode,"Bin");
    while (ch) {
-      if (strcmp(gTools().xmlengine().GetNodeName(ch),"Bin") !=0) {
-         ch = gTools().xmlengine().GetNext(ch);
-         continue;
-      }
+//       if (strcmp(gTools().GetName(ch),"Bin") !=0) {
+//          ch = gTools().GetNextChild(ch);
+//          continue;
+//       }
 
       gTools().ReadAttr( ch, "ibin", tmpbin  );
       gTools().ReadAttr( ch, "effS", tmpeffS );
@@ -1336,12 +1335,12 @@ void TMVA::MethodCuts::ReadWeightsFromXML( void* wghtnode )
       }
 
       fEffBvsSLocal->SetBinContent( tmpbin, tmpeffB );
-      void* ct = gTools().xmlengine().GetChild(ch);
+      void* ct = gTools().GetChild(ch);
       for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
          gTools().ReadAttr( ct, Form( "cutMin_%i", ivar ), fCutMin[ivar][tmpbin-1] );
          gTools().ReadAttr( ct, Form( "cutMax_%i", ivar ), fCutMax[ivar][tmpbin-1] );
       }
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch, "Bin");
    }
 }
 
diff --git a/tmva/src/MethodDT.cxx b/tmva/src/MethodDT.cxx
index e94647beaf4418f2127498d507b2a857a77cd198..c31656be2304c8af9fd34473ce22364b07f83a99 100644
--- a/tmva/src/MethodDT.cxx
+++ b/tmva/src/MethodDT.cxx
@@ -269,8 +269,9 @@ TMVA::MethodDT::~MethodDT( void )
 //_______________________________________________________________________
 void TMVA::MethodDT::Train( void )
 {
+   TMVA::DecisionTreeNode::fgIsTraining=true;
    SeparationBase *qualitySepType = new GiniIndex();
-   fTree = new DecisionTree( fSepType, fNodeMinEvents, fNCuts, qualitySepType,
+   fTree = new DecisionTree( fSepType, fNodeMinEvents, fNCuts, 0, qualitySepType,
                              fRandomisedTrees, fUseNvars, 0 );
    if (fRandomisedTrees) Log()<<kWARNING<<" randomised Trees do not work yet in this framework," 
                                 << " as I do not know how to give each tree a new random seed, now they"
@@ -278,6 +279,7 @@ void TMVA::MethodDT::Train( void )
    fTree->SetAnalysisType( GetAnalysisType() );
 
    fTree->BuildTree(GetEventCollection(Types::kTraining));
+   TMVA::DecisionTreeNode::fgIsTraining=false;
 }
 
 //_______________________________________________________________________
@@ -450,7 +452,7 @@ Double_t TMVA::MethodDT::TestTreeQuality( DecisionTree *dt )
    for (Long64_t ievt=0; ievt<Data()->GetNEvents(); ievt++)
       {
          Event * ev = Data()->GetEvent(ievt);
-         if ((dt->CheckEvent(*ev) > dt->GetNodePurityLimit() ) == ev->IsSignal()) SumCorrect+=ev->GetWeight();
+         if ((dt->CheckEvent(*ev) > dt->GetNodePurityLimit() ) == DataInfo().IsSignal(ev)) SumCorrect+=ev->GetWeight();
          else SumWrong+=ev->GetWeight();
       }
    Data()->SetCurrentType(Types::kTraining);
diff --git a/tmva/src/MethodFDA.cxx b/tmva/src/MethodFDA.cxx
index d1c7d19a48e8af2204d74213fe2eaeee996f282d..69cbd267c5fca34d41a8b7fc3a967e6665a6e3ca 100644
--- a/tmva/src/MethodFDA.cxx
+++ b/tmva/src/MethodFDA.cxx
@@ -1,4 +1,4 @@
-// @(#)root/tmva $Id$    
+// @(#)root/tmva $Id$
 // Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer
 
 /**********************************************************************************
@@ -26,7 +26,7 @@
  **********************************************************************************/
 
 //_______________________________________________________________________
-//                                                                      
+//
 // Function discriminant analysis (FDA). This simple classifier         //
 // fits any user-defined TFormula (via option configuration string) to  //
 // the training data by requiring a formula response of 1 (0) to signal //
@@ -46,6 +46,10 @@
 #include "TMath.h"
 #include <sstream>
 
+#include <algorithm>
+#include <iterator>
+#include <stdexcept>
+
 #include "TMVA/ClassifierFactory.h"
 #include "TMVA/MethodFDA.h"
 #include "TMVA/Tools.h"
@@ -108,6 +112,10 @@ void TMVA::MethodFDA::Init( void )
 
    fFitMethod       = "";
    fConverger       = "";
+
+   if( DoMulticlass() )
+      if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
+
 }
 
 //_______________________________________________________________________
@@ -140,7 +148,7 @@ void TMVA::MethodFDA::DeclareOptions()
 }
 
 //_______________________________________________________________________
-void TMVA::MethodFDA::CreateFormula() 
+void TMVA::MethodFDA::CreateFormula()
 {
    // translate formula string into TFormula, and parameter string into par ranges
 
@@ -150,16 +158,16 @@ void TMVA::MethodFDA::CreateFormula()
    // intepret formula string
 
    // replace the parameters "(i)" by the TFormula style "[i]"
-   for (Int_t ipar=0; ipar<fNPars; ipar++) {
+   for (UInt_t ipar=0; ipar<fNPars; ipar++) {
       fFormulaStringT.ReplaceAll( Form("(%i)",ipar), Form("[%i]",ipar) );
    }
 
    // sanity check, there should be no "(i)", with 'i' a number anymore
    for (Int_t ipar=fNPars; ipar<1000; ipar++) {
       if (fFormulaStringT.Contains( Form("(%i)",ipar) ))
-         Log() << kFATAL 
+         Log() << kFATAL
                  << "<CreateFormula> Formula contains expression: \"" << Form("(%i)",ipar) << "\", "
-                 << "which cannot be attributed to a parameter; " 
+               << "which cannot be attributed to a parameter; "
                  << "it may be that the number of variable ranges given via \"ParRanges\" "
                  << "does not match the number of parameters in the formula expression, please verify!"
                  << Endl;
@@ -173,19 +181,19 @@ void TMVA::MethodFDA::CreateFormula()
    // sanity check, there should be no "xi", with 'i' a number anymore
    for (UInt_t ivar=GetNvar(); ivar<1000; ivar++) {
       if (fFormulaStringT.Contains( Form("x%i",ivar) ))
-         Log() << kFATAL 
+         Log() << kFATAL
                  << "<CreateFormula> Formula contains expression: \"" << Form("x%i",ivar) << "\", "
                  << "which cannot be attributed to an input variable" << Endl;
    }
-   
+
    Log() << "User-defined formula string       : \"" << fFormulaStringP << "\"" << Endl;
    Log() << "TFormula-compatible formula string: \"" << fFormulaStringT << "\"" << Endl;
    Log() << "Creating and compiling formula" << Endl;
-   
+
    // create TF1
    if (fFormula) delete fFormula;
    fFormula = new TFormula( "FDA_Formula", fFormulaStringT );
-   
+
 #if ROOT_VERSION_CODE >= ROOT_VERSION(5,2,0)
    fFormula->Optimize();
 #endif
@@ -195,50 +203,50 @@ void TMVA::MethodFDA::CreateFormula()
       Log() << kFATAL << "<ProcessOptions> Formula expression could not be properly compiled" << Endl;
 
    // other sanity checks
-   if (fFormula->GetNpar() > fNPars + (Int_t)GetNvar())
-      Log() << kFATAL << "<ProcessOptions> Dubious number of parameters in formula expression: " 
+   if (fFormula->GetNpar() > (Int_t)(fNPars + GetNvar()))
+      Log() << kFATAL << "<ProcessOptions> Dubious number of parameters in formula expression: "
               << fFormula->GetNpar() << " - compared to maximum allowed: " << fNPars + GetNvar() << Endl;
 }
 
 //_______________________________________________________________________
-void TMVA::MethodFDA::ProcessOptions() 
+void TMVA::MethodFDA::ProcessOptions()
 {
    // the option string is decoded, for availabel options see "DeclareOptions"
 
    // process transient strings
    fParRangeStringT = fParRangeStringP;
 
-   // interpret parameter string   
+   // interpret parameter string
    fParRangeStringT.ReplaceAll( " ", "" );
    fNPars = fParRangeStringT.CountChar( ')' );
 
    TList* parList = gTools().ParseFormatLine( fParRangeStringT, ";" );
-   if (parList->GetSize() != fNPars) {
-      Log() << kFATAL << "<ProcessOptions> Mismatch in parameter string: " 
-              << "the number of parameters: " << fNPars << " != ranges defined: " 
-              << parList->GetSize() << "; the format of the \"ParRanges\" string "
-              << "must be: \"(-1.2,3.4);(-2.3,4.55);...\", "
-              << "where the numbers in \"(a,b)\" correspond to the a=min, b=max parameter ranges; "
-              << "each parameter defined in the function string must have a corresponding rang."
-              << Endl;
+   if ((UInt_t)parList->GetSize() != fNPars) {
+      Log() << kFATAL << "<ProcessOptions> Mismatch in parameter string: "
+            << "the number of parameters: " << fNPars << " != ranges defined: "
+            << parList->GetSize() << "; the format of the \"ParRanges\" string "
+            << "must be: \"(-1.2,3.4);(-2.3,4.55);...\", "
+            << "where the numbers in \"(a,b)\" correspond to the a=min, b=max parameter ranges; "
+            << "each parameter defined in the function string must have a corresponding rang."
+            << Endl;
    }
 
    fParRange.resize( fNPars );
-   for (Int_t ipar=0; ipar<fNPars; ipar++) fParRange[ipar] = 0;
+   for (UInt_t ipar=0; ipar<fNPars; ipar++) fParRange[ipar] = 0;
 
-   for (Int_t ipar=0; ipar<fNPars; ipar++) {
+   for (UInt_t ipar=0; ipar<fNPars; ipar++) {
       // parse (a,b)
       TString str = ((TObjString*)parList->At(ipar))->GetString();
       Ssiz_t istr = str.First( ',' );
       TString pminS(str(1,istr-1));
-      TString pmaxS(str(istr+1,str.Length()-2-istr));      
+      TString pmaxS(str(istr+1,str.Length()-2-istr));
 
-      stringstream stmin; Float_t pmin; stmin << pminS.Data(); stmin >> pmin;       
+      stringstream stmin; Float_t pmin; stmin << pminS.Data(); stmin >> pmin;
       stringstream stmax; Float_t pmax; stmax << pmaxS.Data(); stmax >> pmax;
 
       // sanity check
       if (TMath::Abs(pmax-pmin) < 1.e-30) pmax = pmin;
-      if (pmin > pmax) Log() << kFATAL << "<ProcessOptions> max > min in interval for parameter: [" 
+      if (pmin > pmax) Log() << kFATAL << "<ProcessOptions> max > min in interval for parameter: ["
                                << ipar << "] : [" << pmin  << ", " << pmax << "] " << Endl;
 
       fParRange[ipar] = new Interval( pmin, pmax );
@@ -248,6 +256,21 @@ void TMVA::MethodFDA::ProcessOptions()
    // create formula
    CreateFormula();
 
+
+   // copy parameter ranges for each output dimension ==================
+   fOutputDimensions = 1;
+   if( DoRegression() )
+      fOutputDimensions = DataInfo().GetNTargets();
+   if( DoMulticlass() )
+      fOutputDimensions = DataInfo().GetNClasses();
+
+   for( Int_t dim = 0; dim < fOutputDimensions; ++dim ){
+      for( UInt_t par = 0; par < fNPars; ++par ){
+         fParRange.push_back( fParRange.at(par) );
+      }
+   }
+   // ====================
+
    // create minimiser
    fConvergerFitter = (IFitterTarget*)this;
    if (fConverger == "MINUIT") {
@@ -255,18 +278,18 @@ void TMVA::MethodFDA::ProcessOptions()
       SetOptions(dynamic_cast<Configurable*>(fConvergerFitter)->GetOptions());
    }
 
-   if      (fFitMethod == "MC")     
+   if(fFitMethod == "MC")
       fFitter = new MCFitter( *fConvergerFitter, Form("%s_Fitter_MC", GetName()), fParRange, GetOptions() );
-   else if (fFitMethod == "GA")     
+   else if (fFitMethod == "GA")
       fFitter = new GeneticFitter( *fConvergerFitter, Form("%s_Fitter_GA", GetName()), fParRange, GetOptions() );
-   else if (fFitMethod == "SA")     
+   else if (fFitMethod == "SA")
       fFitter = new SimulatedAnnealingFitter( *fConvergerFitter, Form("%s_Fitter_SA", GetName()), fParRange, GetOptions() );
-   else if (fFitMethod == "MINUIT") 
+   else if (fFitMethod == "MINUIT")
       fFitter = new MinuitFitter( *fConvergerFitter, Form("%s_Fitter_Minuit", GetName()), fParRange, GetOptions() );
    else {
       Log() << kFATAL << "<Train> Do not understand fit method:" << fFitMethod << Endl;
    }
-   
+
    fFitter->CheckForUnusedOptions();
 }
 
@@ -278,11 +301,12 @@ TMVA::MethodFDA::~MethodFDA( void )
 }
 
 //_______________________________________________________________________
-Bool_t TMVA::MethodFDA::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets )
+Bool_t TMVA::MethodFDA::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t /*numberTargets*/ )
 {
    // FDA can handle classification with 2 classes and regression with one regression-target
    if (type == Types::kClassification && numberClasses == 2) return kTRUE;
-   if (type == Types::kRegression     && numberTargets == 1) return kTRUE;
+   if (type == Types::kMulticlass ) return kTRUE;
+   if (type == Types::kRegression ) return kTRUE;
    return kFALSE;
 }
 
@@ -291,7 +315,11 @@ Bool_t TMVA::MethodFDA::HasAnalysisType( Types::EAnalysisType type, UInt_t numbe
 void TMVA::MethodFDA::ClearAll( void )
 {
    // delete and clear all class members
-   for (UInt_t ipar=0; ipar<fParRange.size(); ipar++) {
+   
+   // if there is more than one output dimension, the paramater ranges are the same again (object has been copied).
+   // hence, ... erase the copied pointers to assure, that they are deleted only once.
+//   fParRange.erase( fParRange.begin()+(fNPars), fParRange.end() );
+   for (UInt_t ipar=0; ipar<fParRange.size() && ipar<fNPars; ipar++) {
       if (fParRange[ipar] != 0) { delete fParRange[ipar]; fParRange[ipar] = 0; }
    }
    fParRange.clear(); 
@@ -319,7 +347,7 @@ void TMVA::MethodFDA::Train( void )
       Float_t w  = GetTWeight(ev);
 
       if (!DoRegression()) {
-         if (ev->IsSignal()) { fSumOfWeightsSig += w; }
+         if (DataInfo().IsSignal(ev)) { fSumOfWeightsSig += w; }
          else                { fSumOfWeightsBkg += w; }
       }
       fSumOfWeights += w;
@@ -381,40 +409,76 @@ Double_t TMVA::MethodFDA::EstimatorFunction( std::vector<Double_t>& pars )
 
    Double_t result, deviation;
    Double_t desired = 0.0;
-   for (UInt_t ievt=0; ievt<GetNEvents(); ievt++) {
-
-      // read the training event 
-      const Event* ev = GetEvent(ievt);
-
-      // calculate the deviation from the desired value
-
-      if (!DoRegression()) desired = (ev->IsSignal() ? 1.0 : 0.0);
-      else                 desired = ev->GetTarget( 0 );
-
-      result    = InterpretFormula( ev, pars );
-      deviation = TMath::Power(result - desired, 2);
-
-      if (!DoRegression())  estimator[Int_t(desired)] += deviation * ev->GetWeight();
-      else                  estimator[2]              += deviation * ev->GetWeight();
 
+   // calculate the deviation from the desired value
+   if( DoRegression() ){
+      for (UInt_t ievt=0; ievt<GetNEvents(); ievt++) {
+	 // read the training event 
+	 const TMVA::Event* ev = GetEvent(ievt);
+
+	 for( Int_t dim = 0; dim < fOutputDimensions; ++dim ){
+	    desired = ev->GetTarget( dim );
+	    result    = InterpretFormula( ev, pars.begin(), pars.end() );
+	    deviation = TMath::Power(result - desired, 2);
+	    estimator[2]  += deviation * ev->GetWeight();
+	 }
+      }
+      estimator[2] /= sumOfWeights[2];
+      // return value is sum over normalised signal and background contributions
+      return estimator[2];
+
+   }else if( DoMulticlass() ){
+      for (UInt_t ievt=0; ievt<GetNEvents(); ievt++) {
+	 // read the training event 
+	 const TMVA::Event* ev = GetEvent(ievt);
+
+	 CalculateMulticlassValues( ev, pars, *fMulticlassReturnVal );
+
+	 Double_t crossEntropy = 0.0;
+	 for( Int_t dim = 0; dim < fOutputDimensions; ++dim ){
+	    Double_t y = fMulticlassReturnVal->at(dim);
+	    Double_t t = (ev->GetClass() == static_cast<UInt_t>(dim) ? 1.0 : 0.0 );
+	    crossEntropy += t*log(y);
+	 }
+	 estimator[2] += ev->GetWeight()*crossEntropy; 
+      }
+      estimator[2] /= sumOfWeights[2];
+      // return value is sum over normalised signal and background contributions
+      return estimator[2];
+
+   }else{
+      for (UInt_t ievt=0; ievt<GetNEvents(); ievt++) {
+	 // read the training event 
+	 const TMVA::Event* ev = GetEvent(ievt);
+
+	 desired = (DataInfo().IsSignal(ev) ? 1.0 : 0.0);
+	 result    = InterpretFormula( ev, pars.begin(), pars.end() );
+	 deviation = TMath::Power(result - desired, 2);
+	 estimator[Int_t(desired)] += deviation * ev->GetWeight();
+      }
+      estimator[0] /= sumOfWeights[0];
+      estimator[1] /= sumOfWeights[1];
+      // return value is sum over normalised signal and background contributions
+      return estimator[0] + estimator[1];
    }
-   estimator[0] /= sumOfWeights[0];
-   estimator[1] /= sumOfWeights[1];
-   if (DoRegression()) estimator[2] /= sumOfWeights[2];
-   // return value is sum over normalised signal and background contributions
-
-   if (!DoRegression()) return estimator[0] + estimator[1];
-   else                 return estimator[2];
 }
 
 //_______________________________________________________________________
-Double_t TMVA::MethodFDA::InterpretFormula( const Event* event, std::vector<Double_t>& pars )
+Double_t TMVA::MethodFDA::InterpretFormula( const Event* event, std::vector<Double_t>::iterator parBegin, std::vector<Double_t>::iterator parEnd )
 {
    // formula interpretation
-   for (UInt_t ipar=0; ipar<pars.size(); ipar++) fFormula->SetParameter( ipar, pars[ipar] );
-   for (UInt_t ivar=0;  ivar<GetNvar();  ivar++) fFormula->SetParameter( fNPars+ivar, event->GetValue(ivar) );
+   Int_t ipar = 0;
+//    std::cout << "pars ";
+   for( std::vector<Double_t>::iterator it = parBegin; it != parEnd; ++it ){
+//       std::cout << " i" << ipar << " val" << (*it);
+      fFormula->SetParameter( ipar, (*it) );
+      ++ipar;
+   }
+   for (UInt_t ivar=0;  ivar<GetNvar();  ivar++) fFormula->SetParameter( ivar+ipar, event->GetValue(ivar) );
 
-   return fFormula->Eval( 0 );
+   Double_t result = fFormula->Eval( 0 );
+//    std::cout << "  result " << result << std::endl;
+   return result;
 }
 
 //_______________________________________________________________________
@@ -426,11 +490,11 @@ Double_t TMVA::MethodFDA::GetMvaValue( Double_t* err )
    // cannot determine error
    if (err != 0) *err = -1;
    
-   return InterpretFormula( ev, fBestPars );
+   return InterpretFormula( ev, fBestPars.begin(), fBestPars.end() );
 }
 
 //_______________________________________________________________________
-std::vector<Float_t>& TMVA::MethodFDA::GetRegressionValues()
+const std::vector<Float_t>& TMVA::MethodFDA::GetRegressionValues()
 {
    if (fRegressionReturnVal == NULL) fRegressionReturnVal = new std::vector<Float_t>();
    fRegressionReturnVal->clear();
@@ -438,7 +502,11 @@ std::vector<Float_t>& TMVA::MethodFDA::GetRegressionValues()
    const Event* ev = GetEvent();
 
    Event* evT = new Event(*ev);
-   evT->SetTarget(0,InterpretFormula( ev, fBestPars ));
+
+   for( Int_t dim = 0; dim < fOutputDimensions; ++dim ){
+      Int_t offset = dim*fNPars;
+      evT->SetTarget(dim,InterpretFormula( ev, fBestPars.begin()+offset, fBestPars.begin()+offset+fNPars ) ); 
+   }
    const Event* evT2 = GetTransformationHandler().InverseTransform( evT );
    fRegressionReturnVal->push_back(evT2->GetTarget(0));
 
@@ -447,6 +515,49 @@ std::vector<Float_t>& TMVA::MethodFDA::GetRegressionValues()
    return (*fRegressionReturnVal);
 }
   
+
+//_______________________________________________________________________
+const std::vector<Float_t>& TMVA::MethodFDA::GetMulticlassValues()
+{
+   if (fMulticlassReturnVal == NULL) fMulticlassReturnVal = new std::vector<Float_t>();
+   fMulticlassReturnVal->clear();
+
+   // returns MVA value for given event
+   const TMVA::Event* evt = GetEvent();
+
+   CalculateMulticlassValues( evt, fBestPars, *fMulticlassReturnVal );
+
+   return (*fMulticlassReturnVal);
+}
+
+
+//_______________________________________________________________________
+void TMVA::MethodFDA::CalculateMulticlassValues( const TMVA::Event*& evt, std::vector<Double_t>& parameters, std::vector<Float_t>& values)
+{
+   // calculate the values for multiclass
+   values.clear();
+
+//    std::copy( parameters.begin(), parameters.end(), std::ostream_iterator<double>( std::cout, " " ) );
+//    std::cout << std::endl;
+
+//    char inp;
+//    std::cin >> inp;
+
+   Double_t sum;
+   for( Int_t dim = 0; dim < fOutputDimensions; ++dim ){ // check for all other dimensions (=classes)
+      Int_t offset = dim*fNPars;
+      Double_t value = InterpretFormula( evt, parameters.begin()+offset, parameters.begin()+offset+fNPars );
+//       std::cout << "dim : " << dim << " value " << value << "    offset " << offset << std::endl;
+      values.push_back( value );
+      sum += value;
+   }
+
+//    // normalize to sum of value (commented out, .. have to think of how to treat negative classifier values)
+//    std::transform( fMulticlassReturnVal.begin(), fMulticlassReturnVal.end(), fMulticlassReturnVal.begin(), bind2nd( std::divides<float>(), sum) );
+}
+
+
+
 //_______________________________________________________________________
 void  TMVA::MethodFDA::ReadWeightsFromStream( istream& istr )
 {
@@ -457,19 +568,20 @@ void  TMVA::MethodFDA::ReadWeightsFromStream( istream& istr )
 
    fBestPars.clear();
    fBestPars.resize( fNPars );
-   for (Int_t ipar=0; ipar<fNPars; ipar++) istr >> fBestPars[ipar];
+   for (UInt_t ipar=0; ipar<fNPars; ipar++) istr >> fBestPars[ipar];
 }
 
 //_______________________________________________________________________
-void TMVA::MethodFDA::AddWeightsXMLTo( void* parent ) const 
+void TMVA::MethodFDA::AddWeightsXMLTo( void* parent ) const
 {
-   // create XML description for LD classification and regression 
+   // create XML description for LD classification and regression
    // (for arbitrary number of output classes/targets)
 
-   void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
+   void* wght = gTools().AddChild(parent, "Weights");
    gTools().AddAttr( wght, "NPars",  fNPars );
-   for (Int_t ipar=0; ipar<fNPars; ipar++) {
-      void* coeffxml = gTools().xmlengine().NewChild( wght, 0, "Parameter" );
+   gTools().AddAttr( wght, "NDim",   fOutputDimensions );
+   for (UInt_t ipar=0; ipar<fNPars*fOutputDimensions; ipar++) {
+      void* coeffxml = gTools().AddChild( wght, "Parameter" );
       gTools().AddAttr( coeffxml, "Index", ipar   );
       gTools().AddAttr( coeffxml, "Value", fBestPars[ipar] );
    }
@@ -477,29 +589,36 @@ void TMVA::MethodFDA::AddWeightsXMLTo( void* parent ) const
    // write formula
    gTools().AddAttr( wght, "Formula", fFormulaStringP );
 }
-  
+
 //_______________________________________________________________________
-void TMVA::MethodFDA::ReadWeightsFromXML( void* wghtnode ) 
+void TMVA::MethodFDA::ReadWeightsFromXML( void* wghtnode )
 {
    // read coefficients from xml weight file
    gTools().ReadAttr( wghtnode, "NPars", fNPars );
 
+   try {
+      gTools().ReadAttr( wghtnode, "NDim" , fOutputDimensions );
+   }catch( std::logic_error& excpt ){
+      // attribute could not be read, it probably does not exist because the weight file has been written with an older version
+      fOutputDimensions = 1;
+   }
+
    fBestPars.clear();
-   fBestPars.resize( fNPars );
+   fBestPars.resize( fNPars*fOutputDimensions );
    
-   void* ch = gTools().xmlengine().GetChild(wghtnode);
+   void* ch = gTools().GetChild(wghtnode);
    Double_t par;
-   Int_t    ipar;
+   UInt_t    ipar;
    while (ch) {
       gTools().ReadAttr( ch, "Index", ipar );
       gTools().ReadAttr( ch, "Value", par  );
 
       // sanity check
-      if (ipar >= fNPars) Log() << kFATAL << "<ReadWeightsFromXML> index out of range: "
+      if (ipar >= fNPars*fOutputDimensions) Log() << kFATAL << "<ReadWeightsFromXML> index out of range: "
                                   << ipar << " >= " << fNPars << Endl;
       fBestPars[ipar] = par;
 
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 
    // read formula
@@ -518,7 +637,7 @@ void TMVA::MethodFDA::MakeClassSpecific( std::ostream& fout, const TString& clas
    fout << "" << endl;
    fout << "inline void " << className << "::Initialize() " << endl;
    fout << "{" << endl;
-   for (Int_t ipar=0; ipar<fNPars; ipar++) {
+   for(UInt_t ipar=0; ipar<fNPars; ipar++) {
       fout << "   fParameter[" << ipar << "] = " << fBestPars[ipar] << ";" << endl;
    }
    fout << "}" << endl;
@@ -529,10 +648,10 @@ void TMVA::MethodFDA::MakeClassSpecific( std::ostream& fout, const TString& clas
 
    // replace parameters
    TString str = fFormulaStringT;
-   for (Int_t ipar=0; ipar<fNPars; ipar++) {
+   for (UInt_t ipar=0; ipar<fNPars; ipar++) {
       str.ReplaceAll( Form("[%i]", ipar), Form("fParameter[%i]", ipar) );
    }
-   
+
    // replace input variables
    for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
       str.ReplaceAll( Form("[%i]", ivar+fNPars), Form("inputValues[%i]", ivar) );
@@ -555,13 +674,13 @@ void TMVA::MethodFDA::GetHelpMessage() const
 {
    // get help message text
    //
-   // typical length of text line: 
+   // typical length of text line:
    //         "|--------------------------------------------------------------|"
    Log() << Endl;
    Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
    Log() << Endl;
    Log() << "The function discriminant analysis (FDA) is a classifier suitable " << Endl;
-   Log() << "to solve linear or simple nonlinear discrimination problems." << Endl; 
+   Log() << "to solve linear or simple nonlinear discrimination problems." << Endl;
    Log() << Endl;
    Log() << "The user provides the desired function with adjustable parameters" << Endl;
    Log() << "via the configuration option string, and FDA fits the parameters to" << Endl;
@@ -575,8 +694,8 @@ void TMVA::MethodFDA::GetHelpMessage() const
    Log() << "Please consult the Users Guide for the format of the formula string" << Endl;
    Log() << "and the allowed parameter ranges:" << Endl;
    if (gConfig().WriteOptionsReference()) {
-      Log() << "<a href=\"http://tmva.sourceforge.net/docu/TMVAUsersGuide.pdf\">" 
-              << "http://tmva.sourceforge.net/docu/TMVAUsersGuide.pdf</a>" << Endl;
+      Log() << "<a href=\"http://tmva.sourceforge.net/docu/TMVAUsersGuide.pdf\">"
+            << "http://tmva.sourceforge.net/docu/TMVAUsersGuide.pdf</a>" << Endl;
    }
    else Log() << "http://tmva.sourceforge.net/docu/TMVAUsersGuide.pdf" << Endl;
    Log() << Endl;
diff --git a/tmva/src/MethodFisher.cxx b/tmva/src/MethodFisher.cxx
index a46fc81531c00b5e9f9fb625e237f526cf9bf03a..2e815dc1dd06a4261176545921c54d2c3aa42e80 100644
--- a/tmva/src/MethodFisher.cxx
+++ b/tmva/src/MethodFisher.cxx
@@ -106,7 +106,6 @@
 
 #include "TMath.h"
 #include "Riostream.h"
-#include "TXMLEngine.h"
 
 #include "TMVA/VariableTransformBase.h"
 #include "TMVA/MethodFisher.h"
@@ -581,11 +580,11 @@ void TMVA::MethodFisher::AddWeightsXMLTo( void* parent ) const
 
    void* wght = gTools().AddChild(parent, "Weights");
    gTools().AddAttr( wght, "NCoeff", GetNvar()+1 );
-   void* coeffxml = gTools().xmlengine().NewChild(wght, 0, "Coefficient");
+   void* coeffxml = gTools().AddChild(wght, "Coefficient");
    gTools().AddAttr( coeffxml, "Index", 0   );
    gTools().AddAttr( coeffxml, "Value", fF0 );
    for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
-      coeffxml = gTools().xmlengine().NewChild( wght, 0, "Coefficient" );
+      coeffxml = gTools().AddChild( wght, "Coefficient" );
       gTools().AddAttr( coeffxml, "Index", ivar+1 );
       gTools().AddAttr( coeffxml, "Value", (*fFisherCoeff)[ivar] );
    }
@@ -599,14 +598,14 @@ void TMVA::MethodFisher::ReadWeightsFromXML( void* wghtnode )
    gTools().ReadAttr( wghtnode, "NCoeff", ncoeff );
    fFisherCoeff->resize(ncoeff-1);
 
-   void* ch = gTools().xmlengine().GetChild(wghtnode);
+   void* ch = gTools().GetChild(wghtnode);
    Double_t coeff;
    while (ch) {
       gTools().ReadAttr( ch, "Index", coeffidx );
       gTools().ReadAttr( ch, "Value", coeff    );
       if (coeffidx==0) fF0 = coeff;
       else             (*fFisherCoeff)[coeffidx-1] = coeff;
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 }
 
diff --git a/tmva/src/MethodHMatrix.cxx b/tmva/src/MethodHMatrix.cxx
index 0b33fa88064b0627de4908f33422e8d1a7dea098..f9de182e9dfa1415730246e18728f2f17248cad7 100644
--- a/tmva/src/MethodHMatrix.cxx
+++ b/tmva/src/MethodHMatrix.cxx
@@ -199,7 +199,7 @@ void TMVA::MethodHMatrix::ComputeCovariance( Bool_t isSignal, TMatrixD* mat )
       // in case event with neg weights are to be ignored
       if (IgnoreEventsWithNegWeightsInTraining() && weight <= 0) continue;
 
-      if (ev->IsSignal() != isSignal) continue;
+      if (DataInfo().IsSignal(ev) != isSignal) continue;
 
       // event is of good type
       sumOfWeights += weight;
@@ -312,7 +312,7 @@ Double_t TMVA::MethodHMatrix::GetChi2( Types::ESBType type ) const
 
 //_______________________________________________________________________
 void TMVA::MethodHMatrix::AddWeightsXMLTo( void* parent ) const {
-   void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
+   void* wght = gTools().AddChild(parent, "Weights");
    gTools().WriteTVectorDToXML(wght,"VecMeanS",fVecMeanS); 
    gTools().WriteTVectorDToXML(wght,"VecMeanB", fVecMeanB);
    gTools().WriteTMatrixDToXML(wght,"InvHMatS",fInvHMatrixS); 
@@ -321,13 +321,13 @@ void TMVA::MethodHMatrix::AddWeightsXMLTo( void* parent ) const {
 }
 
 void TMVA::MethodHMatrix::ReadWeightsFromXML( void* wghtnode ){
-   void* descnode = gTools().xmlengine().GetChild(wghtnode);
+   void* descnode = gTools().GetChild(wghtnode);
    gTools().ReadTVectorDFromXML(descnode,"VecMeanS",fVecMeanS);
-   descnode = gTools().xmlengine().GetNext(descnode);
+   descnode = gTools().GetNextChild(descnode);
    gTools().ReadTVectorDFromXML(descnode,"VecMeanB", fVecMeanB);
-   descnode = gTools().xmlengine().GetNext(descnode);
+   descnode = gTools().GetNextChild(descnode);
    gTools().ReadTMatrixDFromXML(descnode,"InvHMatS",fInvHMatrixS); 
-   descnode = gTools().xmlengine().GetNext(descnode);
+   descnode = gTools().GetNextChild(descnode);
    gTools().ReadTMatrixDFromXML(descnode,"InvHMatB",fInvHMatrixB);
 }
 
diff --git a/tmva/src/MethodKNN.cxx b/tmva/src/MethodKNN.cxx
index 3893a021a4de613aba268012b94af82288fceaff..21016e691658a7a62fba053a9bf9d03c16fefd0c 100644
--- a/tmva/src/MethodKNN.cxx
+++ b/tmva/src/MethodKNN.cxx
@@ -229,7 +229,7 @@ void TMVA::MethodKNN::Train()
       
       Short_t event_type = 0;
 
-      if (evt_ -> IsSignal()) { // signal type = 1
+      if (DataInfo().IsSignal(evt_)) { // signal type = 1
          fSumOfWeightsS += weight;
          event_type = 1;
       }
diff --git a/tmva/src/MethodLD.cxx b/tmva/src/MethodLD.cxx
index c7bc57426cc0c171364fd1ed749442e80341b516..aa7f88fc996f61ced3432c49c5ee2f5a9c880b67 100644
--- a/tmva/src/MethodLD.cxx
+++ b/tmva/src/MethodLD.cxx
@@ -32,7 +32,6 @@
 #include "Riostream.h"
 #include "TMatrix.h"
 #include "TMatrixD.h"
-#include "TXMLEngine.h"
 
 #include "TMVA/VariableTransformBase.h"
 #include "TMVA/MethodLD.h"
@@ -254,7 +253,7 @@ void TMVA::MethodLD::GetSumVal( void )
          Double_t val = weight;
 
          if (!DoRegression())
-            val *= ev->IsSignal();
+            val *= DataInfo().IsSignal(ev);
          else //for regression
             val *= ev->GetTarget( ivar ); 
 
@@ -316,12 +315,12 @@ void TMVA::MethodLD::AddWeightsXMLTo( void* parent ) const
    // create XML description for LD classification and regression 
    // (for arbitrary number of output classes/targets)
 
-   void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
+   void* wght = gTools().AddChild(parent, "Weights");
    gTools().AddAttr( wght, "NOut",   fNRegOut    );
    gTools().AddAttr( wght, "NCoeff", GetNvar()+1 );
    for (Int_t iout=0; iout<fNRegOut; iout++) {
       for (UInt_t icoeff=0; icoeff<GetNvar()+1; icoeff++) {
-         void* coeffxml = gTools().xmlengine().NewChild( wght, 0, "Coefficient" );
+         void* coeffxml = gTools().AddChild( wght, "Coefficient" );
          gTools().AddAttr( coeffxml, "IndexOut",   iout   );
          gTools().AddAttr( coeffxml, "IndexCoeff", icoeff );
          gTools().AddAttr( coeffxml, "Value",      (*(*fLDCoeff)[iout])[icoeff] );
@@ -350,7 +349,7 @@ void TMVA::MethodLD::ReadWeightsFromXML( void* wghtnode )
    fLDCoeff = new vector< vector< Double_t >* >(fNRegOut);
    for (Int_t ivar = 0; ivar<fNRegOut; ivar++) (*fLDCoeff)[ivar] = new std::vector<Double_t>( ncoeff );
 
-   void* ch = gTools().xmlengine().GetChild(wghtnode);
+   void* ch = gTools().GetChild(wghtnode);
    Double_t coeff;
    Int_t iout, icoeff;
    while (ch) {
@@ -360,7 +359,7 @@ void TMVA::MethodLD::ReadWeightsFromXML( void* wghtnode )
 
       (*(*fLDCoeff)[iout])[icoeff] = coeff;
 
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 }
 
diff --git a/tmva/src/MethodLikelihood.cxx b/tmva/src/MethodLikelihood.cxx
index 96ccbba218066b9e77cda2d5cb3285a08f6da000..7b31ab5fbcbd639ea1fd3da579678344b3b31b81 100644
--- a/tmva/src/MethodLikelihood.cxx
+++ b/tmva/src/MethodLikelihood.cxx
@@ -381,7 +381,7 @@ void TMVA::MethodLikelihood::Train( void )
                  << ", xmax="<<(*fHistSig)[ivar]->GetXaxis()->GetXmax()
                  << Endl;
          }
-         if (ev->IsSignal()) (*fHistSig)[ivar]->Fill( value, weight );
+         if (DataInfo().IsSignal(ev)) (*fHistSig)[ivar]->Fill( value, weight );
          else                (*fHistBgd)[ivar]->Fill( value, weight );
       }
    }
@@ -536,7 +536,7 @@ void TMVA::MethodLikelihood::WriteOptionsToStream( ostream& o, const TString& pr
 void TMVA::MethodLikelihood::AddWeightsXMLTo( void* parent ) const 
 {
    // write weights to XML
-   void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
+   void* wght = gTools().AddChild(parent, "Weights");
    gTools().AddAttr(wght, "NVariables", GetNvar());
    gTools().AddAttr(wght, "NClasses", 2);
    void* pdfwrap;
@@ -544,11 +544,11 @@ void TMVA::MethodLikelihood::AddWeightsXMLTo( void* parent ) const
       if ( (*fPDFSig)[ivar]==0 || (*fPDFBgd)[ivar]==0 )
          Log() << kFATAL << "Reference histograms for variable " << ivar 
                << " don't exist, can't write it to weight file" << Endl;
-      pdfwrap = gTools().xmlengine().NewChild(wght, 0, "PDFDescriptor");
+      pdfwrap = gTools().AddChild(wght, "PDFDescriptor");
       gTools().AddAttr(pdfwrap, "VarIndex", ivar);
       gTools().AddAttr(pdfwrap, "ClassIndex", 0);
       (*fPDFSig)[ivar]->AddXMLTo(pdfwrap);
-      pdfwrap = gTools().xmlengine().NewChild(wght, 0, "PDFDescriptor");
+      pdfwrap = gTools().AddChild(wght, "PDFDescriptor");
       gTools().AddAttr(pdfwrap, "VarIndex", ivar);
       gTools().AddAttr(pdfwrap, "ClassIndex", 1);
       (*fPDFBgd)[ivar]->AddXMLTo(pdfwrap);
@@ -584,7 +584,7 @@ const TMVA::Ranking* TMVA::MethodLikelihood::CreateRanking()
 
          Double_t lk = this->GetMvaValue();
          Double_t w  = ev->GetWeight();
-         if (ev->IsSignal()) rS->Fill( lk, w );
+         if (DataInfo().IsSignal(ev)) rS->Fill( lk, w );
          else                rB->Fill( lk, w );
       }
 
@@ -601,7 +601,7 @@ const TMVA::Ranking* TMVA::MethodLikelihood::CreateRanking()
    }
 
    fDropVariable = -1;
-   
+
    return fRanking;
 }
 
@@ -610,7 +610,7 @@ void  TMVA::MethodLikelihood::WriteWeightsToStream( TFile& ) const
 {
    // write reference PDFs to ROOT file
    TString pname = "PDF_";
-   for (UInt_t ivar=0; ivar<GetNvar(); ivar++){ 
+   for (UInt_t ivar=0; ivar<GetNvar(); ivar++){
       (*fPDFSig)[ivar]->Write( pname + GetInputVar( ivar ) + "_S" );
       (*fPDFBgd)[ivar]->Write( pname + GetInputVar( ivar ) + "_B" );
    }
@@ -620,13 +620,13 @@ void  TMVA::MethodLikelihood::ReadWeightsFromXML(void* wghtnode)
 {
    // read weights from XML
    TString pname = "PDF_";
-
+   Bool_t addDirStatus = TH1::AddDirectoryStatus();
    TH1::AddDirectory(0); // this avoids the binding of the hists in TMVA::PDF to the current ROOT file
    UInt_t nvars=0;
    gTools().ReadAttr(wghtnode, "NVariables",nvars);
-   void* descnode = gTools().xmlengine().GetChild(wghtnode);
+   void* descnode = gTools().GetChild(wghtnode);
    for (UInt_t ivar=0; ivar<nvars; ivar++){
-      void* pdfnode = gTools().xmlengine().GetChild(descnode);
+      void* pdfnode = gTools().GetChild(descnode);
       Log() << kINFO << "Reading signal and background PDF for variable: " << GetInputVar( ivar ) << Endl;
       if ((*fPDFSig)[ivar] !=0) delete (*fPDFSig)[ivar];
       if ((*fPDFBgd)[ivar] !=0) delete (*fPDFBgd)[ivar];
@@ -635,12 +635,13 @@ void  TMVA::MethodLikelihood::ReadWeightsFromXML(void* wghtnode)
       (*fPDFSig)[ivar]->SetReadingVersion( GetTrainingTMVAVersionCode() );
       (*fPDFBgd)[ivar]->SetReadingVersion( GetTrainingTMVAVersionCode() );
       (*(*fPDFSig)[ivar]).ReadXML(pdfnode);
-      descnode = gTools().xmlengine().GetNext(descnode);
-      pdfnode  = gTools().xmlengine().GetChild(descnode);
+      descnode = gTools().GetNextChild(descnode);
+      pdfnode  = gTools().GetChild(descnode);
       (*(*fPDFBgd)[ivar]).ReadXML(pdfnode);
-      descnode = gTools().xmlengine().GetNext(descnode);
+      descnode = gTools().GetNextChild(descnode);
    }
-}  
+   TH1::AddDirectory(addDirStatus);
+}
 //_______________________________________________________________________
 void  TMVA::MethodLikelihood::ReadWeightsFromStream( istream & istr )
 {
@@ -649,7 +650,7 @@ void  TMVA::MethodLikelihood::ReadWeightsFromStream( istream & istr )
    TString pname = "PDF_";
    Bool_t addDirStatus = TH1::AddDirectoryStatus();
    TH1::AddDirectory(0); // this avoids the binding of the hists in TMVA::PDF to the current ROOT file
-   for (UInt_t ivar=0; ivar<GetNvar(); ivar++){ 
+   for (UInt_t ivar=0; ivar<GetNvar(); ivar++){
       Log() << kINFO << "Reading signal and background PDF for variable: " << GetInputVar( ivar ) << Endl;
       if ((*fPDFSig)[ivar] !=0) delete (*fPDFSig)[ivar];
       if ((*fPDFBgd)[ivar] !=0) delete (*fPDFBgd)[ivar];
@@ -670,7 +671,7 @@ void  TMVA::MethodLikelihood::ReadWeightsFromStream( TFile& rf )
    TString pname = "PDF_";
    Bool_t addDirStatus = TH1::AddDirectoryStatus();
    TH1::AddDirectory(0); // this avoids the binding of the hists in TMVA::PDF to the current ROOT file
-   for (UInt_t ivar=0; ivar<GetNvar(); ivar++){ 
+   for (UInt_t ivar=0; ivar<GetNvar(); ivar++){
       (*fPDFSig)[ivar] = (TMVA::PDF*)rf.Get( Form( "PDF_%s_S", GetInputVar( ivar ).Data() ) );
       (*fPDFBgd)[ivar] = (TMVA::PDF*)rf.Get( Form( "PDF_%s_B", GetInputVar( ivar ).Data() ) );
    }
@@ -685,7 +686,7 @@ void  TMVA::MethodLikelihood::WriteMonitoringHistosToFile( void ) const
    Log() << kINFO << "Write monitoring histograms to file: " << BaseDir()->GetPath() << Endl;
    BaseDir()->cd();
 
-   for (UInt_t ivar=0; ivar<GetNvar(); ivar++) { 
+   for (UInt_t ivar=0; ivar<GetNvar(); ivar++) {
       (*fHistSig)[ivar]->Write();
       (*fHistBgd)[ivar]->Write();
       if ((*fHistSig_smooth)[ivar] != 0) (*fHistSig_smooth)[ivar]->Write();
diff --git a/tmva/src/MethodMLP.cxx b/tmva/src/MethodMLP.cxx
index 2fce4958300a1774498037fe2074ba61f96af96f..6bf5d44faeb0a641a4da9381ea5a0e43941e2aae 100644
--- a/tmva/src/MethodMLP.cxx
+++ b/tmva/src/MethodMLP.cxx
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Matt Jachowski, Joerg Stelzer
+// Author: Andreas Hoecker, Matt Jachowski, Peter Speckmayer, Joerg Stelzer
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -18,7 +18,9 @@
  *      Matt Jachowski        <jachowski@stanford.edu> - Stanford University, USA *
  *      Kamil Kraszewski      <kalq@cern.ch>           - IFJ & UJ, Poland         *
  *      Maciej Kruk           <mkruk@cern.ch>          - IFJ & AGH, Poland        *
+ *      Peter Speckmayer      <peter.speckmayer@cern.ch> - CERN, Switzerland      *
  *      Joerg Stelzer         <stelzer@cern.ch>        - DESY, Germany            *
+ *      Jiahang Zhong         <Jiahang.Zhong@cern.ch>  - Academia Sinica, Taipei  *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
  *      CERN, Switzerland                                                         *
@@ -40,6 +42,7 @@
 #include "TFitter.h"
 #include "TMatrixD.h"
 #include "TMath.h"
+#include "TFile.h"
 
 #include "TMVA/ClassifierFactory.h"
 #include "TMVA/Interval.h"
@@ -70,6 +73,7 @@ TMVA::MethodMLP::MethodMLP( const TString& jobName,
                             const TString& theOption,
                             TDirectory* theTargetDir ) 
    : MethodANNBase( jobName, Types::kMLP, methodTitle, theData, theOption, theTargetDir ),
+     fPrior			  (0.0),		//zjh
      fSamplingFraction(1.0),
      fSamplingEpoch   (0.0)
 {
@@ -81,6 +85,7 @@ TMVA::MethodMLP::MethodMLP( DataSetInfo& theData,
                             const TString& theWeightFile,
                             TDirectory* theTargetDir ) 
    : MethodANNBase( Types::kMLP, theData, theWeightFile, theTargetDir ),
+     fPrior			  (0.0),		//zjh
      fSamplingFraction(1.0),
      fSamplingEpoch(0.0)
 {
@@ -99,7 +104,7 @@ Bool_t TMVA::MethodMLP::HasAnalysisType( Types::EAnalysisType type, UInt_t numbe
 {
    // MLP can handle classification with 2 classes and regression with one regression-target
    if (type == Types::kClassification && numberClasses == 2 ) return kTRUE;
-   //   if (type == Types::kRegression     && numberTargets == 1 ) return kTRUE;
+   if (type == Types::kMulticlass ) return kTRUE;
    if (type == Types::kRegression ) return kTRUE;
 
    return kFALSE;
@@ -112,6 +117,9 @@ void TMVA::MethodMLP::Init()
 
    // the minimum requirement to declare an event signal-like
    SetSignalReferenceCut( 0.5 );
+#ifdef MethodMLP_UseMinuit__
+   fgThis = this;
+#endif
 }
 
 //_______________________________________________________________________
@@ -169,6 +177,10 @@ void TMVA::MethodMLP::DeclareOptions()
    DeclareOptionRef(fSteps=-1, "ConvergenceTests", 
                     "Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)");
 
+   DeclareOptionRef(fUseRegulator=kTRUE, "UseRegulator",
+		    "Use regulator to avoid over-training  (bayesian neural network technique)");   //zjh
+   DeclareOptionRef(fUpdateLimit=10, "UpdateLimit",
+		    "Number of updates for regulator before stop training");   //zjh
 }
 
 //_______________________________________________________________________
@@ -243,7 +255,8 @@ Double_t TMVA::MethodMLP::CalculateEstimator( Types::ETreeType treeType, Int_t i
    Double_t estimator = 0;
 
    // loop over all training events 
-   Int_t nEvents = GetNEvents();
+   Int_t  nEvents  = GetNEvents();
+   UInt_t nClasses = DataInfo().GetNClasses();
    UInt_t nTgts = DataInfo().GetNTargets();
    for (Int_t i = 0; i < nEvents; i++) {
 
@@ -260,25 +273,43 @@ Double_t TMVA::MethodMLP::CalculateEstimator( Types::ETreeType treeType, Int_t i
             Double_t dt = v - ev->GetTarget( itgt );
             d += (dt*dt);
          }
-         d = TMath::Sqrt(d);
-      }
-      else {
+//         d = TMath::Sqrt(d);
+//	 estimator += (d*d)*w;
+	 estimator += d*w;
+      } else if (DoMulticlass() ) {
+	 UInt_t cls = ev->GetClass();
+         for (UInt_t icls = 0; icls < nClasses; icls++) {
+            v = GetOutputNeuron( icls )->GetActivationValue();
+            Double_t dt = v - ( icls==cls ? 1.0 : 0.0 );
+            d += (dt*dt);
+         }
+//         d = TMath::Sqrt(d);
+	 estimator += d*w;	//zjh
+      } else {
          Double_t desired = GetDesiredOutput( ev );
          v = GetOutputNeuron()->GetActivationValue();
-         d = v - desired;
+         if (fEstimator==kMSE) d = (desired-v)*(desired-v);                         //zjh
+    	 else if (fEstimator==kCE) d = -2*(desired*TMath::Log(v)+(1-desired)*TMath::Log(1-v));     //zjh
+	 estimator += d*w;	//zjh
       }      
-      estimator += (d*d)*w;
 
       // fill monitoring histograms
-      if (ev->IsSignal() && histS != 0) histS->Fill( float(v), float(w) );
+      if (DataInfo().IsSignal(ev) && histS != 0) histS->Fill( float(v), float(w) );
       else if              (histB != 0) histB->Fill( float(v), float(w) );
    }
 
    if (histS != 0) fEpochMonHistS.push_back( histS );
    if (histB != 0) fEpochMonHistB.push_back( histB );
 
-   if (DoRegression()) estimator = TMath::Sqrt(estimator/Float_t(nEvents));
-   else                estimator = estimator*0.5/Float_t(nEvents);
+   //if      (DoRegression()) estimator = TMath::Sqrt(estimator/Float_t(nEvents));
+   //else if (DoMulticlass()) estimator = TMath::Sqrt(estimator/Float_t(nEvents));
+   //else                     estimator = estimator*0.5/Float_t(nEvents);
+   if      (DoRegression()) estimator = estimator/Float_t(nEvents);
+   else if (DoMulticlass()) estimator = estimator/Float_t(nEvents);
+   else                     estimator = estimator/Float_t(nEvents);
+
+
+   //if (fUseRegulator) estimator+=fPrior/Float_t(nEvents);  //zjh
 
    Data()->SetCurrentType( saveType );
 
@@ -300,6 +331,21 @@ void TMVA::MethodMLP::Train(Int_t nEpochs)
    Log() << kDEBUG << "reinitalize learning rates" << Endl;
    InitializeLearningRates();
    PrintMessage("Training Network");
+
+   Int_t nEvents=GetNEvents();
+   Int_t nSynapses=fSynapses->GetEntriesFast();
+   if (nSynapses>nEvents) Log()<<kFATAL<<"ANN too complicated: #events="<<nEvents<<"\t#synapses="<<nSynapses<<Endl;
+   //Int_t  nEvents  = GetNEvents();
+   //UInt_t nTgts = DataInfo().GetNTargets();
+   //for (Int_t i = 0; i < nEvents; i++) {
+   //  const Event* ev = GetEvent(i);
+   //  if (i*100%nEvents==0 && DoRegression()) {
+   //    for (UInt_t itgt = 0; itgt < nTgts; itgt++) {
+   //	 Log()<<kINFO<< GetOutputNeuron( itgt )->GetActivationValue()<<"\t"<< ev->GetTarget( itgt ) <<Endl;
+   //    }
+   //  }
+   //}
+
 #ifdef MethodMLP_UseMinuit__  
    if (useMinuit) MinuitMinimize();
 #else
@@ -308,6 +354,16 @@ void TMVA::MethodMLP::Train(Int_t nEpochs)
    else                               BackPropagationMinimize(nEpochs);
 #endif
 
+   //zjh
+   float trainE = CalculateEstimator( Types::kTraining, 0 ) ; // estimator for training sample  //zjh
+   float testE  = CalculateEstimator( Types::kTesting,  0 ) ; // estimator for test sample //zjh
+   Log()<<kDEBUG<<"Finalizing...\ttrainE="<<trainE<<"\ttestE="<<testE<<Endl;
+   UpdateRegulators();
+   Int_t numSynapses=fSynapses->GetEntriesFast();
+   fInvHessian.ResizeTo(numSynapses,numSynapses);
+   GetApproxInvHessian( fInvHessian ,false);
+   //zjh
+
 }
 
 //______________________________________________________________________________
@@ -339,6 +395,9 @@ void TMVA::MethodMLP::BFGSMinimize( Int_t nEpochs )
    TMatrixD Hessian ( nWeights, nWeights );
    TMatrixD Gamma   ( nWeights, 1 );
    TMatrixD Delta   ( nWeights, 1 );
+   Int_t        RegUpdateCD=0;                  //zjh
+   Int_t        RegUpdateTimes=0;               //zjh
+   Double_t     AccuError=0;
 
    Double_t trainE = -1;
    Double_t testE  = -1;
@@ -373,37 +432,67 @@ void TMVA::MethodMLP::BFGSMinimize( Int_t nEpochs )
       }
       Data()->SetCurrentType( Types::kTraining );
 
+      //zjh
+      if (fUseRegulator) {
+    	  UpdatePriors();
+    	  RegUpdateCD++;
+      }
+      //zjh
+
       SetGammaDelta( Gamma, Delta, buffer );
 
-      if (i % fResetStep == 0) {
+      if (i % fResetStep == 0 && i<0.5*nEpochs) { //zjh
          SteepestDir( Dir );
          Hessian.UnitMatrix();
+    	 RegUpdateCD=0;    //zjh
       }
       else {
          if (GetHessian( Hessian, Gamma, Delta )) {
             SteepestDir( Dir );
             Hessian.UnitMatrix();
+    	    RegUpdateCD=0;    //zjh
          }
          else SetDir( Hessian, Dir );
       }
 
+      Double_t	dError=0;  //zjh
       if (DerivDir( Dir ) > 0) {
          SteepestDir( Dir );
          Hessian.UnitMatrix();
+    	 RegUpdateCD=0;    //zjh
       }
-      if (LineSearch( Dir, buffer )) {
+      if (LineSearch( Dir, buffer, &dError )) { //zjh
          Hessian.UnitMatrix();
          SteepestDir( Dir );
-         if (LineSearch(Dir, buffer)) {
+    	 RegUpdateCD=0;    //zjh
+         if (LineSearch(Dir, buffer, &dError)) {  //zjh
             i = nEpochs;
             Log() << kFATAL << "Line search failed! Huge troubles somewhere..." << Endl;
          }
       }
 
+      //zjh+
+      if (dError<0) Log()<<kWARNING<<"\nnegative dError=" <<dError<<Endl;
+      AccuError+=dError;
+      if (fabs(dError)>0.0001) RegUpdateCD=0;
+      
+      if ( fUseRegulator && RegUpdateTimes<fUpdateLimit && RegUpdateCD>=((0.4*fResetStep)>50?50:(0.4*fResetStep)) && i<0.8*nEpochs && AccuError>0.01 ) {
+	     Log()<<kDEBUG <<Endl;
+	     Log()<<kDEBUG<<"\nUpdate regulators "<<RegUpdateTimes<<" on epoch "<<i<<"\tdError="<<dError<<Endl;
+	     UpdateRegulators();
+	     Hessian.UnitMatrix();
+	     RegUpdateCD=0;
+	RegUpdateTimes++;
+	AccuError=0;
+    	  }
+      //zjh-
+
       // monitor convergence of training and control sample
       if ((i+1)%fTestRate == 0) {
-         trainE = CalculateEstimator( Types::kTraining, i ); // estimator for training sample
-         testE  = CalculateEstimator( Types::kTesting,  i  );  // estimator for test sample
+	//trainE = CalculateEstimator( Types::kTraining, i ) - fPrior/Float_t(GetNEvents()); // estimator for training sample  //zjh
+	//testE  = CalculateEstimator( Types::kTesting,  i ) - fPrior/Float_t(GetNEvents()); // estimator for test sample //zjh
+         trainE = CalculateEstimator( Types::kTraining, i ) ; // estimator for training sample  //zjh
+         testE  = CalculateEstimator( Types::kTesting,  i ) ; // estimator for test sample //zjh
          fEstimatorHistTrain->Fill( i+1, trainE );
          fEstimatorHistTest ->Fill( i+1, testE );
 
@@ -425,17 +514,22 @@ void TMVA::MethodMLP::BFGSMinimize( Int_t nEpochs )
       }
       
       // draw progress
-      TString convText = Form( "<D^2> (train/test): %.4g/%.4g", trainE, testE );
+      TString convText = Form( "<D^2> (Epoch/train/test): %d/%.4g/%.4g", i, trainE, testE ); //zjh
       if (fSteps > 0) {
          Float_t progress = 0;
          if (Float_t(i)/nEpochs < fSamplingEpoch) 
             progress = Progress()*fSamplingEpoch*fSamplingFraction*100;
          else
             progress = 100.0*(fSamplingEpoch*fSamplingFraction+(1.0-fSamplingFraction*fSamplingEpoch)*Progress());
-         
+         Float_t progress2= 100.0*RegUpdateTimes/fUpdateLimit;	//zjh
+         if (progress2>progress) progress=progress2;			//zjh
          timer.DrawProgressBar( Int_t(progress), convText );
       }
-      else timer.DrawProgressBar( i, convText );
+      else {
+    	  Int_t progress=Int_t(nEpochs*RegUpdateTimes/Float_t(fUpdateLimit));	//zjh
+    	  if (progress<i) progress=i;										  	//zjh
+    	  timer.DrawProgressBar( progress, convText );							//zjh
+      }
 
       // some verbose output
       if (fgPRINT_SEQ) {
@@ -493,7 +587,9 @@ void TMVA::MethodMLP::ComputeDEDw()
 
    for (Int_t i=0;i<nSynapses;i++) {
       TSynapse *synapse = (TSynapse*)fSynapses->At(i);
-      synapse->SetDEDw( synapse->GetDEDw() / nEvents );
+      Double_t DEDw=synapse->GetDEDw();     //zjh
+      if (fUseRegulator) DEDw+=fPriorDev[i]; //zjh
+      synapse->SetDEDw( DEDw / nEvents );   //zjh
    }
 }
 
@@ -512,10 +608,19 @@ void TMVA::MethodMLP::SimulateEvent( const Event* ev )
          Double_t error = ( GetOutputNeuron( itgt )->GetActivationValue() - desired )*eventWeight;
          GetOutputNeuron( itgt )->SetError(error);
       }
-   }
-   else {
+   } else if (DoMulticlass()) {
+      UInt_t nClasses = DataInfo().GetNClasses();
+      UInt_t cls      = ev->GetClass();
+      for (UInt_t icls = 0; icls < nClasses; icls++) {
+         Double_t desired  = ( cls==icls ? 1.0 : 0.0 );
+         Double_t error    = ( GetOutputNeuron( icls )->GetActivationValue() - desired )*eventWeight;
+         GetOutputNeuron( icls )->SetError(error);
+      }
+   } else {
       Double_t desired     = GetDesiredOutput( ev );
-      Double_t error = ( GetOutputNeuron()->GetActivationValue() - desired )*eventWeight;
+      Double_t error=-1;				//zjh
+      if (fEstimator==kMSE) error = ( GetOutputNeuron()->GetActivationValue() - desired )*eventWeight;       //zjh
+      else if (fEstimator==kCE) error = -eventWeight/(GetOutputNeuron()->GetActivationValue() -1 + desired);  //zjh
       GetOutputNeuron()->SetError(error);
    }
 
@@ -590,7 +695,7 @@ Double_t TMVA::MethodMLP::DerivDir( TMatrixD &Dir )
 }
 
 //______________________________________________________________________________
-Bool_t TMVA::MethodMLP::LineSearch(TMatrixD &Dir, std::vector<Double_t> &buffer)
+Bool_t TMVA::MethodMLP::LineSearch(TMatrixD &Dir, std::vector<Double_t> &buffer, Double_t* dError)
 {
    Int_t IDX = 0;
    Int_t nSynapses = fSynapses->GetEntriesFast();
@@ -603,6 +708,7 @@ Bool_t TMVA::MethodMLP::LineSearch(TMatrixD &Dir, std::vector<Double_t> &buffer)
    }
 
    Double_t err1 = GetError();
+   Double_t errOrigin=err1;  	//zjh
    Double_t alpha1 = 0.;
    Double_t alpha2 = fLastAlpha;
 
@@ -656,6 +762,7 @@ Bool_t TMVA::MethodMLP::LineSearch(TMatrixD &Dir, std::vector<Double_t> &buffer)
       }
       if (!bingo) {
          SetDirWeights(Origin, Dir, 0.);
+	 Log() << kWARNING << "linesearch, failed even in opposite direction of steepestDIR" << Endl;
          fLastAlpha = 0.05;
          return kTRUE;
       }
@@ -690,6 +797,9 @@ Bool_t TMVA::MethodMLP::LineSearch(TMatrixD &Dir, std::vector<Double_t> &buffer)
       buffer[IDX] = synapse->GetWeight() - Origin[IDX];
       IDX++;
    }
+
+   if (dError) (*dError)=(errOrigin-finalError)/finalError; //zjh
+
    return kFALSE;
 }
 
@@ -704,6 +814,7 @@ void TMVA::MethodMLP::SetDirWeights( std::vector<Double_t> &Origin, TMatrixD &Di
       synapse->SetWeight( Origin[IDX] + Dir[IDX][0] * alpha );
       IDX++;
    }
+   if (fUseRegulator) UpdatePriors();	//zjh
 }
 
 
@@ -722,27 +833,50 @@ Double_t TMVA::MethodMLP::GetError()
       Double_t error = 0.;
       if (DoRegression()) {
          for (UInt_t itgt = 0; itgt < ntgts; itgt++) {
-            error += GetSqrErr( ev, itgt );
+            error += GetMSEErr( ev, itgt );	//zjh
          }
+      } else if ( DoMulticlass() ){
+	 for( UInt_t icls = 0, iclsEnd = DataInfo().GetNClasses(); icls < iclsEnd; icls++ ){
+	    error += GetMSEErr( ev, icls );
+	 }
+      } else {
+	 if (fEstimator==kMSE) error = GetMSEErr( ev );  //zjh
+	 else if (fEstimator==kCE) error= GetCEErr( ev ); //zjh
       }
-      else {
-         error = GetSqrErr( ev );
-      }
-      Result += error * ev->GetWeight();   
+      Result += error * ev->GetWeight();
    }
+   if (fUseRegulator) Result+=fPrior;  //zjh
+   if (Result<0) Log()<<kWARNING<<"\nNegative Error!!! :"<<Result-fPrior<<"+"<<fPrior<<Endl;
    return Result;
 }
 
 //______________________________________________________________________________
-Double_t TMVA::MethodMLP::GetSqrErr( const Event* ev, UInt_t index )
+Double_t TMVA::MethodMLP::GetMSEErr( const Event* ev, UInt_t index )
 {
    Double_t error = 0;
    Double_t output = GetOutputNeuron( index )->GetActivationValue();
    Double_t target = 0;
-   if (DoRegression()) target = ev->GetTarget( index );
-   else                target = GetDesiredOutput( ev );  
+   if      (DoRegression()) target = ev->GetTarget( index );
+   else if (DoMulticlass()) target = (ev->GetClass() == index ? 1.0 : 0.0 );
+   else                     target = GetDesiredOutput( ev );  
 
-   error = (output-target)*(output-target);
+   error = 0.5*(output-target)*(output-target); //zjh
+
+   return error;
+
+}
+
+//______________________________________________________________________________
+Double_t TMVA::MethodMLP::GetCEErr( const Event* ev, UInt_t index )  //zjh
+{
+   Double_t error = 0;
+   Double_t output = GetOutputNeuron( index )->GetActivationValue();
+   Double_t target = 0;
+   if      (DoRegression()) target = ev->GetTarget( index );
+   else if (DoMulticlass()) target = (ev->GetClass() == index ? 1.0 : 0.0 );
+   else                     target = GetDesiredOutput( ev );
+
+   error = -(target*TMath::Log(output)+(1-target)*TMath::Log(1-output));
 
    return error;
 }
@@ -930,8 +1064,8 @@ void TMVA::MethodMLP::TrainOneEventFast(Int_t ievt, Float_t*& branchVar, Int_t&
    
    // get the desired output of this event
    Double_t desired;
-   if (type == 0) desired = fActivation->GetMin();  // background
-   else           desired = fActivation->GetMax();  // signal
+   if (type == 0) desired = fOutput->GetMin();  // background //zjh
+   else           desired = fOutput->GetMax();  // signal     //zjh
 
    // force the value for each input neuron
    Double_t x;
@@ -964,6 +1098,7 @@ void TMVA::MethodMLP::TrainOneEvent(Int_t ievt)
    ForceNetworkInputs( ev );
    ForceNetworkCalculations();
    if (DoRegression()) UpdateNetwork( ev->GetTargets(),       eventWeight );
+   if (DoMulticlass()) UpdateNetwork( *DataInfo().GetTargetsForMulticlass( ev ), eventWeight );
    else                UpdateNetwork( GetDesiredOutput( ev ), eventWeight );
 }
 
@@ -971,7 +1106,7 @@ void TMVA::MethodMLP::TrainOneEvent(Int_t ievt)
 Double_t TMVA::MethodMLP::GetDesiredOutput( const Event* ev )
 {
    // get the desired output of this event
-   return DataInfo().IsSignal(ev)?fActivation->GetMax():fActivation->GetMin();
+   return DataInfo().IsSignal(ev)?fOutput->GetMax():fOutput->GetMin(); //zjh
 }
 
 
@@ -981,6 +1116,9 @@ void TMVA::MethodMLP::UpdateNetwork(Double_t desired, Double_t eventWeight)
    // update the network based on how closely
    // the output matched the desired output
    Double_t error = GetOutputNeuron()->GetActivationValue() - desired;
+   if (fEstimator==kMSE)  error = GetOutputNeuron()->GetActivationValue() - desired ;  //zjh
+   else if (fEstimator==kCE)  error = -1./(GetOutputNeuron()->GetActivationValue() -1 + desired); //zjh
+   else  Log() << kFATAL << "Estimator type unspecified!!" << Endl;              //zjh
    error *= eventWeight;
    GetOutputNeuron()->SetError(error);
    CalculateNeuronDeltas();
@@ -992,7 +1130,7 @@ void TMVA::MethodMLP::UpdateNetwork(std::vector<Float_t>& desired, Double_t even
 {
    // update the network based on how closely
    // the output matched the desired output
-   for (UInt_t i = 0; i < DataInfo().GetNTargets(); i++) {
+   for (UInt_t i = 0; i < desired.size(); i++) {
       Double_t error = GetOutputNeuron( i )->GetActivationValue() - desired.at(i);
       error *= eventWeight;
       GetOutputNeuron( i )->SetError(error);
@@ -1078,6 +1216,7 @@ Double_t TMVA::MethodMLP::ComputeEstimator( std::vector<Double_t>& parameters)
       synapse = (TSynapse*)fSynapses->At(i);
       synapse->SetWeight(parameters.at(i));
    }
+   if (fUseRegulator) UpdatePriors();	//zjh
 
    Double_t estimator = CalculateEstimator();
 
@@ -1127,6 +1266,147 @@ void TMVA::MethodMLP::AdjustSynapseWeights()
    }
 }
 
+//_______________________________________________________________________
+void TMVA::MethodMLP::UpdatePriors()  //zjh
+{
+	fPrior=0;
+	fPriorDev.clear();
+	Int_t nSynapses = fSynapses->GetEntriesFast();
+	for (Int_t i=0;i<nSynapses;i++) {
+		TSynapse* synapse = (TSynapse*)fSynapses->At(i);
+		fPrior+=0.5*fRegulators[fRegulatorIdx[i]]*(synapse->GetWeight())*(synapse->GetWeight());
+		fPriorDev.push_back(fRegulators[fRegulatorIdx[i]]*(synapse->GetWeight()));
+	}
+}
+
+//_______________________________________________________________________
+void TMVA::MethodMLP::UpdateRegulators()  //zjh
+{
+	TMatrixD InvH(0,0);
+	GetApproxInvHessian(InvH);
+	Int_t numSynapses=fSynapses->GetEntriesFast();
+	Int_t numRegulators=fRegulators.size();
+	Float_t gamma=0,
+	        variance=1.;    // Gaussian noise
+	vector<Int_t> 		nWDP(numRegulators);
+	vector<Double_t> 	trace(numRegulators),weightSum(numRegulators);
+	for (int i=0;i<numSynapses;i++) {
+		TSynapse* synapses = (TSynapse*)fSynapses->At(i);
+		Int_t idx=fRegulatorIdx[i];
+		nWDP[idx]++;
+		trace[idx]+=InvH[i][i];
+		gamma+=1-fRegulators[idx]*InvH[i][i];
+		weightSum[idx]+=(synapses->GetWeight())*(synapses->GetWeight());
+	}
+	if (fEstimator==kMSE) {
+	  if (GetNEvents()>gamma) variance=CalculateEstimator( Types::kTraining, 0 )/(1-(gamma/GetNEvents()));
+	  else variance=CalculateEstimator( Types::kTraining, 0 );
+	}
+
+	//Log() << kDEBUG << Endl;
+	for (int i=0;i<numRegulators;i++)
+	{
+	  //fRegulators[i]=variance*(nWDP[i]-fRegulators[i]*trace[i])/weightSum[i];
+	  fRegulators[i]=variance*nWDP[i]/(weightSum[i]+variance*trace[i]);
+	  if (fRegulators[i]<0) fRegulators[i]=0;
+	  Log()<<kDEBUG<<"R"<<i<<":"<<fRegulators[i]<<"\t";
+	}
+	float trainE = CalculateEstimator( Types::kTraining, 0 ) ; // estimator for training sample  //zjh
+	float testE  = CalculateEstimator( Types::kTesting,  0 ) ; // estimator for test sample //zjh
+
+	Log()<<kDEBUG<<"\n"<<"trainE:"<<trainE<<"\ttestE:"<<testE<<"\tvariance:"<<variance<<"\tgamma:"<<gamma<<Endl;
+
+}
+
+//_______________________________________________________________________
+void TMVA::MethodMLP::GetApproxInvHessian(TMatrixD& InvHessian, bool regulate)  //zjh
+{
+	Int_t numSynapses=fSynapses->GetEntriesFast();
+	InvHessian.ResizeTo( numSynapses, numSynapses );
+	InvHessian=0;
+	TMatrixD sens(numSynapses,1);
+	TMatrixD sensT(1,numSynapses);
+	Int_t nEvents = GetNEvents();
+	for (Int_t i=0;i<nEvents;i++) {
+		GetEvent(i);
+		double outputValue=GetMvaValue();		// force calculation
+		GetOutputNeuron()->SetError(1./fOutput->EvalDerivative(GetOutputNeuron()->GetValue()));
+		CalculateNeuronDeltas();
+		for (Int_t j = 0; j < numSynapses; j++){
+			TSynapse* synapses = (TSynapse*)fSynapses->At(j);
+			synapses->InitDelta();
+			synapses->CalculateDelta();
+			sens[j][0]=sensT[0][j]=synapses->GetDelta();
+		}
+		if (fEstimator==kMSE ) InvHessian+=sens*sensT;
+		else if (fEstimator==kCE) InvHessian+=(outputValue*(1-outputValue))*sens*sensT;
+	}
+
+// 	TVectorD eValue(numSynapses);
+	if (regulate) {
+	  for (Int_t i = 0; i < numSynapses; i++){
+	    InvHessian[i][i]+=fRegulators[fRegulatorIdx[i]];
+	  }
+	}
+	else {
+	  for (Int_t i = 0; i < numSynapses; i++){
+	    InvHessian[i][i]+=1e-6; //to avoid precision problem that will destroy the pos-def
+	  }
+	}
+
+	InvHessian.Invert();
+
+}
+
+// zjh =>_______________________________________________________________________
+Double_t TMVA::MethodMLP::GetMvaValues( Double_t& errUpper, Double_t& errLower ) //zjh
+{
+	Double_t MvaValue,MvaUpper,MvaLower,median,variance;
+	MvaValue=GetMvaValue();// contains back propagation
+	if (fInvHessian.GetNcols()==0) {
+	   Log() << kFATAL << "no inverse hessian matrix available. GetMvaValues( Double_t& errUpper, Double_t& errLower ) cannot be used." << Endl;
+	}
+	Int_t numSynapses=fSynapses->GetEntriesFast();
+	if (fInvHessian.GetNcols()!=numSynapses) {
+		Log() << kWARNING << "inconsistent dimension " << fInvHessian.GetNcols() << " vs " << numSynapses << Endl;
+	}
+	TMatrixD sens(numSynapses,1);
+	TMatrixD sensT(1,numSynapses);
+	GetOutputNeuron()->SetError(1./fOutput->EvalDerivative(GetOutputNeuron()->GetValue()));
+	//GetOutputNeuron()->SetError(1.);
+	CalculateNeuronDeltas();
+	for (Int_t i = 0; i < numSynapses; i++){
+	  TSynapse* synapses = (TSynapse*)fSynapses->At(i);
+	  synapses->InitDelta();
+	  synapses->CalculateDelta();
+	  sensT[0][i]=synapses->GetDelta();
+	}
+	sens.Transpose(sensT);
+	TMatrixD sig=sensT*fInvHessian*sens;
+	variance=sig[0][0];
+	median=GetOutputNeuron()->GetValue();
+	//Log()<<kDEBUG<<"median="<<median<<"\tvariance="<<variance<<Endl;
+
+	//upper
+	MvaUpper=fOutput->Eval(median+variance);
+	errUpper=MvaUpper-MvaValue;
+	//Log()<<kDEBUG<<"MvaUpper="<<MvaUpper<<"\terrUpper="<<errUpper<<Endl;
+
+	//lower
+	MvaLower=fOutput->Eval(median-variance);
+	errLower=MvaValue-MvaLower;
+	//Log()<<kDEBUG<<"MvaLower="<<MvaLower<<"\terrLower="<<errLower<<Endl;
+	//log()<<kDEBUG<<"MvaValue="<<MvaValue<<"\tActmedian="<<fOutput->Eval(median)<<Endl;
+	if (variance<0) {
+	  Log()<<kWARNING<<"median="<<median<<"\tvariance="<<variance
+	       <<"MvaUpper="<<MvaUpper<<"\terrUpper="<<errUpper<<"MvaLower="<<MvaLower<<"\terrLower="<<errLower<<Endl;
+	}
+
+	return MvaValue;
+}
+//<= zjh
+
+
 #ifdef MethodMLP_UseMinuit__
 
 //______________________________________________________________________________
@@ -1218,8 +1498,16 @@ void TMVA::MethodMLP::FCN( Int_t& npars, Double_t* grad, Double_t &f, Double_t*
    Log() << kDEBUG << "***** New estimator: " << f << "  min: " << minf << " --> ncalls: " << nc << Endl;
 }
 
+//_______________________________________________________________________
+TMVA::MethodMLP* TMVA::MethodMLP::GetThisPtr() 
+{ 
+   // global "this" pointer to be used in minuit
+   return fgThis; 
+}
+
 #endif
 
+
 //_______________________________________________________________________
 void TMVA::MethodMLP::MakeClassSpecific( std::ostream& fout, const TString& className ) const
 {
diff --git a/tmva/src/MethodPDEFoam.cxx b/tmva/src/MethodPDEFoam.cxx
index 4fcbf82b4fe7606e137176836309022baba5cefb..7de2f3bc70321480737d8c4436827c1851c482f9 100644
--- a/tmva/src/MethodPDEFoam.cxx
+++ b/tmva/src/MethodPDEFoam.cxx
@@ -15,13 +15,13 @@
  *      Dominik Dannheim - CERN, Switzerland                                      *
  *      Alexander Voigt  - CERN, Switzerland                                      *
  *      Peter Speckmayer - CERN, Switzerland                                      *
- *                                                                                * 
+ *                                                                                *
  * Original author of the TFoam implementation:                                   *
  *      S. Jadach - Institute of Nuclear Physics, Cracow, Poland                  *
  *                                                                                *
  * Copyright (c) 2008:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      MPI-K Heidelberg, Germany                                                 *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
@@ -52,7 +52,7 @@ ClassImp(TMVA::MethodPDEFoam)
 //_______________________________________________________________________
 TMVA::MethodPDEFoam::MethodPDEFoam( const TString& jobName,
                                     const TString& methodTitle,
-                                    DataSetInfo& dsi, 
+                                    DataSetInfo& dsi,
                                     const TString& theOption,
                                     TDirectory* theTargetDir ) :
    MethodBase( jobName, Types::kPDEFoam, methodTitle, dsi, theOption, theTargetDir )
@@ -62,7 +62,7 @@ TMVA::MethodPDEFoam::MethodPDEFoam( const TString& jobName,
 
 //_______________________________________________________________________
 TMVA::MethodPDEFoam::MethodPDEFoam( DataSetInfo& dsi,
-                                    const TString& theWeightFile,  
+                                    const TString& theWeightFile,
                                     TDirectory* theTargetDir ) :
    MethodBase( Types::kPDEFoam, dsi, theWeightFile, theTargetDir )
 {
@@ -94,11 +94,11 @@ void TMVA::MethodPDEFoam::Init( void )
    fnSampl         = 2000;
    fnBin           = 5;
    fEvPerBin       = 10000;
-   fCutNmin        = true; 
+   fCutNmin        = true;
    fNmin           = 100;
    fCutRMSmin      = false;   // default TFoam method
    fRMSmin         = 0.01;
-   
+
    fKernel         = kNone; // default: use no kernel
    fTargetSelection= kMean; // default: use mean for target selection (only multi target regression!)
 
@@ -111,7 +111,7 @@ void TMVA::MethodPDEFoam::Init( void )
 }
 
 //_______________________________________________________________________
-void TMVA::MethodPDEFoam::DeclareOptions() 
+void TMVA::MethodPDEFoam::DeclareOptions()
 {
    //
    // Declare MethodPDEFoam options
@@ -126,7 +126,7 @@ void TMVA::MethodPDEFoam::DeclareOptions()
    DeclareOptionRef( fMultiTargetRegression = kFALSE,     "MultiTargetRegression", "Do regression with multiple targets");
    DeclareOptionRef( fCutNmin = true,         "CutNmin",  "Requirement for minimal number of events in cell");
    DeclareOptionRef( fNmin = 100,             "Nmin",     "Number of events in cell required to split cell");
-   
+
    DeclareOptionRef( fKernelStr = "None",     "Kernel",   "Kernel type used");
    AddPreDefVal(TString("None"));
    AddPreDefVal(TString("Gauss"));
@@ -343,7 +343,7 @@ void TMVA::MethodPDEFoam::TrainSeparatedClassification()
       // insert event to BinarySearchTree
       for (Long64_t k=0; k<GetNEvents(); k++) {
          const Event* ev = GetEvent(k);
-         if ((i==0 && ev->IsSignal()) || (i==1 && !ev->IsSignal()))
+         if ((i==0 && DataInfo().IsSignal(ev)) || (i==1 && !DataInfo().IsSignal(ev)))
             foam[i]->FillBinarySearchTree(ev, IgnoreEventsWithNegWeightsInTraining());
       }
 
@@ -360,7 +360,7 @@ void TMVA::MethodPDEFoam::TrainSeparatedClassification()
       // loop over all events -> fill foam cells
       for (Long64_t k=0; k<GetNEvents(); k++) {
          const Event* ev = GetEvent(k); 
-         if ((i==0 && ev->IsSignal()) || (i==1 && !ev->IsSignal()))
+         if ((i==0 && DataInfo().IsSignal(ev)) || (i==1 && !DataInfo().IsSignal(ev)))
             foam[i]->FillFoamCells(ev, IgnoreEventsWithNegWeightsInTraining());
       }
 
@@ -689,7 +689,7 @@ void TMVA::MethodPDEFoam::AddWeightsXMLTo( void* parent ) const
 {
    // create XML output of PDEFoam method variables
 
-   void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
+   void* wght = gTools().AddChild(parent, "Weights");
    gTools().AddAttr( wght, "SigBgSeparated",  fSigBgSeparated );
    gTools().AddAttr( wght, "Frac",            fFrac );
    gTools().AddAttr( wght, "DiscrErrCut",     fDiscrErrCut );
@@ -710,13 +710,13 @@ void TMVA::MethodPDEFoam::AddWeightsXMLTo( void* parent ) const
    // save foam borders Xmin[i], Xmax[i]
    void *xmin_wrap;
    for (UInt_t i=0; i<Xmin.size(); i++){
-      xmin_wrap = gTools().xmlengine().NewChild( wght, 0, "Xmin" );
+      xmin_wrap = gTools().AddChild( wght, "Xmin" );
       gTools().AddAttr( xmin_wrap, "Index", i );
       gTools().AddAttr( xmin_wrap, "Value", Xmin.at(i) );
    }
    void *xmax_wrap;
    for (UInt_t i=0; i<Xmin.size(); i++){
-      xmax_wrap = gTools().xmlengine().NewChild( wght, 0, "Xmax" );
+      xmax_wrap = gTools().AddChild( wght, "Xmax" );
       gTools().AddAttr( xmax_wrap, "Index", i );
       gTools().AddAttr( xmax_wrap, "Value", Xmax.at(i) );
    }
@@ -848,14 +848,14 @@ void TMVA::MethodPDEFoam::ReadWeightsFromXML( void* wghtnode )
    }
 
    // read foam range
-   void *xmin_wrap = gTools().xmlengine().GetChild( wghtnode );
+   void *xmin_wrap = gTools().GetChild( wghtnode );
    for (UInt_t counter=0; counter<kDim; counter++) {
       UInt_t i=0;
       gTools().ReadAttr( xmin_wrap , "Index", i );
       if (i>=kDim)
          Log() << kFATAL << "dimension index out of range:" << i << Endl;
       gTools().ReadAttr( xmin_wrap , "Value", Xmin.at(i) );
-      xmin_wrap = gTools().xmlengine().GetNext( xmin_wrap );
+      xmin_wrap = gTools().GetNextChild( xmin_wrap );
    }
 
    void *xmax_wrap = xmin_wrap;
@@ -865,7 +865,7 @@ void TMVA::MethodPDEFoam::ReadWeightsFromXML( void* wghtnode )
       if (i>=kDim)
          Log() << kFATAL << "dimension index out of range:" << i << Endl;
       gTools().ReadAttr( xmax_wrap , "Value", Xmax.at(i) );
-      xmax_wrap = gTools().xmlengine().GetNext( xmax_wrap );
+      xmax_wrap = gTools().GetNextChild( xmax_wrap );
    }
 
    // if foams exist, delete them
diff --git a/tmva/src/MethodPDERS.cxx b/tmva/src/MethodPDERS.cxx
index 8a67b97602ff819a45b157d6ce67f54cc485b864..e66919610074074c0640516c47231a675c8d427b 100644
--- a/tmva/src/MethodPDERS.cxx
+++ b/tmva/src/MethodPDERS.cxx
@@ -1029,7 +1029,7 @@ Float_t TMVA::MethodPDERS::GetError( Float_t countS, Float_t countB,
 void TMVA::MethodPDERS::AddWeightsXMLTo( void* parent ) const 
 {
    // write weights to xml file
-   void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
+   void* wght = gTools().AddChild(parent, "Weights");
    if (fBinaryTree)
       fBinaryTree->AddXMLTo(wght);
    else
@@ -1041,7 +1041,7 @@ void TMVA::MethodPDERS::AddWeightsXMLTo( void* parent ) const
 void TMVA::MethodPDERS::ReadWeightsFromXML( void* wghtnode)
 {
    if (NULL != fBinaryTree) delete fBinaryTree; 
-   void* treenode = gTools().xmlengine().GetChild(wghtnode);
+   void* treenode = gTools().GetChild(wghtnode);
    fBinaryTree = dynamic_cast<BinarySearchTree*>(TMVA::BinaryTree::CreateFromXML(treenode));
    fBinaryTree->SetPeriode( GetNvar() );
    fBinaryTree->CalcStatistics();
@@ -1097,6 +1097,19 @@ void TMVA::MethodPDERS::ReadWeightsFromStream( TFile& /*rf*/ )
    // read training sample from file
 }
 
+//_______________________________________________________________________
+TMVA::MethodPDERS* TMVA::MethodPDERS::ThisPDERS( void ) 
+{ 
+   // static pointer to this object
+   return fgThisPDERS; 
+}
+//_______________________________________________________________________
+void TMVA::MethodPDERS::UpdateThis( void ) 
+{
+   // update static this pointer
+   fgThisPDERS = this; 
+}
+
 //_______________________________________________________________________
 void TMVA::MethodPDERS::MakeClassSpecific( std::ostream& fout, const TString& className ) const
 {
diff --git a/tmva/src/MethodRuleFit.cxx b/tmva/src/MethodRuleFit.cxx
index e288160f341936fc6dfdd7d8882936ba80313a72..2e3c52c21a24dbbb267f5bb46017ae5440441827 100644
--- a/tmva/src/MethodRuleFit.cxx
+++ b/tmva/src/MethodRuleFit.cxx
@@ -349,6 +349,7 @@ void TMVA::MethodRuleFit::InitEventSample( void )
 //_______________________________________________________________________
 void TMVA::MethodRuleFit::Train( void )
 {
+   TMVA::DecisionTreeNode::fgIsTraining=true;
    // training of rules
 
    InitMonitorNtuple();
@@ -363,6 +364,7 @@ void TMVA::MethodRuleFit::Train( void )
       TrainTMVARuleFit();
    }
    fRuleFit.GetRuleEnsemblePtr()->ClearRuleMap();
+   TMVA::DecisionTreeNode::fgIsTraining=false;
 }
 
 //_______________________________________________________________________
diff --git a/tmva/src/MethodSVM.cxx b/tmva/src/MethodSVM.cxx
index 357370f78b76244e33c54b579393b89f303fcaa7..706bd61551ae1b38a70a5a1a769a12078ce049a9 100644
--- a/tmva/src/MethodSVM.cxx
+++ b/tmva/src/MethodSVM.cxx
@@ -207,7 +207,7 @@ void TMVA::MethodSVM::Train()
 void TMVA::MethodSVM::AddWeightsXMLTo( void* parent ) const 
 {
    // write configuration to xml file
-   void* wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
+   void* wght = gTools().AddChild(parent, "Weights");
    gTools().AddAttr(wght,"fBparm",fBparm);
    gTools().AddAttr(wght,"fGamma",fGamma);
    gTools().AddAttr(wght,"NSupVec",fSupportVectors->size());
@@ -224,10 +224,10 @@ void TMVA::MethodSVM::AddWeightsXMLTo( void* parent ) const
       gTools().WriteTVectorDToXML(wght,"SupportVector",&temp);
    }
    // write max/min data values
-   void* maxnode = gTools().xmlengine().NewChild(wght, 0, "Maxima");
+   void* maxnode = gTools().AddChild(wght, "Maxima");
    for (UInt_t ivar = 0; ivar < GetNvar(); ivar++)
       gTools().AddAttr(maxnode, "Var"+gTools().StringFromInt(ivar), GetXmax(ivar));
-   void* minnode = gTools().xmlengine().NewChild(wght, 0, "Minima");
+   void* minnode = gTools().AddChild(wght, "Minima");
    for (UInt_t ivar = 0; ivar < GetNvar(); ivar++)
       gTools().AddAttr(minnode, "Var"+gTools().StringFromInt(ivar), GetXmin(ivar));
 }
@@ -257,7 +257,7 @@ void TMVA::MethodSVM::ReadWeightsFromXML( void* wghtnode )
       delete fSupportVectors;
    }
    fSupportVectors = new std::vector<TMVA::SVEvent*>(0);
-   void* supportvectornode = gTools().xmlengine().GetChild(wghtnode);
+   void* supportvectornode = gTools().GetChild(wghtnode);
    for (UInt_t ievt = 0; ievt < fNsupv; ievt++) {
       TVectorD temp(GetNvar()+4);
       gTools().ReadTVectorDFromXML(supportvectornode,"SupportVector",&temp);
@@ -269,13 +269,13 @@ void TMVA::MethodSVM::ReadWeightsFromXML( void* wghtnode )
          (*svector)[ivar]=temp[ivar+4];
          
       fSupportVectors->push_back(new SVEvent(svector,alpha,alpha_p,typeFlag));
-      supportvectornode = gTools().xmlengine().GetNext(supportvectornode);
+      supportvectornode = gTools().GetNextChild(supportvectornode);
    }
    
    void* maxminnode = supportvectornode;
    for (UInt_t ivar = 0; ivar < GetNvar(); ivar++)
       gTools().ReadAttr( maxminnode,"Var"+gTools().StringFromInt(ivar),(*fMaxVars)[ivar]);
-   maxminnode = gTools().xmlengine().GetNext(maxminnode);
+   maxminnode = gTools().GetNextChild(maxminnode);
    for (UInt_t ivar = 0; ivar < GetNvar(); ivar++)
       gTools().ReadAttr( maxminnode,"Var"+gTools().StringFromInt(ivar),(*fMinVars)[ivar]);
    if (fSVKernelFunction!=0) delete fSVKernelFunction;
diff --git a/tmva/src/MethodSeedDistance.cxx b/tmva/src/MethodSeedDistance.cxx
deleted file mode 100644
index 7af189b38949a05d618ddac1c0eb36101d40d633..0000000000000000000000000000000000000000
--- a/tmva/src/MethodSeedDistance.cxx
+++ /dev/null
@@ -1,613 +0,0 @@
-// @(#)root/tmva $Id$    
-// Author: Andreas Hoecker, Peter Speckmayer
-
-/**********************************************************************************
- * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
- * Package: TMVA                                                                  *
- * Class  : MethodSeedDistance                                                    *
- * Web    : http://tmva.sourceforge.net                                           *
- *                                                                                *
- * Description:                                                                   *
- *      Implementation                                                            *
- *                                                                                *
- * Authors (alphabetical):                                                        *
- *      Peter Speckmayer <speckmay@mail.cern.ch>  - CERN, Switzerland             *
- *                                                                                *
- * Copyright (c) 2005-2006:                                                       *
- *      CERN, Switzerland                                                         *
- *      MPI-K Heidelberg, Germany                                                 *
- *                                                                                *
- * Redistribution and use in source and binary forms, with or without             *
- * modification, are permitted according to the terms listed in LICENSE           *
- * (http://tmva.sourceforge.net/LICENSE)                                          *
- **********************************************************************************/
-
-//_______________________________________________________________________
-/* Begin_Html
-   This method is experimental only. It does not show any improvements
-   compared to any of the traditional methods.
-End_Html */
-//_______________________________________________________________________
-
-#include <sstream>
-
-#include "TList.h"
-#include "TFormula.h"
-#include "TString.h"
-#include "TObjString.h"
-#include "TRandom3.h"
-#include "TMath.h"
-
-#include "TMVA/ClassifierFactory.h"
-#include "TMVA/MethodSeedDistance.h"
-#include "TMVA/Tools.h"
-#include "TMVA/Interval.h"
-#include "TMVA/Timer.h"
-#include "TMVA/GeneticFitter.h"
-#include "TMVA/SimulatedAnnealingFitter.h"
-#include "TMVA/MinuitFitter.h"
-#include "TMVA/MCFitter.h"
-#include "TMVA/MetricEuler.h"
-#include "TMVA/MetricManhattan.h"
-#include "TMVA/SeedDistance.h"
-
-REGISTER_METHOD(SeedDistance)
-
-ClassImp(TMVA::MethodSeedDistance)
-
-//_______________________________________________________________________
-TMVA::MethodSeedDistance::MethodSeedDistance( const TString& jobName,
-                                              const TString& methodTitle,
-                                              DataSetInfo& theData, 
-                                              const TString& theOption,
-                                              TDirectory* theTargetDir ) :
-   TMVA::MethodBase( jobName, Types::kSeedDistance, methodTitle, theData, theOption, theTargetDir ), 
-   IFitterTarget(),
-   fSeedRangeStringP(""),
-   fSeedRangeStringT(""),
-   fScalingFactor(1),
-   fMetric(0),
-   fSeedDistance(0),
-   fSeeds(),
-   fMetricPars(),
-   fPars(),
-   fDataSeeds(0),
-   fBackSeeds(0),
-   fMetricType(""),
-   fPow2Estimator(kTRUE),
-   fNPars(0),
-   fParRange(),
-   fFitMethod(""),
-   fConverger(""),
-   fFitter(0),
-   fIntermediateFitter(0),
-   fEventsSig(),
-   fEventsBkg(),
-   fSumOfWeightsSig(0),
-   fSumOfWeightsBkg(0)
-{
-   // standard constructor
-}
-
-//_______________________________________________________________________
-TMVA::MethodSeedDistance::MethodSeedDistance( DataSetInfo& theData, 
-                                              const TString& theWeightFile,  
-                                              TDirectory* theTargetDir ) :
-   TMVA::MethodBase( Types::kSeedDistance, theData, theWeightFile, theTargetDir ),
-   IFitterTarget(),
-   fSeedRangeStringP(""),
-   fSeedRangeStringT(""),
-   fScalingFactor(1),
-   fMetric(0),
-   fSeedDistance(0),
-   fSeeds(),
-   fMetricPars(),
-   fPars(),
-   fDataSeeds(0),
-   fBackSeeds(0),
-   fMetricType(""),
-   fPow2Estimator(kTRUE),
-   fNPars(0),
-   fParRange(),
-   fFitMethod(""),
-   fConverger(""),
-   fFitter(0),
-   fIntermediateFitter(0),
-   fEventsSig(),
-   fEventsBkg(),
-   fSumOfWeightsSig(0),
-   fSumOfWeightsBkg(0)
-{
-   // constructor from weight file
-}
-
-//_______________________________________________________________________
-Bool_t TMVA::MethodSeedDistance::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t /*numberTargets*/ )
-{
-   // SeedDistance can handle classification with 2 classes
-   if( type == Types::kClassification && numberClasses == 2 ) return kTRUE;
-   return kFALSE;
-}
-
-//_______________________________________________________________________
-void TMVA::MethodSeedDistance::Init( void )
-{
-   // default initialisation
-}
-
-//_______________________________________________________________________
-void TMVA::MethodSeedDistance::DeclareOptions() 
-{
-   // define the options (their key words) that can be set in the option string 
-   //
-   // format of function string:
-   //    "x0*(0)+((1)/x1)**(2)..."
-   // where "[i]" are the parameters, and "xi" the input variables
-   //
-   // format of parameter string:
-   //    "(-1.2,3.4);(-2.3,4.55);..."
-   // where the numbers in "(a,b)" correspond to the a=min, b=max parameter ranges;
-   // each parameter defined in the function string must have a corresponding range
-   //
-   DeclareOptionRef( fSeedRangeStringP = "", "SeedRanges", "Range intervals confining the variables for the seeds" );
-   DeclareOptionRef( fDataSeeds = 1, "DataSeeds", "Number of used data seeds" );
-   DeclareOptionRef( fBackSeeds = 1, "BackSeeds", "Number of used background seeds" );
-   DeclareOptionRef( fMetricType = "Euler", "Metric", "Type of metric used (Euler, Manhattan)" );
-   AddPreDefVal(TString("Euler"));
-   AddPreDefVal(TString("Manhattan"));
-
-   DeclareOptionRef( fPow2Estimator = false, "Pow2Estimator", "Squared deviation from desired result (true) or number of correct classifications (false) as estimator" );
-   DeclareOptionRef( fScalingFactor = true, "Scaling", "Produces an additional free parameter for a Seed which scales the calculated distance" );
-
-   // fitter
-   DeclareOptionRef( fFitMethod = "MINUIT", "FitMethod", "Optimisation Method");
-   AddPreDefVal(TString("MC"));
-   AddPreDefVal(TString("GA"));
-   AddPreDefVal(TString("SA"));
-   AddPreDefVal(TString("MINUIT"));
-
-   DeclareOptionRef( fConverger = "None", "Converger", "FitMethod uses Converger to improve result");
-   AddPreDefVal(TString("None"));
-   AddPreDefVal(TString("MINUIT"));
-}
-
-//_______________________________________________________________________
-void TMVA::MethodSeedDistance::ProcessOptions() 
-{
-   // the option string is decoded, for availabel options see "DeclareOptions"
-   // clean up first
-   ClearAll();
-
-   if (IgnoreEventsWithNegWeightsInTraining()) {
-      Log() << kFATAL << "Mechanism to ignore events with negative weights in training not yet available for method: "
-            << GetMethodTypeName() 
-            << " --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
-            << Endl;
-   }
-
-   // process transient strings
-   //   fFormulaStringT  = fFormulaStringP;
-   fSeedRangeStringT = fSeedRangeStringP;
-
-   // interpret parameter string   
-   fSeedRangeStringT.ReplaceAll( " ", "" );
-   fNPars = fSeedRangeStringT.CountChar( ')' );
-   //   fNPars = 4;
-
-   //   Log() << kINFO << "rangestring " << fSeedRangeStringT << Endl;
-   //   Log() << kINFO << "rangestring number ) " << fNPars << Endl;
-
-   TList* parList = gTools().ParseFormatLine( fSeedRangeStringT, ";" );
-   //   if (parList->GetSize()*2 != fNPars) {
-   //      Log() << kFATAL << "<ProcessOptions> Mismatch in parameter string: " 
-   //              << "the number of parameters: " << fNPars << " != ranges defined: " 
-   //              << parList->GetSize() << "; the format of the \"SeedRanges\" string "
-   //              << "must be: \"(-1.2,3.4);(-2.3,4.55);...\", "
-   //              << "where the numbers in \"(a,b)\" correspond to the a=min, b=max parameter ranges; "
-   //              << "each parameter defined in the function string must have a corresponding rang."
-   //              << Endl;
-   //   }
-
-   fParRange.resize( fNPars );
-   for (Int_t ipar=0; ipar<fNPars; ipar++) fParRange[ipar] = 0;
-
-   for (Int_t ipar=0; ipar<fNPars; ipar++) {
-      // parse (a,b)
-      TString str = ((TObjString*)parList->At(ipar))->GetString();
-      Ssiz_t istr = str.First( ',' );
-      TString pminS(str(1,istr-1));
-      TString pmaxS(str(istr+1,str.Length()-2-istr));
-      std::stringstream st;
-      st.precision( 16 );
-      st << std::scientific << pminS.Data();
-      Float_t pmin;
-      st >> pmin;
-      st << std::scientific << pmaxS.Data();
-      Float_t pmax;
-      st >> pmax;
-
-      // sanity check
-      if (pmin > pmax) Log() << kFATAL << "<ProcessOptions> max > min in interval for parameter: [" 
-                               << ipar << "] : [" << pmin  << ", " << pmax << "] " << Endl;
-
-      fParRange[ipar] = new Interval( pmin, pmax );
-   }
-
-   delete parList;
-
-   if( fScalingFactor ){
-      fParRange.push_back( new Interval( 0.0, 1.0 ) );
-   }
-   
-   
-   for( Int_t i = 0; i< fDataSeeds+fBackSeeds; i++ ){
-      fSeeds.push_back( std::vector< Double_t >() );
-      for(std::vector<TMVA::Interval*>::const_iterator parIt = fParRange.begin(); parIt != fParRange.end(); parIt++) {
-         fSeeds[i].push_back( (*parIt)->GetMean() );
-      }
-   }
-
-   std::vector<Interval*>::iterator maxpos;
-   for( Int_t i = 1; i< fDataSeeds+fBackSeeds; i++ ){
-      maxpos = fParRange.begin();
-      for( Int_t j=0; j< fNPars; j++ ){
-         maxpos++;
-      }
-      if( fScalingFactor ){
-         maxpos++;
-      }
-      fParRange.insert( fParRange.end(), fParRange.begin(), maxpos );
-   }
-
-   for( Int_t i = 0; i < fNPars; i++) {
-      fMetricPars.push_back( 0.5 );
-      fParRange.push_back( new Interval( 0.0, 1.0 ) );
-   }
-   
-   if( fMetricType == "Euler" )     fMetric = new MetricEuler();
-   if( fMetricType == "Manhattan" ) fMetric = new MetricManhattan();
-
-   fMetric->SetParameters( &fMetricPars );
-   fSeedDistance = new SeedDistance( *fMetric, fSeeds );
-
-   fIntermediateFitter = (TMVA::IFitterTarget*)this;
-   if (fConverger == "MINUIT")
-      fIntermediateFitter = new TMVA::MinuitFitter( *this, Form("%s_MINUIT", GetName()), fParRange, GetOptions() );
-   if      (fFitMethod == "MC")     fFitter = new TMVA::MCFitter                ( *fIntermediateFitter, Form("%sFitter_MC", GetName()), fParRange, GetOptions() );
-   else if (fFitMethod == "GA")     fFitter = new TMVA::GeneticFitter           ( *fIntermediateFitter, Form("%sFitter_GA", GetName()), fParRange, GetOptions() );
-   else if (fFitMethod == "SA")     fFitter = new TMVA::SimulatedAnnealingFitter( *fIntermediateFitter, Form("%sFitter_SA", GetName()), fParRange, GetOptions() );
-   else if (fFitMethod == "MINUIT") fFitter = new TMVA::MinuitFitter            ( *fIntermediateFitter, Form("%sFitter_MINUIT", GetName()), fParRange, GetOptions() );
-   else {
-      Log() << kFATAL << "<Train> Do not understand fit method: " << fFitMethod << Endl;
-   }
-   
-   fFitter->CheckForUnusedOptions();
-   
-}
-
-//_______________________________________________________________________
-TMVA::MethodSeedDistance::~MethodSeedDistance( void )
-{
-   // destructor
-   ClearAll();
-}
-
-//_______________________________________________________________________
-void TMVA::MethodSeedDistance::ClearAll( void )
-{
-   // reset all parameters of the method
-   std::map< Interval*, Int_t > delmap;
-    
-   for (UInt_t ipar=0; ipar<fParRange.size(); ipar++) {
-      delmap[fParRange[ipar]] = ipar;
-      fParRange[ipar] = 0;
-   }
-   for( std::map< Interval*, Int_t >::iterator it = delmap.begin(); it != delmap.end(); it++ ){
-      delete it->first;
-   }
-   fParRange.clear(); 
-
-   fMetricPars.clear();
-
-   fPars.clear();
-}
-
-//_______________________________________________________________________
-void TMVA::MethodSeedDistance::Train( void )
-{
-   // FDA training 
-
-   // cache training events
-   fSumOfWeightsSig = 0;
-   fSumOfWeightsBkg = 0;
-
-   for (Int_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
-
-      const Event*  ev = Data()->GetEvent(ievt);
-      Float_t w  = ev->GetWeight();
-
-      if (ev->IsSignal()) { fEventsSig.push_back( ev ); fSumOfWeightsSig += w; }
-      else                { fEventsBkg.push_back( ev ); fSumOfWeightsBkg += w; }
-   }
-
-   // sanity check
-   if (fSumOfWeightsSig <= 0 || fSumOfWeightsBkg <= 0) {
-      Log() << kFATAL << "<Train> Troubles in sum of weights: " 
-              << fSumOfWeightsSig << " (S) : " << fSumOfWeightsBkg << " (B)" << Endl;
-   }
-
-   // starting values (not used by all fitters)
-   fPars.clear();
-
-   MakeListFromStructure( fPars, fSeeds, fMetricPars );
-
-   // execute the fit
-//   Double_t estimator = fFitter->Run( fPars );
-   Double_t estimator = fFitter->Run( fPars );
-
-   MakeStructureFromList( fPars, fSeeds, fMetricPars );
-
-   // print results
-   PrintResults( fFitMethod, fPars, estimator );
-
-   // free cache 
-   std::vector<const Event*>::const_iterator itev;
-   for (itev = fEventsSig.begin(); itev != fEventsSig.end(); itev++) delete *itev;
-   for (itev = fEventsBkg.begin(); itev != fEventsBkg.end(); itev++) delete *itev;
-
-   fEventsSig.clear();
-   fEventsBkg.clear();
-
-   if (fConverger == "MINUIT") delete fIntermediateFitter;
-   delete fFitter; fFitter = 0;
-}
-
-//_______________________________________________________________________
-void TMVA::MethodSeedDistance::PrintResults( const TString& fitter, std::vector<Double_t>& , const Double_t estimator ) const
-{
-   //MakeStructureFromList( pars, fSeeds, fMetricPars );
-
-   // display fit parameters
-   // check maximum length of variable name
-   Log() << kINFO;
-   Log() << "Results for distance to seed method using fitter: \"" << fitter << Endl;
-   Log() << "Value of estimator at minimum: " << estimator << Endl;
-
-   // print seeds
-   Log() << kINFO << "Number of Seeds: " << fSeeds.size() << Endl;
-   for( Int_t i = 0; i< (Int_t)fSeeds.size(); i++ ){
-      if( i < fDataSeeds ){
-         Log() << kINFO << "Seed " << i << " -- DATA" << Endl;
-      }else{
-         Log() << kINFO << "Seed " << i << " -- BACKGROUND" << Endl;
-      }
-      for( Int_t j = 0; j< (Int_t)fSeeds[i].size(); j++ ){
-         if( fScalingFactor && j >= (Int_t)fSeeds[i].size()-1 ){
-            Log() << kINFO << "   scaling factor " << ": " << fSeeds[i][j] << Endl;
-         }else{
-            Log() << kINFO << "   dimension " << j << ": " << fSeeds[i][j] << Endl;
-         }
-      }
-   }
-   
-   // print metric parameters
-   Log() << kINFO << Endl;
-   Log() << kINFO << "Metric: " << fMetricType << " with " << fMetricPars.size() << " parameters" << Endl;
-   for( Int_t i = 0; i< (Int_t)fMetricPars.size(); i++ ){
-      Log() << kINFO << "   par " << i << ": " << fMetricPars[i] << Endl;
-   }
-
-}
-
-//_______________________________________________________________________
-Double_t TMVA::MethodSeedDistance::EstimatorFunction( std::vector<Double_t>& pars )
-{
-   // compute estimator for given parameter set (to be minimised)
-
-   MakeStructureFromList( pars, fSeeds, fMetricPars );
-   std::vector< Double_t > point;
-   Double_t looksLike = 0.0;
-   
-   // species-specific stuff
-   const std::vector<const Event*>* eventVecs[] = { &fEventsSig, &fEventsBkg };
-   const Double_t sumOfWeights[]                = { fSumOfWeightsSig, fSumOfWeightsBkg };
-   const Double_t desiredVal[]                  = { 1, 0 };
-   Double_t estimator[]                         = { 0, 0 };
-   std::vector<const Event*>::const_iterator itev;
-
-   Double_t distData;
-   Double_t distBack;
-   Double_t deviation;
-   
-   // loop over species
-   for (Int_t itype=0; itype<2; itype++) {
-
-      // loop over specific events
-      for (itev = eventVecs[itype]->begin(); itev != eventVecs[itype]->end(); itev++) {
-         point.clear();
-         for (UInt_t ivar=0;  ivar<GetNvar();   ivar++) point.push_back( (**itev).GetValue(ivar) );
-
-         std::vector< Double_t >& distances = fSeedDistance->GetDistances( point );
-         
-         distData = distances[0];
-         for( Int_t i=1; i< fDataSeeds; i++ ){
-            distData = TMath::Min( distData, distances[i] );
-         }
-         distBack = distances[fDataSeeds];
-         for( Int_t i=fDataSeeds; i< fDataSeeds+fBackSeeds; i++ ){
-            distBack = TMath::Min( distBack, distances[i] );
-         }
-         
-         if( !fPow2Estimator ){
-            if( distData < distBack ){ 
-               deviation = 1-desiredVal[itype];
-            }else{
-               deviation = desiredVal[itype];
-            }
-         }else{
-            looksLike = distBack/(distData+distBack);
-            deviation = (looksLike - desiredVal[itype])*(looksLike - desiredVal[itype]);
-         }
-
-         estimator[itype] += deviation * (*itev)->GetWeight();
-      }
-      estimator[itype] /= sumOfWeights[itype];
-   }
-
-   // return value is sum over normalised signal and background contributions
-   return estimator[0] + estimator[1];
-}
-
-//_______________________________________________________________________
-Double_t TMVA::MethodSeedDistance::GetMvaValue( Double_t* err )
-{
-   // returns MVA value for given event
-   std::vector< Double_t > point;
-   const Event* ev = GetEvent();
-
-   // cannot determine error
-   if (err != 0) *err = -1;
-
-   Double_t distData;
-   Double_t distBack;
-
-   point.clear();
-   for (UInt_t ivar=0;  ivar<GetNvar();   ivar++) point.push_back( ev->GetValue(ivar) );
-
-   std::vector< Double_t >& distances = fSeedDistance->GetDistances( point );
-
-   distData = distances[0];
-   for( Int_t i=1; i< fDataSeeds; i++ ){
-      distData = TMath::Min( distData, distances[i] );
-   }
-   distBack = distances[fDataSeeds];
-   for( Int_t i=fDataSeeds; i< fDataSeeds+fBackSeeds; i++ ){
-      distBack = TMath::Min( distBack, distances[i] );
-   }
-   
-
-   if( distData+distBack == 0 ){
-      Log() << kINFO << "backgroundseed=dataseed";
-      return 0.0;
-   }
-   Double_t looksLike = distBack/(distData+distBack);
-
-   return looksLike;
-}
-
-//_______________________________________________________________________
-void TMVA::MethodSeedDistance::AddWeightsXMLTo( void* /*parent*/ ) const 
-{
-   Log() << kFATAL << "Please implement writing of weights as XML" << Endl;
-}
- 
-//_______________________________________________________________________
-void  TMVA::MethodSeedDistance::ReadWeightsFromStream( istream& istr )
-{
-   // read back the training results from a file (stream)
-
-   Int_t size;
-   Double_t val;
-   istr >> size;
-//   Log() << kINFO << size << " ";
-   fSeeds.clear();
-   for( Int_t i = 0; i<size; i++ ){
-      fSeeds.push_back( std::vector< Double_t >() );
-      Int_t subSize;
-      istr >> subSize;
-//      Log() << kINFO << subSize << " ";
-      for( Int_t j = 0; j<subSize; j++ ){
-         istr >> val;
-//         Log() << kINFO << val << " ";
-         fSeeds[i].push_back( val );
-      }
-   }
-
-   istr >> fDataSeeds;
-   istr >> fBackSeeds;
-   istr >> fScalingFactor;
-
-   istr >> fMetricType;
-   istr >> size;
-//   Log() << kINFO << size << " ";
-   fMetricPars.clear();
-   for( Int_t i = 0; i<size; i++ ){
-      istr >> val;
-//      Log() << kINFO << val << " ";
-      fMetricPars.push_back( val );
-   }
-
-   if( fMetricType == "Euler" ) fMetric = new MetricEuler();
-   else if( fMetricType == "Manhattan" ) fMetric = new MetricManhattan();
-   else{
-      Log() << kFATAL << "unknown metric" << Endl;
-   }
-   fMetric->SetParameters( &fMetricPars );
-   fSeedDistance = new SeedDistance( *fMetric, fSeeds );
-}
-
-//_______________________________________________________________________
-void TMVA::MethodSeedDistance::MakeClassSpecific( std::ostream& fout, const TString& /*className*/ ) const
-{
-   //    fout << "Bool_t                 fScalingFactor = " << fScalingFactor << ";" << endl;
-   //    fout << "IMetric*               fMetric = new Metric" << fMetricType << "();" << endl;
-   //    fout << "SeedDistance*          fSeedDistance;" << endl;
-   //    fout << "std::vector< std::vector< Double_t > > fSeeds;" << endl;
-   //    fout << "std::vector<Double_t>  fMetricPars;" << endl;
-   //    fout << "Int_t                  fDataSeeds = " << fDataSeeds << ";" << endl;
-   //    fout << "Int_t                  fBackSeeds = " << fBackSeeds << ";" << endl;
-   //    fout << "TString                fMetricType = \"" << fMetricType << "\";" << endl;
-   //    fout << "Int_t                  fNPars = " << fNPars << ";" << endl;
-   fout << "not implemented for class" << std::endl;
-}
-
-
-//_______________________________________________________________________
-void TMVA::MethodSeedDistance::MakeListFromStructure( std::vector<Double_t>& linear, 
-                                  std::vector< std::vector< Double_t > >& seeds,
-                                  std::vector<Double_t>& metricParams )
-{
-   // linear: / /-seed1-//-seed-2//...//-seed n-/ /metricParams/ /
-   linear.clear();
-   for( std::vector< std::vector< Double_t > >::iterator itSeed = seeds.begin(); itSeed != seeds.end(); itSeed++ ){
-      linear.insert( linear.end(), (*itSeed).begin(), (*itSeed).end() );
-   }
-   linear.insert( linear.end(), metricParams.begin(), metricParams.end() );
-}
-
-//_______________________________________________________________________
-void TMVA::MethodSeedDistance::MakeStructureFromList( std::vector<Double_t>& linear, 
-                                  std::vector< std::vector< Double_t > >& seeds,
-                                  std::vector<Double_t>& metricParams )
-{
-   // makes the structure from the list
-   std::vector<Double_t>::iterator loc = linear.begin();
-   for( std::vector< std::vector<Double_t> >::iterator itSeed = seeds.begin(); itSeed != seeds.end(); itSeed++ ){
-      for( std::vector<Double_t>::iterator it = (*itSeed).begin(); it != (*itSeed).end(); it++ ){
-         (*it) = (*loc);
-         loc++;
-      }
-   }
-   for( std::vector<Double_t>::iterator it = metricParams.begin(); it != metricParams.end(); it++ ){
-      (*it) = (*loc);
-      loc++;
-   }
-}
-
-
-//_______________________________________________________________________
-void TMVA::MethodSeedDistance::GetHelpMessage() const
-{
-   // get help message text
-   //
-   // typical length of text line: 
-   //         "|--------------------------------------------------------------|"
-   Log() << Endl;
-   Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
-   Log() << Endl;
-   Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
-   Log() << Endl;
-   Log() << Endl;
-   Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
-   Log() << Endl;
-}
diff --git a/tmva/src/MethodTMlpANN.cxx b/tmva/src/MethodTMlpANN.cxx
index 26583f1b18099f0af3018017368bfac2afe017f2..7f1123f2cb19044e52c847a17dfa965312933f23 100644
--- a/tmva/src/MethodTMlpANN.cxx
+++ b/tmva/src/MethodTMlpANN.cxx
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$ 
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
  * Package: TMVA                                                                  *
@@ -15,9 +15,9 @@
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                                                    * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      MPI-K Heidelberg, Germany                                                 *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
@@ -27,12 +27,12 @@
 //_______________________________________________________________________
 /* Begin_Html
 
-  This is the TMVA TMultiLayerPerceptron interface class. It provides the 
+  This is the TMVA TMultiLayerPerceptron interface class. It provides the
   training and testing the ROOT internal MLP class in the TMVA framework.<be>
 
   Available learning methods:<br>
   <ul>
-  <li>Stochastic      </li> 
+  <li>Stochastic      </li>
   <li>Batch           </li>
   <li>SteepestDescent </li>
   <li>RibierePolak    </li>
@@ -81,19 +81,19 @@ ClassImp(TMVA::MethodTMlpANN)
 //_______________________________________________________________________
 TMVA::MethodTMlpANN::MethodTMlpANN( const TString& jobName,
                                     const TString& methodTitle,
-                                    DataSetInfo& theData, 
+                                    DataSetInfo& theData,
                                     const TString& theOption,
                                     TDirectory* theTargetDir) :
    TMVA::MethodBase( jobName, Types::kTMlpANN, methodTitle, theData, theOption, theTargetDir ),
    fMLP(0),
    fLearningMethod( "" )
 {
-   // standard constructor 
+   // standard constructor
 }
 
 //_______________________________________________________________________
-TMVA::MethodTMlpANN::MethodTMlpANN( DataSetInfo& theData, 
-                                    const TString& theWeightFile,  
+TMVA::MethodTMlpANN::MethodTMlpANN( DataSetInfo& theData,
+                                    const TString& theWeightFile,
                                     TDirectory* theTargetDir ) :
    TMVA::MethodBase( Types::kTMlpANN, theData, theWeightFile, theTargetDir ),
    fMLP(0),
@@ -103,10 +103,10 @@ TMVA::MethodTMlpANN::MethodTMlpANN( DataSetInfo& theData,
 }
 
 //_______________________________________________________________________
-Bool_t TMVA::MethodTMlpANN::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, 
+Bool_t TMVA::MethodTMlpANN::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses,
                                              UInt_t /*numberTargets*/ )
 {
-   // TMlpANN can handle classification with 2 classes 
+   // TMlpANN can handle classification with 2 classes
    if (type == Types::kClassification && numberClasses == 2) return kTRUE;
    return kFALSE;
 }
@@ -123,7 +123,7 @@ TMVA::MethodTMlpANN::~MethodTMlpANN( void )
 {
    // destructor
 }
- 
+
 //_______________________________________________________________________
 void TMVA::MethodTMlpANN::CreateMLPOptions( TString layerSpec )
 {
@@ -136,7 +136,7 @@ void TMVA::MethodTMlpANN::CreateMLPOptions( TString layerSpec )
       if (layerSpec.First(',')<0) {
          sToAdd = layerSpec;
          layerSpec = "";
-      } 
+      }
       else {
          sToAdd = layerSpec(0,layerSpec.First(','));
          layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
@@ -164,26 +164,26 @@ void TMVA::MethodTMlpANN::CreateMLPOptions( TString layerSpec )
    fMLPBuildOptions += "type";
 
    Log() << kINFO << "Use " << fNcycles << " training cycles" << Endl;
-   Log() << kINFO << "Use configuration (nodes per hidden layer): " << fHiddenLayer << Endl;  
+   Log() << kINFO << "Use configuration (nodes per hidden layer): " << fHiddenLayer << Endl;
 }
 
 //_______________________________________________________________________
-void TMVA::MethodTMlpANN::DeclareOptions() 
+void TMVA::MethodTMlpANN::DeclareOptions()
 {
-   // define the options (their key words) that can be set in the option string 
+   // define the options (their key words) that can be set in the option string
    // know options:
-   // NCycles       <integer>    Number of training cycles (too many cycles could overtrain the network) 
+   // NCycles       <integer>    Number of training cycles (too many cycles could overtrain the network)
    // HiddenLayers  <string>     Layout of the hidden layers (nodes per layer)
    //   * specifiactions for each hidden layer are separated by commata
    //   * for each layer the number of nodes can be either absolut (simply a number)
    //        or relative to the number of input nodes to the neural net (N)
-   //   * there is always a single node in the output layer 
-   //   example: a net with 6 input nodes and "Hiddenlayers=N-1,N-2" has 6,5,4,1 nodes in the 
-   //   layers 1,2,3,4, repectively 
+   //   * there is always a single node in the output layer
+   //   example: a net with 6 input nodes and "Hiddenlayers=N-1,N-2" has 6,5,4,1 nodes in the
+   //   layers 1,2,3,4, repectively
    DeclareOptionRef( fNcycles    = 200,       "NCycles",      "Number of training cycles" );
    DeclareOptionRef( fLayerSpec  = "N,N-1",   "HiddenLayers", "Specification of hidden layer architecture (N stands for number of variables; any integers may also be used)" );
-   
-   DeclareOptionRef( fValidationFraction = 0.5, "ValidationFraction", 
+
+   DeclareOptionRef( fValidationFraction = 0.5, "ValidationFraction",
                      "Fraction of events in training tree used for cross validation" );
 
    DeclareOptionRef( fLearningMethod = "Stochastic", "LearningMethod", "Learning method" );
@@ -196,14 +196,14 @@ void TMVA::MethodTMlpANN::DeclareOptions()
 }
 
 //_______________________________________________________________________
-void TMVA::MethodTMlpANN::ProcessOptions() 
+void TMVA::MethodTMlpANN::ProcessOptions()
 {
    // builds the neural network as specified by the user
    CreateMLPOptions(fLayerSpec);
 
    if (IgnoreEventsWithNegWeightsInTraining()) {
       Log() << kFATAL << "Mechanism to ignore events with negative weights in training not available for method"
-            << GetMethodTypeName() 
+            << GetMethodTypeName()
             << " --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
             << Endl;
    }
@@ -212,7 +212,7 @@ void TMVA::MethodTMlpANN::ProcessOptions()
 //_______________________________________________________________________
 Double_t TMVA::MethodTMlpANN::GetMvaValue( Double_t* err )
 {
-   // calculate the value of the neural net for the current event 
+   // calculate the value of the neural net for the current event
    const Event* ev = GetEvent();
    static Double_t* d = new Double_t[Data()->GetNVariables()];
    for (UInt_t ivar = 0; ivar<Data()->GetNVariables(); ivar++) {
@@ -232,12 +232,12 @@ void TMVA::MethodTMlpANN::Train( void )
    // performs TMlpANN training
    // available learning methods:
    //
-   //       TMultiLayerPerceptron::kStochastic      
-   //       TMultiLayerPerceptron::kBatch           
-   //       TMultiLayerPerceptron::kSteepestDescent 
-   //       TMultiLayerPerceptron::kRibierePolak    
-   //       TMultiLayerPerceptron::kFletcherReeves  
-   //       TMultiLayerPerceptron::kBFGS            
+   //       TMultiLayerPerceptron::kStochastic
+   //       TMultiLayerPerceptron::kBatch
+   //       TMultiLayerPerceptron::kSteepestDescent
+   //       TMultiLayerPerceptron::kRibierePolak
+   //       TMultiLayerPerceptron::kFletcherReeves
+   //       TMultiLayerPerceptron::kBFGS
    //
    // TMultiLayerPerceptron wants test and training tree at once
    // so merge the training and testing trees from the MVA factory first:
@@ -287,9 +287,9 @@ void TMVA::MethodTMlpANN::Train( void )
 
    // localTrainingTree->Print();
 
-   // create NN 
+   // create NN
    if (fMLP != 0) { delete fMLP; fMLP = 0; }
-   fMLP = new TMultiLayerPerceptron( fMLPBuildOptions.Data(), 
+   fMLP = new TMultiLayerPerceptron( fMLPBuildOptions.Data(),
                                      localTrainingTree,
                                      trainList,
                                      testList );
@@ -297,9 +297,9 @@ void TMVA::MethodTMlpANN::Train( void )
 
    // set learning method
 #if ROOT_VERSION_CODE > ROOT_VERSION(5,13,06)
-   TMultiLayerPerceptron::ELearningMethod learningMethod = TMultiLayerPerceptron::kStochastic; 
+   TMultiLayerPerceptron::ELearningMethod learningMethod = TMultiLayerPerceptron::kStochastic;
 #else
-   TMultiLayerPerceptron::LearningMethod  learningMethod = TMultiLayerPerceptron::kStochastic; 
+   TMultiLayerPerceptron::LearningMethod  learningMethod = TMultiLayerPerceptron::kStochastic;
 #endif
 
    fLearningMethod.ToLower();
@@ -323,15 +323,15 @@ void TMVA::MethodTMlpANN::Train( void )
 
 }
 
- 
+
 //_______________________________________________________________________
-void TMVA::MethodTMlpANN::AddWeightsXMLTo( void* parent ) const 
+void TMVA::MethodTMlpANN::AddWeightsXMLTo( void* parent ) const
 {
    // write weights to xml file
-   
+
    // first the architecture
-   void *wght = gTools().xmlengine().NewChild(parent, 0, "Weights");
-   void* arch = gTools().xmlengine().NewChild( wght, 0, "Architecture" );
+   void *wght = gTools().AddChild(parent, "Weights");
+   void* arch = gTools().AddChild( wght, "Architecture" );
    gTools().AddAttr( arch, "BuildOptions", fMLPBuildOptions.Data() );
 
    // dump weights first in temporary txt file, read from there into xml
@@ -342,17 +342,19 @@ void TMVA::MethodTMlpANN::AddWeightsXMLTo( void* parent ) const
    void *ch=NULL;
    while (inf.getline(temp,256)) {
       TString dummy(temp);
+      std::cout << dummy << std::endl;
       if (dummy.BeginsWith('#')) {
-         if (ch!=0) gTools().xmlengine().AddRawLine( ch, data.Data() );
+         if (ch!=0) gTools().AddRawLine( ch, data.Data() );
          dummy = dummy.Strip(TString::kLeading, '#');
          dummy = dummy(0,dummy.First(' '));
-         ch = gTools().xmlengine().NewChild(wght, 0, dummy);
+         ch = gTools().AddChild(wght, dummy);
          data.Resize(0);
          continue;
       }
       data += (dummy + " ");
    }
-   if (ch != 0) gTools().xmlengine().AddRawLine( ch, data.Data() );
+   if (ch != 0) gTools().AddRawLine( ch, data.Data() );
+
    inf.close();
 }
 
@@ -361,41 +363,41 @@ void  TMVA::MethodTMlpANN::ReadWeightsFromXML( void* wghtnode )
 {
    // rebuild temporary textfile from xml weightfile and load this
    // file into MLP
-   void* ch = gTools().xmlengine().GetChild(wghtnode);
+   void* ch = gTools().GetChild(wghtnode);
    gTools().ReadAttr( ch, "BuildOptions", fMLPBuildOptions );
 
-   ch = gTools().xmlengine().GetNext(ch);
+   ch = gTools().GetNextChild(ch);
    const char* fname = "weights/TMlp.nn.weights.temp";
    std::ofstream fout( fname );
    double temp1=0,temp2=0;
    while (ch) {
-      const char* nodecontent = gTools().xmlengine().GetNodeContent(ch);
+      const char* nodecontent = gTools().GetContent(ch);
       std::stringstream content(nodecontent);
-      if (strcmp(gTools().xmlengine().GetNodeName(ch),"input")==0) {
-         fout << "#input normalization" << std::endl;         
+      if (strcmp(gTools().GetName(ch),"input")==0) {
+         fout << "#input normalization" << std::endl;
          while ((content >> temp1) &&(content >> temp2)) {
             fout << temp1 << " " << temp2 << std::endl;
          }
       }
-      if (strcmp(gTools().xmlengine().GetNodeName(ch),"output")==0) {
-         fout << "#output normalization" << std::endl;         
+      if (strcmp(gTools().GetName(ch),"output")==0) {
+         fout << "#output normalization" << std::endl;
          while ((content >> temp1) &&(content >> temp2)) {
             fout << temp1 << " " << temp2 << std::endl;
          }
       }
-      if (strcmp(gTools().xmlengine().GetNodeName(ch),"neurons")==0) {
+      if (strcmp(gTools().GetName(ch),"neurons")==0) {
          fout << "#neurons weights" << std::endl;         
          while (content >> temp1) {
             fout << temp1 << std::endl;
          }
       }
-      if (strcmp(gTools().xmlengine().GetNodeName(ch),"synapses")==0) {
+      if (strcmp(gTools().GetName(ch),"synapses")==0) {
          fout << "#synapses weights" ;         
          while (content >> temp1) {
             fout << std::endl << temp1 ;                
          }
       }
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
    fout.close();;
 
diff --git a/tmva/src/MetricEuler.cxx b/tmva/src/MetricEuler.cxx
deleted file mode 100644
index 7566a7fe60a461fbee70e841e636b6c920e0166f..0000000000000000000000000000000000000000
--- a/tmva/src/MetricEuler.cxx
+++ /dev/null
@@ -1,85 +0,0 @@
-// @(#)root/tmva $Id$ 
-// Author: Andreas Hoecker, Peter Speckmayer
-
-/**********************************************************************************
- * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
- * Package: TMVA                                                                  *
- * Class  : MetricEuler                                                         *
- * Web    : http://tmva.sourceforge.net                                           *
- *                                                                                *
- * Description:                                                                   *
- *      Implementation                                                            *
- *                                                                                *
- * Authors (alphabetical):                                                        *
- *      Peter Speckmayer <speckmay@mail.cern.ch> - CERN, Switzerland              *
- *                                                                                *
- * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
- *                                                                                *
- * Redistribution and use in source and binary forms, with or without             *
- * modification, are permitted according to the terms listed in LICENSE           *
- * (http://tmva.sourceforge.net/LICENSE)                                          *
- **********************************************************************************/
-
-//_______________________________________________________________________
-//                                                                      
-// interface for a metric
-//
-//_______________________________________________________________________
-
-#include "TMVA/MetricEuler.h"
-#include "TMath.h"
-
-ClassImp(TMVA::MetricEuler)
-
-//_______________________________________________________________________
-TMVA::MetricEuler::MetricEuler() 
-   : IMetric()
-{
-   // constructor
-}            
-
-
-//_______________________________________________________________________
-Double_t TMVA::MetricEuler::Distance( std::vector<Double_t>& pointA, std::vector<Double_t>& pointB )
-{
-   // the Euler distance between point A and B
-   Double_t distance = 0.0;
-   Double_t val = 0.0;
-   std::vector<Double_t>::iterator itA;
-   std::vector<Double_t>::iterator itB;
-   if( fParameters == NULL ){
-      itA = pointA.begin();
-      for( itB = pointB.begin(); itB != pointB.end(); itB++ ){
-         if( itA == pointA.end() ){
-            break;
-         }
-         val = (*itA)-(*itB);
-         distance += pow( val, 2 );
-         itA++;
-      }
-   }else{
-      std::vector<Double_t>::iterator itPar;
-      itA   = pointA.begin();
-      itPar = fParameters->begin();
-      for( itB = pointB.begin(); itB != pointB.end(); itB++ ){
-         if( itA == pointA.end() ){
-            break;
-         }
-         if( itPar == fParameters->end() ){
-            break;
-         }
-         val = (*itPar)*( (*itA)-(*itB) );
-         distance += pow( val, 2 );
-         itA++;
-         itPar++;
-      }
-      if( itA != pointA.end() ){
-         distance *= pow( (*itA),2 );
-      }
-   }
-   return sqrt( distance );
-}
-
-
diff --git a/tmva/src/MetricManhattan.cxx b/tmva/src/MetricManhattan.cxx
deleted file mode 100644
index 9bbfa9ff09c7ce6e8db90341092063f9f3da8991..0000000000000000000000000000000000000000
--- a/tmva/src/MetricManhattan.cxx
+++ /dev/null
@@ -1,86 +0,0 @@
-// @(#)root/tmva $Id$ 
-// Author: Andreas Hoecker, Peter Speckmayer
-
-/**********************************************************************************
- * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
- * Package: TMVA                                                                  *
- * Class  : MetricManhattan                                                       *
- * Web    : http://tmva.sourceforge.net                                           *
- *                                                                                *
- * Description:                                                                   *
- *      Implementation                                                            *
- *                                                                                *
- * Authors (alphabetical):                                                        *
- *      Peter Speckmayer <speckmay@mail.cern.ch> - CERN, Switzerland              *
- *                                                                                *
- * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
- *                                                                                *
- * Redistribution and use in source and binary forms, with or without             *
- * modification, are permitted according to the terms listed in LICENSE           *
- * (http://tmva.sourceforge.net/LICENSE)                                          *
- **********************************************************************************/
-
-//_______________________________________________________________________
-//                                                                      
-// interface for a metric
-//
-//_______________________________________________________________________
-
-#include "TMath.h"
-
-#include "TMVA/MetricManhattan.h"
-
-ClassImp(TMVA::MetricManhattan)
-
-//_______________________________________________________________________
-TMVA::MetricManhattan::MetricManhattan() 
-   : IMetric()
-{
-   // constructor
-}            
-
-
-//_______________________________________________________________________
-Double_t TMVA::MetricManhattan::Distance( std::vector<Double_t>& pointA, std::vector<Double_t>& pointB )
-{
-   // the Manhatten distance between point A and B:
-   // linear sum of differences between A in B in all variables
-   Double_t distance = 0.0;
-   Double_t val = 0.0;
-   std::vector<Double_t>::iterator itA;
-   std::vector<Double_t>::iterator itB;
-   if( fParameters == NULL ){
-      itA = pointA.begin();
-      for( itB = pointB.begin(); itB != pointB.end(); itB++ ){
-         if( itA == pointA.end() ){
-            break;
-         }
-         val = (*itA)-(*itB);
-         distance += TMath::Abs( val );
-         itA++;
-      }
-   }else{
-      std::vector<Double_t>::iterator itPar;
-      itA   = pointA.begin();
-      itPar = fParameters->begin();
-      for( itB = pointB.begin(); itB != pointB.end(); itB++ ){
-         if( itA == pointA.end() ){
-            break;
-         }
-         if( itPar == fParameters->end() ){
-            break;
-         }
-         val = (*itPar)*( (*itA)-(*itB) );
-         distance += TMath::Abs( val );
-         itA++;
-         itPar++;
-      }
-      if( itA != pointA.end() ){
-         distance *= (*itA);
-      }
-   }
-   return distance;
-}
-
diff --git a/tmva/src/MsgLogger.cxx b/tmva/src/MsgLogger.cxx
index 67053914fb2770e1d52caf45aba9b6fb40e49874..b21d01b48d70fbe11c31ab8d6019452db31d6387 100644
--- a/tmva/src/MsgLogger.cxx
+++ b/tmva/src/MsgLogger.cxx
@@ -31,11 +31,6 @@
 
 #include <cstdlib>
 
-// this is the hardcoded prefix
-#define PREFIX "--- "
-// this is the hardcoded suffix
-#define SUFFIX ": "
-
 // ROOT include(s):
 
 ClassImp(TMVA::MsgLogger)
@@ -43,15 +38,18 @@ ClassImp(TMVA::MsgLogger)
 // this is the hard-coded maximum length of the source names
 UInt_t TMVA::MsgLogger::fgMaxSourceSize = 25;
 Bool_t TMVA::MsgLogger::fgInhibitOutput = kFALSE;
+
+const std::string TMVA::MsgLogger::fgPrefix="--- ";     
+const std::string TMVA::MsgLogger::fgSuffix=": ";
+std::map<TMVA::EMsgType, std::string> TMVA::MsgLogger::fgTypeMap=std::map<TMVA::EMsgType, std::string>();
+std::map<TMVA::EMsgType, std::string> TMVA::MsgLogger::fgColorMap=std::map<TMVA::EMsgType, std::string>();
+
 void   TMVA::MsgLogger::InhibitOutput() { fgInhibitOutput = kTRUE;  }
 void   TMVA::MsgLogger::EnableOutput()  { fgInhibitOutput = kFALSE; }
-
 //_______________________________________________________________________
 TMVA::MsgLogger::MsgLogger( const TObject* source, EMsgType minType )
    : fObjSource ( source ), 
      fStrSource ( "" ), 
-     fPrefix    ( PREFIX ), 
-     fSuffix    ( SUFFIX ), 
      fActiveType( kINFO ), 
      fMinType   ( minType )
 {
@@ -63,8 +61,6 @@ TMVA::MsgLogger::MsgLogger( const TObject* source, EMsgType minType )
 TMVA::MsgLogger::MsgLogger( const std::string& source, EMsgType minType )
    : fObjSource ( 0 ),
      fStrSource ( source ), 
-     fPrefix    ( PREFIX ), 
-     fSuffix    ( SUFFIX ), 
      fActiveType( kINFO ), 
      fMinType   ( minType )
 {
@@ -76,8 +72,6 @@ TMVA::MsgLogger::MsgLogger( const std::string& source, EMsgType minType )
 TMVA::MsgLogger::MsgLogger( EMsgType minType )
    : fObjSource ( 0 ), 
      fStrSource ( "Unknown" ), 
-     fPrefix    ( PREFIX ), 
-     fSuffix    ( SUFFIX ), 
      fActiveType( kINFO ), 
      fMinType   ( minType )
 {
@@ -89,9 +83,7 @@ TMVA::MsgLogger::MsgLogger( EMsgType minType )
 TMVA::MsgLogger::MsgLogger( const MsgLogger& parent )
    : std::basic_ios<MsgLogger::char_type, MsgLogger::traits_type>(),
      std::ostringstream(),
-     TObject(),
-     fPrefix( PREFIX ), 
-     fSuffix( SUFFIX )
+     TObject()
 {
    // copy constructor
    InitMaps();
@@ -142,7 +134,7 @@ std::string TMVA::MsgLogger::GetPrintedSource() const
    if (source_name.size() < fgMaxSourceSize) 
       for (std::string::size_type i=source_name.size(); i<fgMaxSourceSize; i++) source_name.push_back( ' ' );
 
-   return fPrefix + source_name + fSuffix; 
+   return fgPrefix + source_name + fgSuffix; 
 }
 
 //_______________________________________________________________________
@@ -165,7 +157,7 @@ void TMVA::MsgLogger::Send()
       // must call the modifiers like this, otherwise g++ get's confused with the operators...
       message_to_send.setf( std::ios::adjustfield, std::ios::left );
       message_to_send.width( fgMaxSourceSize );
-      message_to_send << source_name << fSuffix << line;
+      message_to_send << source_name << fgSuffix << line;
       this->WriteMsg( fActiveType, message_to_send.str() );
 
       if (current_pos == message.npos) break;
@@ -187,23 +179,23 @@ void TMVA::MsgLogger::WriteMsg( EMsgType type, const std::string& line ) const
    if (type < fMinType || (fgInhibitOutput && type!=kFATAL)) return; // no output
 
    std::map<EMsgType, std::string>::const_iterator stype;
-   if ((stype = fTypeMap.find( type )) == fTypeMap.end()) return;
+   if ((stype = fgTypeMap.find( type )) == fgTypeMap.end()) return;
    if (!gConfig().IsSilent() || type==kFATAL) {
       if (gConfig().UseColor()) {
          // no text for INFO or VERBOSE
-         if (type == kINFO || type == kVERBOSE) 
-            std::cout << fPrefix << line << std::endl; // no color for info
-         else               
-            std::cout << fColorMap.find( type )->second << fPrefix << "<" 
+         if (type == kINFO || type == kVERBOSE)
+            std::cout << fgPrefix << line << std::endl; // no color for info
+         else
+            std::cout << fgColorMap.find( type )->second << fgPrefix << "<"
                       << stype->second << "> " << line  << "\033[0m" << std::endl;
-      } 
+      }
       else {
-         if (type == kINFO) std::cout << fPrefix << line << std::endl;
-         else               std::cout << fPrefix << "<" << stype->second << "> " << line << std::endl;
+         if (type == kINFO) std::cout << fgPrefix << line << std::endl;
+         else               std::cout << fgPrefix << "<" << stype->second << "> " << line << std::endl;
       }
    }
    // take decision to stop if fatal error
-   if (type == kFATAL) { 
+   if (type == kFATAL) {
       std::cout << "***> abort program execution" << std::endl;
       std::exit(1);
    }
@@ -220,20 +212,22 @@ TMVA::MsgLogger& TMVA::MsgLogger::Endmsg( MsgLogger& logger )
 //_______________________________________________________________________
 void TMVA::MsgLogger::InitMaps()
 {
+   if (fgTypeMap.size()>0 && fgColorMap.size()>0 ) return;
+
    // fill maps that assign a string and a color to echo message level
-   fTypeMap[kVERBOSE]  = std::string("VERBOSE");
-   fTypeMap[kDEBUG]    = std::string("DEBUG");
-   fTypeMap[kINFO]     = std::string("INFO");
-   fTypeMap[kWARNING]  = std::string("WARNING");
-   fTypeMap[kERROR]    = std::string("ERROR");
-   fTypeMap[kFATAL]    = std::string("FATAL");
-   fTypeMap[kSILENT]   = std::string("SILENT");
-
-   fColorMap[kVERBOSE] = std::string("");
-   fColorMap[kDEBUG]   = std::string("\033[34m");
-   fColorMap[kINFO]    = std::string("");
-   fColorMap[kWARNING] = std::string("\033[1;31m");
-   fColorMap[kERROR]   = std::string("\033[31m");
-   fColorMap[kFATAL]   = std::string("\033[37;41;1m");
-   fColorMap[kSILENT]  = std::string("\033[30m");
+   fgTypeMap[kVERBOSE]  = std::string("VERBOSE");
+   fgTypeMap[kDEBUG]    = std::string("DEBUG");
+   fgTypeMap[kINFO]     = std::string("INFO");
+   fgTypeMap[kWARNING]  = std::string("WARNING");
+   fgTypeMap[kERROR]    = std::string("ERROR");
+   fgTypeMap[kFATAL]    = std::string("FATAL");
+   fgTypeMap[kSILENT]   = std::string("SILENT");
+
+   fgColorMap[kVERBOSE] = std::string("");
+   fgColorMap[kDEBUG]   = std::string("\033[34m");
+   fgColorMap[kINFO]    = std::string("");
+   fgColorMap[kWARNING] = std::string("\033[1;31m");
+   fgColorMap[kERROR]   = std::string("\033[31m");
+   fgColorMap[kFATAL]   = std::string("\033[37;41;1m");
+   fgColorMap[kSILENT]  = std::string("\033[30m");
 }
diff --git a/tmva/src/Node.cxx b/tmva/src/Node.cxx
index e69953f53606fbc6f186569d2e0d6007d5edff59..b42ac0f80e1727ace542e1a243103c48e77721f8 100644
--- a/tmva/src/Node.cxx
+++ b/tmva/src/Node.cxx
@@ -99,6 +99,13 @@ TMVA::Node::~Node()
    fgCount--;
 }
 
+//_______________________________________________________________________
+int TMVA::Node::GetCount()
+{
+   // retuns the global number of instantiated nodes
+   return fgCount;
+}
+
 //_______________________________________________________________________
 Int_t TMVA::Node::CountMeAndAllDaughters() const 
 {
@@ -149,7 +156,7 @@ void TMVA::Node::ReadXML( void* node,  UInt_t tmva_Version_Code )
 {
    // read attributes from XML
    ReadAttributes(node, tmva_Version_Code);
-   const char* content = gTools().xmlengine().GetNodeContent(node);
+   const char* content = gTools().GetContent(node);
    if (content) {
       std::stringstream s(content);
       ReadContent(s);
@@ -157,15 +164,15 @@ void TMVA::Node::ReadXML( void* node,  UInt_t tmva_Version_Code )
    gTools().ReadAttr( node, "pos",   fPos );
    gTools().ReadAttr( node, "depth", fDepth );
 
-   void* ch = gTools().xmlengine().GetChild(node);
+   void* ch = gTools().GetChild(node);
    while (ch) {
       Node* n = CreateNode();
       n->ReadXML(ch, tmva_Version_Code);
       if (n->GetPos()=='l') { fLeft  = n; }
       else if(n->GetPos()=='r') { fRight = n; }
       else { 
-         std::cout << "neither left nor right" << std::endl;
+	 std::cout << "neither left nor right" << std::endl;
       }
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 }
diff --git a/tmva/src/PDEFoam.cxx b/tmva/src/PDEFoam.cxx
index d2b863227ff3eb7f84409afa50cdfff66fcebacb..526d9c1272b90442020262060d237459ea93c413 100644
--- a/tmva/src/PDEFoam.cxx
+++ b/tmva/src/PDEFoam.cxx
@@ -1134,7 +1134,7 @@ void TMVA::PDEFoam::FillFoamCells(const Event* ev, Bool_t NoNegWeights)
    } else if (ft == kDiscr){
       // 0. Element: Number of signal events
       // 1. Element: Number of background events times normalization
-      if (ev->IsSignal())
+      if (ev->GetClass() == 0)
          SetCellElement(cell, 0, GetCellElement(cell, 0) + weight);
       else
          SetCellElement(cell, 1, GetCellElement(cell, 1) + weight);
@@ -2518,7 +2518,7 @@ void TMVA::PDEFoam::PrintStream( ostream & ostr ) const
 void TMVA::PDEFoam::AddXMLTo( void* parent ){
    // write foam variables to xml
 
-   void *variables = gTools().xmlengine().NewChild( parent, 0, "Variables" );
+   void *variables = gTools().AddChild( parent, "Variables" );
    gTools().AddAttr( variables, "LastCe",           fLastCe );
    gTools().AddAttr( variables, "nCells",           fNCells );
    gTools().AddAttr( variables, "Dim",              fDim );
@@ -2526,14 +2526,14 @@ void TMVA::PDEFoam::AddXMLTo( void* parent ){
 
    void *xmin_wrap;
    for (Int_t i=0; i<GetTotDim(); i++){
-      xmin_wrap = gTools().xmlengine().NewChild( variables, 0, "Xmin" );
+      xmin_wrap = gTools().AddChild( variables, "Xmin" );
       gTools().AddAttr( xmin_wrap, "Index", i );
       gTools().AddAttr( xmin_wrap, "Value", fXmin[i] );
    }
 
    void *xmax_wrap;
    for (Int_t i=0; i<GetTotDim(); i++){
-      xmax_wrap = gTools().xmlengine().NewChild( variables, 0, "Xmax" );
+      xmax_wrap = gTools().AddChild( variables, "Xmax" );
       gTools().AddAttr( xmax_wrap, "Index", i );
       gTools().AddAttr( xmax_wrap, "Value", fXmax[i] );
    }
@@ -2541,7 +2541,7 @@ void TMVA::PDEFoam::AddXMLTo( void* parent ){
 
 //_____________________________________________________________________
 void TMVA::PDEFoam::ReadXML( void* parent ) {
-   void *variables = gTools().xmlengine().GetChild( parent );
+   void *variables = gTools().GetChild( parent );
    gTools().ReadAttr( variables, "LastCe",         fLastCe );
    gTools().ReadAttr( variables, "nCells",         fNCells );
    gTools().ReadAttr( variables, "Dim",            fDim );
@@ -2554,14 +2554,14 @@ void TMVA::PDEFoam::ReadXML( void* parent ) {
    fXmin = new Double_t[GetTotDim()];
    fXmax = new Double_t[GetTotDim()];
 
-   void *xmin_wrap = gTools().xmlengine().GetChild( variables );
+   void *xmin_wrap = gTools().GetChild( variables );
    for (Int_t counter=0; counter<fDim; counter++) {
       Int_t i=0;
       gTools().ReadAttr( xmin_wrap , "Index", i );
       if (i >= GetTotDim() || i<0)
          Log() << kFATAL << "dimension index out of range:" << i << Endl;
       gTools().ReadAttr( xmin_wrap , "Value", fXmin[i] );
-      xmin_wrap = gTools().xmlengine().GetNext( xmin_wrap );
+      xmin_wrap = gTools().GetNextChild( xmin_wrap );
    }
 
    void *xmax_wrap = xmin_wrap; //gTools().xmlengine().GetChild( variables );
@@ -2571,6 +2571,6 @@ void TMVA::PDEFoam::ReadXML( void* parent ) {
       if (i >= GetTotDim() || i<0)
          Log() << kFATAL << "dimension index out of range:" << i << Endl;
       gTools().ReadAttr( xmax_wrap , "Value", fXmax[i] );
-      xmax_wrap = gTools().xmlengine().GetNext( xmax_wrap );
+      xmax_wrap = gTools().GetNextChild( xmax_wrap );
    }
 }
diff --git a/tmva/src/PDEFoamDistr.cxx b/tmva/src/PDEFoamDistr.cxx
index b513f99aeb173dc13f4b9bb0c6dd2ea3feb90ce0..3f23688af0601f4487210d9722232fd89c441fad 100644
--- a/tmva/src/PDEFoamDistr.cxx
+++ b/tmva/src/PDEFoamDistr.cxx
@@ -37,7 +37,7 @@
 #include "TMVA/PDEFoamDistr.h"
 #endif
 
-ClassImp(TMVA::PDEFoamDistr);
+ClassImp(TMVA::PDEFoamDistr)
 
 //_____________________________________________________________________
 TMVA::PDEFoamDistr::PDEFoamDistr() 
@@ -72,7 +72,8 @@ TMVA::PDEFoamDistr::PDEFoamDistr(const PDEFoamDistr &distr)
      fBst             (distr.fBst),
      fDensityCalc     (kEVENT_DENSITY), // default: fill event density to BinarySearchTree
      fSignalClass     (distr.fSignalClass),
-     fBackgroundClass (distr.fBackgroundClass)
+     fBackgroundClass (distr.fBackgroundClass),
+     fLogger( new MsgLogger("PDEFoamDistr"))
 {
    // Copy constructor
    Log() << kFATAL << "COPY CONSTRUCTOR NOT IMPLEMENTED" << Endl;
@@ -123,11 +124,10 @@ void TMVA::PDEFoamDistr::FillBinarySearchTree( const Event* ev, EFoamType ft, Bo
       return;
 
    TMVA::Event *event = new TMVA::Event(*ev);
-   event->SetSignalClass( fSignalClass );
-
+ 
    // set event class and normalization
    if (ft==kSeparate || ft==kDiscr){
-      event->SetClass(ev->IsSignal() ? fSignalClass : fBackgroundClass);
+      event->SetClass(ev->GetClass()==fSignalClass ? fSignalClass : fBackgroundClass);
    } else if (ft==kMultiTarget){
       // since in multi target regression targets are handled like
       // variables, remove targets and add them to the event variabels
diff --git a/tmva/src/PDEFoamVect.cxx b/tmva/src/PDEFoamVect.cxx
index 61f3cc36f6b2509ea197017564e27a85eead2135..9de145a8d35ede84b05e833e8e2bcff5913db614 100644
--- a/tmva/src/PDEFoamVect.cxx
+++ b/tmva/src/PDEFoamVect.cxx
@@ -32,24 +32,25 @@
 
 #define SW2 std::setprecision(7) << std::setw(12)
 
-ClassImp(TMVA::PDEFoamVect);
+ClassImp(TMVA::PDEFoamVect)
+
+TMVA::MsgLogger* TMVA::PDEFoamVect::fgLogger = 0;
 
 //_____________________________________________________________________
 TMVA::PDEFoamVect::PDEFoamVect()
    : TObject(),
      fDim(0),
-     fCoords(0),
-     fLogger( new MsgLogger("PDEFoamVect") )
+     fCoords(0)
 {
    // Default constructor for streamer
+   if (!fgLogger) fgLogger = new MsgLogger("PDEFoamVect");
 }
 
 //_____________________________________________________________________
 TMVA::PDEFoamVect::PDEFoamVect(Int_t n)
    : TObject(),
      fDim(n),
-     fCoords(0),
-     fLogger( new MsgLogger("PDEFoamVect") )
+     fCoords(0)
 {
    // User constructor creating n-dimensional vector
    // and allocating dynamically array of components
@@ -58,6 +59,9 @@ TMVA::PDEFoamVect::PDEFoamVect(Int_t n)
       fCoords = new Double_t[fDim];
       for (Int_t i=0; i<n; i++) *(fCoords+i)=0.0;
    }
+
+   if (!fgLogger) fgLogger = new MsgLogger("PDEFoamVect");
+
 }
 
 //_____________________________________________________________________
@@ -76,7 +80,6 @@ TMVA::PDEFoamVect::~PDEFoamVect()
    // Destructor
    delete [] fCoords; //  free(fCoords)
    fCoords=0;
-   delete fLogger;
 }
 
 //////////////////////////////////////////////////////////////////////////////
diff --git a/tmva/src/PDF.cxx b/tmva/src/PDF.cxx
index e63174bd405ccfbed3987aeb24b8675901441e7a..7df53087f1d5b37b55ef4e3fc2c745507a89471e 100644
--- a/tmva/src/PDF.cxx
+++ b/tmva/src/PDF.cxx
@@ -31,7 +31,6 @@
 #include <cstdlib>
 
 #include "TMath.h"
-#include "TXMLEngine.h"
 #include "TF1.h"
 #include "TH1F.h"
 #include "TVectorD.h"
@@ -495,15 +494,8 @@ void TMVA::PDF::SmoothHistogram()
 //_______________________________________________________________________
 void TMVA::PDF::FillHistToGraph()
 {
+   // Simple conversion
    fGraph=new TGraph(fHist);
-   return;
-   Int_t PointNum = fHist->GetXaxis()->GetNbins();
-   Double_t Factor=PointNum/(fHist->GetBinLowEdge(PointNum)+fHist->GetBinWidth(PointNum)-fHist->GetBinLowEdge(1));
-   fGraph = new TGraph(PointNum+2);
-   fGraph->SetPoint(0,fHist->GetBinLowEdge(1),0);
-   for (Int_t i=0;i<PointNum;i++)
-      fGraph->SetPoint(i+1,fHist->GetBinCenter(i+1), fHist->GetBinContent(i+1) / (fHist->GetBinWidth(i+1) * Factor));
-   fGraph->SetPoint(PointNum+1,fHist->GetBinLowEdge(PointNum)+fHist->GetBinWidth(PointNum),0);
 }
 
 //_______________________________________________________________________
@@ -627,11 +619,11 @@ Double_t TMVA::PDF::GetIntegral() const
 }
 
 //_______________________________________________________________________
-Double_t TMVA::PDF::IGetVal( Double_t* x, Double_t* ) 
-{
-   // static external auxiliary function (integrand)
-   return ThisPDF()->GetVal( x[0] );
-}
+// Double_t TMVA::PDF::IGetVal( Double_t* x, Double_t* ) 
+// {
+//    // static external auxiliary function (integrand)
+//    return ThisPDF()->GetVal( x[0] );
+// }
 
 //_______________________________________________________________________
 Double_t TMVA::PDF::GetIntegral( Double_t xmin, Double_t xmax ) 
@@ -827,7 +819,7 @@ void TMVA::PDF::ProcessOptions()
 void TMVA::PDF::AddXMLTo( void* parent ) 
 {
    // XML file writing
-   void* pdfxml = gTools().xmlengine().NewChild(parent, 0, "PDF");
+   void* pdfxml = gTools().AddChild(parent, "PDF");
    gTools().AddAttr(pdfxml, "Name",           fPDFName );
    gTools().AddAttr(pdfxml, "MinNSmooth",     fMinNsmooth );
    gTools().AddAttr(pdfxml, "MaxNSmooth",     fMaxNsmooth );
@@ -836,7 +828,7 @@ void TMVA::PDF::AddXMLTo( void* parent )
    gTools().AddAttr(pdfxml, "KDE_iter",       fKDEiter );
    gTools().AddAttr(pdfxml, "KDE_border",     fKDEborder );
    gTools().AddAttr(pdfxml, "KDE_finefactor", fFineFactor );
-   void* pdfhist = gTools().xmlengine().NewChild(pdfxml,0,"Histogram" );
+   void* pdfhist = gTools().AddChild(pdfxml,"Histogram" );
    TH1*  histToWrite = GetOriginalHist();
    Bool_t hasEquidistantBinning = gTools().HistoHasEquidistantBins(*histToWrite);
    gTools().AddAttr(pdfhist, "Name",  histToWrite->GetName() );
@@ -850,17 +842,17 @@ void TMVA::PDF::AddXMLTo( void* parent )
       bincontent += gTools().StringFromDouble(histToWrite->GetBinContent(i+1));
       bincontent += " ";
    }
-   gTools().xmlengine().AddRawLine(pdfhist, bincontent );
+   gTools().AddRawLine(pdfhist, bincontent );
    
    if (!hasEquidistantBinning) {
-      void* pdfhistbins = gTools().xmlengine().NewChild(pdfxml,0,"HistogramBinning" );
+      void* pdfhistbins = gTools().AddChild(pdfxml,"HistogramBinning" );
       gTools().AddAttr(pdfhistbins, "NBins", histToWrite->GetNbinsX() );
       TString binns("");
       for (Int_t i=1; i<=histToWrite->GetNbinsX()+1; i++) {
          binns += gTools().StringFromDouble(histToWrite->GetXaxis()->GetBinLowEdge(i));
          binns += " ";
       }
-      gTools().xmlengine().AddRawLine(pdfhistbins, binns );      
+      gTools().AddRawLine(pdfhistbins, binns );
    }
 }
 
@@ -883,7 +875,7 @@ void TMVA::PDF::ReadXML( void* pdfnode )
    Double_t xmin, xmax;
    Bool_t hasEquidistantBinning;
 
-   void* histch = gTools().xmlengine().GetChild(pdfnode);
+   void* histch = gTools().GetChild(pdfnode);
    gTools().ReadAttr( histch, "Name",  hname );
    gTools().ReadAttr( histch, "NBins", nbins );
    gTools().ReadAttr( histch, "XMin",  xmin );
@@ -895,7 +887,7 @@ void TMVA::PDF::ReadXML( void* pdfnode )
    if (hasEquidistantBinning) {
       newhist = new TH1F( hname, hname, nbins, xmin, xmax );
       newhist->SetDirectory(0);
-      const char* content = gTools().xmlengine().GetNodeContent(histch);
+      const char* content = gTools().GetContent(histch);
       std::stringstream s(content);
       Double_t val;
       for (UInt_t i=0; i<nbins; i++) {
@@ -904,7 +896,7 @@ void TMVA::PDF::ReadXML( void* pdfnode )
       }
    }
    else{
-      const char* content = gTools().xmlengine().GetNodeContent(histch);
+      const char* content = gTools().GetContent(histch);
       std::stringstream s(content);
       Double_t val;
       void* binch = gTools().GetNextChild(histch);
@@ -914,7 +906,7 @@ void TMVA::PDF::ReadXML( void* pdfnode )
       if (nbinning != nbins) {
          Log() << kFATAL << "Number of bins in content and binning array differs"<<Endl;
       } 
-      const char* binString = gTools().xmlengine().GetNodeContent(binch);
+      const char* binString = gTools().GetContent(binch);
       std::stringstream sb(binString);
       for (UInt_t i=0; i<=nbins; i++) sb >> binns[i];
       newhist =  new TH1F( hname, hname, nbins, binns.GetMatrixArray() );
@@ -1032,3 +1024,9 @@ istream& TMVA::operator>> ( istream& istr, PDF& pdf )
 
    return istr;
 }
+
+TMVA::PDF*  TMVA::PDF::ThisPDF( void ) 
+{ 
+   // return global "this" pointer of PDF
+   return fgThisPDF; 
+}
diff --git a/tmva/src/Reader.cxx b/tmva/src/Reader.cxx
index 8b409743424c73a995d6a2ea3aea97ec4a2701d0..8949a4ec6b21eb37be0f6ce291424f87f0f681b2 100644
--- a/tmva/src/Reader.cxx
+++ b/tmva/src/Reader.cxx
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Eckhard von Toerne
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -96,6 +96,7 @@
 #include "TKey.h"
 #include "TVector.h"
 #include "TXMLEngine.h"
+
 #include <cstdlib>
 
 #include <string>
@@ -110,25 +111,32 @@
 #include "TMVA/ClassifierFactory.h"
 #include "TMVA/IMethod.h"
 #include "TMVA/MethodCuts.h"
+#include "TMVA/DataSetManager.h"
 
 ClassImp(TMVA::Reader)
 
 //_______________________________________________________________________
 TMVA::Reader::Reader( const TString& theOption, Bool_t verbose )
    : Configurable( theOption ),
+     fDataSetManager( NULL ), // DSMTEST
      fDataSetInfo(),
      fVerbose( verbose ),
      fSilent ( kFALSE ),
      fColor  ( kFALSE ),
      fMvaEventError( -1 ),
+     fMvaEventError2( -1 ),   //zjh
      fLogger ( 0 )
 {
    // constructor
 
    fLogger = new MsgLogger(this);
 
-   DataSetManager::CreateInstance(fDataInputHandler);
-   DataSetManager::Instance().AddDataSetInfo(fDataSetInfo);
+//    DataSetManager::CreateInstance(fDataInputHandler); // DSMTEST removed
+//    DataSetManager::Instance().AddDataSetInfo(fDataSetInfo); // DSMTEST removed
+   fDataSetManager = new DataSetManager( fDataInputHandler ); // DSMTEST 
+   fDataSetManager->AddDataSetInfo(fDataSetInfo); // DSMTEST
+   
+
 
    SetConfigName( GetName() );
    DeclareOptions();
@@ -145,6 +153,7 @@ TMVA::Reader::Reader( std::vector<TString>& inputVars, const TString& theOption,
      fSilent ( kFALSE ),
      fColor  ( kFALSE ),
      fMvaEventError( -1 ),
+     fMvaEventError2( -1 ),   //zjh
      fLogger ( 0 )
 {
    // constructor
@@ -170,6 +179,7 @@ TMVA::Reader::Reader( std::vector<std::string>& inputVars, const TString& theOpt
      fSilent ( kFALSE ),
      fColor  ( kFALSE ),
      fMvaEventError( -1 ),
+     fMvaEventError2( -1 ),   //zjh
      fLogger ( 0 )
 {
    // constructor
@@ -195,6 +205,7 @@ TMVA::Reader::Reader( const std::string& varNames, const TString& theOption, Boo
      fSilent ( kFALSE ),
      fColor  ( kFALSE ),
      fMvaEventError( -1 ),
+     fMvaEventError2( -1 ),   //zjh
      fLogger ( 0 )
 {
    // constructor
@@ -217,6 +228,7 @@ TMVA::Reader::Reader( const TString& varNames, const TString& theOption, Bool_t
      fSilent ( kFALSE ),
      fColor  ( kFALSE ),
      fMvaEventError( -1 ),
+     fMvaEventError2( -1 ),   //zjh
      fLogger ( 0 )
 {
    // constructor
@@ -246,6 +258,9 @@ void TMVA::Reader::DeclareOptions()
 TMVA::Reader::~Reader( void )
 {
    // destructor
+
+   delete fDataSetManager; // DSMTEST
+
    delete fLogger;
 }
 
@@ -270,7 +285,9 @@ void TMVA::Reader::AddVariable( const TString& expression, Float_t* datalink )
 //_______________________________________________________________________
 void TMVA::Reader::AddVariable( const TString& expression, Int_t* datalink )
 {
+   Log() << kFATAL << "Reader::AddVariable( const TString& expression, Int_t* datalink ), this function is deprecated, please provide all variables to the reader as floats" << Endl;
    // Add an integer variable or expression to the reader
+   Log() << kFATAL << "Reader::AddVariable( const TString& expression, Int_t* datalink ), this function is deprecated, please provide all variables to the reader as floats" << Endl;
    DataInfo().AddVariable(expression, "", "", 0, 0, 'I', kFALSE, (void*)datalink ); // <= should this be F or rather T?
 }
 
@@ -344,9 +361,11 @@ TMVA::IMethod* TMVA::Reader::BookMVA( TMVA::Types::EMVA methodType, const TStrin
    // books MVA method from weightfile
    IMethod* im = ClassifierFactory::Instance().Create(std::string(Types::Instance().GetMethodName( methodType )),
                                                       DataInfo(), weightfile );
-   
+
    MethodBase *method = (dynamic_cast<MethodBase*>(im));
 
+   if (method==0) return im;
+
    method->SetupMethod();
 
    // when reading older weight files, they could include options
@@ -365,22 +384,66 @@ TMVA::IMethod* TMVA::Reader::BookMVA( TMVA::Types::EMVA methodType, const TStrin
    return method;
 }
 
+#if ROOT_SVN_REVISION >= 32259
+//_______________________________________________________________________
+TMVA::IMethod* TMVA::Reader::BookMVA( TMVA::Types::EMVA methodType, const char* xmlstr )
+{
+   // books MVA method from weightfile
+   IMethod* im = ClassifierFactory::Instance().Create(std::string(Types::Instance().GetMethodName( methodType )),
+                                                      DataInfo(), "" );
+   
+   MethodBase *method = (dynamic_cast<MethodBase*>(im));
+
+   method->SetupMethod();
+
+   // when reading older weight files, they could include options
+   // that are not supported any longer
+   method->DeclareCompatibilityOptions();
+
+   // read weight file
+   method->ReadStateFromXMLString( xmlstr );
+
+   // check for unused options
+   method->CheckSetup();
+   
+   Log() << kINFO << "Booked classifier \"" << method->GetMethodName()
+         << "\" of type: \"" << method->GetMethodTypeName() << "\"" << Endl;
+   
+   return method;
+}
+#endif
+
 //_______________________________________________________________________
-Double_t TMVA::Reader::EvaluateMVA( const std::vector<Float_t>& /*inputVec*/, const TString& methodTag, Double_t aux )
+Double_t TMVA::Reader::EvaluateMVA( const std::vector<Float_t>& inputVec, const TString& methodTag, Double_t aux )
 {
    // Evaluate a vector<float> of input data for a given method
    // The parameter aux is obligatory for the cuts method where it represents the efficiency cutoff
 
-   return EvaluateMVA( methodTag, aux );
+   // create a temporary event from the vector.
+   Event* tmpEvent=new Event(inputVec, 2); // ToDo resolve magic 2 issue
+   IMethod* imeth = FindMVA( methodTag );
+   MethodBase* meth = dynamic_cast<TMVA::MethodBase*>(imeth); 
+   if (meth->GetMethodType() == TMVA::Types::kCuts)
+      dynamic_cast<TMVA::MethodCuts*>(meth)->SetTestSignalEfficiency( aux );
+   Double_t val = meth->GetMvaValue( tmpEvent, &fMvaEventError);
+   delete tmpEvent;
+   return val;
 }
 
 //_______________________________________________________________________
-Double_t TMVA::Reader::EvaluateMVA( const std::vector<Double_t>& /*inputVec*/, const TString& methodTag, Double_t aux )
+Double_t TMVA::Reader::EvaluateMVA( const std::vector<Double_t>& inputVec, const TString& methodTag, Double_t aux )
 {
    // Evaluate a vector<double> of input data for a given method
    // The parameter aux is obligatory for the cuts method where it represents the efficiency cutoff
 
-   return EvaluateMVA( methodTag, aux );
+   // performs a copy to float values which are internally used by all methods
+   if(fTmpEvalVec.size() != inputVec.size())
+      fTmpEvalVec.resize(inputVec.size());
+
+   for (UInt_t idx=0; idx!=inputVec.size(); idx++ ) 
+      fTmpEvalVec[idx]=inputVec[idx];
+
+   return EvaluateMVA( fTmpEvalVec, methodTag, aux );
 }
 
 //_______________________________________________________________________
@@ -399,7 +462,12 @@ Double_t TMVA::Reader::EvaluateMVA( const TString& methodTag, Double_t aux )
 
    else method = it->second;
 
-   return this->EvaluateMVA( dynamic_cast<TMVA::MethodBase*>(method), aux );
+   MethodBase * kl = dynamic_cast<TMVA::MethodBase*>(method);
+
+   if(kl==0)
+      Log() << kFATAL << methodTag << " is not a method" << Endl;
+
+   return this->EvaluateMVA( kl, aux );
 }
 
 //_______________________________________________________________________
@@ -410,8 +478,8 @@ Double_t TMVA::Reader::EvaluateMVA( MethodBase* method, Double_t aux )
    // the aux value is only needed for MethodCuts: it sets the required signal efficiency
    if (method->GetMethodType() == TMVA::Types::kCuts)
       dynamic_cast<TMVA::MethodCuts*>(method)->SetTestSignalEfficiency( aux );
-
-   return method->GetMvaValue( &fMvaEventError ); // attributed MVA response and error
+   if (method->GetMethodType() == TMVA::Types::kMLP) return method->GetMvaValues( fMvaEventError, fMvaEventError2 ); //zjh
+   else  return method->GetMvaValue( &fMvaEventError ); // attributed MVA response and error
 }
 
 //_______________________________________________________________________
@@ -427,9 +495,14 @@ const std::vector< Float_t >& TMVA::Reader::EvaluateRegression( const TString& m
       for (it = fMethodMap.begin(); it!=fMethodMap.end(); it++) Log() << " --> " << it->first << Endl;
       Log() << "Check calling string" << kFATAL << Endl;
    }
-
    else method = it->second;
-   return this->EvaluateRegression( dynamic_cast<TMVA::MethodBase*>(method), aux );
+
+   MethodBase * kl = dynamic_cast<TMVA::MethodBase*>(method);
+
+   if(kl==0)
+      Log() << kFATAL << methodTag << " is not a method" << Endl;
+
+   return this->EvaluateRegression( kl, aux );
 }
 
 //_______________________________________________________________________
@@ -453,6 +526,53 @@ Float_t TMVA::Reader::EvaluateRegression( UInt_t tgtNumber, const TString& metho
    }
 }
 
+
+
+//_______________________________________________________________________
+const std::vector< Float_t >& TMVA::Reader::EvaluateMulticlass( const TString& methodTag, Double_t aux )
+{
+   // evaluates MVA for given set of input variables
+   IMethod* method = 0;
+
+   std::map<TString, IMethod*>::iterator it = fMethodMap.find( methodTag );
+   if (it == fMethodMap.end()) {
+      Log() << kINFO << "<EvaluateMVA> unknown method in map; "
+              << "you looked for \"" << methodTag << "\" within available methods: " << Endl;
+      for (it = fMethodMap.begin(); it!=fMethodMap.end(); it++) Log() << " --> " << it->first << Endl;
+      Log() << "Check calling string" << kFATAL << Endl;
+   }
+   else method = it->second;
+
+   MethodBase * kl = dynamic_cast<TMVA::MethodBase*>(method);
+
+   if(kl==0)
+      Log() << kFATAL << methodTag << " is not a method" << Endl;
+
+   return this->EvaluateMulticlass( kl, aux );
+}
+
+//_______________________________________________________________________
+const std::vector< Float_t >& TMVA::Reader::EvaluateMulticlass( MethodBase* method, Double_t /*aux*/ )
+{
+   // evaluates the multiclass MVA
+   return method->GetMulticlassValues();
+}
+
+
+//_______________________________________________________________________
+Float_t TMVA::Reader::EvaluateMulticlass( UInt_t clsNumber, const TString& methodTag, Double_t aux )
+{ 
+   // evaluates the multiclass MVA
+   try {
+      return EvaluateMulticlass(methodTag, aux).at(clsNumber); 
+   }
+   catch (std::out_of_range e) {
+      Log() << kWARNING << "Multiclass could not be evaluated for class-number " << clsNumber << Endl;
+      return 0;
+   }
+}
+
+
 //_______________________________________________________________________
 TMVA::IMethod* TMVA::Reader::FindMVA( const TString& methodTag )
 {
@@ -466,7 +586,7 @@ TMVA::IMethod* TMVA::Reader::FindMVA( const TString& methodTag )
 //_______________________________________________________________________
 TMVA::MethodCuts* TMVA::Reader::FindCutsMVA( const TString& methodTag )
 {
-   // special function for Cuts to avoid dynamic_casts in ROOT macros, 
+   // special function for Cuts to avoid dynamic_casts in ROOT macros,
    // which are not properly handled by CINT
    return dynamic_cast<MethodCuts*>(FindMVA(methodTag));
 }
@@ -485,6 +605,8 @@ Double_t TMVA::Reader::GetProba( const TString& methodTag,  Double_t ap_sig, Dou
    else method = it->second;
 
    MethodBase* kl = dynamic_cast<MethodBase*>(method);
+   if(kl==0) return -1;
+
    if (mvaVal == -9999999) mvaVal = kl->GetMvaValue();
 
    return kl->GetProba( mvaVal, ap_sig );
@@ -504,6 +626,8 @@ Double_t TMVA::Reader::GetRarity( const TString& methodTag, Double_t mvaVal )
    else method = it->second;
 
    MethodBase* kl = dynamic_cast<MethodBase*>(method);
+   if(kl==0) return -1;
+
    if (mvaVal == -9999999) mvaVal = kl->GetMvaValue();
 
    return kl->GetRarity( mvaVal );
diff --git a/tmva/src/Results.cxx b/tmva/src/Results.cxx
index fca1a3e0b81299fd15c7a07ceadc111d12e8d67e..aa0fc8723302a7868a7f1313ec5b070fbf1504a3 100644
--- a/tmva/src/Results.cxx
+++ b/tmva/src/Results.cxx
@@ -71,7 +71,7 @@ void TMVA::Results::Store( TObject* obj, const char* alias )
       // alias exists
       *fLogger << kFATAL << "Alias " << as << " already exists in results storage" << Endl;
    }
-   if( obj->InheritsFrom(TH1::Class()) ) {
+   if( obj->InheritsFrom("TH1") ) {
       ((TH1*)obj)->SetDirectory(0);
    }
    fStorage->Add( obj );
diff --git a/tmva/src/ResultsMulticlass.cxx b/tmva/src/ResultsMulticlass.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..a019aac3ee448137d5d59057302365d3e8810235
--- /dev/null
+++ b/tmva/src/ResultsMulticlass.cxx
@@ -0,0 +1,99 @@
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
+
+/**********************************************************************************
+ * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
+ * Package: TMVA                                                                  *
+ * Class  : ResultsMulticlass                                                     *
+ * Web    : http://tmva.sourceforge.net                                           *
+ *                                                                                *
+ * Description:                                                                   *
+ *      Implementation (see header for description)                               *
+ *                                                                                *
+ * Authors (alphabetical):                                                        *
+ *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch>  - CERN, Switzerland          *
+ *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>  - CERN, Switzerland              *
+ *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
+ *                                                                                *
+ * Copyright (c) 2006:                                                            *
+ *      CERN, Switzerland                                                         *
+ *      MPI-K Heidelberg, Germany                                                 *
+ *                                                                                *
+ * Redistribution and use in source and binary forms, with or without             *
+ * modification, are permitted according to the terms listed in LICENSE           *
+ * (http://tmva.sourceforge.net/LICENSE)                                          *
+ **********************************************************************************/
+
+#include <vector>
+
+#include "TMVA/ResultsMulticlass.h"
+#include "TMVA/MsgLogger.h"
+#include "TMVA/DataSet.h"
+
+//_______________________________________________________________________
+TMVA::ResultsMulticlass::ResultsMulticlass( const DataSetInfo* dsi ) 
+   : Results( dsi ),
+     fLogger( new MsgLogger("ResultsMulticlass", kINFO) )
+{
+   // constructor
+}
+
+//_______________________________________________________________________
+TMVA::ResultsMulticlass::~ResultsMulticlass() 
+{
+   // destructor
+   delete fLogger;
+}
+
+//_______________________________________________________________________
+void TMVA::ResultsMulticlass::SetValue( std::vector<Float_t>& value, Int_t ievt )
+{
+   if (ievt >= (Int_t)fMultiClassValues.size()) fMultiClassValues.resize( ievt+1 );
+   fMultiClassValues[ievt] = value; 
+}
+
+
+
+
+//_______________________________________________________________________
+void  TMVA::ResultsMulticlass::MakeHistograms()
+{
+//    DataSet* ds = GetDataSet();
+//    ds->SetCurrentType( GetTreeType() );
+//    const DataSetInfo* dsi = GetDataSetInfo();
+
+//    TString name( Form("tgt_%d",tgtNum) );
+
+//    VariableInfo vinf = dsi->GetTargetInfo(tgtNum);
+//    Float_t xmin=0., xmax=0.;
+//    if (truncate){
+//      xmax = truncvalue;
+//    }
+//    else{
+//      for (Int_t ievt=0; ievt<ds->GetNEvents(); ievt++) {
+//        Event* ev = ds->GetEvent(ievt);
+//        std::vector<Float_t> regVal = fMulticlassValues.at(ievt);
+//        Float_t val = regVal.at( tgtNum ) - ev->GetTarget( tgtNum );
+//        val *= val;
+//        xmax = val> xmax? val: xmax;
+//      } 
+//    }
+//    xmax *= 1.1;
+//    Int_t nbins = 500;
+//    TH1F* h = new TH1F( name, name, nbins, xmin, xmax);
+//    h->SetDirectory(0);
+//    h->GetXaxis()->SetTitle("Quadratic Deviation");
+//    h->GetYaxis()->SetTitle("Weighted Entries");
+
+//    for (Int_t ievt=0; ievt<ds->GetNEvents(); ievt++) {
+//       Event* ev = ds->GetEvent(ievt);
+//       std::vector<Float_t> regVal = fMulticlassValues.at(ievt);
+//       Float_t val = regVal.at( tgtNum ) - ev->GetTarget( tgtNum );
+//       val *= val;
+//       Float_t weight = ev->GetWeight();
+//       if (!truncate || val<=truncvalue ) h->Fill( val, weight);
+//    } 
+//    return h;
+}
+
diff --git a/tmva/src/Rule.cxx b/tmva/src/Rule.cxx
index 0793ee0644cd075a4c250032f2c7d6f044eb5704..d0bd21ee01bffaa99f249c6a7fbe4f39785e5986 100644
--- a/tmva/src/Rule.cxx
+++ b/tmva/src/Rule.cxx
@@ -394,7 +394,7 @@ void* TMVA::Rule::AddXMLTo( void* parent ) const
 void TMVA::Rule::ReadFromXML( void* wghtnode )
 {
    // read rule from XML
-   TString nodeName = TString( gTools().xmlengine().GetNodeName(wghtnode) );
+   TString nodeName = TString( gTools().GetName(wghtnode) );
    if (nodeName != "Rule") Log() << kFATAL << "<ReadFromXML> Unexpected node name: " << nodeName << Endl;
 
    gTools().ReadAttr( wghtnode, "Importance", fImportance    );
@@ -405,22 +405,22 @@ void TMVA::Rule::ReadFromXML( void* wghtnode )
    gTools().ReadAttr( wghtnode, "Norm",       fNorm          );
    gTools().ReadAttr( wghtnode, "SSB",        fSSB           );
    gTools().ReadAttr( wghtnode, "SSBNeve",    fSSBNeve       );
-   
+
    UInt_t nvars;
    gTools().ReadAttr( wghtnode, "Nvars",      nvars          );
    if (fCut) delete fCut;
    fCut = new RuleCut();
    fCut->SetNvars( nvars );
-   
+
    // read Cut
-   void*    ch = gTools().xmlengine().GetChild( wghtnode );
+   void*    ch = gTools().GetChild( wghtnode );
    UInt_t   i = 0;
    UInt_t   ui;
    Double_t d;
    Char_t   c;
    while (ch) {
       gTools().ReadAttr( ch, "Selector", ui );
-      fCut->SetSelector( i, ui ); 
+      fCut->SetSelector( i, ui );
       gTools().ReadAttr( ch, "Min",      d );
       fCut->SetCutMin  ( i, d );
       gTools().ReadAttr( ch, "Max",      d );
@@ -429,10 +429,10 @@ void TMVA::Rule::ReadFromXML( void* wghtnode )
       fCut->SetCutDoMin( i, (c == 'T' ? kTRUE : kFALSE ) );
       gTools().ReadAttr( ch, "DoMax",    c );
       fCut->SetCutDoMax( i, (c == 'T' ? kTRUE : kFALSE ) );
-      
+
       i++;
-      ch = gTools().xmlengine().GetNext(ch);
-   }   
+      ch = gTools().GetNextChild(ch);
+   }
 
    // sanity check
    if (i != nvars) Log() << kFATAL << "<ReadFromXML> Mismatch in number of cuts: " << i << " != " << nvars << Endl;
diff --git a/tmva/src/RuleEnsemble.cxx b/tmva/src/RuleEnsemble.cxx
index d0caaf2d2ae9fb9a385e5b4f0ef38210f185c191..b0679b5b04a2b3280068a091585a1d3ed6672a28 100644
--- a/tmva/src/RuleEnsemble.cxx
+++ b/tmva/src/RuleEnsemble.cxx
@@ -17,9 +17,9 @@
  *      Helge Voss         <Helge.Voss@cern.ch>         - MPI-KP Heidelberg, GER  *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
+ *      CERN, Switzerland                                                         *
  *      Iowa State U.                                                             *
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      MPI-K Heidelberg, Germany                                                 *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
  * modification, are permitted according to the terms listed in LICENSE           *
@@ -345,7 +345,7 @@ void TMVA::RuleEnsemble::CalcRuleSupport()
             if ((*itrRule)->EvalEvent( *(*itrEvent) )) {
                ew = (*itrEvent)->GetWeight();
                s += ew;
-               if ((*itrEvent)->IsSignal()) ssig += ew;
+               if (GetMethodRuleFit()->DataInfo().IsSignal(*itrEvent)) ssig += ew;
                else                         sbkg += ew;
             }
          }
@@ -818,7 +818,7 @@ void TMVA::RuleEnsemble::RuleResponseStats()
          sigTag = (tagged && sigRule);        // it's tagged as a signal
          bkgTag = (tagged && (!sigRule));     // ... as bkg
          noTag = !(sigTag || bkgTag);         // ... not tagged
-         sigTrue = eveData->IsSignal();       // true if event is true signal
+         sigTrue = (eveData->GetClass() == 0);       // true if event is true signal
          if (tagged) {
             ntag++;
             if (sigTag && sigTrue)  nss++;
@@ -1080,13 +1080,13 @@ void TMVA::RuleEnsemble::ReadFromXML( void* wghtnode )
 
    UInt_t i = 0;
    fRules.resize( nrules  );
-   void* ch = gTools().xmlengine().GetChild( wghtnode );
+   void* ch = gTools().GetChild( wghtnode );
    for (i=0; i<nrules; i++) {
       fRules[i] = new Rule();
       fRules[i]->SetRuleEnsemble( this );
       fRules[i]->ReadFromXML( ch );
 
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 
    // read linear classifier (Fisher)
@@ -1109,7 +1109,7 @@ void TMVA::RuleEnsemble::ReadFromXML( void* wghtnode )
       gTools().ReadAttr( ch, "Importance", fLinImportance[i]    );
 
       i++;
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 }
 
diff --git a/tmva/src/RuleFit.cxx b/tmva/src/RuleFit.cxx
index 0cb96953a884e4108f479a2a82b4fa8edb6431e3..2532f5126c66c429963f411a8f305bf5b36b8c5d 100644
--- a/tmva/src/RuleFit.cxx
+++ b/tmva/src/RuleFit.cxx
@@ -209,7 +209,7 @@ void TMVA::RuleFit::MakeForest()
       nsig=0;
       nbkg=0;
       for (UInt_t ie = 0; ie<fNTreeSample; ie++) {
-         if (fTrainingEventsRndm[ie]->IsSignal()) nsig++; // ignore weights here
+         if (fMethodBase->DataInfo().IsSignal(fTrainingEventsRndm[ie])) nsig++; // ignore weights here
          else nbkg++;
       }
       fsig = Double_t(nsig)/Double_t(nsig+nbkg);
@@ -224,7 +224,7 @@ void TMVA::RuleFit::MakeForest()
       while (tryAgain) {
          Double_t frnd = rndGen.Uniform( fMethodRuleFit->GetMinFracNEve(), fMethodRuleFit->GetMaxFracNEve() );
          nminRnd = Int_t(frnd*static_cast<Double_t>(fNTreeSample));
-         dt = new DecisionTree( fMethodRuleFit->GetSeparationBase(), nminRnd, fMethodRuleFit->GetNCuts(), qualitySepType );
+         dt = new DecisionTree( fMethodRuleFit->GetSeparationBase(), nminRnd, fMethodRuleFit->GetNCuts(), 0, qualitySepType );
          BuildTree(dt); // reads fNTreeSample events from fTrainingEventsRndm
          if (dt->GetNNodes()<3) {
             delete dt;
@@ -303,7 +303,7 @@ void TMVA::RuleFit::Boost( DecisionTree *dt )
       Double_t w = (*e)->GetWeight();
       sumw += w;
       // 
-      if (isSignalType == (*e)->IsSignal()) { // correctly classified
+      if (isSignalType == fMethodBase->DataInfo().IsSignal(*e)) { // correctly classified
          correctSelected.push_back(kTRUE);
       } 
       else {                                // missclassified
@@ -780,7 +780,7 @@ void TMVA::RuleFit::MakeVisHists()
    while ((key = (TKey*)next())) {
       // make sure, that we only look at histograms
       TClass *cl = gROOT->GetClass(key->GetClassName());
-      if (!cl->InheritsFrom(TH1F::Class())) continue;
+      if (!cl->InheritsFrom("TH1F")) continue;
       TH1F *sig = (TH1F*)key->ReadObj();
       TString hname= sig->GetName();
       Log() << kDEBUG << "Got histogram : " << hname << Endl;
@@ -807,7 +807,7 @@ void TMVA::RuleFit::MakeVisHists()
    while ((key = (TKey*)nextCorr())) {
       // make sure, that we only look at histograms
       TClass *cl = gROOT->GetClass(key->GetClassName());
-      if (!cl->InheritsFrom(TH2F::Class())) continue;
+      if (!cl->InheritsFrom("TH2F")) continue;
       TH2F *sig = (TH2F*)key->ReadObj();
       TString hname= sig->GetName();
 
diff --git a/tmva/src/RuleFitAPI.cxx b/tmva/src/RuleFitAPI.cxx
index 908311ee6949173e7414e1fd6ffbf1ef658ff05e..b157a45fedb482a28e4507af33fec500488e9d29 100644
--- a/tmva/src/RuleFitAPI.cxx
+++ b/tmva/src/RuleFitAPI.cxx
@@ -376,7 +376,7 @@ Bool_t TMVA::RuleFitAPI::WriteTrain()
          WriteFloat(fx,&x,1);
          if (ivar==0) {
             w = ev->GetWeight();
-            y = ev->IsSignal() ? 1.0 : -1.0;
+            y = fMethodRuleFit->DataInfo().IsSignal(ev)? 1.0 : -1.0;
             WriteFloat(fy,&y,1);
             WriteFloat(fw,&w,1);
          }
diff --git a/tmva/src/RuleFitParams.cxx b/tmva/src/RuleFitParams.cxx
index 6392401ab5cf7306d85a6fd453cdecf074e18413..ab6a7a6c1a1235c307d62de3d518c7a26aa481db 100644
--- a/tmva/src/RuleFitParams.cxx
+++ b/tmva/src/RuleFitParams.cxx
@@ -254,7 +254,7 @@ Double_t TMVA::RuleFitParams::LossFunction( const Event& e ) const
    // Implementation of squared-error ramp loss function (eq 39,40 in ref 1)
    // This is used for binary Classifications where y = {+1,-1} for (sig,bkg)
    Double_t h = TMath::Max( -1.0, TMath::Min(1.0,fRuleEnsemble->EvalEvent( e )) );
-   Double_t diff = (e.IsSignal()?1:-1) - h;
+   Double_t diff = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(&e)?1:-1) - h;
    //
    return diff*diff*e.GetWeight();
 }
@@ -265,7 +265,7 @@ Double_t TMVA::RuleFitParams::LossFunction( UInt_t evtidx ) const
    // Implementation of squared-error ramp loss function (eq 39,40 in ref 1)
    // This is used for binary Classifications where y = {+1,-1} for (sig,bkg)
    Double_t h = TMath::Max( -1.0, TMath::Min(1.0,fRuleEnsemble->EvalEvent( evtidx )) );
-   Double_t diff = (fRuleEnsemble->GetRuleMapEvent( evtidx )->IsSignal()?1:-1) - h;
+   Double_t diff = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(fRuleEnsemble->GetRuleMapEvent( evtidx ))?1:-1) - h;
    //
    return diff*diff*fRuleFit->GetTrainingEventWeight(evtidx);
 }
@@ -277,7 +277,7 @@ Double_t TMVA::RuleFitParams::LossFunction( UInt_t evtidx, UInt_t itau ) const
    // This is used for binary Classifications where y = {+1,-1} for (sig,bkg)
    Double_t e = fRuleEnsemble->EvalEvent( evtidx , fGDOfsTst[itau], fGDCoefTst[itau], fGDCoefLinTst[itau]);
    Double_t h = TMath::Max( -1.0, TMath::Min(1.0,e) );
-   Double_t diff = (fRuleEnsemble->GetRuleMapEvent( evtidx )->IsSignal()?1:-1) - h;
+   Double_t diff = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(fRuleEnsemble->GetRuleMapEvent( evtidx ))?1:-1) - h;
    //
    return diff*diff*fRuleFit->GetTrainingEventWeight(evtidx);
 }
@@ -910,7 +910,7 @@ Double_t TMVA::RuleFitParams::Optimism()
    for (UInt_t i=fPerfIdx1; i<fPerfIdx2+1; i++) {
       const Event& e = *(*events)[i];
       yhat = fRuleEnsemble->EvalEvent(i);         // evaluated using the model
-      y    = (e.IsSignal() ? 1.0:-1.0);           // the truth
+      y    = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(&e) ? 1.0:-1.0);           // the truth
       w    = fRuleFit->GetTrainingEventWeight(i)/fNEveEffPerf; // the weight, reweighted such that sum=1
       sumy     += w*y;
       sumyhat  += w*yhat;
@@ -994,7 +994,7 @@ Double_t TMVA::RuleFitParams::ErrorRateBin()
       //      Double_t sFstar = fRuleEnsemble->FStar(e); // THIS CAN BE CALCULATED ONCE!
       signF = (sF>0 ? +1:-1);
       //      signy = (sFStar>0 ? +1:-1);
-      signy = (e.IsSignal() ? +1:-1);
+      signy = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(&e) ? +1:-1);
       sumdfbin += TMath::Abs(Double_t(signF-signy))*0.5;
    }
    Double_t f = sumdfbin/dneve;
@@ -1096,7 +1096,7 @@ Double_t TMVA::RuleFitParams::ErrorRateRoc()
    for (UInt_t i=fPerfIdx1; i<fPerfIdx2+1; i++) {
       const Event& e = *(*events)[i];
       sF = fRuleEnsemble->EvalEvent(i);// * fRuleFit->GetTrainingEventWeight(i);
-      if (e.IsSignal()) {
+      if (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(&e)) {
          sFsig.push_back(sF);
          sumfsig  +=sF;
          sumf2sig +=sF*sF;
@@ -1148,7 +1148,7 @@ void TMVA::RuleFitParams::ErrorRateRocTst()
          //         if (itau==0) sF = fRuleEnsemble->EvalEvent( *(*events)[i], fGDOfsTst[itau], fGDCoefTst[itau], fGDCoefLinTst[itau] );
          //         else         sF = fRuleEnsemble->EvalEvent(                fGDOfsTst[itau], fGDCoefTst[itau], fGDCoefLinTst[itau] );
          sF = fRuleEnsemble->EvalEvent( i, fGDOfsTst[itau], fGDCoefTst[itau], fGDCoefLinTst[itau] );
-         if ((*events)[i]->IsSignal()) {
+         if (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal((*events)[i])) {
             sFsig[itau].push_back(sF);
          } 
          else {
@@ -1276,7 +1276,7 @@ void TMVA::RuleFitParams::MakeTstGradientVector()
             if (TMath::Abs(sF)<1.0) {
                nsfok++;
                r = 0;
-               y = (e->IsSignal()?1.0:-1.0);
+               y = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(e)?1.0:-1.0);
                r = norm*(y - sF) * fRuleFit->GetTrainingEventWeight(i);
                // rule gradient vector
                for (UInt_t ir=0; ir<nrules; ir++) {
@@ -1390,7 +1390,7 @@ void TMVA::RuleFitParams::MakeGradientVector()
             eventRuleMap = &(fRuleEnsemble->GetEventRuleMap(i));
             nrules = (*eventRuleMap).size();
          }
-         y = (e->IsSignal()?1.0:-1.0);
+         y = (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(e)?1.0:-1.0);
          r = norm*(y - sF) * fRuleFit->GetTrainingEventWeight(i);
          // rule gradient vector
          for (UInt_t ir=0; ir<nrules; ir++) {
@@ -1508,15 +1508,22 @@ Double_t TMVA::RuleFitParams::CalcAverageTruth()
    const std::vector<Event *> *events = &(fRuleFit->GetTrainingEvents());
    for (UInt_t i=fPathIdx1; i<fPathIdx2+1; i++) {
       Double_t ew = fRuleFit->GetTrainingEventWeight(i);
-      if ((*events)[i]->IsSignal()) ensig += ew;
+      if (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal((*events)[i])) ensig += ew;
       else                          enbkg += ew;
-      sum += ew*((*events)[i]->IsSignal()?1.0:-1.0);
+      sum += ew*(fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal((*events)[i])?1.0:-1.0);
    }
    Log() << kVERBOSE << "Effective number of signal / background = " << ensig << " / " << enbkg << Endl;
 
    return sum/fNEveEffPath;
 }
 
+//_______________________________________________________________________
+
+Int_t  TMVA::RuleFitParams::Type( const Event * e ) const { 
+   return (fRuleFit->GetMethodRuleFit()->DataInfo().IsSignal(e) ? 1:-1);
+}
+
+
 //_______________________________________________________________________
 void TMVA::RuleFitParams::SetMsgType( EMsgType t ) {
    fLogger->SetMinType(t);
diff --git a/tmva/src/SeedDistance.cxx b/tmva/src/SeedDistance.cxx
deleted file mode 100644
index dbe680bbaf4f33d832d0276d97a1b367c8dd9d6d..0000000000000000000000000000000000000000
--- a/tmva/src/SeedDistance.cxx
+++ /dev/null
@@ -1,58 +0,0 @@
-// @(#)root/tmva $Id$ 
-// Author: Andreas Hoecker, Peter Speckmayer
-
-/**********************************************************************************
- * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
- * Package: TMVA                                                                  *
- * Class  : SeedDistance                                                         *
- * Web    : http://tmva.sourceforge.net                                           *
- *                                                                                *
- * Description:                                                                   *
- *      Implementation                                                            *
- *                                                                                *
- * Authors (alphabetical):                                                        *
- *      Peter Speckmayer <speckmay@mail.cern.ch> - CERN, Switzerland              *
- *                                                                                *
- * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      MPI-K Heidelberg, Germany                                                 * 
- *                                                                                *
- * Redistribution and use in source and binary forms, with or without             *
- * modification, are permitted according to the terms listed in LICENSE           *
- * (http://tmva.sourceforge.net/LICENSE)                                          *
- **********************************************************************************/
-
-//_______________________________________________________________________
-//                                                                      
-// SeedDistance
-//
-//_______________________________________________________________________
-
-#include "TMVA/SeedDistance.h"
-
-ClassImp(TMVA::SeedDistance)
-
-//_______________________________________________________________________
-TMVA::SeedDistance::SeedDistance( IMetric& metric, std::vector< std::vector<Double_t> >& seeds ) 
-   : fSeeds( seeds ),
-     fMetric( metric )
-{
-   // constructor
-}            
-
-
-
-//_______________________________________________________________________
-std::vector<Double_t>& TMVA::SeedDistance::GetDistances( std::vector<Double_t>& point )
-{
-   // calculates distances of all seeds to a point and stores the result in a distance vector
-   fDistances.clear();
-   Double_t val = 0.0;
-   for( std::vector< std::vector<Double_t> >::iterator itSeed = fSeeds.begin(); itSeed != fSeeds.end(); itSeed++ ){
-      val = fMetric.Distance( (*itSeed), point );
-      fDistances.push_back( val );
-   }
-   return fDistances;
-}
-
-
diff --git a/tmva/src/SimulatedAnnealing.cxx b/tmva/src/SimulatedAnnealing.cxx
index 2a08107202f074d2e5a91228a90bb92744f1a43b..7d3d4855b0f8a3939d85146253887d4aa7c6dc18 100644
--- a/tmva/src/SimulatedAnnealing.cxx
+++ b/tmva/src/SimulatedAnnealing.cxx
@@ -26,8 +26,8 @@
  **********************************************************************************/
 
 //_______________________________________________________________________
-//                                                                      
-// Implementation of Simulated Annealing fitter  
+//
+// Implementation of Simulated Annealing fitter
 //_______________________________________________________________________
 #include "TMVA/SimulatedAnnealing.h"
 
@@ -44,7 +44,8 @@ ClassImp(TMVA::SimulatedAnnealing)
 
 //_______________________________________________________________________
 TMVA::SimulatedAnnealing::SimulatedAnnealing( IFitterTarget& target, const std::vector<Interval*>& ranges )
-   : fFitterTarget          ( target ),
+   : fKernelTemperature     (kIncreasingAdaptive),
+     fFitterTarget          ( target ),
      fRandom                ( new TRandom3(100) ),
      fRanges                ( ranges ),
      fMaxCalls              ( 500000 ),
@@ -52,11 +53,14 @@ TMVA::SimulatedAnnealing::SimulatedAnnealing( IFitterTarget& target, const std::
      fMinTemperature        ( 0 ),
      fEps                   ( 1e-10 ),
      fTemperatureScale      ( 0.06 ),
+     fAdaptiveSpeed         ( 1.0 ),
+     fTemperatureAdaptiveStep( 0.0 ),
      fUseDefaultScale       ( kFALSE ),
-     fLogger( new MsgLogger("SimulatedAnnealing") )
+     fUseDefaultTemperature ( kFALSE ),
+     fLogger( new MsgLogger("SimulatedAnnealing") ),
+     fProgress(0.0)
 {
    // constructor
-   fAdaptiveSpeed = 1.0;
    fKernelTemperature = kIncreasingAdaptive;
 }
 
@@ -315,10 +319,10 @@ Double_t TMVA::SimulatedAnnealing::Minimize( std::vector<Double_t>& parameters )
          currentTemperature = fInitialTemperature;
       FillWithRandomValues( parameters ); 
    }
-   
+
    if (fUseDefaultScale) SetDefaultScale();
 
-   Log() << kINFO 
+   Log() << kINFO
            << "Temperatur scale = "      << fTemperatureScale  
            << ", current temperature = " << currentTemperature  << Endl;
 
diff --git a/tmva/src/TNeuron.cxx b/tmva/src/TNeuron.cxx
index fd09eb12af48404fc2014fc1ad2c5a5970f63eb5..c65d9d42d776be56f190a9ebdbb5feb8faa2e25e 100644
--- a/tmva/src/TNeuron.cxx
+++ b/tmva/src/TNeuron.cxx
@@ -51,12 +51,13 @@ using std::vector;
 
 ClassImp(TMVA::TNeuron)
 
+TMVA::MsgLogger* TMVA::TNeuron::fgLogger = 0;
+
 //______________________________________________________________________________
 TMVA::TNeuron::TNeuron()
-   : fLogger( 0 )
 {
    // standard constructor
-   fLogger = new MsgLogger(this, kDEBUG);
+   if (!fgLogger) fgLogger = new MsgLogger("TNeuron",kDEBUG);
    InitNeuron();
 }
 
@@ -65,7 +66,6 @@ TMVA::TNeuron::~TNeuron()
    // destructor
    if (fLinksIn != NULL)  delete fLinksIn;
    if (fLinksOut != NULL) delete fLinksOut;
-   delete fLogger;
 }
 
 void TMVA::TNeuron::InitNeuron()
diff --git a/tmva/src/TSynapse.cxx b/tmva/src/TSynapse.cxx
index dac456ade2cc83434ce5b42bd62690cf8dd3a2f8..da3c0a84646bcb867dab493f77e0e579cd115b99 100644
--- a/tmva/src/TSynapse.cxx
+++ b/tmva/src/TSynapse.cxx
@@ -40,6 +40,8 @@ static const Int_t fgUNINITIALIZED = -1;
 
 ClassImp(TMVA::TSynapse);
 
+TMVA::MsgLogger* TMVA::TSynapse::fgLogger = 0;
+
 //______________________________________________________________________________
 TMVA::TSynapse::TSynapse()
   : fWeight( 0 ),
@@ -48,11 +50,11 @@ TMVA::TSynapse::TSynapse()
     fDEDw( 0 ),
     fCount( 0 ),
     fPreNeuron( NULL ),
-    fPostNeuron( NULL ),
-    fLogger( new MsgLogger("TSynapse") )
+    fPostNeuron( NULL )
 {
    // constructor
    fWeight     = fgUNINITIALIZED;
+   if (!fgLogger) fgLogger = new MsgLogger("TSynapse");
 }
 
 
@@ -60,7 +62,6 @@ TMVA::TSynapse::TSynapse()
 TMVA::TSynapse::~TSynapse()
 {
    // destructor
-   delete fLogger;
 }
 
 //______________________________________________________________________________
diff --git a/tmva/src/Tools.cxx b/tmva/src/Tools.cxx
index 626aab75f6be5f3baafd84c73ce0b5cac9e8be88..b1b2ace6ce3911259fb679e5a5a574453c5dec87 100644
--- a/tmva/src/Tools.cxx
+++ b/tmva/src/Tools.cxx
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$   
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -12,6 +12,7 @@
  *                                                                                *
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch> - CERN, Switzerland           *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
  *                                                                                *
@@ -44,6 +45,7 @@
 #include "TTreeFormula.h"
 #include "TXMLEngine.h"
 #include "TROOT.h"
+#include "TMatrixDSymEigen.h"
 
 #ifndef ROOT_TMVA_Tools
 #include "TMVA/Tools.h"
@@ -77,11 +79,11 @@ TMVA::Tools::Tools() :
    fLogger(new MsgLogger("Tools")),
    fXMLEngine(new TXMLEngine())
 {
-   // constructor   
+   // constructor
 }
 
 //_______________________________________________________________________
-TMVA::Tools::~Tools() 
+TMVA::Tools::~Tools()
 {
    // destructor
    delete fLogger;
@@ -101,7 +103,7 @@ Double_t TMVA::Tools::GetSeparation( TH1* S, TH1* B ) const
    // compute "separation" defined as
    // <s2> = (1/2) Int_-oo..+oo { (S^2(x) - B^2(x))/(S(x) + B(x)) dx }
    Double_t separation = 0;
-   
+
    // sanity checks
    // signal and background histograms must have same number of bins and 
    // same limits
@@ -974,7 +976,7 @@ TString TMVA::Tools::GetYTitleWithUnit( const TH1& h, const TString& unit, Bool_
 }
 
 //_______________________________________________________________________
-void TMVA::Tools::WriteFloatArbitraryPrecision( Float_t val, ostream& os ) 
+void TMVA::Tools::WriteFloatArbitraryPrecision( Float_t val, ostream& os )
 {
    // writes a float value with the available precision to a stream
    os << val << " :: ";
@@ -988,7 +990,7 @@ void TMVA::Tools::WriteFloatArbitraryPrecision( Float_t val, ostream& os )
 }
 
 //_______________________________________________________________________
-void TMVA::Tools::ReadFloatArbitraryPrecision( Float_t& val, istream& is ) 
+void TMVA::Tools::ReadFloatArbitraryPrecision( Float_t& val, istream& is )
 {
    // reads a float value with the available precision from a stream
    Float_t a = 0;
@@ -1005,14 +1007,44 @@ void TMVA::Tools::ReadFloatArbitraryPrecision( Float_t& val, istream& is )
    val = a;
 }
 
+
+// XML file reading/writing helper functions
+
+//_______________________________________________________________________
+Bool_t TMVA::Tools::HasAttr( void* node, const char* attrname )
+{
+   // add attribute from xml
+   return xmlengine().HasAttr(node, attrname);
+}
+
+//_______________________________________________________________________
+void TMVA::Tools::ReadAttr( void* node, const char* attrname, TString& value )
+{
+   // add attribute from xml
+   const char* val = xmlengine().GetAttr(node, attrname);
+   value = TString(val);
+}
+
+//_______________________________________________________________________
+void TMVA::Tools::AddAttr( void* node, const char* attrname, const char* value )
+{
+   // add attribute to node
+   gTools().xmlengine().NewAttr(node, 0, attrname, value );
+}
+
 //_______________________________________________________________________
 void* TMVA::Tools::AddChild( void* parent, const char* childname, const char* content ) {
    return gTools().xmlengine().NewChild(parent, 0, childname, content);
 }
 
+//_______________________________________________________________________
+Bool_t TMVA::Tools::AddComment( void* node, const char* comment ) {
+   return gTools().xmlengine().AddComment(node, comment);
+}
+
 
 //_______________________________________________________________________
-void* TMVA::Tools::GetChild( void* parent, const char* childname ) 
+void* TMVA::Tools::GetChild( void* parent, const char* childname )
 {
    void* ch = xmlengine().GetChild(parent);
    if (childname != 0) {
@@ -1022,7 +1054,7 @@ void* TMVA::Tools::GetChild( void* parent, const char* childname )
 }
 
 //_______________________________________________________________________
-void* TMVA::Tools::GetNextChild( void* prevchild, const char* childname ) 
+void* TMVA::Tools::GetNextChild( void* prevchild, const char* childname )
 {
    // XML helpers
    void* ch = xmlengine().GetNext(prevchild);
@@ -1033,12 +1065,26 @@ void* TMVA::Tools::GetNextChild( void* prevchild, const char* childname )
 }
 
 //_______________________________________________________________________
-const char* TMVA::Tools::GetContent( void* node ) 
+const char* TMVA::Tools::GetContent( void* node )
 {
    // XML helpers
    return xmlengine().GetNodeContent(node);
 }
 
+//_______________________________________________________________________
+const char* TMVA::Tools::GetName( void* node )
+{
+   // XML helpers
+   return xmlengine().GetNodeName(node);
+}
+
+//_______________________________________________________________________
+Bool_t TMVA::Tools::AddRawLine( void* node, const char * raw )
+{
+   // XML helpers
+   return xmlengine().AddRawLine( node, raw );
+}
+
 //_______________________________________________________________________
 std::vector<TString> TMVA::Tools::SplitString(const TString& theOpt, const char separator ) const
 {
@@ -1052,7 +1098,7 @@ std::vector<TString> TMVA::Tools::SplitString(const TString& theOpt, const char
       if ( !splitOpt.Contains(separator) ) {
          splitV.push_back(splitOpt);
          break;
-      } 
+      }
       else {
          TString toSave = splitOpt(0,splitOpt.First(separator));
          splitV.push_back(toSave);
diff --git a/tmva/src/TransformationHandler.cxx b/tmva/src/TransformationHandler.cxx
index dcf936621312f6e1c8ea87351f04a6502081d025..6b75b628b4ad122a1dbfe856588a4851803710b8 100644
--- a/tmva/src/TransformationHandler.cxx
+++ b/tmva/src/TransformationHandler.cxx
@@ -35,7 +35,6 @@
 #include "TH2.h"
 #include "TAxis.h"
 #include "TProfile.h"
-#include "TXMLEngine.h"
 
 #ifndef ROOT_TMVA_Config
 #include "TMVA/Config.h"
@@ -816,7 +815,7 @@ void TMVA::TransformationHandler::AddXMLTo( void* parent ) const
    // XML node describing the transformation
    //   return;
    if(!parent) return;
-   void* trfs = gTools().xmlengine().NewChild(parent, 0, "Transformations");
+   void* trfs = gTools().AddChild(parent, "Transformations");
    gTools().AddAttr( trfs, "NTransformations", fTransformations.GetSize() );
    TListIter trIt(&fTransformations);
    while (VariableTransformBase *trf = (VariableTransformBase*) trIt()) trf->AttachXMLTo(trfs);
@@ -834,7 +833,7 @@ void TMVA::TransformationHandler::ReadFromStream( std::istream& )
 //_______________________________________________________________________
 void TMVA::TransformationHandler::ReadFromXML( void* trfsnode ) 
 {
-   void* ch = gTools().xmlengine().GetChild( trfsnode );
+   void* ch = gTools().GetChild( trfsnode );
    while(ch) {
       Int_t idxCls = -1;
       TString trfname;
@@ -860,7 +859,7 @@ void TMVA::TransformationHandler::ReadFromXML( void* trfsnode )
       }
       newtrf->ReadFromXML( ch );
       AddTransformation( newtrf, idxCls );
-      ch = gTools().xmlengine().GetNext(ch);      
+      ch = gTools().GetNextChild(ch);      
    }
 }
 
diff --git a/tmva/src/Types.cxx b/tmva/src/Types.cxx
index f9a9f6953bfeff5e19a83f2e1f19004c2294cfa0..6d8d060b38bde2300fd90e67501e84c0ef193490 100644
--- a/tmva/src/Types.cxx
+++ b/tmva/src/Types.cxx
@@ -46,6 +46,20 @@ TMVA::Types::~Types()
    delete fLogger;
 }
 
+//_______________________________________________________________________
+TMVA::Types& TMVA::Types::Instance() 
+{ 
+   // the the single instance of "Types" if existin already, or create it  (Signleton) 
+   return fgTypesPtr ? *fgTypesPtr : *(fgTypesPtr = new Types()); 
+}
+//_______________________________________________________________________
+void   TMVA::Types::DestroyInstance() 
+{ 
+   // "destructor" of the single instance
+   if (fgTypesPtr != 0) { delete fgTypesPtr; fgTypesPtr = 0; } 
+}
+
+
 //_______________________________________________________________________
 Bool_t TMVA::Types::AddTypeMapping( Types::EMVA method, const TString& methodname ) 
 {
diff --git a/tmva/src/VariableDecorrTransform.cxx b/tmva/src/VariableDecorrTransform.cxx
index 37c34d71d17bb9c5ceefd3238dd9d658ff1a7867..1b64730c3d9dd66a3e1d132e5bca009406980736 100644
--- a/tmva/src/VariableDecorrTransform.cxx
+++ b/tmva/src/VariableDecorrTransform.cxx
@@ -32,7 +32,6 @@
 #include "TVectorD.h"
 #include "TMatrixD.h"
 #include "TMatrixDBase.h"
-#include "TXMLEngine.h"
 
 #ifndef ROOT_TMVA_MsgLogger
 #include "TMVA/MsgLogger.h"
@@ -180,7 +179,6 @@ const TMVA::Event* TMVA::VariableDecorrTransform::Transform( const TMVA::Event*
    fTransformedEvent->SetWeight     ( ev->GetWeight() );
    fTransformedEvent->SetBoostWeight( ev->GetBoostWeight() );
    fTransformedEvent->SetClass      ( ev->GetClass() );
-   fTransformedEvent->SetSignalClass( ev->GetSignalClass() );
    return fTransformedEvent;
 }
 
@@ -386,8 +384,8 @@ void TMVA::VariableDecorrTransform::WriteTransformationToStream( std::ostream& o
 void TMVA::VariableDecorrTransform::AttachXMLTo(void* parent) 
 {
    // node attachment to parent
-   void* trf = gTools().xmlengine().NewChild(parent, 0, "Transform");
-   gTools().xmlengine().NewAttr(trf,0,"Name", "Decorrelation");
+   void* trf = gTools().AddChild(parent, "Transform");
+   gTools().AddAttr(trf,"Name", "Decorrelation");
 
    for (std::vector<TMatrixD*>::const_iterator itm = fDecorrMatrices.begin(); itm != fDecorrMatrices.end(); itm++) {
       TMatrixD* mat = (*itm);
@@ -416,13 +414,13 @@ void TMVA::VariableDecorrTransform::ReadFromXML( void* trfnode )
       if( (*it) != 0 ) delete (*it);
    fDecorrMatrices.clear();
 
-   void* ch = gTools().xmlengine().GetChild(trfnode);
+   void* ch = gTools().GetChild(trfnode);
    while(ch!=0) {
       Int_t nrows, ncols;
       gTools().ReadAttr(ch, "Rows", nrows);
       gTools().ReadAttr(ch, "Columns", ncols);
       TMatrixD* mat = new TMatrixD(nrows,ncols);
-      const char* content = gTools().xmlengine().GetNodeContent(ch);
+      const char* content = gTools().GetContent(ch);
       std::stringstream s(content);
       for (Int_t row = 0; row<nrows; row++) {
          for (Int_t col = 0; col<ncols; col++) {
@@ -430,7 +428,7 @@ void TMVA::VariableDecorrTransform::ReadFromXML( void* trfnode )
          }
       }
       fDecorrMatrices.push_back(mat);
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
    SetCreated();
 }
diff --git a/tmva/src/VariableGaussTransform.cxx b/tmva/src/VariableGaussTransform.cxx
index 223f61c9067bf0c0408646fdf3a308a60eef5861..8d1452174edcedb1ab5d93c269035a447fafea64 100644
--- a/tmva/src/VariableGaussTransform.cxx
+++ b/tmva/src/VariableGaussTransform.cxx
@@ -40,7 +40,6 @@
 #include "TVectorD.h"
 #include "TMath.h"
 #include "TCanvas.h"
-#include "TXMLEngine.h"
 
 #include "TMVA/VariableGaussTransform.h"
 #ifndef ROOT_TMVA_MsgLogger
@@ -163,7 +162,6 @@ const TMVA::Event* TMVA::VariableGaussTransform::Transform(const Event* const ev
    fTransformedEvent->SetWeight     ( ev->GetWeight() );
    fTransformedEvent->SetBoostWeight( ev->GetBoostWeight() );
    fTransformedEvent->SetClass      ( ev->GetClass() );
-   fTransformedEvent->SetSignalClass( ev->GetSignalClass() );
 
    return fTransformedEvent;
 }
@@ -411,12 +409,12 @@ void TMVA::VariableGaussTransform::CleanUpCumulativeArrays(TString opt) {
 //_______________________________________________________________________
 void TMVA::VariableGaussTransform::AttachXMLTo(void* parent) {
    // create XML description of Gauss transformation
-   void* trfxml = gTools().xmlengine().NewChild(parent, 0, "Transform");
+   void* trfxml = gTools().AddChild(parent, "Transform");
    gTools().AddAttr(trfxml, "Name",        "Gauss");
    gTools().AddAttr(trfxml, "FlatOrGauss", (fFlatNotGaussD?"Flat":"Gauss") );
 
    for (UInt_t ivar=0; ivar<GetNVariables(); ivar++) {
-      void* varxml = gTools().xmlengine().NewChild( trfxml, 0, "Variable");
+      void* varxml = gTools().AddChild( trfxml, "Variable");
       gTools().AddAttr( varxml, "Name",     Variables()[ivar].GetLabel() );
       gTools().AddAttr( varxml, "VarIndex", ivar );
          
@@ -424,7 +422,7 @@ void TMVA::VariableGaussTransform::AttachXMLTo(void* parent) {
          Log() << kFATAL << "Cumulative histograms for variable " << ivar << " don't exist, can't write it to weight file" << Endl;
       
       for (UInt_t icls=0; icls<fCumulativePDF[ivar].size(); icls++){
-         void* pdfxml = gTools().xmlengine().NewChild( varxml, 0, Form("CumulativePDF_cls%d",icls));
+         void* pdfxml = gTools().AddChild( varxml, Form("CumulativePDF_cls%d",icls));
          (fCumulativePDF[ivar][icls])->AddXMLTo(pdfxml);
       }
    }
@@ -440,7 +438,7 @@ void TMVA::VariableGaussTransform::ReadFromXML( void* trfnode ) {
    gTools().ReadAttr(trfnode, "FlatOrGauss", fFlatNotGaussD );
 
    // Read the cumulative distribution
-   void* varnode = gTools().xmlengine().GetChild( trfnode );
+   void* varnode = gTools().GetChild( trfnode );
 
    TString varname, histname, classname;
    UInt_t ivar;
@@ -448,19 +446,19 @@ void TMVA::VariableGaussTransform::ReadFromXML( void* trfnode ) {
       gTools().ReadAttr(varnode, "Name", varname);
       gTools().ReadAttr(varnode, "VarIndex", ivar);
       
-      void* clsnode = gTools().xmlengine().GetChild( varnode);
+      void* clsnode = gTools().GetChild( varnode);
 
       while(clsnode) {
-         void* pdfnode = gTools().xmlengine().GetChild( clsnode);
+         void* pdfnode = gTools().GetChild( clsnode);
          PDF* pdfToRead = new PDF(TString("tempName"),kFALSE);
          pdfToRead->ReadXML(pdfnode); // pdfnode
          // push_back PDF
          fCumulativePDF.resize( ivar+1 );
          fCumulativePDF[ivar].push_back(pdfToRead);
-         clsnode = gTools().xmlengine().GetNext(clsnode);
+         clsnode = gTools().GetNextChild(clsnode);
       }
       
-      varnode = gTools().xmlengine().GetNext(varnode);    
+      varnode = gTools().GetNextChild(varnode);    
    }
    SetCreated();
 }
diff --git a/tmva/src/VariableInfo.cxx b/tmva/src/VariableInfo.cxx
index 3b1cbb4379351681a8863244b5b1b37736b4e808..8b0096a3a52e58ae76585aa9cdfbe76755683917 100644
--- a/tmva/src/VariableInfo.cxx
+++ b/tmva/src/VariableInfo.cxx
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$   
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -16,9 +16,9 @@
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                                                    * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      MPI-K Heidelberg, Germany                                                 *
  *      LAPP, Annecy, France                                                      *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
@@ -35,10 +35,10 @@
 #include "TMath.h"
 
 //_______________________________________________________________________
-TMVA::VariableInfo::VariableInfo( const TString& expression, const TString& title, const TString& unit, 
-                                  Int_t varCounter, 
+TMVA::VariableInfo::VariableInfo( const TString& expression, const TString& title, const TString& unit,
+                                  Int_t varCounter,
                                   char varType, void* external,
-                                  Double_t min, Double_t max, Bool_t normalized ) 
+                                  Double_t min, Double_t max, Bool_t normalized )
    : fExpression  ( expression ),
      fTitle       ( title ),
      fUnit        ( unit ),
@@ -166,17 +166,17 @@ void TMVA::VariableInfo::ReadFromStream( std::istream& istr )
 void TMVA::VariableInfo::AddToXML( void* varnode )
 {
    // write class to XML
-   gTools().xmlengine().NewAttr( varnode, 0, "Expression", GetExpression() );
-   gTools().xmlengine().NewAttr( varnode, 0, "Label",      GetLabel() );
-   gTools().xmlengine().NewAttr( varnode, 0, "Title",      GetTitle() );
-   gTools().xmlengine().NewAttr( varnode, 0, "Unit",       GetUnit() );
-   gTools().xmlengine().NewAttr( varnode, 0, "Internal",   GetInternalName() );
+   gTools().AddAttr( varnode, "Expression", GetExpression() );
+   gTools().AddAttr( varnode, "Label",      GetLabel() );
+   gTools().AddAttr( varnode, "Title",      GetTitle() );
+   gTools().AddAttr( varnode, "Unit",       GetUnit() );
+   gTools().AddAttr( varnode, "Internal",   GetInternalName() );
 
    TString typeStr(" ");
    typeStr[0] = GetVarType();
-   gTools().xmlengine().NewAttr( varnode, 0, "Type", typeStr );
-   gTools().xmlengine().NewAttr( varnode, 0, "Min", gTools().StringFromDouble(GetMin()) );
-   gTools().xmlengine().NewAttr( varnode, 0, "Max", gTools().StringFromDouble(GetMax()) );
+   gTools().AddAttr( varnode, "Type", typeStr );
+   gTools().AddAttr( varnode, "Min", gTools().StringFromDouble(GetMin()) );
+   gTools().AddAttr( varnode, "Max", gTools().StringFromDouble(GetMax()) );
 }
 
 //_______________________________________________________________________
diff --git a/tmva/src/VariableNormalizeTransform.cxx b/tmva/src/VariableNormalizeTransform.cxx
index c7a4f78b637688f848f319c94f43cdb9ba78df78..fe4aa71303da37bf9013d5147f5ddf620fef12c0 100644
--- a/tmva/src/VariableNormalizeTransform.cxx
+++ b/tmva/src/VariableNormalizeTransform.cxx
@@ -13,7 +13,7 @@
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker  <Andreas.Hocker@cern.ch>   - CERN, Switzerland           *
  *      Joerg Stelzer    <Joerg.Stelzer@cern.ch>    - CERN, Switzerland           *
- *      Peter Speckmayer <Peter:Speckmayer@cern.ch> - CERN, Switzerland           *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch> - CERN, Switzerland           *
  *      Helge Voss       <Helge.Voss@cern.ch>       - MPI-K Heidelberg, Germany   *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
@@ -27,12 +27,12 @@
 
 #include <iostream>
 #include <iomanip>
+#include <stdexcept>
 
 #include "TVectorF.h"
 #include "TVectorD.h"
 #include "TMatrixD.h"
 #include "TMatrixDBase.h"
-#include "TXMLEngine.h"
 
 #ifndef ROOT_TMVA_MsgLogger
 #include "TMVA/MsgLogger.h"
@@ -65,18 +65,18 @@ void TMVA::VariableNormalizeTransform::Initialize()
 {
    // initialization of the normalization transformation
 
-   UInt_t nvar = Variables().size();
-   UInt_t ntgts = Targets().size();
+   UInt_t inputSize = fGet.size();
+
    Int_t numC = GetNClasses()+1;
    if (GetNClasses() <= 1 ) numC = 1;
 
    fMin.resize( numC ); 
    fMax.resize( numC ); 
    for (Int_t i=0; i<numC; i++) {
-      fMin.at(i).resize(nvar+ntgts);
-      fMax.at(i).resize(nvar+ntgts);
-      fMin.at(i).assign(nvar+ntgts, 0);
-      fMax.at(i).assign(nvar+ntgts, 0);
+      fMin.at(i).resize(inputSize);
+      fMax.at(i).resize(inputSize);
+      fMin.at(i).assign(inputSize, 0);
+      fMax.at(i).assign(inputSize, 0);
    }
 }
 
@@ -101,7 +101,7 @@ Bool_t TMVA::VariableNormalizeTransform::PrepareTransformation( const std::vecto
 const TMVA::Event* TMVA::VariableNormalizeTransform::Transform( const TMVA::Event* const ev, Int_t cls ) const
 {
 
-   // apply the decorrelation transformation
+   // apply the normalization transformation
    if (!IsCreated()) Log() << kFATAL << "Transformation not yet created" << Endl;
 
    // if cls (the class chosen by the user) not existing, 
@@ -110,40 +110,36 @@ const TMVA::Event* TMVA::VariableNormalizeTransform::Transform( const TMVA::Even
       if (GetNClasses() > 1 ) cls = GetNClasses();
       else cls = (fMin.size()==1?0:2);
    }
-   const UInt_t nvars = GetNVariables();
-   const UInt_t ntgts = ev->GetNTargets();
-   if (nvars != ev->GetNVariables()) {
-      Log() << kFATAL << "Transformation defined for a different number of variables " << GetNVariables() 
-            << "  " << ev->GetNVariables() << Endl;
-   }
+
+   FloatVector input; // will be filled with the selected variables, targets, (spectators)
+   FloatVector output; // will be filled with the selected variables, targets, (spectators)
+   GetInput( ev, input );
 
    if (fTransformedEvent==0) fTransformedEvent = new Event();
 
    Float_t min,max;
-   for (Int_t ivar=nvars-1; ivar>=0; ivar--) {
-      min = fMin.at(cls).at(ivar); 
-      max = fMax.at(cls).at(ivar);
-      Float_t offset = min;
-      Float_t scale  = 1.0/(max-min);
 
-      Float_t valnorm = (ev->GetValue(ivar)-offset)*scale * 2 - 1;
-      fTransformedEvent->SetVal(ivar,valnorm);  
-   }
-   for (Int_t itgt=ntgts-1; itgt>=0; itgt--) {
-      min = fMin.at(cls).at(nvars+itgt); 
-      max = fMax.at(cls).at(nvars+itgt);
+   const FloatVector& minVector = fMin.at(cls); 
+   const FloatVector& maxVector = fMax.at(cls);
+   
+   UInt_t iidx = 0;          
+   for ( std::vector<Float_t>::iterator itInp = input.begin(), itInpEnd = input.end(); itInp != itInpEnd; ++itInp) { // loop over input variables
+      Float_t val = (*itInp);
+
+      min = minVector.at(iidx); 
+      max = maxVector.at(iidx);
+
       Float_t offset = min;
       Float_t scale  = 1.0/(max-min);
 
-      Float_t original = ev->GetTarget(itgt);
-      Float_t valnorm = (original-offset)*scale * 2 - 1;
-      fTransformedEvent->SetTarget(itgt,valnorm);
+      Float_t valnorm = (val-offset)*scale * 2 - 1;
+      output.push_back( valnorm );
+
+      ++iidx;
    }
-   
-   fTransformedEvent->SetWeight     ( ev->GetWeight() );
-   fTransformedEvent->SetBoostWeight( ev->GetBoostWeight() );
-   fTransformedEvent->SetClass      ( ev->GetClass() );
-   fTransformedEvent->SetSignalClass( ev->GetSignalClass() );
+
+   SetOutput( fTransformedEvent, output, ev );
+
    return fTransformedEvent;
 }
 
@@ -154,43 +150,41 @@ const TMVA::Event* TMVA::VariableNormalizeTransform::InverseTransform( const TMV
    if (!IsCreated()) Log() << kFATAL << "Transformation not yet created" << Endl;
 
    // if cls (the class chosen by the user) not existing, 
-   // assume that user wants to have the matrix for all classes together. 
+   // assume that user wants to have the transformation for all classes together. 
    if (cls < 0 || cls > GetNClasses()) {
       if (GetNClasses() > 1 ) cls = GetNClasses();
       else cls = 0;
    }
 
-   const UInt_t nvars = GetNVariables();
-   const UInt_t ntgts = GetNTargets();
-   if (nvars != ev->GetNVariables()) {
-      Log() << kFATAL << "Transformation defined for a different number of variables " << GetNVariables() << "  " << ev->GetNVariables() 
-            << Endl;
-   }
+   FloatVector input;  // will be filled with the selected variables, targets, (spectators)
+   FloatVector output; // will be filled with the output
+   GetInput( ev, input );
 
    if (fBackTransformedEvent==0) fBackTransformedEvent = new Event( *ev );
 
    Float_t min,max;
-   for (Int_t ivar=nvars-1; ivar>=0; ivar--) {
-      min = fMin.at(cls).at(ivar); 
-      max = fMax.at(cls).at(ivar);
-      Float_t offset = min;
-      Float_t scale  = 1.0/(max-min);
 
-      Float_t valnorm = offset+((ev->GetValue(ivar)+1)/(scale * 2));
-      fBackTransformedEvent->SetVal(ivar,valnorm);
-   }
+   const FloatVector& minVector = fMin.at(cls); 
+   const FloatVector& maxVector = fMax.at(cls);
+   
+   UInt_t iidx = 0;          
+   for ( std::vector<Float_t>::iterator itInp = input.begin(), itInpEnd = input.end(); itInp != itInpEnd; ++itInp) { // loop over input variables
+      Float_t val = (*itInp);
+
+      min = minVector.at(iidx); 
+      max = maxVector.at(iidx);
 
-   for (Int_t itgt=ntgts-1; itgt>=0; itgt--) {
-      min = fMin.at(cls).at(nvars+itgt); 
-      max = fMax.at(cls).at(nvars+itgt);
       Float_t offset = min;
       Float_t scale  = 1.0/(max-min);
 
-      Float_t original = ev->GetTarget(itgt);
-      Float_t valnorm = offset+((original+1.0)/(scale * 2));
-      fBackTransformedEvent->SetTarget(itgt,valnorm);
+      Float_t valnorm = offset+((val+1)/(scale * 2));
+      output.push_back( valnorm );
+
+      ++iidx;
    }
 
+   SetOutput( fBackTransformedEvent, output, ev );
+
    return fBackTransformedEvent;
 }
 
@@ -200,49 +194,57 @@ void TMVA::VariableNormalizeTransform::CalcNormalizationParams( const std::vecto
    // compute offset and scale from min and max
    if (events.size() <= 1) 
       Log() << kFATAL << "Not enough events (found " << events.size() << ") to calculate the normalization" << Endl;
-   
-   UInt_t nvars = GetNVariables();
-   UInt_t ntgts = GetNTargets();
 
-   Int_t numC = GetNClasses()+1;
-   if (GetNClasses() <= 1 ) numC = 1;
+   FloatVector input; // will be filled with the selected variables, targets, (spectators)
+
+   UInt_t inputSize = fGet.size(); // number of input variables
 
-   for (UInt_t ivar=0; ivar<nvars+ntgts; ivar++) {
+   const UInt_t nCls = GetNClasses();
+   Int_t numC = nCls+1;   // prepare the min and max values for each of the classes and additionally for all classes (if more than one)
+   Int_t all = nCls; // at idx the min and max values for "all" classes are stored
+   if (nCls <= 1 ) {
+      numC = 1;
+      all = 0;
+   }
+
+   for (UInt_t iinp=0; iinp<inputSize; ++iinp) {
       for (Int_t ic = 0; ic < numC; ic++) {
-         fMin.at(ic).at(ivar) = FLT_MAX;
-         fMax.at(ic).at(ivar) = -FLT_MAX;
+         fMin.at(ic).at(iinp) = FLT_MAX;
+         fMax.at(ic).at(iinp) = -FLT_MAX;
       }
    }
 
-   const Int_t all = GetNClasses();
    std::vector<Event*>::const_iterator evIt = events.begin();
-   for (;evIt!=events.end();evIt++) {
-      for (UInt_t ivar=0; ivar<nvars; ivar++) {
-         Float_t val = (*evIt)->GetValue(ivar);
-         UInt_t cls = (*evIt)->GetClass();
+   for (;evIt!=events.end();evIt++) { // loop over all events
+      TMVA::Event* event = (*evIt);   // get the event
 
-         if (fMin.at(cls).at(ivar) > val) fMin.at(cls).at(ivar) = val;
-         if (fMax.at(cls).at(ivar) < val) fMax.at(cls).at(ivar) = val;
+      UInt_t cls = (*evIt)->GetClass(); // get the class of this event
+      
+      FloatVector& minVector = fMin.at(cls); 
+      FloatVector& maxVector = fMax.at(cls);
 
-         if (GetNClasses() != 1) {
-            if (fMin.at(all).at(ivar) > val) fMin.at(all).at(ivar) = val;
-            if (fMax.at(all).at(ivar) < val) fMax.at(all).at(ivar) = val;
-         }
-      }
-      for (UInt_t itgt=0; itgt<ntgts; itgt++) {
-         Float_t val = (*evIt)->GetTarget(itgt);
-         UInt_t cls = (*evIt)->GetClass();
+      FloatVector& minVectorAll = fMin.at(all);
+      FloatVector& maxVectorAll = fMax.at(all);
 
-         if (fMin.at(cls).at(nvars+itgt) > val) fMin.at(cls).at(nvars+itgt) = val;
-         if (fMax.at(cls).at(nvars+itgt) < val) fMax.at(cls).at(nvars+itgt) = val;
+      GetInput(event,input);    // select the input variables for the transformation and get them from the event
+      UInt_t iidx = 0;          
+      for ( std::vector<Float_t>::iterator itInp = input.begin(), itInpEnd = input.end(); itInp != itInpEnd; ++itInp) { // loop over input variables
+         Float_t val = (*itInp);
 
-         if (GetNClasses() != 1) {
-            if (fMin.at(all).at(nvars+itgt) > val) fMin.at(all).at(nvars+itgt) = val;
-            if (fMax.at(all).at(nvars+itgt) < val) fMax.at(all).at(nvars+itgt) = val;
+	 if( minVector.at(iidx) > val ) minVector.at(iidx) = val;
+	 if( maxVector.at(iidx) < val ) maxVector.at(iidx) = val;
+
+	 if (nCls != 1) { // in case more than one class exists, compute min and max as well for all classes together
+            if (minVectorAll.at(iidx) > val) minVectorAll.at(iidx) = val;
+            if (maxVectorAll.at(iidx) < val) maxVectorAll.at(iidx) = val;
          }
+
+	 ++iidx;
       }
    }
 
+   PrintTransformation( std::cout );
+
    return;
 }
 
@@ -255,21 +257,43 @@ std::vector<TString>* TMVA::VariableNormalizeTransform::GetTransformationStrings
    // have the matrix for all classes together. 
    if (cls < 0 || cls > GetNClasses()) cls = GetNClasses();
 
-   const UInt_t nvar = GetNVariables();
-   std::vector<TString>* strVec = new std::vector<TString>(nvar);
-
    Float_t min, max;
-   for (Int_t ivar=nvar-1; ivar>=0; ivar--) {
-      min = fMin.at(cls).at(ivar); 
-      max = fMax.at(cls).at(ivar);
+
+   const UInt_t size = fGet.size();
+   std::vector<TString>* strVec = new std::vector<TString>(size);
+
+   UInt_t iinp = 0;
+   for( ItVarTypeIdxConst itGet = fGet.begin(), itGetEnd = fGet.end(); itGet != itGetEnd; ++itGet ) {
+      min = fMin.at(cls).at(iinp);
+      max = fMax.at(cls).at(iinp);
+
+      Char_t type = (*itGet).first;
+      UInt_t idx  = (*itGet).second;
+
       Float_t offset = min;
       Float_t scale  = 1.0/(max-min);      
       TString str("");
-      if (offset < 0) str = Form( "2*%g*([%s] + %g) - 1", scale, Variables()[ivar].GetLabel().Data(), -offset );
-      else            str = Form( "2*%g*([%s] - %g) - 1", scale, Variables()[ivar].GetLabel().Data(),  offset );
-      (*strVec)[ivar] = str;
+      VariableInfo& varInfo = (type=='v'?fDsi.GetVariableInfo(idx):(type=='t'?fDsi.GetTargetInfo(idx):fDsi.GetSpectatorInfo(idx)));
+
+      if (offset < 0) str = Form( "2*%g*([%s] + %g) - 1", scale, varInfo.GetLabel().Data(), -offset );
+      else            str = Form( "2*%g*([%s] - %g) - 1", scale, varInfo.GetLabel().Data(),  offset );
+      (*strVec)[iinp] = str;
+
+      ++iinp;
    }
 
+//    Float_t min, max;
+//    for (Int_t ivar=nvar-1; ivar>=0; ivar--) {
+//       min = fMin.at(cls).at(ivar); 
+//       max = fMax.at(cls).at(ivar);
+//       Float_t offset = min;
+//       Float_t scale  = 1.0/(max-min);      
+//       TString str("");
+//       if (offset < 0) str = Form( "2*%g*([%s] + %g) - 1", scale, Variables()[ivar].GetLabel().Data(), -offset );
+//       else            str = Form( "2*%g*([%s] - %g) - 1", scale, Variables()[ivar].GetLabel().Data(),  offset );
+//       (*strVec)[ivar] = str;
+//    }
+
    return strVec;
 }
 
@@ -301,31 +325,25 @@ void TMVA::VariableNormalizeTransform::WriteTransformationToStream( std::ostream
 void TMVA::VariableNormalizeTransform::AttachXMLTo(void* parent) 
 {
    // create XML description of Normalize transformation
+   void* trfxml = gTools().AddChild(parent, "Transform");
+   gTools().AddAttr(trfxml, "Name", "Normalize");
+
+   VariableTransformBase::AttachXMLTo( trfxml );
+
    Int_t numC = (GetNClasses()<= 1)?1:GetNClasses()+1;
-   UInt_t nvars = GetNVariables();
-   UInt_t ntgts = GetNTargets();
 
-   void* trfxml = gTools().xmlengine().NewChild(parent, 0, "Transform");
-   gTools().AddAttr(trfxml, "Name", "Normalize");
-   gTools().AddAttr(trfxml, "NVariables", nvars);
-   gTools().AddAttr(trfxml, "NTargets",   ntgts);
 
    for( Int_t icls=0; icls<numC; icls++ ) {
-      void* clsxml = gTools().xmlengine().NewChild(trfxml, 0, "Class");
+      void* clsxml = gTools().AddChild(trfxml, "Class");
       gTools().AddAttr(clsxml, "ClassIndex", icls);
-      void* varsxml = gTools().xmlengine().NewChild(clsxml, 0, "Variables");
-      for (UInt_t ivar=0; ivar<nvars; ivar++) {
-         void* varxml = gTools().xmlengine().NewChild(varsxml, 0, "Variable");
-         gTools().AddAttr(varxml, "VarIndex", ivar);
-         gTools().AddAttr(varxml, "Min",      fMin.at(icls).at(ivar) );
-         gTools().AddAttr(varxml, "Max",      fMax.at(icls).at(ivar) );
-      }
-      void* tgtsxml = gTools().xmlengine().NewChild(clsxml, 0, "Targets");
-      for (UInt_t itgt=0; itgt<ntgts; itgt++) {
-         void* tgtxml = gTools().xmlengine().NewChild(tgtsxml, 0, "Target");
-         gTools().AddAttr(tgtxml, "TargetIndex", itgt);
-         gTools().AddAttr(tgtxml, "Min",         fMin.at(icls).at(nvars+itgt) );
-         gTools().AddAttr(tgtxml, "Max",         fMax.at(icls).at(nvars+itgt) );
+      void* inpxml = gTools().AddChild(clsxml, "Ranges");
+      UInt_t iinp = 0;
+      for( ItVarTypeIdx itGet = fGet.begin(), itGetEnd = fGet.end(); itGet != itGetEnd; ++itGet ) {
+         void* mmxml = gTools().AddChild(inpxml, "Range");
+         gTools().AddAttr(mmxml, "Index", iinp);
+         gTools().AddAttr(mmxml, "Min", fMin.at(icls).at(iinp) );
+         gTools().AddAttr(mmxml, "Max", fMax.at(icls).at(iinp) );
+	 ++iinp;
       }
    }
 }
@@ -334,12 +352,73 @@ void TMVA::VariableNormalizeTransform::AttachXMLTo(void* parent)
 void TMVA::VariableNormalizeTransform::ReadFromXML( void* trfnode ) 
 {
    // Read the transformation matrices from the xml node
+
+   Bool_t newFormat = kFALSE;
+
+   void* inpnode = NULL;
+   try{
+      inpnode = gTools().GetChild(trfnode, "Input"); // new xml format
+      newFormat = kTRUE;
+   }catch( std::logic_error& excpt ){
+      newFormat = kFALSE; // old xml format
+   }
+   if( newFormat ){
+      // ------------- new format --------------------
+      // read input
+      VariableTransformBase::ReadFromXML( inpnode );
+
+      // read transformation information
+      
+      UInt_t size = fGet.size();
+      UInt_t classindex, idx;
+
+      void* ch = gTools().GetChild( trfnode );
+      while(ch) {
+	 Int_t ci = 0;
+	 gTools().ReadAttr(ch, "ClassIndex", ci);
+	 classindex = UInt_t(ci);
+
+	 fMin.resize(classindex+1);
+	 fMax.resize(classindex+1);
+	 
+	 fMin[classindex].resize(size,Float_t(0));
+	 fMax[classindex].resize(size,Float_t(0));
+
+	 void* clch = gTools().GetChild( ch );
+	 while(clch) {
+	    TString nodeName(gTools().GetName(clch));
+	    if(nodeName=="Ranges") {
+	       void* varch = gTools().GetChild( clch );
+	       while(varch) {
+		  gTools().ReadAttr(varch, "Index", idx);
+		  gTools().ReadAttr(varch, "Min",      fMin[classindex][idx]);
+		  gTools().ReadAttr(varch, "Max",      fMax[classindex][idx]);
+		  varch = gTools().GetNextChild( varch );
+	       }
+	    }
+	    clch = gTools().GetNextChild( clch );
+	 }
+	 ch = gTools().GetNextChild( ch );
+      }
+
+      SetCreated();
+      return;
+   }
+   
+   // ------------- old format --------------------
    UInt_t classindex, varindex, tgtindex, nvars, ntgts;
 
    gTools().ReadAttr(trfnode, "NVariables", nvars);
    gTools().ReadAttr(trfnode, "NTargets",   ntgts);
 
-   void* ch = gTools().xmlengine().GetChild( trfnode );
+   for( UInt_t ivar = 0; ivar < nvars; ++ivar ){
+      fGet.push_back(std::make_pair<Char_t,UInt_t>('v',ivar));
+   }
+   for( UInt_t itgt = 0; itgt < ntgts; ++itgt ){
+      fGet.push_back(std::make_pair<Char_t,UInt_t>('t',itgt));
+   }
+
+   void* ch = gTools().GetChild( trfnode );
    while(ch) {
       gTools().ReadAttr(ch, "ClassIndex", classindex);
 
@@ -348,29 +427,29 @@ void TMVA::VariableNormalizeTransform::ReadFromXML( void* trfnode )
       fMin[classindex].resize(nvars+ntgts,Float_t(0));
       fMax[classindex].resize(nvars+ntgts,Float_t(0));
 
-      void* clch = gTools().xmlengine().GetChild( ch );
+      void* clch = gTools().GetChild( ch );
       while(clch) {
-         TString nodeName(gTools().xmlengine().GetNodeName(clch));
+         TString nodeName(gTools().GetName(clch));
          if(nodeName=="Variables") {
-            void* varch = gTools().xmlengine().GetChild( clch );
+            void* varch = gTools().GetChild( clch );
             while(varch) {
                gTools().ReadAttr(varch, "VarIndex", varindex);
                gTools().ReadAttr(varch, "Min",      fMin[classindex][varindex]);
                gTools().ReadAttr(varch, "Max",      fMax[classindex][varindex]);
-               varch = gTools().xmlengine().GetNext( varch );
+               varch = gTools().GetNextChild( varch );
             }
          } else if (nodeName=="Targets") {
-            void* tgtch = gTools().xmlengine().GetChild( clch );
+            void* tgtch = gTools().GetChild( clch );
             while(tgtch) {
                gTools().ReadAttr(tgtch, "TargetIndex", tgtindex);
                gTools().ReadAttr(tgtch, "Min",      fMin[classindex][nvars+tgtindex]);
                gTools().ReadAttr(tgtch, "Max",      fMax[classindex][nvars+tgtindex]);
-               tgtch = gTools().xmlengine().GetNext( tgtch );
+               tgtch = gTools().GetNextChild( tgtch );
             }
          }
-         clch = gTools().xmlengine().GetNext( clch );
+         clch = gTools().GetNextChild( clch );
       }
-      ch = gTools().xmlengine().GetNext( ch );
+      ch = gTools().GetNextChild( ch );
    }
    SetCreated();
 }
@@ -378,7 +457,7 @@ void TMVA::VariableNormalizeTransform::ReadFromXML( void* trfnode )
 //_______________________________________________________________________
 void
 TMVA::VariableNormalizeTransform::BuildTransformationFromVarInfo( const std::vector<TMVA::VariableInfo>& var ) {
-   // this method is only used when building a normalization transformation 
+   // this method is only used when building a normalization transformation
    // from old text files
    // in this case regression didn't exist and there were no targets
 
@@ -389,8 +468,8 @@ TMVA::VariableNormalizeTransform::BuildTransformationFromVarInfo( const std::vec
             << " since the number of variables disagree" << Endl;
 
    UInt_t numC = (GetNClasses()<=1)?1:GetNClasses()+1;
-   fMin.clear();fMin.resize( numC ); 
-   fMax.clear();fMax.resize( numC ); 
+   fMin.clear();fMin.resize( numC );
+   fMax.clear();fMax.resize( numC );
 
 
    for(UInt_t cls=0; cls<numC; ++cls) {
@@ -412,6 +491,14 @@ void TMVA::VariableNormalizeTransform::ReadTransformationFromStream( std::istrea
 
    UInt_t nvars = GetNVariables();
    UInt_t ntgts = GetNTargets();
+
+   for( UInt_t ivar = 0; ivar < nvars; ++ivar ){
+      fGet.push_back(std::make_pair<Char_t,UInt_t>('v',ivar));
+   }
+   for( UInt_t itgt = 0; itgt < ntgts; ++itgt ){
+      fGet.push_back(std::make_pair<Char_t,UInt_t>('t',itgt));
+   }
+
    char buf[512];
    char buf2[512];
    istr.getline(buf,512);
@@ -443,23 +530,26 @@ void TMVA::VariableNormalizeTransform::ReadTransformationFromStream( std::istrea
 }
 
 //_______________________________________________________________________
-void TMVA::VariableNormalizeTransform::PrintTransformation( ostream& o ) 
+void TMVA::VariableNormalizeTransform::PrintTransformation( ostream& ) 
 {
    // prints the transformation ranges
 
    Int_t numC = GetNClasses()+1;
    if (GetNClasses() <= 1 ) numC = 1;
 
-   UInt_t nvars = GetNVariables();
-   UInt_t ntgts = GetNTargets();
    for (Int_t icls = 0; icls < numC; icls++ ) {
-      Log() << kINFO << "Transformation for class " << icls << " based on these ranges:" << Endl;
-      Log() << kINFO << "Variables:" << Endl;
-      for (UInt_t ivar=0; ivar<nvars; ivar++)
-         o << std::setw(20) << fMin[icls][ivar] << std::setw(20) << fMax[icls][ivar] << std::endl;
-      Log() << kINFO << "Targets:" << Endl;
-      for (UInt_t itgt=0; itgt<ntgts; itgt++)
-         o << std::setw(20) << fMin[icls][nvars+itgt] << std::setw(20) << fMax[icls][nvars+itgt] << std::endl;
+      Log() << "Transformation for class " << icls << " based on these ranges:" << Endl;
+      
+      UInt_t iinp = 0;
+      for( ItVarTypeIdxConst itGet = fGet.begin(), itGetEnd = fGet.end(); itGet != itGetEnd; ++itGet ){
+	 Char_t type = (*itGet).first;
+	 UInt_t idx  = (*itGet).second;
+
+	 TString typeString = (type=='v'?"Variable: ": (type=='t'?"Target : ":"Spectator : ") );
+	 Log() << typeString.Data() << std::setw(20) << fMin[icls][idx] << std::setw(20) << fMax[icls][idx] << Endl;
+	 
+	 ++iinp;
+      }
    }
 }
 
@@ -468,12 +558,12 @@ void TMVA::VariableNormalizeTransform::MakeFunction( std::ostream& fout, const T
                                                      Int_t part, UInt_t trCounter, Int_t ) 
 {
    // creates a normalizing function
-   // TODO include target-transformation into makefunction
+
    UInt_t numC = fMin.size();
    if (part==1) {
       fout << std::endl;
-      fout << "   double fMin_"<<trCounter<<"["<<numC<<"]["<<GetNVariables()<<"];" << std::endl;
-      fout << "   double fMax_"<<trCounter<<"["<<numC<<"]["<<GetNVariables()<<"];" << std::endl;
+      fout << "   double fMin_"<<trCounter<<"["<<numC<<"]["<<fGet.size()<<"];" << std::endl;
+      fout << "   double fMax_"<<trCounter<<"["<<numC<<"]["<<fGet.size()<<"];" << std::endl;
    }
 
    if (part==2) {
@@ -481,6 +571,7 @@ void TMVA::VariableNormalizeTransform::MakeFunction( std::ostream& fout, const T
       fout << "//_______________________________________________________________________" << std::endl;
       fout << "inline void " << fcncName << "::InitTransform_"<<trCounter<<"()" << std::endl;
       fout << "{" << std::endl;
+      
       for (UInt_t ivar=0; ivar<GetNVariables(); ivar++) {
          Float_t min = FLT_MAX;
          Float_t max = -FLT_MAX;
diff --git a/tmva/src/VariablePCATransform.cxx b/tmva/src/VariablePCATransform.cxx
index 0384abab033d912602aa3f4cfc1fb995770a115e..a2ea4977d29b2d1870a0d1a800be758b4ab60177 100644
--- a/tmva/src/VariablePCATransform.cxx
+++ b/tmva/src/VariablePCATransform.cxx
@@ -12,6 +12,7 @@
  *                                                                                *
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch> - CERN, Switzerland           *
  *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>  - CERN, Switzerland              *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *                                                                                *
@@ -31,7 +32,6 @@
 #include "TVectorD.h"
 #include "TMatrixD.h"
 #include "TMatrixDBase.h"
-#include "TXMLEngine.h"
 
 #include "TMVA/VariablePCATransform.h"
 
@@ -80,15 +80,17 @@ Bool_t TMVA::VariablePCATransform::PrepareTransformation( const std::vector<Even
 
    Log() << kINFO << "Preparing the Principle Component (PCA) transformation..." << Endl;
 
-   SetNVariables(events[0]->GetNVariables());
+   UInt_t inputSize = fGet.size();
+
+   SetNVariables(inputSize);
 
    // TPrincipal doesn't support PCA transformation for 1 or less variables
-   if (GetNVariables() <= 1) {
+   if (inputSize <= 1) {
       Log() << kINFO << "Cannot perform PCA transformation for " << GetNVariables() << " variable only" << Endl;
       return kFALSE;
    }
 
-   if (GetNVariables() > 200) { 
+   if (inputSize > 200) { 
       Log() << kINFO << "----------------------------------------------------------------------------" 
             << Endl;
       Log() << kINFO 
@@ -111,40 +113,41 @@ const TMVA::Event* TMVA::VariablePCATransform::Transform( const Event* const ev,
    // apply the principal component analysis
    if (!IsCreated()) return 0;
 
-   const Int_t nvar = ev->GetNVariables();
+   const Int_t inputSize = fGet.size();
+   const UInt_t nCls = GetNClasses();
+   //UInt_t evCls = ev->GetClass();
+
    // if we have more than one class, take the last PCA analysis where all classes are combined if 
    // the cls parameter is outside the defined classes
    // If there is only one class, then no extra class for all events of all classes has to be created
-   if (cls < 0 || cls > GetNClasses()) cls = (fMeanValues.size()==1?0:2);//( GetNClasses() == 1 ? 0 : 1 );  ;
+   if (cls < 0 || UInt_t(cls) > nCls) cls = (fMeanValues.size()==1?0:2);//( GetNClasses() == 1 ? 0 : 1 );  ;
    // Perform PCA and put it into PCAed events tree
 
-   if (fTransformedEvent==0 || fTransformedEvent->GetNVariables()!=ev->GetNVariables()) {
-      if(fTransformedEvent!=0) delete fTransformedEvent;
+   if (fTransformedEvent==0 ) {
       fTransformedEvent = new Event();
    }
 
+   std::vector<Float_t> input;
+   std::vector<Float_t> principalComponents;
+
    // set the variable values
    const std::vector<UInt_t>* varArrange = ev->GetVariableArrangement();
    if(!varArrange) {
-      std::vector<Float_t> rv = X2P( ev->GetValues(), cls );
-      for (Int_t ivar=0; ivar<nvar; ++ivar)
-         fTransformedEvent->SetVal(ivar, rv[ivar]);
+
+      GetInput( ev, input );
+      X2P( principalComponents, input, cls );
+      SetOutput( fTransformedEvent, principalComponents, ev );
+
    } else {
-      std::vector<Float_t> rv(nvar);
-      for (Int_t ivar=0; ivar<nvar; ++ivar)
+      // TODO: check what has to be done here
+      std::vector<Float_t> rv(inputSize);
+      for (Int_t ivar=0; ivar<inputSize; ++ivar)
          rv[ivar] = ev->GetValue(ivar);
-      rv = X2P( rv, cls );
-      for (Int_t ivar=0; ivar<nvar; ++ivar)
+      X2P( principalComponents, input, cls );
+      for (Int_t ivar=0; ivar<inputSize; ++ivar)
          fTransformedEvent->SetVal(ivar, rv[ivar]);
    }
-   // set the targets
-   for (UInt_t itgt=0; itgt<ev->GetNTargets(); itgt++) 
-      fTransformedEvent->SetTarget( itgt, ev->GetTarget(itgt) );
-   // and the rest
-   fTransformedEvent->SetWeight     ( ev->GetWeight() );
-   fTransformedEvent->SetBoostWeight( ev->GetBoostWeight() );
-   fTransformedEvent->SetClass      ( ev->GetClass() );
-   fTransformedEvent->SetSignalClass( ev->GetSignalClass() );
+
    return fTransformedEvent;
 }
 
@@ -153,28 +156,43 @@ const TMVA::Event* TMVA::VariablePCATransform::InverseTransform( const Event* co
 {
    // apply the principal component analysis
    // TODO: implementation of inverse transformation
-   Log() << kFATAL << "Inverse transformation for PCA transformation not yet implemented. Hence, this transformation cannot be applied together with regression. Please contact the authors if necessary." << Endl;
+//    Log() << kFATAL << "Inverse transformation for PCA transformation not yet implemented. Hence, this transformation cannot be applied together with regression. Please contact the authors if necessary." << Endl;
 
    if (!IsCreated()) return 0;
-   const Int_t nvar = ev->GetNVariables();
+
+
+   const Int_t inputSize = fGet.size();
+   const UInt_t nCls = GetNClasses();
+   //UInt_t evCls = ev->GetClass();
 
    // if we have more than one class, take the last PCA analysis where all classes are combined if 
    // the cls parameter is outside the defined classes
    // If there is only one class, then no extra class for all events of all classes has to be created
-   if (cls < 0 || cls > GetNClasses()) cls = ( GetNClasses() == 1 ? 0 : 1 );  
+   if (cls < 0 || UInt_t(cls) > nCls) cls = (fMeanValues.size()==1?0:2);//( GetNClasses() == 1 ? 0 : 1 );  ;
+   // Perform PCA and put it into PCAed events tree
 
+   if (fBackTransformedEvent==0 ) fBackTransformedEvent = new Event();
 
-   // Perform PCA and put it into PCAed events tree
-   std::vector<Float_t> rv = X2P( ev->GetValues(), cls );
+   std::vector<Float_t> principalComponents;
+   std::vector<Float_t> output;
+
+   // set the variable values
+   const std::vector<UInt_t>* varArrange = ev->GetVariableArrangement();
+   if(!varArrange) {
 
-   if (fBackTransformedEvent==0 || fBackTransformedEvent->GetNVariables()!=ev->GetNVariables()) {
-      if(fBackTransformedEvent!=0) delete fBackTransformedEvent;
-      fBackTransformedEvent = new Event( *ev );
+      GetInput( ev, principalComponents );
+      P2X( output, principalComponents, cls );
+      SetOutput( fBackTransformedEvent, output, ev );
+
+   } else {
+      // TODO: check what has to be done here
+      std::vector<Float_t> rv(inputSize);
+      for (Int_t ivar=0; ivar<inputSize; ++ivar)
+         rv[ivar] = ev->GetValue(ivar);
+      X2P( principalComponents, output, cls );
+      for (Int_t ivar=0; ivar<inputSize; ++ivar)
+         fBackTransformedEvent->SetVal(ivar, rv[ivar]);
    }
-   for (Int_t ivar=0; ivar<nvar; ivar++) fBackTransformedEvent->SetVal(ivar, rv[ivar]);
-   fBackTransformedEvent->SetClass      ( ev->GetClass() );
-   fBackTransformedEvent->SetWeight     ( ev->GetWeight() );
-   fBackTransformedEvent->SetBoostWeight( ev->GetBoostWeight() );
 
    return fBackTransformedEvent;
 }
@@ -185,25 +203,45 @@ void TMVA::VariablePCATransform::CalculatePrincipalComponents( const std::vector
    // calculate the principal components for the signal and the background data
    // it uses the MakePrincipal method of ROOT's TPrincipal class
 
-   const Int_t nvar = GetNVariables();
+   UInt_t nvars = 0, ntgts = 0, nspcts = 0;
+   CountVariableTypes( nvars, ntgts, nspcts );
+   if( nvars>0  && ntgts>0 )
+      Log() << kFATAL << "Variables and targets cannot be mixed in PCA transformation." << Endl;
+
+   const Int_t inputSize = fGet.size();
 
    // if we have more than one class, add another PCA analysis which combines all classes
-   const UInt_t maxPCA = (GetNClasses()<=1) ? GetNClasses() : GetNClasses()+1;
+   const UInt_t nCls = GetNClasses();
+   const UInt_t maxPCA = (nCls<=1) ? nCls : nCls+1;
 
    // PCA [signal/background/class x/class y/... /all classes]
    std::vector<TPrincipal*> pca(maxPCA);
-   for (UInt_t i=0; i<maxPCA; i++) pca[i] = new TPrincipal(nvar,"");
+   for (UInt_t i=0; i<maxPCA; i++) pca[i] = new TPrincipal(nvars,"");
 
-   // !! Not normalizing and not storing input data, for performance reasons. Should perhaps restore normalization.
+   // !! Not normalizing and not storing input data, for performance reasons. Should perhaps restore normalization. 
+   // But this can be done afterwards by adding a normalisation transformation (user defined)
 
    Long64_t ievt, entries = events.size();
-   Double_t *dvec = new Double_t[nvar];
+   Double_t *dvec = new Double_t[inputSize];
 
+
+   std::vector<Float_t> input;
    for (ievt=0; ievt<entries; ievt++) {
       Event* ev = events[ievt];
-      for (Int_t i = 0; i < nvar; i++) dvec[i] = (Double_t) ev->GetValue(i);
-      pca.at(ev->GetClass())->AddRow( dvec );
-      if (GetNClasses() > 1) pca.at(maxPCA-1)->AddRow( dvec );
+      UInt_t cls = ev->GetClass();
+
+      GetInput( ev, input );
+
+      UInt_t iinp = 0;
+      for( std::vector<Float_t>::iterator itInp = input.begin(), itInpEnd = input.end(); itInp != itInpEnd; ++itInp )
+      {
+	 Float_t value = (*itInp);
+	 dvec[iinp] = (Double_t)value;
+	 ++iinp;
+      }
+
+      pca.at(cls)->AddRow( dvec );
+      if (nCls > 1) pca.at(maxPCA-1)->AddRow( dvec );
    }
 
    // delete possible leftovers
@@ -225,22 +263,39 @@ void TMVA::VariablePCATransform::CalculatePrincipalComponents( const std::vector
 }
 
 //_______________________________________________________________________
-std::vector<Float_t> TMVA::VariablePCATransform::X2P( const std::vector<Float_t>& x, Int_t cls ) const
+void TMVA::VariablePCATransform::X2P( std::vector<Float_t>& pc, const std::vector<Float_t>& x, Int_t cls ) const
 {
    // Calculate the principal components from the original data vector
    // x, and return it in p (function extracted from TPrincipal::X2P)
    // It's the users responsibility to make sure that both x and p are
    // of the right size (i.e., memory must be allocated for p)
-   const Int_t nvar = x.size();
-   std::vector<Float_t> p(nvar,0);
+   const Int_t nInput = x.size();
+   pc.assign(nInput,0);
 
-   for (Int_t i = 0; i < nvar; i++) {
+   for (Int_t i = 0; i < nInput; i++) {
       Double_t pv = 0;
-      for (Int_t j = 0; j < nvar; j++)
+      for (Int_t j = 0; j < nInput; j++)
          pv += (((Double_t)x.at(j)) - (*fMeanValues.at(cls))(j)) * (*fEigenVectors.at(cls))(j,i);
-      p[i] = pv;
+      pc[i] = pv;
+   }
+}
+
+//_______________________________________________________________________
+void TMVA::VariablePCATransform::P2X( std::vector<Float_t>& x, const std::vector<Float_t>& pc, Int_t cls ) const
+{
+   // Perform the back-transformation from the principal components
+   // pc, and return x 
+   // It's the users responsibility to make sure that both x and pc are
+   // of the right size (i.e., memory must be allocated for p)
+   const Int_t nInput = pc.size();
+   x.assign(nInput,0);
+
+   for (Int_t i = 0; i < nInput; i++) {
+      Double_t xv = 0;
+      for (Int_t j = 0; j < nInput; j++)
+         xv += (((Double_t)pc.at(j)) * (*fEigenVectors.at(cls))(i,j) ) + (*fMeanValues.at(cls))(j);
+      x[i] = xv;
    }
-   return p;
 }
 
 //_______________________________________________________________________
@@ -277,12 +332,12 @@ void TMVA::VariablePCATransform::WriteTransformationToStream( std::ostream& o )
 void TMVA::VariablePCATransform::AttachXMLTo(void* parent) {
    // create XML description of PCA transformation
 
-   void* trfxml = gTools().xmlengine().NewChild(parent, 0, "Transform");
-   gTools().xmlengine().NewAttr(trfxml, 0, "Name", "PCA");
+   void* trfxml = gTools().AddChild(parent, "Transform");
+   gTools().AddAttr(trfxml, "Name", "PCA");
 
    // write mean values to stream
    for (UInt_t sbType=0; sbType<fMeanValues.size(); sbType++) {
-      void* meanxml = gTools().xmlengine().NewChild( trfxml, 0, "Statistics");
+      void* meanxml = gTools().AddChild( trfxml, "Statistics");
       const TVectorD* means = fMeanValues[sbType];
       gTools().AddAttr( meanxml, "Class",     (sbType==0 ? "Signal" :(sbType==1 ? "Background":"Combined")) );
       gTools().AddAttr( meanxml, "ClassIndex", sbType );
@@ -290,12 +345,12 @@ void TMVA::VariablePCATransform::AttachXMLTo(void* parent) {
       TString meansdef = "";
       for (Int_t row = 0; row<means->GetNrows(); row++)
          meansdef += gTools().StringFromDouble((*means)[row]) + " ";
-      gTools().xmlengine().AddRawLine( meanxml, meansdef );      
+      gTools().AddRawLine( meanxml, meansdef );      
    }
 
    // write eigenvectors to stream
    for (UInt_t sbType=0; sbType<fEigenVectors.size(); sbType++) {
-      void* evxml = gTools().xmlengine().NewChild( trfxml, 0, "Eigenvectors");
+      void* evxml = gTools().AddChild( trfxml, "Eigenvectors");
       const TMatrixD* mat = fEigenVectors[sbType];
       gTools().AddAttr( evxml, "Class",      (sbType==0 ? "Signal" :(sbType==1 ? "Background":"Combined") ) );
       gTools().AddAttr( evxml, "ClassIndex", sbType );
@@ -305,7 +360,7 @@ void TMVA::VariablePCATransform::AttachXMLTo(void* parent) {
       for (Int_t row = 0; row<mat->GetNrows(); row++)
          for (Int_t col = 0; col<mat->GetNcols(); col++)
             evdef += gTools().StringFromDouble((*mat)[row][col]) + " ";
-      gTools().xmlengine().AddRawLine( evxml, evdef );
+      gTools().AddRawLine( evxml, evdef );
    }
 }
 
@@ -319,9 +374,9 @@ void TMVA::VariablePCATransform::ReadFromXML( void* trfnode )
    TString classtype;
    TString nodeName;
 
-   void* ch = gTools().xmlengine().GetChild(trfnode);
+   void* ch = gTools().GetChild(trfnode);
    while (ch) {
-      nodeName = gTools().xmlengine().GetNodeName(ch);
+      nodeName = gTools().GetName(ch);
       if (nodeName == "Statistics") {
          // read mean values
          gTools().ReadAttr(ch, "Class",      classtype);
@@ -334,7 +389,7 @@ void TMVA::VariablePCATransform::ReadFromXML( void* trfnode )
          fMeanValues[clsIdx]->ResizeTo( nrows );
          
          // now read vector entries
-         std::stringstream s(gTools().xmlengine().GetNodeContent(ch));
+         std::stringstream s(gTools().GetContent(ch));
          for (Int_t row = 0; row<nrows; row++) s >> (*fMeanValues[clsIdx])(row);
       } 
       else if ( nodeName == "Eigenvectors" ) {
@@ -349,12 +404,12 @@ void TMVA::VariablePCATransform::ReadFromXML( void* trfnode )
          fEigenVectors[clsIdx]->ResizeTo( nrows, ncols );
 
          // now read matrix entries
-         std::stringstream s(gTools().xmlengine().GetNodeContent(ch));
+         std::stringstream s(gTools().GetContent(ch));
          for (Int_t row = 0; row<nrows; row++)
             for (Int_t col = 0; col<ncols; col++)
                s >> (*fEigenVectors[clsIdx])[row][col];
       } // done reading eigenvectors
-      ch = gTools().xmlengine().GetNext(ch);
+      ch = gTools().GetNextChild(ch);
    }
 
    SetCreated();
@@ -377,7 +432,7 @@ void TMVA::VariablePCATransform::ReadTransformationFromStream( std::istream& ist
    fMeanValues.resize(3);
    fEigenVectors.resize(3);
 
-   std::cout << "VariablePCATransform::ReadTransformationFromStream(): " << std::endl;
+   Log() << kINFO << "VariablePCATransform::ReadTransformationFromStream(): " << Endl;
 
    while (!(buf[0]=='#'&& buf[1]=='#')) { // if line starts with ## return
       char* p = buf;
diff --git a/tmva/src/VariableTransformBase.cxx b/tmva/src/VariableTransformBase.cxx
index 4d89ae1b18351f01f391d2c55ca94eadf032c5b2..d2b252319178f3e9a3e55d0422140c4260a2c2b7 100644
--- a/tmva/src/VariableTransformBase.cxx
+++ b/tmva/src/VariableTransformBase.cxx
@@ -1,5 +1,5 @@
 // @(#)root/tmva $Id$
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
+// Author: Andreas Hoecker, Peter Speckmayer, Joerg Stelzer, Helge Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -12,6 +12,7 @@
  *                                                                                *
  * Authors (alphabetical):                                                        *
  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
+ *      Peter Speckmayer <Peter.Speckmayer@cern.ch>  - CERN, Switzerland          *
  *      Joerg Stelzer   <Joerg.Stelzer@cern.ch>  - CERN, Switzerland              *
  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
  *                                                                                *
@@ -25,6 +26,9 @@
  **********************************************************************************/
 
 #include <iomanip>
+#include <algorithm>
+#include <exception>
+#include <stdexcept>
 
 #include "TMath.h"
 #include "TVectorD.h"
@@ -79,6 +83,214 @@ TMVA::VariableTransformBase::~VariableTransformBase()
    delete fLogger;
 }
 
+//_______________________________________________________________________
+void TMVA::VariableTransformBase::SelectInput( const TString& _inputVariables  )
+{
+   // select the variables/targets/spectators which serve as input to the transformation
+   TString inputVariables = _inputVariables;
+
+
+   // unselect all variables first
+   fGet.clear();       
+
+   UInt_t nvars  = GetNVariables();
+   UInt_t ntgts  = GetNTargets();
+   UInt_t nspcts = GetNSpectators();
+
+   if (inputVariables == "") // default is all variables and all targets 
+   {                         //   (the default can be changed by decorating this member function in the implementations)
+      inputVariables = "_V_,_T_";
+   }
+
+   TList* inList = gTools().ParseFormatLine( inputVariables, "," );
+   TListIter inIt(inList);
+   while (TObjString* os = (TObjString*)inIt()) {
+      TString variables = os->GetString();
+      
+      if( variables.BeginsWith("_") && variables.EndsWith("_") ) { // special symbol (keyword)
+	 variables.Remove( 0,1); // remove first "_"
+	 variables.Remove( variables.Length()-1,1 ); // remove last "_"
+
+	 if( variables.BeginsWith("V") ) {       // variables
+	    variables.Remove(0,1); // remove "V"
+	    if( variables.Length() == 0 ){
+	       for( UInt_t ivar = 0; ivar < nvars; ++ivar ) {
+		  fGet.push_back( std::make_pair<Char_t,UInt_t>('v',ivar) );
+	       }
+	    } else {
+	       UInt_t idx = variables.Atoi();
+	       if( idx >= nvars )
+		  Log() << kFATAL << "You selected variable with index : " << idx << " of only " << nvars << " variables." << Endl;
+	       fGet.push_back( std::make_pair<Char_t,UInt_t>('v',idx) );
+	    }
+	 }else if( variables.BeginsWith("T") ) {       // targets
+	    variables.Remove(0,1); // remove "T"
+	    if( variables.Length() == 0 ){
+	       for( UInt_t itgt = 0; itgt < ntgts; ++itgt ) {
+		  fGet.push_back( std::make_pair<Char_t,UInt_t>('t',itgt) );
+	       }
+	    } else {
+	       UInt_t idx = variables.Atoi();
+	       if( idx >= ntgts )
+		  Log() << kFATAL << "You selected target with index : " << idx << " of only " << ntgts << " targets." << Endl;
+	       fGet.push_back( std::make_pair<Char_t,UInt_t>('t',idx) );
+	    }
+	 }else if( variables.BeginsWith("S") ) {       // spectators
+	    variables.Remove(0,1); // remove "S"
+	    if( variables.Length() == 0 ){
+	       for( UInt_t ispct = 0; ispct < nspcts; ++ispct ) {
+		  fGet.push_back( std::make_pair<Char_t,UInt_t>('s',ispct) );
+	       }
+	    } else {
+	       UInt_t idx = variables.Atoi();
+	       if( idx >= nspcts )
+		  Log() << kFATAL << "You selected spectator with index : " << idx << " of only " << nspcts << " spectators." << Endl;
+	       fGet.push_back( std::make_pair<Char_t,UInt_t>('s',idx) );
+	    }
+	 }
+      }else{ // no keyword
+	 for( UInt_t ivar = 0; ivar < nvars; ++ivar ) { // search all variables
+	    if( fDsi.GetVariableInfo( ivar ).GetLabel() == variables ) {
+	       fGet.push_back( std::make_pair<Char_t,UInt_t>('v',ivar) );
+	       break;
+	    }
+	 }
+	 for( UInt_t itgt = 0; itgt < ntgts; ++itgt ) { // search all targets
+	    if( fDsi.GetTargetInfo( itgt ).GetLabel() == variables ) {
+	       fGet.push_back( std::make_pair<Char_t,UInt_t>('t',itgt) );
+	       break;
+	    }
+	 }
+	 for( UInt_t ispct = 0; ispct < nspcts; ++ispct ) { // search all spectators
+	    if( fDsi.GetSpectatorInfo( ispct ).GetLabel() == variables ) {
+	       fGet.push_back( std::make_pair<Char_t,UInt_t>('s',ispct) );
+	       break;
+	    }
+	 }
+      }
+
+   }
+
+   Log() << kINFO << "Transformation, Variable selection : " << Endl;
+
+   ItVarTypeIdx itGet = fGet.begin(), itGetEnd = fGet.end();
+   for( ; itGet != itGetEnd; ++itGet ) {
+      TString inputType  = "?";
+
+      Char_t type = (*itGet).first;
+      Int_t inputIdx  = (*itGet).second;
+
+      TString label = "NOT FOUND";
+      if( type == 'v' ) {
+	 label = fDsi.GetVariableInfo( inputIdx ).GetLabel();
+	 inputType = "variable";
+      }
+      else if( type == 't' ){
+	 label = fDsi.GetTargetInfo( inputIdx ).GetLabel();
+	 inputType = "target";
+      }
+      else if( type == 's' ){
+	 label = fDsi.GetSpectatorInfo( inputIdx ).GetLabel();
+	 inputType = "spectator";
+      }
+
+      Log() << kINFO << "Input : " << inputType.Data() << " '" << label.Data() << "' (index=" << inputIdx << ")." << Endl;
+   }
+
+}
+
+
+//_______________________________________________________________________
+void TMVA::VariableTransformBase::GetInput( const Event* event, std::vector<Float_t>& input ) const
+{
+   // select the values from the event
+   input.clear();
+   for( ItVarTypeIdxConst itEntry = fGet.begin(), itEntryEnd = fGet.end(); itEntry != itEntryEnd; ++itEntry ) {
+      Char_t type = (*itEntry).first;
+      Int_t  idx  = (*itEntry).second;
+
+      switch( type ) {
+      case 'v':
+	 input.push_back( event->GetValue(idx) );
+	 break;
+      case 't':
+	 input.push_back( event->GetTarget(idx) );
+	 break;
+      case 's':
+	 input.push_back( event->GetSpectator(idx) );
+	 break;
+      default:
+	 Log() << kFATAL << "VariableTransformBase/GetInput : unknown type '" << type << "'." << Endl;
+      }
+   }
+}
+
+//_______________________________________________________________________
+void TMVA::VariableTransformBase::SetOutput( Event* event, std::vector<Float_t>& output, const Event* oldEvent ) const
+{
+   // select the values from the event
+   
+   std::vector<Float_t>::iterator itOutput = output.begin();
+
+   if( oldEvent )
+      event->CopyVarValues( *oldEvent );
+
+   try {
+
+      for( ItVarTypeIdxConst itEntry = fGet.begin(), itEntryEnd = fGet.end(); itEntry != itEntryEnd; ++itEntry ) {
+	 Char_t type = (*itEntry).first;
+	 Int_t  idx  = (*itEntry).second;
+	 
+	 Float_t value = (*itOutput);
+
+	 switch( type ) {
+	 case 'v':
+	    event->SetVal( idx, value );
+	    break;
+	 case 't':
+	    event->SetTarget( idx, value );
+	    break;
+	 case 's':
+	    event->SetSpectator( idx, value );
+	    break;
+	 default:
+	    Log() << kFATAL << "VariableTransformBase/GetInput : unknown type '" << type << "'." << Endl;
+	 }
+	 ++itOutput;
+      }
+   }catch( std::exception& except ){
+      Log() << kFATAL << "VariableTransformBase/SetOutput : exception/" << except.what() << Endl;
+      throw;
+   }
+}
+
+
+//_______________________________________________________________________
+void TMVA::VariableTransformBase::CountVariableTypes( UInt_t& nvars, UInt_t& ntgts, UInt_t& nspcts )
+{
+   // count variables, targets and spectators
+   nvars = ntgts = nspcts = 0;
+
+   for( ItVarTypeIdxConst itEntry = fGet.begin(), itEntryEnd = fGet.end(); itEntry != itEntryEnd; ++itEntry ) {
+	 Char_t type = (*itEntry).first;
+
+	 switch( type ) {
+	 case 'v':
+	    nvars++;
+	    break;
+	 case 't':
+	    ntgts++;
+	    break;
+	 case 's':
+	    nspcts++;
+	    break;
+	 default:
+	    Log() << kFATAL << "VariableTransformBase/GetVariableTypeNumbers : unknown type '" << type << "'." << Endl;
+	 }
+   }
+}
+
+
 //_______________________________________________________________________
 void TMVA::VariableTransformBase::CalcNorm( const std::vector<Event*>& events ) 
 {
@@ -127,15 +339,33 @@ void TMVA::VariableTransformBase::CalcNorm( const std::vector<Event*>& events )
       }
    }
 
+   if (sumOfWeights <= 0) {
+      Log() << kFATAL << " the sum of event weights calcualted for your input is == 0"
+            << " or exactly: " << sumOfWeights << " there is obviously some problem..."<< Endl;
+   } 
+
    // set Mean and RMS
    for (UInt_t ivar=0; ivar<nvars; ivar++) {
       Double_t mean = x0(ivar)/sumOfWeights;
+      
       Variables().at(ivar).SetMean( mean ); 
+      if (x2(ivar)/sumOfWeights - mean*mean < 0) {
+         Log() << kFATAL << " the RMS of your input variable " << ivar 
+               << " evaluates to an imaginary number: sqrt("<< x2(ivar)/sumOfWeights - mean*mean
+               <<") .. sometimes related to a problem with outliers and negative event weights"
+               << Endl;
+      }
       Variables().at(ivar).SetRMS( TMath::Sqrt( x2(ivar)/sumOfWeights - mean*mean) );
    }
    for (UInt_t itgt=0; itgt<ntgts; itgt++) {
       Double_t mean = x0(nvars+itgt)/sumOfWeights;
       Targets().at(itgt).SetMean( mean ); 
+      if (x2(nvars+itgt)/sumOfWeights - mean*mean < 0) {
+         Log() << kFATAL << " the RMS of your target variable " << itgt 
+               << " evaluates to an imaginary number: sqrt(" << x2(nvars+itgt)/sumOfWeights - mean*mean
+               <<") .. sometimes related to a problem with outliers and negative event weights"
+               << Endl;
+      }
       Targets().at(itgt).SetRMS( TMath::Sqrt( x2(nvars+itgt)/sumOfWeights - mean*mean) );
    }
 
@@ -179,3 +409,128 @@ void TMVA::VariableTransformBase::UpdateNorm ( Int_t ivar,  Double_t x )
    }
 }
 
+
+
+//_______________________________________________________________________
+void TMVA::VariableTransformBase::AttachXMLTo(void* parent) 
+{
+   // create XML description the transformation (write out info of selected variables)
+
+   void* inpxml = gTools().AddChild(parent, "Input");
+   gTools().AddAttr(inpxml, "NInputs", fGet.size() );
+
+   for( ItVarTypeIdx itGet = fGet.begin(), itGetEnd = fGet.end(); itGet != itGetEnd; ++itGet ) {
+      UInt_t idx  = (*itGet).second;
+      Char_t type = (*itGet).first;
+      
+      TString label = "";
+      TString typeString = "";
+      switch( type ){
+      case 'v':
+	 typeString = "Variable";
+	 label = fDsi.GetVariableInfo( idx ).GetLabel();
+	 break;
+      case 't':
+	 typeString = "Target";
+	 label = fDsi.GetTargetInfo( idx ).GetLabel();
+	 break;
+      case 's':
+	 typeString = "Spectator";
+	 label = fDsi.GetSpectatorInfo( idx ).GetLabel();
+	 break;
+      default:
+	 Log() << kFATAL << "VariableTransformBase/AttachXMLTo unknown variable type '" << type << "'." << Endl;
+      }
+	 
+       void* idxxml = gTools().AddChild(inpxml, "Input");
+//      gTools().AddAttr(idxxml, "Index", idx);
+      gTools().AddAttr(idxxml, "Type",  typeString);
+      gTools().AddAttr(idxxml, "Label", label);
+      
+   }
+}
+
+//_______________________________________________________________________
+void TMVA::VariableTransformBase::ReadFromXML( void* inpnode ) 
+{
+   // Read the input variables from the XML node
+   fGet.clear();       
+
+   UInt_t nvars  = GetNVariables();
+   UInt_t ntgts  = GetNTargets();
+   UInt_t nspcts = GetNSpectators();
+
+   UInt_t nInputs = 0;
+   gTools().ReadAttr(inpnode, "NInputs", nInputs);
+
+   void* ch = gTools().GetChild( inpnode );
+   while(ch) {
+      TString typeString = "";
+      TString label      = "";
+
+      gTools().ReadAttr(ch, "Type",  typeString);
+      gTools().ReadAttr(ch, "Label", label);
+   
+      if( typeString == "Variable"  ){
+	 for( UInt_t ivar = 0; ivar < nvars; ++ivar ) { // search all variables
+	    if( fDsi.GetVariableInfo( ivar ).GetLabel() == label ) {
+	       fGet.push_back( std::make_pair<Char_t,UInt_t>('v',ivar) );
+	       break;
+	    }
+	 }
+      }else if( typeString == "Target"    ){
+	 for( UInt_t itgt = 0; itgt < ntgts; ++itgt ) { // search all targets
+	    if( fDsi.GetTargetInfo( itgt ).GetLabel() == label ) {
+	       fGet.push_back( std::make_pair<Char_t,UInt_t>('t',itgt) );
+	       break;
+	    }
+	 }
+      }else if( typeString == "Spectator" ){
+	 for( UInt_t ispct = 0; ispct < nspcts; ++ispct ) { // search all spectators
+	    if( fDsi.GetSpectatorInfo( ispct ).GetLabel() == label ) {
+	       fGet.push_back( std::make_pair<Char_t,UInt_t>('s',ispct) );
+	       break;
+	    }
+	 }
+      }else{
+	 Log() << kFATAL << "VariableTransformationBase/ReadFromXML : unknown type '" << typeString << "'." << Endl;
+      }
+      ch = gTools().GetNextChild( ch );
+   }
+}
+
+
+//_______________________________________________________________________
+void TMVA::VariableTransformBase::MakeFunction( std::ostream& /*fout*/, const TString& /*fncName*/, Int_t part,
+						UInt_t /*trCounter*/, Int_t /*cls*/ )
+{
+   // getinput and setoutput equivalent
+   if( part == 0 ){ // getinput equivalent
+//       fout << std::endl;
+//       fout << "   std::vector<double> input; " << std::endl;
+//    // select the values from the event
+//    input.clear();
+//    for( ItVarTypeIdxConst itEntry = fGet.begin(), itEntryEnd = fGet.end(); itEntry != itEntryEnd; ++itEntry ) {
+//       Char_t type = (*itEntry).first;
+//       Int_t  idx  = (*itEntry).second;
+
+//       switch( type ) {
+//       case 'v':
+// 	 input.push_back( event->GetValue(idx) );
+// 	 break;
+//       case 't':
+// 	 input.push_back( event->GetTarget(idx) );
+// 	 break;
+//       case 's':
+// 	 input.push_back( event->GetSpectator(idx) );
+// 	 break;
+//       default:
+// 	 Log() << kFATAL << "VariableTransformBase/GetInput : unknown type '" << type << "'." << Endl;
+//       }
+//    }
+
+
+   }else if( part == 1){ // setoutput equivalent
+   }
+}
+
diff --git a/tmva/test/BDT_Reg.C b/tmva/test/BDT_Reg.C
index 50f5202a8cc2ba58b6c0d345540246b7c127d9ae..6af8e2f514f9559a4889618439bf4ab63a941288 100644
--- a/tmva/test/BDT_Reg.C
+++ b/tmva/test/BDT_Reg.C
@@ -403,10 +403,10 @@ void StatDialogBDT::DrawTree( Int_t itree )
    signalleaf->SetBorderSize(1);
    signalleaf->SetFillStyle(1);
    signalleaf->SetFillColor( kSigColorF );
-   signalleaf->AddText("Signal Leaf Nodes");
+   signalleaf->AddText("Leaf Nodes");
    signalleaf->SetTextColor( kSigColorT );
    signalleaf->Draw();
-
+/*
    ydown = ydown - ystep/2.5 -dy;
    yup   = yup - ystep/2.5 -dy;
    TPaveText *backgroundleaf = new TPaveText(0.02,ydown,0.15,yup, "NDC");
@@ -417,7 +417,7 @@ void StatDialogBDT::DrawTree( Int_t itree )
    backgroundleaf->AddText("Backgr. Leaf Nodes");
    backgroundleaf->SetTextColor( kBkgColorT );
    backgroundleaf->Draw();
-
+*/
    fCanvas->Update();
    TString fname = Form("plots/%s_%i", fMethName.Data(), itree );
    cout << "--- Creating image: " << fname << endl;
diff --git a/tmva/test/Boost.C b/tmva/test/Boost.C
index 5ba6161725bb291186f30998013e4888e39e4162..bf0ad2adbab1080e254df5e08ad785b725661a97 100644
--- a/tmva/test/Boost.C
+++ b/tmva/test/Boost.C
@@ -37,7 +37,7 @@ void Boost(){
       input = TFile::Open( fname );
    }
    else {
-      gROOT->LoadMacro( "../development/createData.C");
+      gROOT->LoadMacro( "./createData.C");
       create_circ(20000);
       cout << " created data.root with data and circle arranged in half circles"<<endl;
       input = TFile::Open( fname );
diff --git a/tmva/test/Boost2.C b/tmva/test/Boost2.C
index 65757d932c82e9aa7fe7269bfe1dba458a61b0a0..29afbfc26a6aae6ccd365003d63bd66e034463b6 100644
--- a/tmva/test/Boost2.C
+++ b/tmva/test/Boost2.C
@@ -37,7 +37,7 @@ void Boost2(){
       input = TFile::Open( fname );
    }
    else {
-      gROOT->LoadMacro( "../development/createData.C");
+      gROOT->LoadMacro( "./createData.C");
       create_fullcirc(20000);
       cout << " created circledata.root with data and circle arranged in circles"<<endl;
       input = TFile::Open( fname );
diff --git a/tmva/test/Makefile b/tmva/test/Makefile
index aab492f71c1de0560d980d99849b6e6d652890d5..38a1a8d15f97c024756198c7f78e49e50ab64731 100644
--- a/tmva/test/Makefile
+++ b/tmva/test/Makefile
@@ -18,7 +18,7 @@ PUBLISH = ../www
 
 include nightlyClassifiers.make
 
-all: $(BINS)
+all: $(TMVALIB) $(BINS)
 
 run: TMVA.root 
 
@@ -79,7 +79,7 @@ $(PUBLISH)/uptodate: install TMVApp.root ../test/CompareHistsTrainAndApplied.C
 
 
 
-$(BINS): % : %.cxx $(TMVALIB)
+$(BINS): % : %.cxx ../inc/*.h
 	@echo -n "Building $@ ... "
 	$(CXX) $(CCFLAGS) $< $(INCLUDE) $(LIBS) -g -o $@
 	@echo "Done"
diff --git a/tmva/test/TMVAClassification.C b/tmva/test/TMVAClassification.C
index 6e62a34b5dd479d9f022b38ec5c3df3b18c7d9a3..b862e6dd9e70428b9e40af8b5535e4c132460e17 100644
--- a/tmva/test/TMVAClassification.C
+++ b/tmva/test/TMVAClassification.C
@@ -24,7 +24,7 @@
  **********************************************************************************/
 
 #include <cstdlib>
-#include <iostream> 
+#include <iostream>
 #include <map>
 #include <string>
 
@@ -47,11 +47,11 @@
 
 // read input data file with ascii format (otherwise ROOT) ?
 Bool_t ReadDataFromAsciiIFormat = kFALSE;
-   
-void TMVAClassification( TString myMethodList = "" ) 
+
+void TMVAClassification( TString myMethodList = "" )
 {
    // The explicit loading of the shared libTMVA is done in TMVAlogon.C, defined in .rootrc
-   // if you use your private .rootrc, or run from a different directory, please copy the 
+   // if you use your private .rootrc, or run from a different directory, please copy the
    // corresponding lines from .rootrc
 
    // methods to be processed can be given as an argument; use format:
@@ -59,7 +59,7 @@ void TMVAClassification( TString myMethodList = "" )
    // mylinux~> root -l TMVAClassification.C\(\"myMethod1,myMethod2,myMethod3\"\)
    //
    // if you like to use a method via the plugin mechanism, we recommend using
-   // 
+   //
    // mylinux~> root -l TMVAClassification.C\(\"P_myMethod\"\)
    // (an example is given for using the BDT as plugin (see below),
    // but of course the real application is when you write your own
@@ -108,7 +108,7 @@ void TMVAClassification( TString myMethodList = "" )
    Use["MLP"]             = 1; // this is the recommended ANN
    Use["MLPBFGS"]         = 1; // recommended ANN with optional training method
    Use["CFMlpANN"]        = 1; // *** missing
-   Use["TMlpANN"]         = 1; 
+   Use["TMlpANN"]         = 1;
    // ---
    Use["SVM"]             = 1;
    // ---
@@ -151,15 +151,15 @@ void TMVAClassification( TString myMethodList = "" )
    // then run the performance analysis for you.
    //
    // The first argument is the base of the name of all the
-   // weightfiles in the directory weight/ 
+   // weightfiles in the directory weight/
    //
    // The second argument is the output file for the training results
-   // All TMVA output can be suppressed by removing the "!" (not) in 
+   // All TMVA output can be suppressed by removing the "!" (not) in
    // front of the "Silent" argument in the option string
-   TMVA::Factory *factory = new TMVA::Factory( "TMVAClassification", outputFile, 
-                                               "!V:!Silent:Color:DrawProgressBar:Transformations=I;D;P;G,D" );
+   TMVA::Factory *factory = new TMVA::Factory( "TMVAClassification", outputFile,
+                                               "!V:!Silent:Color:DrawProgressBar:Transformations=I;D;P;G,D:AnalysisType=Classification" );
 
-   // If you wish to modify default settings 
+   // If you wish to modify default settings
    // (please check "src/Config.h" to see all available global options)
    //    (TMVA::gConfig().GetVariablePlotting()).fTimesRMS = 8.0;
    //    (TMVA::gConfig().GetIONames()).fWeightFileDir = "myWeightDirectory";
@@ -172,8 +172,8 @@ void TMVAClassification( TString myMethodList = "" )
    factory->AddVariable( "var3",                "Variable 3", "units", 'F' );
    factory->AddVariable( "var4",                "Variable 4", "units", 'F' );
 
-   // You can add so-called "Spectator variables", which are not used in the MVA training, 
-   // but will appear in the final "TestTree" produced by TMVA. This TestTree will contain the 
+   // You can add so-called "Spectator variables", which are not used in the MVA training,
+   // but will appear in the final "TestTree" produced by TMVA. This TestTree will contain the
    // input variables, the response values of all trained MVAs, and the spectator variables
    factory->AddSpectator( "spec1:=var1*2",  "Spectator 1", "units", 'F' );
    factory->AddSpectator( "spec2:=var1*3",  "Spectator 2", "units", 'F' );
@@ -193,19 +193,13 @@ void TMVAClassification( TString myMethodList = "" )
    }
    else {
       // load the signal and background event samples from ROOT trees
-      TFile *input(0);
-      TString fname = "../macros/tmva_example.root";
-      if (!gSystem->AccessPathName( fname )) {
-         input = TFile::Open( fname ); // check if file in local directory exists
-      } 
-      else { 
-         input = TFile::Open( "http://root.cern.ch/files/tmva_class_example.root" ); // if not: download from ROOT server
-      }
+      TString fname = "./tmva_class_example.root";
+
+      if (gSystem->AccessPathName( fname ))  // file does not exist in local directory
+         gSystem->Exec("wget http://root.cern.ch/files/tmva_class_example.root");
+
+      TFile *input = TFile::Open( fname );
 
-      if (!input) {
-         std::cout << "ERROR: could not open data file" << std::endl;
-         exit(1);
-      }
       std::cout << "--- TMVAClassification       : Using input file: " << input->GetName() << std::endl;
 
       TTree *signal     = (TTree*)input->Get("TreeS");
@@ -226,11 +220,11 @@ void TMVAClassification( TString myMethodList = "" )
       //    factory->AddSignalTree( signalTrainingTree, signalTrainWeight, "Training" );
       //    factory->AddSignalTree( signalTestTree,     signalTestWeight,  "Test" );
 
-      // Use the following code instead of the above two or four lines to add signal and background 
+      // Use the following code instead of the above two or four lines to add signal and background
       // training and test events "by hand"
-      // NOTE that in this case one should not give expressions (such as "var1+var2") in the input 
+      // NOTE that in this case one should not give expressions (such as "var1+var2") in the input
       //      variable definition, but simply compute the expression before adding the event
-      // 
+      //
       //    // --- begin ----------------------------------------------------------
       //    std::vector<Double_t> vars( 4 ); // vector has size of number of input variables
       //    Float_t  treevars[4];
@@ -239,26 +233,26 @@ void TMVAClassification( TString myMethodList = "" )
       //       signal->GetEntry(i);
       //       for (Int_t ivar=0; ivar<4; ivar++) vars[ivar] = treevars[ivar];
       //       // add training and test events; here: first half is training, second is testing
-      //       // note that the weight can also be event-wise	
-      //       if (i < signal->GetEntries()/2) factory->AddSignalTrainingEvent( vars, signalWeight ); 
-      //       else                            factory->AddSignalTestEvent    ( vars, signalWeight ); 
+      //       // note that the weight can also be event-wise
+      //       if (i < signal->GetEntries()/2) factory->AddSignalTrainingEvent( vars, signalWeight );
+      //       else                            factory->AddSignalTestEvent    ( vars, signalWeight );
       //    }
       //
       //    for (Int_t ivar=0; ivar<4; ivar++) background->SetBranchAddress( Form( "var%i", ivar+1 ), &(treevars[ivar]) );
       //    for (Int_t i=0; i<background->GetEntries(); i++) {
-      //       background->GetEntry(i); 
+      //       background->GetEntry(i);
       //       for (Int_t ivar=0; ivar<4; ivar++) vars[ivar] = treevars[ivar];
       //       // add training and test events; here: first half is training, second is testing
-      //       // note that the weight can also be event-wise	
-      //       if (i < background->GetEntries()/2) factory->AddBackgroundTrainingEvent( vars, backgroundWeight ); 
-      //       else                                factory->AddBackgroundTestEvent    ( vars, backgroundWeight ); 
+      //       // note that the weight can also be event-wise
+      //       if (i < background->GetEntries()/2) factory->AddBackgroundTrainingEvent( vars, backgroundWeight );
+      //       else                                factory->AddBackgroundTestEvent    ( vars, backgroundWeight );
       //    }
       //    // --- end ------------------------------------------------------------
       //
       // ====== end of register trees ==============================================
    }
-   
-   // This would set individual event weights (the variables defined in the 
+
+   // This would set individual event weights (the variables defined in the
    // expression need to exist in the original TTree)
    //    for signal    : factory->SetSignalWeightExpression("weight1*weight2");
    //    for background: factory->SetBackgroundWeightExpression("weight1*weight2");
@@ -272,12 +266,12 @@ void TMVAClassification( TString myMethodList = "" )
    factory->PrepareTrainingAndTestTree( mycuts, mycutb,
                                         "nTrain_Signal=0:nTrain_Background=0:SplitMode=Random:NormMode=NumEvents:!V" );
 
-   // If no numbers of events are given, half of the events in the tree are used for training, and 
+   // If no numbers of events are given, half of the events in the tree are used for training, and
    // the other half for testing:
-   //    factory->PrepareTrainingAndTestTree( mycut, "SplitMode=random:!V" );  
+   //    factory->PrepareTrainingAndTestTree( mycut, "SplitMode=random:!V" );
    // To also specify the number of testing events, use:
-   //    factory->PrepareTrainingAndTestTree( mycut, 
-   //                                         "NSigTrain=3000:NBkgTrain=3000:NSigTest=3000:NBkgTest=3000:SplitMode=Random:!V" );  
+   //    factory->PrepareTrainingAndTestTree( mycut,
+   //                                         "NSigTrain=3000:NBkgTrain=3000:NSigTest=3000:NBkgTest=3000:SplitMode=Random:!V" );
 
    // ---- Book MVA methods
    //
@@ -288,83 +282,83 @@ void TMVAClassification( TString myMethodList = "" )
 
    // Cut optimisation
    if (Use["Cuts"])
-      factory->BookMethod( TMVA::Types::kCuts, "Cuts", 
+      factory->BookMethod( TMVA::Types::kCuts, "Cuts",
                            "!H:!V:FitMethod=MC:EffSel:SampleSize=200000:VarProp=FSmart" );
 
    if (Use["CutsD"])
-      factory->BookMethod( TMVA::Types::kCuts, "CutsD", 
+      factory->BookMethod( TMVA::Types::kCuts, "CutsD",
                            "!H:!V:FitMethod=MC:EffSel:SampleSize=200000:VarProp=FSmart:VarTransform=Decorrelate" );
 
    if (Use["CutsPCA"])
-      factory->BookMethod( TMVA::Types::kCuts, "CutsPCA", 
+      factory->BookMethod( TMVA::Types::kCuts, "CutsPCA",
                            "!H:!V:FitMethod=MC:EffSel:SampleSize=200000:VarProp=FSmart:VarTransform=PCA" );
 
    if (Use["CutsGA"])
       factory->BookMethod( TMVA::Types::kCuts, "CutsGA",
                            "H:!V:FitMethod=GA:CutRangeMin[0]=-10:CutRangeMax[0]=10:VarProp[1]=FMax:EffSel:Steps=30:Cycles=3:PopSize=400:SC_steps=10:SC_rate=5:SC_factor=0.95" );
-   
+
    if (Use["CutsSA"])
       factory->BookMethod( TMVA::Types::kCuts, "CutsSA",
                            "!H:!V:FitMethod=SA:EffSel:MaxCalls=150000:KernelTemp=IncAdaptive:InitialTemp=1e+6:MinTemp=1e-6:Eps=1e-10:UseDefaultScale" );
-   
+
    // Likelihood
    if (Use["Likelihood"])
-      factory->BookMethod( TMVA::Types::kLikelihood, "Likelihood", 
-                           "H:!V:!TransformOutput:PDFInterpol=Spline2:NSmoothSig[0]=20:NSmoothBkg[0]=20:NSmoothBkg[1]=10:NSmooth=1:NAvEvtPerBin=50" ); 
+      factory->BookMethod( TMVA::Types::kLikelihood, "Likelihood",
+                           "H:!V:!TransformOutput:PDFInterpol=Spline2:NSmoothSig[0]=20:NSmoothBkg[0]=20:NSmoothBkg[1]=10:NSmooth=1:NAvEvtPerBin=50" );
 
    // test the decorrelated likelihood
    if (Use["LikelihoodD"])
-      factory->BookMethod( TMVA::Types::kLikelihood, "LikelihoodD", 
-                           "!H:!V:!TransformOutput:PDFInterpol=Spline2:NSmoothSig[0]=20:NSmoothBkg[0]=20:NSmooth=5:NAvEvtPerBin=50:VarTransform=Decorrelate" ); 
+      factory->BookMethod( TMVA::Types::kLikelihood, "LikelihoodD",
+                           "!H:!V:!TransformOutput:PDFInterpol=Spline2:NSmoothSig[0]=20:NSmoothBkg[0]=20:NSmooth=5:NAvEvtPerBin=50:VarTransform=Decorrelate" );
 
    if (Use["LikelihoodPCA"])
-      factory->BookMethod( TMVA::Types::kLikelihood, "LikelihoodPCA", 
+      factory->BookMethod( TMVA::Types::kLikelihood, "LikelihoodPCA",
                            "!H:!V:!TransformOutput:PDFInterpol=Spline2:NSmoothSig[0]=20:NSmoothBkg[0]=20:NSmooth=5:NAvEvtPerBin=50:VarTransform=PCA" ); 
- 
+
    // test the new kernel density estimator
    if (Use["LikelihoodKDE"])
-      factory->BookMethod( TMVA::Types::kLikelihood, "LikelihoodKDE", 
+      factory->BookMethod( TMVA::Types::kLikelihood, "LikelihoodKDE",
                            "!H:!V:!TransformOutput:PDFInterpol=KDE:KDEtype=Gauss:KDEiter=Adaptive:KDEFineFactor=0.3:KDEborder=None:NAvEvtPerBin=50" ); 
 
    // test the mixed splines and kernel density estimator (depending on which variable)
    if (Use["LikelihoodMIX"])
-      factory->BookMethod( TMVA::Types::kLikelihood, "LikelihoodMIX", 
+      factory->BookMethod( TMVA::Types::kLikelihood, "LikelihoodMIX",
                            "!H:!V:!TransformOutput:PDFInterpolSig[0]=KDE:PDFInterpolBkg[0]=KDE:PDFInterpolSig[1]=KDE:PDFInterpolBkg[1]=KDE:PDFInterpolSig[2]=Spline2:PDFInterpolBkg[2]=Spline2:PDFInterpolSig[3]=Spline2:PDFInterpolBkg[3]=Spline2:KDEtype=Gauss:KDEiter=Nonadaptive:KDEborder=None:NAvEvtPerBin=50" ); 
 
    // test the multi-dimensional probability density estimator
    // here are the options strings for the MinMax and RMS methods, respectively:
-   //      "!H:!V:VolumeRangeMode=MinMax:DeltaFrac=0.2:KernelEstimator=Gauss:GaussSigma=0.3" );   
-   //      "!H:!V:VolumeRangeMode=RMS:DeltaFrac=3:KernelEstimator=Gauss:GaussSigma=0.3" );   
+   //      "!H:!V:VolumeRangeMode=MinMax:DeltaFrac=0.2:KernelEstimator=Gauss:GaussSigma=0.3" );
+   //      "!H:!V:VolumeRangeMode=RMS:DeltaFrac=3:KernelEstimator=Gauss:GaussSigma=0.3" );
    if (Use["PDERS"])
-      factory->BookMethod( TMVA::Types::kPDERS, "PDERS", 
+      factory->BookMethod( TMVA::Types::kPDERS, "PDERS",
                            "!H:!V:NormTree=T:VolumeRangeMode=Adaptive:KernelEstimator=Gauss:GaussSigma=0.3:NEventsMin=400:NEventsMax=600" );
 
    if (Use["PDERSkNN"])
-      factory->BookMethod( TMVA::Types::kPDERS, "PDERSkNN", 
+      factory->BookMethod( TMVA::Types::kPDERS, "PDERSkNN",
                            "!H:!V:VolumeRangeMode=kNN:KernelEstimator=Gauss:GaussSigma=0.3:NEventsMin=400:NEventsMax=600" );
 
    if (Use["PDERSD"])
-      factory->BookMethod( TMVA::Types::kPDERS, "PDERSD", 
+      factory->BookMethod( TMVA::Types::kPDERS, "PDERSD",
                            "!H:!V:VolumeRangeMode=Adaptive:KernelEstimator=Gauss:GaussSigma=0.3:NEventsMin=400:NEventsMax=600:VarTransform=Decorrelate" );
 
    if (Use["PDERSPCA"])
-      factory->BookMethod( TMVA::Types::kPDERS, "PDERSPCA", 
+      factory->BookMethod( TMVA::Types::kPDERS, "PDERSPCA",
                            "!H:!V:VolumeRangeMode=Adaptive:KernelEstimator=Gauss:GaussSigma=0.3:NEventsMin=400:NEventsMax=600:VarTransform=PCA" );
 
    // Multi-dimensional likelihood estimator using self-adapting phase-space binning
    if (Use["PDEFoam"])
-      factory->BookMethod( TMVA::Types::kPDEFoam, "PDEFoam", 
+      factory->BookMethod( TMVA::Types::kPDEFoam, "PDEFoam",
                            "H:!V:SigBgSeparate=F:TailCut=0.001:VolFrac=0.0333:nActiveCells=500:nSampl=2000:nBin=5:CutNmin=T:Nmin=100:Kernel=None:Compress=T" );
 
    // K-Nearest Neighbour classifier (KNN)
    if (Use["KNN"])
-      factory->BookMethod( TMVA::Types::kKNN, "KNN", 
+      factory->BookMethod( TMVA::Types::kKNN, "KNN",
                            "H:nkNN=20:ScaleFrac=0.8:SigmaFact=1.0:Kernel=Gaus:UseKernel=F:UseWeight=T:!Trim" );
    // H-Matrix (chi2-squared) method
    if (Use["HMatrix"])
-      factory->BookMethod( TMVA::Types::kHMatrix, "HMatrix", "!H:!V" ); 
+      factory->BookMethod( TMVA::Types::kHMatrix, "HMatrix", "!H:!V" );
 
-   // Fisher discriminant   
+   // Fisher discriminant
    if (Use["Fisher"])
       factory->BookMethod( TMVA::Types::kFisher, "Fisher", "H:!V:Fisher:CreateMVAPdfs:PDFInterpolMVAPdf=Spline2:NbinsMVAPdf=60:NsmoothMVAPdf=10" );
 
@@ -380,11 +374,11 @@ void TMVAClassification( TString myMethodList = "" )
    if (Use["LD"])
       factory->BookMethod( TMVA::Types::kLD, "LD", "H:!V:VarTransform=None" );
 
-	// Function discrimination analysis (FDA) -- test of various fitters - the recommended one is Minuit (or GA or SA)
+   // Function discrimination analysis (FDA) -- test of various fitters - the recommended one is Minuit (or GA or SA)
    if (Use["FDA_MC"])
       factory->BookMethod( TMVA::Types::kFDA, "FDA_MC",
                            "H:!V:Formula=(0)+(1)*x0+(2)*x1+(3)*x2+(4)*x3:ParRanges=(-1,1);(-10,10);(-10,10);(-10,10);(-10,10):FitMethod=MC:SampleSize=100000:Sigma=0.1" );
-   
+
    if (Use["FDA_GA"]) // can also use Simulated Annealing (SA) algorithm (see Cuts_SA options])
       factory->BookMethod( TMVA::Types::kFDA, "FDA_GA",
                            "H:!V:Formula=(0)+(1)*x0+(2)*x1+(3)*x2+(4)*x3:ParRanges=(-1,1);(-10,10);(-10,10);(-10,10);(-10,10):FitMethod=GA:PopSize=300:Cycles=3:Steps=20:Trim=True:SaveBestGen=1" );
@@ -407,7 +401,7 @@ void TMVAClassification( TString myMethodList = "" )
 
    // TMVA ANN: MLP (recommended ANN) -- all ANNs in TMVA are Multilayer Perceptrons
    if (Use["MLP"])
-      factory->BookMethod( TMVA::Types::kMLP, "MLP", "H:!V:NeuronType=tanh:VarTransform=N:NCycles=500:HiddenLayers=N+5:TestRate=10:EpochMonitoring" );
+      factory->BookMethod( TMVA::Types::kMLP, "MLP", "H:!V:NeuronType=tanh:VarTransform=N:NCycles=500:HiddenLayers=N+5:TestRate=10:EpochMonitoring:RandomSeed=1" );
 
    if (Use["MLPBFGS"])
       factory->BookMethod( TMVA::Types::kMLP, "MLPBFGS", "H:!V:NeuronType=tanh:VarTransform=N:NCycles=500:HiddenLayers=N+5:TestRate=10:TrainingMethod=BFGS:!EpochMonitoring" );
@@ -416,37 +410,37 @@ void TMVAClassification( TString myMethodList = "" )
    // CF(Clermont-Ferrand)ANN
    if (Use["CFMlpANN"])
       factory->BookMethod( TMVA::Types::kCFMlpANN, "CFMlpANN", "!H:!V:NCycles=2000:HiddenLayers=N+1,N"  ); // n_cycles:#nodes:#nodes:...  
-  
+
    // Tmlp(Root)ANN
    if (Use["TMlpANN"])
       factory->BookMethod( TMVA::Types::kTMlpANN, "TMlpANN", "!H:!V:NCycles=200:HiddenLayers=N+1,N:LearningMethod=BFGS:ValidationFraction=0.3"  ); // n_cycles:#nodes:#nodes:...
-  
+
    // Support Vector Machine
    if (Use["SVM"])
       factory->BookMethod( TMVA::Types::kSVM, "SVM", "Gamma=0.25:Tol=0.001:VarTransform=Norm" );
-   
+
    // Boosted Decision Trees
    if (Use["BDTG"]) // Gradient Boost
-      factory->BookMethod( TMVA::Types::kBDT, "BDTG", 
+      factory->BookMethod( TMVA::Types::kBDT, "BDTG",
                            "!H:!V:NTrees=1000:BoostType=Grad:Shrinkage=0.30:UseBaggedGrad:GradBaggingFraction=0.6:SeparationType=GiniIndex:nCuts=20:NNodesMax=5" );
 
    if (Use["BDT"])  // Adaptive Boost
-      factory->BookMethod( TMVA::Types::kBDT, "BDT", 
+      factory->BookMethod( TMVA::Types::kBDT, "BDT",
                            "!H:!V:NTrees=400:nEventsMin=400:MaxDepth=3:BoostType=AdaBoost:SeparationType=GiniIndex:nCuts=20:PruneMethod=NoPruning" );
-   
+
    if (Use["BDTB"]) // Bagging
-      factory->BookMethod( TMVA::Types::kBDT, "BDTB", 
+      factory->BookMethod( TMVA::Types::kBDT, "BDTB",
                            "!H:!V:NTrees=400:BoostType=Bagging:SeparationType=GiniIndex:nCuts=20:PruneMethod=NoPruning" );
 
    if (Use["BDTD"]) // Decorrelation + Adaptive Boost
-      factory->BookMethod( TMVA::Types::kBDT, "BDTD", 
+      factory->BookMethod( TMVA::Types::kBDT, "BDTD",
                            "!H:!V:NTrees=400:nEventsMin=400:MaxDepth=3:BoostType=AdaBoost:SeparationType=GiniIndex:nCuts=20:PruneMethod=NoPruning:VarTransform=Decorrelate" );
-   
+
    // RuleFit -- TMVA implementation of Friedman's method
    if (Use["RuleFit"])
       factory->BookMethod( TMVA::Types::kRuleFit, "RuleFit",
                            "H:!V:RuleFitModule=RFTMVA:Model=ModRuleLinear:MinImp=0.001:RuleMinDist=0.001:NTrees=20:fEventsMin=0.01:fEventsMax=0.5:GDTau=-1.0:GDTauPrec=0.01:GDStep=0.01:GDNSteps=10000:GDErrScale=1.02" );
-   
+
    // For an example of the category classifier, see: TMVAClassificationCategory
 
    // --------------------------------------------------------------------------------------------------
@@ -459,7 +453,7 @@ void TMVAClassification( TString myMethodList = "" )
          //
          // # plugin handler          plugin name(regexp) class to be instanciated library        constructor format
          // Plugin.TMVA@@MethodBase:  ^BDT                TMVA::MethodBDT          TMVA.1         "MethodBDT(TString,TString,DataSet&,TString)"
-         // 
+         //
          // or by telling the global plugin manager directly
       gPluginMgr->AddHandler("TMVA@@MethodBase", "BDT", "TMVA::MethodBDT", "TMVA.1", "MethodBDT(TString,TString,DataSet&,TString)");
       factory->BookMethod( TMVA::Types::kPlugins, "BDT",
@@ -477,15 +471,15 @@ void TMVAClassification( TString myMethodList = "" )
    factory->TestAllMethods();
 
    // ----- Evaluate and compare performance of all configured MVAs
-   factory->EvaluateAllMethods();    
+   factory->EvaluateAllMethods();
 
    // --------------------------------------------------------------
-   
+
    // Save the output
    outputFile->Close();
 
    std::cout << "==> Wrote root file: " << outputFile->GetName() << std::endl;
-   std::cout << "==> TMVAClassification is done!" << std::endl;      
+   std::cout << "==> TMVAClassification is done!" << std::endl;
 
    delete factory;
 
diff --git a/tmva/test/TMVAClassification.cxx b/tmva/test/TMVAClassification.cxx
index 6def9c701f3245f1ff1d8ab210086a6bd085ada8..1a41c6e0869fe0a7f6df95ea9e9487ce20139d65 100644
--- a/tmva/test/TMVAClassification.cxx
+++ b/tmva/test/TMVAClassification.cxx
@@ -151,7 +151,7 @@ int main( int argc, char** argv )
    // The second argument is the output file for the training results
    // All TMVA output can be suppressed by removing the "!" (not) in 
    // front of the "Silent" argument in the option string
-   std::string factoryOptions( "!V:!Silent:Transformations=I;D;P;G,D" );
+   std::string factoryOptions( "!V:!Silent:Transformations=I;D;P;G,D:AnalysisType=Classification" );
    if (batchMode) factoryOptions += ":!Color:!DrawProgressBar";
 
    TMVA::Factory *factory = new TMVA::Factory( "TMVAClassification", outputFile, factoryOptions );
diff --git a/tmva/test/TMVAClassificationApplication.C b/tmva/test/TMVAClassificationApplication.C
index 0cc3bb4b74878d58f493cf8239364304136e6b46..69cfaf6af69b3ff72daf387b99ef19ead99fcd7c 100644
--- a/tmva/test/TMVAClassificationApplication.C
+++ b/tmva/test/TMVAClassificationApplication.C
@@ -195,8 +195,8 @@ void TMVAClassificationApplication( TString myMethodList = "" )
    if (Use["KNN"])           histKNN     = new TH1F( "MVA_KNN",           "MVA_KNN",           nbin,  0, 1 );
    if (Use["HMatrix"])       histHm      = new TH1F( "MVA_HMatrix",       "MVA_HMatrix",       nbin, -0.95, 1.55 );
    if (Use["Fisher"])        histFi      = new TH1F( "MVA_Fisher",        "MVA_Fisher",        nbin, -4, 4 );
-   if (Use["FisherG"])        histFiG    = new TH1F( "MVA_FisherG",        "MVA_FisherG",        nbin, -1, 1 );
-   if (Use["BoostedFisher"])  histFiB    = new TH1F( "MVA_BoostedFisher",        "MVA_BoostedFisher",        nbin, -2, 2 );
+   if (Use["FisherG"])       histFiG     = new TH1F( "MVA_FisherG",       "MVA_FisherG",       nbin, -1, 1 );
+   if (Use["BoostedFisher"]) histFiB     = new TH1F( "MVA_BoostedFisher", "MVA_BoostedFisher", nbin, -2, 2 );
    if (Use["LD"])            histLD      = new TH1F( "MVA_LD",            "MVA_LD",            nbin, -2, 2 );
    if (Use["MLP"])           histNn      = new TH1F( "MVA_MLP",           "MVA_MLP",           nbin, -1.25, 1.5 );
    if (Use["CFMlpANN"])      histNnC     = new TH1F( "MVA_CFMlpANN",      "MVA_CFMlpANN",      nbin,  0, 1 );
@@ -205,12 +205,12 @@ void TMVAClassificationApplication( TString myMethodList = "" )
    if (Use["BDTD"])          histBdtD    = new TH1F( "MVA_BDTD",          "MVA_BDTD",          nbin, -0.8, 0.8 );
    if (Use["BDTG"])          histBdtG    = new TH1F( "MVA_BDTG",          "MVA_BDTG",          nbin, -1.0, 1.0 );
    if (Use["RuleFit"])       histRf      = new TH1F( "MVA_RuleFit",       "MVA_RuleFit",       nbin, -2.0, 2.0 );
-   if (Use["SVM_Gauss"])     histSVMG    = new TH1F( "MVA_SVM_Gauss",     "MVA_SVM_Gauss",     nbin, 0.0, 1.0 );
-   if (Use["SVM_Poly"])      histSVMP    = new TH1F( "MVA_SVM_Poly",      "MVA_SVM_Poly",      nbin, 0.0, 1.0 );
-   if (Use["SVM_Lin"])       histSVML    = new TH1F( "MVA_SVM_Lin",       "MVA_SVM_Lin",       nbin, 0.0, 1.0 );
+   if (Use["SVM_Gauss"])     histSVMG    = new TH1F( "MVA_SVM_Gauss",     "MVA_SVM_Gauss",     nbin,  0.0, 1.0 );
+   if (Use["SVM_Poly"])      histSVMP    = new TH1F( "MVA_SVM_Poly",      "MVA_SVM_Poly",      nbin,  0.0, 1.0 );
+   if (Use["SVM_Lin"])       histSVML    = new TH1F( "MVA_SVM_Lin",       "MVA_SVM_Lin",       nbin,  0.0, 1.0 );
    if (Use["FDA_MT"])        histFDAMT   = new TH1F( "MVA_FDA_MT",        "MVA_FDA_MT",        nbin, -2.0, 3.0 );
    if (Use["FDA_GA"])        histFDAGA   = new TH1F( "MVA_FDA_GA",        "MVA_FDA_GA",        nbin, -2.0, 3.0 );
-   if (Use["Category"])      histCat     = new TH1F( "MVA_Category",      "MVA_Category",           nbin, -2., 2. );
+   if (Use["Category"])      histCat     = new TH1F( "MVA_Category",      "MVA_Category",      nbin, -2., 2. );
    if (Use["Plugin"])        histPBdt    = new TH1F( "MVA_PBDT",          "MVA_BDT",           nbin, -0.8, 0.8 );
 
    // PDEFoam also returns per-event error, fill in histogram, and also fill significance
@@ -264,6 +264,8 @@ void TMVAClassificationApplication( TString myMethodList = "" )
    Int_t    nSelCutsGA = 0;
    Double_t effS       = 0.7;
 
+   std::vector<Float_t> vecVar(4); // vector for EvaluateMVA tests
+
    std::cout << "--- Processing: " << theTree->GetEntries() << " events" << std::endl;
    TStopwatch sw;
    sw.Start();
@@ -278,6 +280,40 @@ void TMVAClassificationApplication( TString myMethodList = "" )
       var1 = userVar1 + userVar2;
       var2 = userVar1 - userVar2;
 
+      if (ievt <20){
+         // test the twodifferent Reader::EvaluateMVA functions 
+         // access via registered variables compared to access via vector<float>
+         vecVar[0]=var1;
+         vecVar[1]=var2;
+         vecVar[2]=var3;
+         vecVar[3]=var4;      
+         for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) {
+            if (it->second) {
+               TString mName = it->first + " method";
+               Double_t mva1 = reader->EvaluateMVA( mName); 
+               Double_t mva2 = reader->EvaluateMVA( vecVar, mName); 
+               if (mva1 != mva2) {
+                  std::cout << "++++++++++++++ ERROR in "<< mName <<", comparing different EvaluateMVA results val1=" << mva1 << " val2="<<mva2<<std::endl;
+               }
+            }
+         }
+         // now test that the inputs do matter
+         TRandom3 rand(0);
+         vecVar[0]=rand.Rndm();
+         vecVar[1]=rand.Rndm();
+         vecVar[2]=rand.Rndm();
+         vecVar[3]=rand.Rndm();
+         for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) {
+            if (it->second) {
+               TString mName = it->first + " method";
+               Double_t mva1 = reader->EvaluateMVA( mName); 
+               Double_t mva2 = reader->EvaluateMVA( vecVar, mName); 
+               if (mva1 == mva2) {
+                  std::cout << "++++++++++++++ ERROR in "<< mName <<", obtaining idnetical output for different inputs" <<std::endl;
+               }
+            }
+         }
+      }
       // 
       // return the MVAs and fill to histograms
       // 
@@ -298,8 +334,8 @@ void TMVAClassificationApplication( TString myMethodList = "" )
       if (Use["KNN"          ])   histKNN    ->Fill( reader->EvaluateMVA( "KNN method"           ) );
       if (Use["HMatrix"      ])   histHm     ->Fill( reader->EvaluateMVA( "HMatrix method"       ) );
       if (Use["Fisher"       ])   histFi     ->Fill( reader->EvaluateMVA( "Fisher method"        ) );
-      if (Use["FisherG"      ])   histFiG    ->Fill( reader->EvaluateMVA( "FisherG method"        ) );
-      if (Use["BoostedFisher"])   histFiB    ->Fill( reader->EvaluateMVA( "BoostedFisher method"        ) );
+      if (Use["FisherG"      ])   histFiG    ->Fill( reader->EvaluateMVA( "FisherG method"       ) );
+      if (Use["BoostedFisher"])   histFiB    ->Fill( reader->EvaluateMVA( "BoostedFisher method" ) );
       if (Use["LD"           ])   histLD     ->Fill( reader->EvaluateMVA( "LD method"            ) );
       if (Use["MLP"          ])   histNn     ->Fill( reader->EvaluateMVA( "MLP method"           ) );
       if (Use["CFMlpANN"     ])   histNnC    ->Fill( reader->EvaluateMVA( "CFMlpANN method"      ) );
@@ -313,7 +349,7 @@ void TMVAClassificationApplication( TString myMethodList = "" )
       if (Use["SVM_Lin"      ])   histSVML   ->Fill( reader->EvaluateMVA( "SVM_Lin method"       ) );
       if (Use["FDA_MT"       ])   histFDAMT  ->Fill( reader->EvaluateMVA( "FDA_MT method"        ) );
       if (Use["FDA_GA"       ])   histFDAGA  ->Fill( reader->EvaluateMVA( "FDA_GA method"        ) );
-      if (Use["Category"     ])   histCat    ->Fill( reader->EvaluateMVA( "Category method"         ) );
+      if (Use["Category"     ])   histCat    ->Fill( reader->EvaluateMVA( "Category method"      ) );
       if (Use["Plugin"       ])   histPBdt   ->Fill( reader->EvaluateMVA( "P_BDT method"         ) );
 
       // retrieve also per-event error
diff --git a/tmva/test/TMVAClassificationApplication.cxx b/tmva/test/TMVAClassificationApplication.cxx
index a8bdebee2ecf88ea7e332f2c2766f74909f2b49a..65fd6a72983b37297ed3b8bdd312675a57f525bd 100644
--- a/tmva/test/TMVAClassificationApplication.cxx
+++ b/tmva/test/TMVAClassificationApplication.cxx
@@ -106,10 +106,10 @@ int main( int argc, char** argv )
    // create a set of variables and declare them to the reader
    // - the variable names must corresponds in name and type to 
    // those given in the weight file(s) that you use
-   Float_t myvar1, myvar2;
+   Float_t var1, var2;
    Float_t var3, var4;
-   reader->AddVariable( "myvar1 := var1+var2", &myvar1 );
-   reader->AddVariable( "myvar2 := var1-var2", &myvar2 );
+   reader->AddVariable( "myvar1 := var1+var2", &var1 );
+   reader->AddVariable( "myvar2 := var1-var2", &var2 );
    reader->AddVariable( "var3",                &var3 );
    reader->AddVariable( "var4",                &var4 );
    Float_t spec1,spec2;
@@ -246,7 +246,6 @@ int main( int argc, char** argv )
    TTree* theTree = (TTree*)input->Get("TreeS");
    std::cout << "--- Select signal sample" << std::endl;
    Float_t userVar1, userVar2;
-   Float_t var1,var2;
    theTree->SetBranchAddress( "var1", &userVar1 );
    theTree->SetBranchAddress( "var2", &userVar2 );
    theTree->SetBranchAddress( "var3", &var3 );
@@ -256,6 +255,8 @@ int main( int argc, char** argv )
    Int_t    nSelCutsGA = 0;
    Double_t effS       = 0.7;
 
+   std::vector<Float_t> vecVar(4); // vector for EvaluateMVA tests
+
    std::cout << "--- Processing: " << theTree->GetEntries() << " events" << std::endl;
    TStopwatch sw;
    sw.Start();
@@ -277,7 +278,38 @@ int main( int argc, char** argv )
       Category_cat2 = (var3>0)&&(var4<0);
       Category_cat3 = (var3>0)&&(var4>=0);
 
-
+      // test the twodifferent Reader::EvaluateMVA functions 
+      // access via registered variables compared to access via vector<float>
+      vecVar[0]=var1;
+      vecVar[1]=var2;
+      vecVar[2]=var3;
+      vecVar[3]=var4;      
+      for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) {
+         if (it->second) {
+            TString mName = it->first + " method";
+            Double_t mva1 = reader->EvaluateMVA( mName); 
+            Double_t mva2 = reader->EvaluateMVA( vecVar, mName); 
+            if (mva1 != mva2) {
+               std::cout << "++++++++++++++ ERROR in "<< mName <<", comparing different EvaluateMVA results val1=" << mva1 << " val2="<<mva2<<std::endl;
+            }
+         }
+      }
+      // now test that the inputs do matter
+      TRandom3 rand(0);
+      vecVar[0]=rand.Rndm();
+      vecVar[1]=rand.Rndm();
+      vecVar[2]=rand.Rndm();
+      vecVar[3]=rand.Rndm();
+      for (std::map<std::string,int>::iterator it = Use.begin(); it != Use.end(); it++) {
+         if (it->second) {
+            TString mName = it->first + " method";
+            Double_t mva1 = reader->EvaluateMVA( mName); 
+            Double_t mva2 = reader->EvaluateMVA( vecVar, mName); 
+            if (mva1 == mva2) {
+               std::cout << "++++++++++++++ ERROR in "<< mName <<", obtaining idnetical output for different inputs" <<std::endl;
+            }
+         }
+      }
       // 
       // return the MVAs and fill to histograms
       // 
diff --git a/tmva/test/TMVAClassificationCategory.C b/tmva/test/TMVAClassificationCategory.C
index 7e1dc18b057b78f3a999b383d9ef21eafdff0ee3..d5d085fb1c9e9812d21c07e6278640f6017aac8f 100644
--- a/tmva/test/TMVAClassificationCategory.C
+++ b/tmva/test/TMVAClassificationCategory.C
@@ -10,7 +10,7 @@
  * As input data is used a toy-MC sample consisting of four Gaussian-distributed  *
  * and linearly correlated input variables with category (eta) dependent          *
  * properties.                                                                    *
- *                                                                                * 
+ *                                                                                *
  * For this example, only Fisher and Likelihood are used. Run via:                *
  *                                                                                *
  *    root -l TMVAClassificationCategory.C                                        *
@@ -21,7 +21,7 @@
  **********************************************************************************/
 
 #include <cstdlib>
-#include <iostream> 
+#include <iostream>
 #include <map>
 #include <string>
 
@@ -44,7 +44,7 @@
 // two types of category methods are implemented
 Bool_t UseOffsetMethod = kTRUE;
 
-void TMVAClassificationCategory() 
+void TMVAClassificationCategory()
 {
    //---------------------------------------------------------------
 
@@ -61,17 +61,17 @@ void TMVAClassificationCategory()
    // then run the performance analysis for you.
    //
    // The first argument is the base of the name of all the
-   // weightfiles in the directory weight/ 
+   // weightfiles in the directory weight/
    //
    // The second argument is the output file for the training results
-   // All TMVA output can be suppressed by removing the "!" (not) in 
+   // All TMVA output can be suppressed by removing the "!" (not) in
    // front of the "Silent" argument in the option string
    std::string factoryOptions( "!V:!Silent:Transformations=I;D;P;G,D" );
    if (batchMode) factoryOptions += ":!Color:!DrawProgressBar";
 
    TMVA::Factory *factory = new TMVA::Factory( "TMVAClassificationCategory", outputFile, factoryOptions );
 
-   // If you wish to modify default settings 
+   // If you wish to modify default settings
    // (please check "src/Config.h" to see all available global options)
    //    (TMVA::gConfig().GetVariablePlotting()).fTimesRMS = 8.0;
    //    (TMVA::gConfig().GetIONames()).fWeightFileDir = "myWeightDirectory";
@@ -84,21 +84,21 @@ void TMVAClassificationCategory()
    factory->AddVariable( "var3", 'F' );
    factory->AddVariable( "var4", 'F' );
 
-   // You can add so-called "Spectator variables", which are not used in the MVA training, 
-   // but will appear in the final "TestTree" produced by TMVA. This TestTree will contain the 
+   // You can add so-called "Spectator variables", which are not used in the MVA training,
+   // but will appear in the final "TestTree" produced by TMVA. This TestTree will contain the
    // input variables, the response values of all trained MVAs, and the spectator variables
    factory->AddSpectator( "eta" );
 
    // load the signal and background event samples from ROOT trees
    TFile *input(0);
    TString fname( "" );
-   if (UseOffsetMethod) fname = "../execs/data/toy_sigbkg_categ_offset.root";
-   else                 fname = "../execs/data/toy_sigbkg_categ_varoff.root";
+   if (UseOffsetMethod) fname = "data/toy_sigbkg_categ_offset.root";
+   else                 fname = "data/toy_sigbkg_categ_varoff.root";
    if (!gSystem->AccessPathName( fname )) {
       // first we try to find tmva_example.root in the local directory
       std::cout << "--- TMVAClassificationCategory: Accessing " << fname << std::endl;
       input = TFile::Open( fname );
-   } 
+   }
 
    if (!input) {
       std::cout << "ERROR: could not open data file: " << fname << std::endl;
@@ -111,11 +111,11 @@ void TMVAClassificationCategory()
    /// global event weights per tree (see below for setting event-wise weights)
    Double_t signalWeight     = 1.0;
    Double_t backgroundWeight = 1.0;
-   
+
    /// you can add an arbitrary number of signal or background trees
    factory->AddSignalTree    ( signal,     signalWeight     );
    factory->AddBackgroundTree( background, backgroundWeight );
-   
+
    // Apply additional cuts on the signal and background samples (can be different)
    TCut mycuts = ""; // for example: TCut mycuts = "abs(var1)<0.5 && abs(var2-0.5)<1";
    TCut mycutb = ""; // for example: TCut mycutb = "abs(var1)<0.5";
@@ -124,21 +124,21 @@ void TMVAClassificationCategory()
    factory->PrepareTrainingAndTestTree( mycuts, mycutb,
                                         "nTrain_Signal=0:nTrain_Background=0:SplitMode=Random:NormMode=NumEvents:!V" );
 
-   // Fisher discriminant   
+   // Fisher discriminant
    factory->BookMethod( TMVA::Types::kFisher, "Fisher", "!H:!V:Fisher" );
 
    // Likelihood
-   factory->BookMethod( TMVA::Types::kLikelihood, "Likelihood", 
+   factory->BookMethod( TMVA::Types::kLikelihood, "Likelihood",
                         "!H:!V:TransformOutput:PDFInterpol=Spline2:NSmoothSig[0]=20:NSmoothBkg[0]=20:NSmoothBkg[1]=10:NSmooth=1:NAvEvtPerBin=50" ); 
 
    // Categorised classifier
    TMVA::MethodCategory* mcat = 0;
-   
+
    // the variable sets
    TString theCat1Vars = "var1:var2:var3:var4";
    TString theCat2Vars = (UseOffsetMethod ? "var1:var2:var3:var4" : "var1:var2:var3");
 
-   // the Fisher 
+   // the Fisher
    TMVA::MethodBase* fiCat = factory->BookMethod( TMVA::Types::kCategory, "FisherCat","" );
    mcat = dynamic_cast<TMVA::MethodCategory*>(fiCat);
    mcat->AddMethod("abs(eta)<=1.3",theCat1Vars, TMVA::Types::kFisher,"Category_Fisher_1","!H:!V:Fisher");
@@ -159,15 +159,15 @@ void TMVAClassificationCategory()
    factory->TestAllMethods();
 
    // ----- Evaluate and compare performance of all configured MVAs
-   factory->EvaluateAllMethods();    
+   factory->EvaluateAllMethods();
 
    // --------------------------------------------------------------
-   
+
    // Save the output
    outputFile->Close();
 
    std::cout << "==> Wrote root file: " << outputFile->GetName() << std::endl;
-   std::cout << "==> TMVAClassificationCategory is done!" << std::endl;      
+   std::cout << "==> TMVAClassificationCategory is done!" << std::endl;
 
    // Clean up
    delete factory;
diff --git a/tmva/test/TMVAGui.C b/tmva/test/TMVAGui.C
index 9689442ec683efd5742944d58d7b476783dcd278..81baea8ecb1e7509164a7b8d9403b1ec95b31475 100644
--- a/tmva/test/TMVAGui.C
+++ b/tmva/test/TMVAGui.C
@@ -54,12 +54,12 @@ void TMVAGui( const char* fName = "TMVA.root" )
 
    TString curMacroPath(gROOT->GetMacroPath());
    // uncomment next line for macros submitted to next root version
-   // gROOT->SetMacroPath(curMacroPath+":$ROOTSYS/tmva/test/:");
+   gROOT->SetMacroPath(curMacroPath+":./:$ROOTSYS/tmva/test/:");
    
    // for the sourceforge version, including $ROOTSYS/tmva/test in the
    // macro path is a mistake, especially if "./" was not part of path
    // add ../macros to the path (comment out next line for the ROOT version of TMVA)
-   gROOT->SetMacroPath(curMacroPath+":../macros:");
+   // gROOT->SetMacroPath(curMacroPath+":../macros:");
    
    cout << "--- Launch TMVA GUI to view input file: " << fName << endl;
 
diff --git a/tmva/test/TMVARegGui.C b/tmva/test/TMVARegGui.C
index 8e2479a6d9e4c4657ae587181c092e7923171af4..e3626f399ec13f3011e8b0b18c1d147e9af023c9 100644
--- a/tmva/test/TMVARegGui.C
+++ b/tmva/test/TMVARegGui.C
@@ -47,12 +47,12 @@ void TMVARegGui( const char* fName = "TMVAReg.root" )
 
    TString curMacroPath(gROOT->GetMacroPath());
    // uncomment next line for macros submitted to next root version
-   // gROOT->SetMacroPath(curMacroPath+":$ROOTSYS/tmva/test/:");
+   gROOT->SetMacroPath(curMacroPath+":./:$ROOTSYS/tmva/test/:");
 
    // for the sourceforge version, including $ROOTSYS/tmva/test in the
    // macro path is a mistake, especially if "./" was not part of path
    // add ../macros to the path (comment out next line for the ROOT version of TMVA)
-   gROOT->SetMacroPath(curMacroPath+":../macros:");
+   // gROOT->SetMacroPath(curMacroPath+":../macros:");
 
    cout << "--- Launch TMVA GUI to view input file: " << fName << endl;
 
diff --git a/tmva/test/TMVARegression.C b/tmva/test/TMVARegression.C
index 6ab558e8af552d13e416615e31ed0a60541557fc..bdb4125a9bf5351877093cd655e10be228845477 100644
--- a/tmva/test/TMVARegression.C
+++ b/tmva/test/TMVARegression.C
@@ -248,7 +248,7 @@ void TMVARegression( TString myMethodList = "" )
 
    // Neural network (MLP)
    if (Use["MLP"])
-      factory->BookMethod( TMVA::Types::kMLP, "MLP", "!H:!V:VarTransform=Norm:NeuronType=tanh:NCycles=5000:HiddenLayers=N+5,N+2:TestRate=6:TrainingMethod=BP:Sampling=0.3:SamplingEpoch=0.8:ConvergenceImprove=1e-6:ConvergenceTests=15" );
+      factory->BookMethod( TMVA::Types::kMLP, "MLP", "!H:!V:VarTransform=Norm:NeuronType=tanh:NCycles=20000:HiddenLayers=N+20:TestRate=6:TrainingMethod=BFGS:Sampling=0.3:SamplingEpoch=0.8:ConvergenceImprove=1e-6:ConvergenceTests=15:!UseRegulator" );
 
    // Support Vector Machine
    if (Use["SVM"])
@@ -261,7 +261,7 @@ void TMVARegression( TString myMethodList = "" )
 
    if (Use["BDTG"])
      factory->BookMethod( TMVA::Types::kBDT, "BDTG",
-                           "!H:!V:NTrees=200::BoostType=Grad:Shrinkage=1.0:UseBaggedGrad:SeparationType=GiniIndex:nCuts=20:NNodesMax=5" );
+                           "!H:!V:NTrees=1000::BoostType=Grad:Shrinkage=0.3:!UseBaggedGrad:SeparationType=GiniIndex:nCuts=20:NNodesMax=10" );
    // --------------------------------------------------------------------------------------------------
 
    // ---- Now you can tell the factory to train, test, and evaluate the MVAs
diff --git a/tmva/test/TMVARegression.cxx b/tmva/test/TMVARegression.cxx
index 3239b3b55a3cf7f62da15dfaf4b1d0935e60a132..6606a6a8c6f7f2eaeb2d7bce560b07aabd591861 100644
--- a/tmva/test/TMVARegression.cxx
+++ b/tmva/test/TMVARegression.cxx
@@ -241,7 +241,7 @@ int main( int argc, char** argv )
 
    // Neural network (MLP)
    if (Use["MLP"])
-      factory->BookMethod( TMVA::Types::kMLP, "MLP", "!H:!V:VarTransform=Norm:NeuronType=tanh:NCycles=20000:HiddenLayers=N+20,N+15:TestRate=6:TrainingMethod=BP:Sampling=0.3:SamplingEpoch=0.8:ConvergenceImprove=1e-6:ConvergenceTests=15" );
+      factory->BookMethod( TMVA::Types::kMLP, "MLP", "!H:!V:VarTransform=Norm:NeuronType=tanh:NCycles=20000:HiddenLayers=N+20:TestRate=6:TrainingMethod=BFGS:Sampling=0.3:SamplingEpoch=0.8:ConvergenceImprove=1e-6:ConvergenceTests=15:!UseRegulator" );
 
    // Support Vector Machine
    if (Use["SVM"])
@@ -254,7 +254,7 @@ int main( int argc, char** argv )
 
    if (Use["BDTG"])
      factory->BookMethod( TMVA::Types::kBDT, "BDTG",
-                           "!H:!V:NTrees=200::BoostType=Grad:Shrinkage=1.0:UseBaggedGrad:SeparationType=GiniIndex:nCuts=20:NNodesMax=5" );
+                           "!H:!V:NTrees=1000::BoostType=Grad:Shrinkage=0.3:!UseBaggedGrad:SeparationType=GiniIndex:nCuts=20:NNodesMax=10" );
    // --------------------------------------------------------------------------------------------------
 
    // ---- Now you can tell the factory to train, test, and evaluate the MVAs
diff --git a/tmva/test/createData.C b/tmva/test/createData.C
new file mode 100644
index 0000000000000000000000000000000000000000..a449501588bc44da10e9aa0d21e24bdaada90601
--- /dev/null
+++ b/tmva/test/createData.C
@@ -0,0 +1,1813 @@
+// plot the variables
+#include "TROOT.h"
+#include "TMath.h"
+#include "TTree.h"
+#include "TArrayD.h"
+#include "TStyle.h"
+#include "TFile.h"
+#include "TRandom.h"
+#include "Riostream.h"
+#include "TCanvas.h"
+#include "TMatrixD.h"
+#include "TH2F.h"
+#include "TLegend.h"
+#include "TBranch.h"
+#include <vector>
+
+void plot( TString fname = "data.root", TString var0="var0", TString var1="var1" ) 
+{
+   TFile* dataFile = TFile::Open( fname );
+
+   if (!dataFile) {
+      cout << "ERROR: cannot open file: " << fname << endl;
+      return;
+   }
+
+   TTree *treeS = (TTree*)dataFile->Get("TreeS");
+   TTree *treeB = (TTree*)dataFile->Get("TreeB");
+
+   TCanvas* c = new TCanvas( "c", "", 0, 0, 550, 550 );
+
+   TStyle *TMVAStyle = gROOT->GetStyle("Plain"); // our style is based on Plain
+   TMVAStyle->SetOptStat(0);
+   TMVAStyle->SetPadTopMargin(0.02);
+   TMVAStyle->SetPadBottomMargin(0.16);
+   TMVAStyle->SetPadRightMargin(0.03);
+   TMVAStyle->SetPadLeftMargin(0.15);
+   TMVAStyle->SetPadGridX(0);
+   TMVAStyle->SetPadGridY(0);
+   
+   TMVAStyle->SetOptTitle(0);
+   TMVAStyle->SetTitleW(.4);
+   TMVAStyle->SetTitleH(.10);
+   TMVAStyle->SetTitleX(.5);
+   TMVAStyle->SetTitleY(.9);
+   TMVAStyle->SetMarkerStyle(20);
+   TMVAStyle->SetMarkerSize(.4);
+   TMVAStyle->cd();
+
+
+   Float_t xmin = TMath::Min( treeS->GetMinimum( var0 ), treeB->GetMinimum( var0 ) );
+   Float_t xmax = TMath::Max( treeS->GetMaximum( var0 ), treeB->GetMaximum( var0 ) );
+   Float_t ymin = TMath::Min( treeS->GetMinimum( var1 ), treeB->GetMinimum( var1 ) );
+   Float_t ymax = TMath::Max( treeS->GetMaximum( var1 ), treeB->GetMaximum( var1 ) );
+
+   Int_t nbin = 500;
+   TH2F* frameS = new TH2F( "DataS", "DataS", nbin, xmin, xmax, nbin, ymin, ymax );
+   TH2F* frameB = new TH2F( "DataB", "DataB", nbin, xmin, xmax, nbin, ymin, ymax );
+
+   // project trees
+   treeS->Draw( "var1:var0>>DataS", "", "0" );
+   treeB->Draw( "var1:var0>>DataB", "", "0" );
+
+   // set style
+   frameS->SetMarkerSize( 1.6 );
+   frameS->SetMarkerColor( 4 );
+
+   frameB->SetMarkerSize( 1.6 );
+   frameB->SetMarkerColor( 2 );
+
+   // legend
+   frameS->SetTitle( var1+" versus "+var0+" for signal and background" );
+   frameS->GetXaxis()->SetTitle( var0 );
+   frameS->GetYaxis()->SetTitle( var1 );
+
+   frameS->SetLabelSize( 0.04, "X" );
+   frameS->SetLabelSize( 0.04, "Y" );
+   frameS->SetTitleSize( 0.05, "X" );
+   frameS->SetTitleSize( 0.05, "Y" );
+
+   // and plot
+   frameS->Draw();
+   frameB->Draw( "same" );  
+
+   // Draw legend               
+   TLegend *legend = new TLegend( 1 - c->GetRightMargin() - 0.32, 1 - c->GetTopMargin() - 0.12, 
+                                  1 - c->GetRightMargin(), 1 - c->GetTopMargin() );
+   legend->SetFillStyle( 1 );
+   legend->AddEntry(frameS,"Signal","p");
+   legend->AddEntry(frameB,"Background","p");
+   legend->Draw("same");
+   legend->SetBorderSize(1);
+   legend->SetMargin( 0.3 );
+
+}
+
+TMatrixD* produceSqrtMat( const TMatrixD& covMat )
+{
+   Double_t sum = 0;
+   Int_t size = covMat.GetNrows();;
+   TMatrixD* sqrtMat = new TMatrixD( size, size );
+
+   for (Int_t i=0; i< size; i++) {
+      
+      sum = 0;
+      for (Int_t j=0;j< i; j++) sum += (*sqrtMat)(i,j) * (*sqrtMat)(i,j);
+
+      (*sqrtMat)(i,i) = TMath::Sqrt(TMath::Abs(covMat(i,i) - sum));
+
+      for (Int_t k=i+1 ;k<size; k++) {
+
+         sum = 0;
+         for (Int_t l=0; l<i; l++) sum += (*sqrtMat)(k,l) * (*sqrtMat)(i,l);
+
+         (*sqrtMat)(k,i) = (covMat(k,i) - sum) / (*sqrtMat)(i,i);
+
+      }
+   }
+   return sqrtMat;
+}
+
+void getGaussRnd( TArrayD& v, const TMatrixD& sqrtMat, TRandom& R ) 
+{
+   // generate "size" correlated Gaussian random numbers
+
+   // sanity check
+   const Int_t size = sqrtMat.GetNrows();
+   if (size != v.GetSize()) 
+      cout << "<getGaussRnd> too short input vector: " << size << " " << v.GetSize() << endl;
+
+   Double_t* tmpVec = new Double_t[size];
+
+   for (Int_t i=0; i<size; i++) {
+      Double_t x, y, z;
+      y = R.Rndm();
+      z = R.Rndm();
+      x = 2*TMath::Pi()*z;
+      tmpVec[i] = TMath::Sin(x) * TMath::Sqrt(-2.0*TMath::Log(y));
+   }
+
+   for (Int_t i=0; i<size; i++) {
+      v[i] = 0;
+      for (Int_t j=0; j<=i; j++) v[i] += sqrtMat(i,j) * tmpVec[j];
+   }
+
+   delete tmpVec;
+}
+
+// create the data
+void create_lin_Nvar_withFriend(Int_t N = 2000)
+{
+   const Int_t nvar  = 4;
+   const Int_t nvar2 = 1;
+   Float_t xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar-nvar2; ivar++) {
+     cout << "Creating branch var" << ivar+1 << " in signal tree" << endl;
+      treeS->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+   }
+   TTree* treeSF = new TTree( "TreeSF", "TreeS", 1 );   
+   TTree* treeBF = new TTree( "TreeBF", "TreeB", 1 );   
+   for (Int_t ivar=nvar-nvar2; ivar<nvar; ivar++) {
+      treeSF->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+      treeBF->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+   }
+
+      
+   TRandom R( 100 );
+   Float_t xS[nvar] = {  0.2,  0.3,  0.5,  0.9 };
+   Float_t xB[nvar] = { -0.2, -0.3, -0.5, -0.6 };
+   Float_t dx[nvar] = {  1.0,  1.0, 1.0, 1.0 };
+   TArrayD* v = new TArrayD( nvar );
+   Float_t rho[20];
+   rho[1*2] = 0.4;
+   rho[1*3] = 0.6;
+   rho[1*4] = 0.9;
+   rho[2*3] = 0.7;
+   rho[2*4] = 0.8;
+   rho[3*4] = 0.93;
+
+   // create covariance matrix
+   TMatrixD* covMatS = new TMatrixD( nvar, nvar );
+   TMatrixD* covMatB = new TMatrixD( nvar, nvar );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      (*covMatS)(ivar,ivar) = dx[ivar]*dx[ivar];
+      (*covMatB)(ivar,ivar) = dx[ivar]*dx[ivar];
+      for (Int_t jvar=ivar+1; jvar<nvar; jvar++) {
+         (*covMatS)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatS)(jvar,ivar) = (*covMatS)(ivar,jvar);
+
+         (*covMatB)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatB)(jvar,ivar) = (*covMatB)(ivar,jvar);
+      }
+   }
+   cout << "signal covariance matrix: " << endl;
+   covMatS->Print();
+   cout << "background covariance matrix: " << endl;
+   covMatB->Print();
+
+   // produce the square-root matrix
+   TMatrixD* sqrtMatS = produceSqrtMat( *covMatS );
+   TMatrixD* sqrtMatB = produceSqrtMat( *covMatB );
+
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      Float_t*  x;
+      TMatrixD* m;
+      if (itype == 0) { x = xS; m = sqrtMatS; cout << "- produce signal" << endl; }
+      else            { x = xB; m = sqrtMatB; cout << "- produce background" << endl; }
+
+      // event loop
+      TTree* tree  = (itype==0) ? treeS : treeB;
+      TTree* treeF = (itype==0) ? treeSF : treeBF;
+      for (Int_t i=0; i<N; i++) {
+
+         if (i%1000 == 0) cout << "... event: " << i << " (" << N << ")" << endl;
+         getGaussRnd( *v, *m, R );
+
+         for (Int_t ivar=0; ivar<nvar; ivar++) xvar[ivar] = (*v)[ivar] + x[ivar];
+         
+         tree->Fill();
+         treeF->Fill();
+      }
+   }
+
+//    treeS->AddFriend(treeSF);
+//    treeB->AddFriend(treeBF);
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+   treeSF->Write();
+   treeBF->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+
+}
+
+
+// create the data
+void create_lin_Nvar(Int_t N = 50000)
+{
+   const Int_t nvar = 4;
+   Float_t xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+   }
+      
+   TRandom R( 100 );
+   Float_t xS[nvar] = {  0.2,  0.3,  0.5,  0.9 };
+   Float_t xB[nvar] = { -0.2, -0.3, -0.5, -0.6 };
+   Float_t dx[nvar] = {  1.0,  1.0, 1.0, 1.0 };
+   TArrayD* v = new TArrayD( nvar );
+   Float_t rho[20];
+   rho[1*2] = 0.4;
+   rho[1*3] = 0.6;
+   rho[1*4] = 0.9;
+   rho[2*3] = 0.7;
+   rho[2*4] = 0.8;
+   rho[3*4] = 0.93;
+
+   // create covariance matrix
+   TMatrixD* covMatS = new TMatrixD( nvar, nvar );
+   TMatrixD* covMatB = new TMatrixD( nvar, nvar );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      (*covMatS)(ivar,ivar) = dx[ivar]*dx[ivar];
+      (*covMatB)(ivar,ivar) = dx[ivar]*dx[ivar];
+      for (Int_t jvar=ivar+1; jvar<nvar; jvar++) {
+         (*covMatS)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatS)(jvar,ivar) = (*covMatS)(ivar,jvar);
+
+         (*covMatB)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatB)(jvar,ivar) = (*covMatB)(ivar,jvar);
+      }
+   }
+   cout << "signal covariance matrix: " << endl;
+   covMatS->Print();
+   cout << "background covariance matrix: " << endl;
+   covMatB->Print();
+
+   // produce the square-root matrix
+   TMatrixD* sqrtMatS = produceSqrtMat( *covMatS );
+   TMatrixD* sqrtMatB = produceSqrtMat( *covMatB );
+
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      Float_t*  x;
+      TMatrixD* m;
+      if (itype == 0) { x = xS; m = sqrtMatS; cout << "- produce signal" << endl; }
+      else            { x = xB; m = sqrtMatB; cout << "- produce background" << endl; }
+
+      // event loop
+      TTree* tree = (itype==0) ? treeS : treeB;
+      for (Int_t i=0; i<N; i++) {
+
+         if (i%1000 == 0) cout << "... event: " << i << " (" << N << ")" << endl;
+         getGaussRnd( *v, *m, R );
+
+         for (Int_t ivar=0; ivar<nvar; ivar++) xvar[ivar] = (*v)[ivar] + x[ivar];
+         
+         tree->Fill();
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+}
+
+// create the category data
+// type = 1 (offset) or 2 (variable = -99)
+void create_lin_Nvar_categories(Int_t N = 10000, Int_t type = 2)  
+{
+   const Int_t nvar = 4;
+   Float_t xvar[nvar];
+   Float_t eta;
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+   }
+
+   // add category variable
+   treeS->Branch( "eta", &eta, "eta/F" );
+   treeB->Branch( "eta", &eta, "eta/F" );
+      
+   TRandom R( 100 );
+   Float_t xS[nvar] = {  0.2,  0.3,  0.5,  0.9 };
+   Float_t xB[nvar] = { -0.2, -0.3, -0.5, -0.6 };
+   Float_t dx[nvar] = {  1.0,  1.0, 1.0, 1.0 };
+   TArrayD* v = new TArrayD( nvar );
+   Float_t rho[20];
+   rho[1*2] = 0.0;
+   rho[1*3] = 0.0;
+   rho[1*4] = 0.0;
+   rho[2*3] = 0.0;
+   rho[2*4] = 0.0;
+   rho[3*4] = 0.0;
+   if (type != 1) {
+      rho[1*2] = 0.6;
+      rho[1*3] = 0.7;
+      rho[1*4] = 0.9;
+      rho[2*3] = 0.8;
+      rho[2*4] = 0.9;
+      rho[3*4] = 0.93;
+   }
+
+   // create covariance matrix
+   TMatrixD* covMatS = new TMatrixD( nvar, nvar );
+   TMatrixD* covMatB = new TMatrixD( nvar, nvar );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      (*covMatS)(ivar,ivar) = dx[ivar]*dx[ivar];
+      (*covMatB)(ivar,ivar) = dx[ivar]*dx[ivar];
+      for (Int_t jvar=ivar+1; jvar<nvar; jvar++) {
+         (*covMatS)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatS)(jvar,ivar) = (*covMatS)(ivar,jvar);
+
+         (*covMatB)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatB)(jvar,ivar) = (*covMatB)(ivar,jvar);
+      }
+   }
+   cout << "signal covariance matrix: " << endl;
+   covMatS->Print();
+   cout << "background covariance matrix: " << endl;
+   covMatB->Print();
+
+   // produce the square-root matrix
+   TMatrixD* sqrtMatS = produceSqrtMat( *covMatS );
+   TMatrixD* sqrtMatB = produceSqrtMat( *covMatB );
+
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      Float_t*  x;
+      TMatrixD* m;
+      if (itype == 0) { x = xS; m = sqrtMatS; cout << "- produce signal" << endl; }
+      else            { x = xB; m = sqrtMatB; cout << "- produce background" << endl; }
+
+      // event loop
+      TTree* tree = (itype==0) ? treeS : treeB;
+      for (Int_t i=0; i<N; i++) {
+
+         if (i%1000 == 0) cout << "... event: " << i << " (" << N << ")" << endl;
+         getGaussRnd( *v, *m, R );
+
+         eta = 2.5*2*(R.Rndm() - 0.5);
+         Float_t offset = 0;
+         if (type == 1) offset = TMath::Abs(eta) > 1.3 ? 0.8 : -0.8;
+         for (Int_t ivar=0; ivar<nvar; ivar++) xvar[ivar] = (*v)[ivar] + x[ivar] + offset;
+         if (type != 1 && TMath::Abs(eta) > 1.3) xvar[nvar-1] = -5;
+
+         tree->Fill();
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+}
+
+
+// create the data
+void create_lin_Nvar_weighted(Int_t N = 10000, int WeightedSignal=0, int WeightedBkg=1)
+{
+   const Int_t nvar = 4;
+   Float_t xvar[nvar];
+   Float_t weight;
+
+   
+   cout << endl << endl << endl;
+   cout << "please use .L createData.C++ if you want to run this MC geneation" <<endl;
+   cout << "otherwise you will wait for ages!!! " << endl;
+   cout << endl << endl << endl;
+
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+   }
+   if (WeightedSignal) treeS->Branch( "weight", &weight,"weight/F" );
+   if (WeightedBkg)    treeB->Branch( "weight", &weight,"weight/F" );
+      
+   TRandom R( 100 );
+   Float_t xS[nvar] = {  0.2,  0.3,  0.4,  0.8 };
+   Float_t xB[nvar] = { -0.2, -0.3, -0.4, -0.5 };
+   Float_t dx[nvar] = {  1.0,  1.0, 1.0, 1.0 };
+   TArrayD* v = new TArrayD( nvar );
+   Float_t rho[20];
+   rho[1*2] = 0.4;
+   rho[1*3] = 0.6;
+   rho[1*4] = 0.9;
+   rho[2*3] = 0.7;
+   rho[2*4] = 0.8;
+   rho[3*4] = 0.93;
+
+   // create covariance matrix
+   TMatrixD* covMatS = new TMatrixD( nvar, nvar );
+   TMatrixD* covMatB = new TMatrixD( nvar, nvar );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      (*covMatS)(ivar,ivar) = dx[ivar]*dx[ivar];
+      (*covMatB)(ivar,ivar) = dx[ivar]*dx[ivar];
+      for (Int_t jvar=ivar+1; jvar<nvar; jvar++) {
+         (*covMatS)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatS)(jvar,ivar) = (*covMatS)(ivar,jvar);
+
+         (*covMatB)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatB)(jvar,ivar) = (*covMatB)(ivar,jvar);
+      }
+   }
+   cout << "signal covariance matrix: " << endl;
+   covMatS->Print();
+   cout << "background covariance matrix: " << endl;
+   covMatB->Print();
+
+   // produce the square-root matrix
+   TMatrixD* sqrtMatS = produceSqrtMat( *covMatS );
+   TMatrixD* sqrtMatB = produceSqrtMat( *covMatB );
+
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      Float_t*  x;
+      TMatrixD* m;
+      if (itype == 0) { x = xS; m = sqrtMatS; cout << "- produce signal" << endl; }
+      else            { x = xB; m = sqrtMatB; cout << "- produce background" << endl; }
+
+      // event loop
+      TTree* tree = (itype==0) ? treeS : treeB;
+      Int_t i=0;
+      do {
+         getGaussRnd( *v, *m, R );
+
+         for (Int_t ivar=0; ivar<nvar; ivar++) xvar[ivar] = (*v)[ivar] + x[ivar];
+         //         for (Int_t ivar=0; ivar<nvar; ivar++) xvar[ivar] = R.Uniform()*10.-5.;
+         
+         //         weight = 0.5 / (TMath::Gaus( (xvar[nvar-1]-x[nvar-1]), 0, 1.1) );
+         // weight = TMath::Gaus(0.675,0,1) / (TMath::Gaus( (xvar[nvar-1]-x[nvar-1]), 0, 1.) );
+         weight = 0.8 / (TMath::Gaus( ((*v)[nvar-1]), 0, 1.09) );
+         Double_t tmp=R.Uniform()/0.00034;
+         if (itype==0 && !WeightedSignal) {
+            weight = 1;
+            tree->Fill();
+            i++;
+         } else if (itype==1 && !WeightedBkg) {
+            weight = 1;
+            tree->Fill();
+            i++;
+         }
+         else {
+            if (tmp < weight){
+               weight = 1./weight;
+               tree->Fill();
+               if (i%10 == 0) cout << "... event: " << i << " (" << N << ")" << endl;
+               i++;
+            }
+         }
+      } while (i<N);
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   TH1F *h[4];   
+   TH1F *hw[4];
+   for (Int_t  i=0;i<4;i++){
+      char buffer[5];
+      sprintf(buffer,"h%d",i);
+      h[i]= new TH1F(buffer,"",100,-5,5);
+      sprintf(buffer,"hw%d",i);
+      hw[i] = new TH1F(buffer,"",100,-5,5);
+      hw[i]->SetLineColor(3);
+   }
+
+   for (int ie=0;ie<treeS->GetEntries();ie++){
+      treeS->GetEntry(ie);
+      for (Int_t  i=0;i<4;i++){
+         h[i]->Fill(xvar[i]);
+         hw[i]->Fill(xvar[i],weight);
+      }
+   }
+
+   TCanvas *c = new TCanvas("c","",800,800);
+   c->Divide(2,2);
+
+   for (Int_t  i=0;i<4;i++){
+      c->cd(i+1);
+      h[i]->Draw();
+      hw[i]->Draw("same");
+   }
+
+
+   //   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+}
+
+
+
+// create the data
+void create_lin_Nvar_Arr(Int_t N = 1000)
+{
+   const Int_t nvar = 4;
+   std::vector<float>* xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      xvar[ivar] = new std::vector<float>();
+      treeS->Branch( TString(Form( "var%i", ivar+1 )).Data(), "vector<float>", &xvar[ivar], 64000, 1 );
+      treeB->Branch( TString(Form( "var%i", ivar+1 )).Data(), "vector<float>", &xvar[ivar], 64000, 1 );
+   }
+
+   TRandom R( 100 );
+   Float_t xS[nvar] = {  0.2,  0.3,  0.5,  0.9 };
+   Float_t xB[nvar] = { -0.2, -0.3, -0.5, -0.6 };
+   Float_t dx[nvar] = {  1.0,  1.0, 1.0, 1.0 };
+   TArrayD* v = new TArrayD( nvar );
+   Float_t rho[20];
+   rho[1*2] = 0.4;
+   rho[1*3] = 0.6;
+   rho[1*4] = 0.9;
+   rho[2*3] = 0.7;
+   rho[2*4] = 0.8;
+   rho[3*4] = 0.93;
+
+   // create covariance matrix
+   TMatrixD* covMatS = new TMatrixD( nvar, nvar );
+   TMatrixD* covMatB = new TMatrixD( nvar, nvar );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      (*covMatS)(ivar,ivar) = dx[ivar]*dx[ivar];
+      (*covMatB)(ivar,ivar) = dx[ivar]*dx[ivar];
+      for (Int_t jvar=ivar+1; jvar<nvar; jvar++) {
+         (*covMatS)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatS)(jvar,ivar) = (*covMatS)(ivar,jvar);
+
+         (*covMatB)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatB)(jvar,ivar) = (*covMatB)(ivar,jvar);
+      }
+   }
+   cout << "signal covariance matrix: " << endl;
+   covMatS->Print();
+   cout << "background covariance matrix: " << endl;
+   covMatB->Print();
+
+   // produce the square-root matrix
+   TMatrixD* sqrtMatS = produceSqrtMat( *covMatS );
+   TMatrixD* sqrtMatB = produceSqrtMat( *covMatB );
+
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      Float_t*  x;
+      TMatrixD* m;
+      if (itype == 0) { x = xS; m = sqrtMatS; cout << "- produce signal" << endl; }
+      else            { x = xB; m = sqrtMatB; cout << "- produce background" << endl; }
+
+      // event loop
+      TTree* tree = (itype==0) ? treeS : treeB;
+      for (Int_t i=0; i<N; i++) {
+
+         if (i%100 == 0) cout << "... event: " << i << " (" << N << ")" << endl;
+
+         Int_t aSize = (Int_t)(gRandom->Rndm()*10); // size of array varies between events
+         for (Int_t ivar=0; ivar<nvar; ivar++) {
+            xvar[ivar]->clear();
+            xvar[ivar]->reserve(aSize);
+         }
+         for(Int_t iA = 0; iA<aSize; iA++) {
+            for (Int_t ivar=0; ivar<nvar; ivar++) {
+               getGaussRnd( *v, *m, R );
+               for (Int_t ivar=0; ivar<nvar; ivar++) xvar[ivar]->push_back((*v)[ivar] + x[ivar]);
+            }
+         }
+         tree->Fill();
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   //plot();
+}
+
+
+
+// create the data
+void create_lin_Nvar_double()
+{
+   Int_t N = 10000;
+   const Int_t nvar = 4;
+   Double_t xvar[nvar];
+   Double_t xvarD[nvar];
+   Float_t  xvarF[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      if (ivar<2) {
+         treeS->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvarD[ivar], TString(Form( "var%i/D", ivar+1 )).Data() );
+         treeB->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvarD[ivar], TString(Form( "var%i/D", ivar+1 )).Data() );
+      }
+      else {
+         treeS->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvarF[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+         treeB->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvarF[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+      }
+   }
+      
+   TRandom R( 100 );
+   Double_t xS[nvar] = {  0.2,  0.3,  0.5,  0.6 };
+   Double_t xB[nvar] = { -0.2, -0.3, -0.5, -0.6 };
+   Double_t dx[nvar] = {  1.0,  1.0, 1.0, 1.0 };
+   TArrayD* v = new TArrayD( nvar );
+   Double_t rho[20];
+   rho[1*2] = 0.4;
+   rho[1*3] = 0.6;
+   rho[1*4] = 0.9;
+   rho[2*3] = 0.7;
+   rho[2*4] = 0.8;
+   rho[3*4] = 0.93;
+
+   // create covariance matrix
+   TMatrixD* covMatS = new TMatrixD( nvar, nvar );
+   TMatrixD* covMatB = new TMatrixD( nvar, nvar );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      (*covMatS)(ivar,ivar) = dx[ivar]*dx[ivar];
+      (*covMatB)(ivar,ivar) = dx[ivar]*dx[ivar];
+      for (Int_t jvar=ivar+1; jvar<nvar; jvar++) {
+         (*covMatS)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatS)(jvar,ivar) = (*covMatS)(ivar,jvar);
+
+         (*covMatB)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatB)(jvar,ivar) = (*covMatB)(ivar,jvar);
+      }
+   }
+   cout << "signal covariance matrix: " << endl;
+   covMatS->Print();
+   cout << "background covariance matrix: " << endl;
+   covMatB->Print();
+
+   // produce the square-root matrix
+   TMatrixD* sqrtMatS = produceSqrtMat( *covMatS );
+   TMatrixD* sqrtMatB = produceSqrtMat( *covMatB );
+
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      Double_t*  x;
+      TMatrixD* m;
+      if (itype == 0) { x = xS; m = sqrtMatS; cout << "- produce signal" << endl; }
+      else            { x = xB; m = sqrtMatB; cout << "- produce background" << endl; }
+
+      // event loop
+      TTree* tree = (itype==0) ? treeS : treeB;
+      for (Int_t i=0; i<N; i++) {
+
+         if (i%1000 == 0) cout << "... event: " << i << " (" << N << ")" << endl;
+         getGaussRnd( *v, *m, R );
+
+         for (Int_t ivar=0; ivar<nvar; ivar++) xvar[ivar] = (*v)[ivar] + x[ivar];
+         for (Int_t ivar=0; ivar<nvar; ivar++) {
+            if (ivar<2) xvarD[ivar] = xvar[ivar];
+            else        xvarF[ivar] = xvar[ivar];
+         }
+         
+         tree->Fill();
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   plot();
+}
+
+// create the data
+void create_lin_Nvar_discrete()
+{
+   Int_t N = 10000;
+   const Int_t nvar = 4;
+   Float_t xvar[nvar];
+   Int_t   xvarI[2];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar-2; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+   }
+   for (Int_t ivar=0; ivar<2; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar+nvar-2+1 )).Data(), &xvarI[ivar], TString(Form( "var%i/I", ivar+nvar-2+1 )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar+nvar-2+1 )).Data(), &xvarI[ivar], TString(Form( "var%i/I", ivar+nvar-2+1 )).Data() );
+   }
+      
+   TRandom R( 100 );
+   Float_t xS[nvar] = {  0.2,  0.3,  1,  2 };
+   Float_t xB[nvar] = { -0.2, -0.3,  0,  0 };
+   Float_t dx[nvar] = {  1.0,  1.0, 1, 2 };
+   TArrayD* v = new TArrayD( nvar );
+   Float_t rho[20];
+   rho[1*2] = 0.4;
+   rho[1*3] = 0.6;
+   rho[1*4] = 0.9;
+   rho[2*3] = 0.7;
+   rho[2*4] = 0.8;
+   rho[3*4] = 0.93;
+   // no correlations
+   for (int i=0; i<20; i++) rho[i] = 0;
+
+   // create covariance matrix
+   TMatrixD* covMatS = new TMatrixD( nvar, nvar );
+   TMatrixD* covMatB = new TMatrixD( nvar, nvar );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      (*covMatS)(ivar,ivar) = dx[ivar]*dx[ivar];
+      (*covMatB)(ivar,ivar) = dx[ivar]*dx[ivar];
+      for (Int_t jvar=ivar+1; jvar<nvar; jvar++) {
+         (*covMatS)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatS)(jvar,ivar) = (*covMatS)(ivar,jvar);
+
+         (*covMatB)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatB)(jvar,ivar) = (*covMatB)(ivar,jvar);
+      }
+   }
+   cout << "signal covariance matrix: " << endl;
+   covMatS->Print();
+   cout << "background covariance matrix: " << endl;
+   covMatB->Print();
+
+   // produce the square-root matrix
+   TMatrixD* sqrtMatS = produceSqrtMat( *covMatS );
+   TMatrixD* sqrtMatB = produceSqrtMat( *covMatB );
+
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      Float_t*  x;
+      TMatrixD* m;
+      if (itype == 0) { x = xS; m = sqrtMatS; cout << "- produce signal" << endl; }
+      else            { x = xB; m = sqrtMatB; cout << "- produce background" << endl; }
+
+      // event loop
+      TTree* tree = (itype==0) ? treeS : treeB;
+      for (Int_t i=0; i<N; i++) {
+
+         if (i%1000 == 0) cout << "... event: " << i << " (" << N << ")" << endl;
+         getGaussRnd( *v, *m, R );
+
+         for (Int_t ivar=0; ivar<nvar; ivar++) xvar[ivar] = (*v)[ivar] + x[ivar];
+
+         xvarI[0] =  TMath::Nint(xvar[nvar-2]);
+         xvarI[1] =  TMath::Nint(xvar[nvar-1]);
+         
+         tree->Fill();
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   plot();
+}
+
+// create the data
+void create_ManyVars()
+{
+   Int_t N = 20000;
+   const Int_t nvar = 20;
+   Float_t xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+   }
+      
+   Float_t xS[nvar];
+   Float_t xB[nvar];
+   Float_t dx[nvar];
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      xS[ivar] = 0 + ivar*0.05;
+      xB[ivar] = 0 - ivar*0.05;
+      dx[ivar] = 1;
+   }
+
+   xS[0] =   0.2;
+   xB[0] =  -0.2;
+   dx[0] =   1.0;
+   xS[1] =   0.3;
+   xB[1] =  -0.3;
+   dx[1] =   1.0;
+   xS[2] =   0.4;
+   xB[2] =  -0.4;
+   dx[2] =  1.0 ;
+   xS[3] =   0.8 ;
+   xB[3] =  -0.5 ;
+   dx[3] =   1.0 ;
+   TArrayD* v = new TArrayD( nvar );
+   Float_t rho[20];
+   rho[1*2] = 0.4;
+   rho[1*3] = 0.6;
+   rho[1*4] = 0.9;
+   rho[2*3] = 0.7;
+   rho[2*4] = 0.8;
+   rho[3*4] = 0.93;
+
+   TRandom R( 100 );
+
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      Float_t* x = (itype == 0) ? xS : xB; 
+
+      // event loop
+      TTree* tree = (itype == 0) ? treeS : treeB;
+      for (Int_t i=0; i<N; i++) {
+
+         if (i%1000 == 0) cout << "... event: " << i << " (" << N << ")" << endl;
+         for (Int_t ivar=0; ivar<nvar; ivar++) {
+            if (ivar == 1500 && itype!=10) xvar[ivar] = 1;
+            else                           xvar[ivar] = x[ivar] + R.Gaus()*dx[ivar];
+         }
+         
+         tree->Fill();
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   plot();
+   cout << "created data file: " << dataFile->GetName() << endl;
+}
+
+// create the data
+void create_lin_NvarObsolete()
+{
+   Int_t N = 20000;
+   const Int_t nvar = 20;
+   Float_t xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+   }
+      
+   TRandom R( 100 );
+   Float_t xS[nvar] = {  0.5,  0.5,  0.0,  0.0,  0.0,  0.0 };
+   Float_t xB[nvar] = { -0.5, -0.5, -0.0, -0.0, -0.0, -0.0 };
+   Float_t dx[nvar] = {  1.0,  1.0, 1.0, 1.0, 1.0, 1.0 };
+   TArrayD* v = new TArrayD( nvar );
+   Float_t rho[50];
+   for (Int_t i=0; i<50; i++) rho[i] = 0;
+   rho[1*2] = 0.3;
+   rho[1*3] = 0.0;
+   rho[1*4] = 0.0;
+   rho[2*3] = 0.0;
+   rho[2*4] = 0.0;
+   rho[3*4] = 0.0;
+
+   // create covariance matrix
+   TMatrixD* covMatS = new TMatrixD( nvar, nvar );
+   TMatrixD* covMatB = new TMatrixD( nvar, nvar );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      (*covMatS)(ivar,ivar) = dx[ivar]*dx[ivar];
+      (*covMatB)(ivar,ivar) = dx[ivar]*dx[ivar];
+      for (Int_t jvar=ivar+1; jvar<nvar; jvar++) {
+         (*covMatS)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatS)(jvar,ivar) = (*covMatS)(ivar,jvar);
+
+         (*covMatB)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatB)(jvar,ivar) = (*covMatB)(ivar,jvar);
+      }
+   }
+   cout << "signal covariance matrix: " << endl;
+   covMatS->Print();
+   cout << "background covariance matrix: " << endl;
+   covMatB->Print();
+
+   // produce the square-root matrix
+   TMatrixD* sqrtMatS = produceSqrtMat( *covMatS );
+   TMatrixD* sqrtMatB = produceSqrtMat( *covMatB );
+
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      Float_t*  x;
+      TMatrixD* m;
+      if (itype == 0) { x = xS; m = sqrtMatS; cout << "- produce signal" << endl; }
+      else            { x = xB; m = sqrtMatB; cout << "- produce background" << endl; }
+
+      // event loop
+      TTree* tree = (itype==0) ? treeS : treeB;
+      for (Int_t i=0; i<N; i++) {
+
+         if (i%1000 == 0) cout << "... event: " << i << " (" << N << ")" << endl;
+         getGaussRnd( *v, *m, R );
+
+         for (Int_t ivar=0; ivar<nvar; ivar++) xvar[ivar] = (*v)[ivar] + x[ivar];
+         
+         tree->Fill();
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   plot();
+}
+
+// create the data
+void create_lin(Int_t N = 2000)
+{
+   const Int_t nvar = 2;
+   Double_t xvar[nvar];
+   Float_t weight;
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/D", ivar )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/D", ivar )).Data() );
+   }
+   treeS->Branch( "weight", &weight, "weight/F" );
+   treeB->Branch( "weight", &weight, "weight/F" );
+      
+   TRandom R( 100 );
+   Float_t xS[nvar] = {  0.0,  0.0 };
+   Float_t xB[nvar] = { -0.0, -0.0 };
+   Float_t dx[nvar] = {  1.0,  1.0 };
+   TArrayD* v = new TArrayD( 2 );
+   Float_t rhoS =  0.21;
+   Float_t rhoB =  0.0;
+
+   // create covariance matrix
+   TMatrixD* covMatS = new TMatrixD( nvar, nvar );
+   TMatrixD* covMatB = new TMatrixD( nvar, nvar );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      (*covMatS)(ivar,ivar) = dx[ivar]*dx[ivar];
+      (*covMatB)(ivar,ivar) = dx[ivar]*dx[ivar];
+      for (Int_t jvar=ivar+1; jvar<nvar; jvar++) {
+         (*covMatS)(ivar,jvar) = rhoS*dx[ivar]*dx[jvar];
+         (*covMatS)(jvar,ivar) = (*covMatS)(ivar,jvar);
+
+         (*covMatB)(ivar,jvar) = rhoB*dx[ivar]*dx[jvar];
+         (*covMatB)(jvar,ivar) = (*covMatB)(ivar,jvar);
+      }
+   }
+   cout << "signal covariance matrix: " << endl;
+   covMatS->Print();
+   cout << "background covariance matrix: " << endl;
+   covMatB->Print();
+
+   // produce the square-root matrix
+   TMatrixD* sqrtMatS = produceSqrtMat( *covMatS );
+   TMatrixD* sqrtMatB = produceSqrtMat( *covMatB );
+
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      Float_t*  x;
+      TMatrixD* m;
+      if (itype == 0) { x = xS; m = sqrtMatS; cout << "- produce signal" << endl; }
+      else            { x = xB; m = sqrtMatB; cout << "- produce background" << endl; }
+
+      // event loop
+      TTree* tree = (itype==0) ? treeS : treeB;
+      for (Int_t i=0; i<N; i++) {
+
+         if (i%1000 == 0) cout << "... event: " << i << " (" << N << ")" << endl;
+         getGaussRnd( *v, *m, R );
+         for (Int_t ivar=0; ivar<nvar; ivar++) xvar[ivar] = (*v)[ivar] + x[ivar];
+
+         // add weights
+         if (itype == 0) weight = 1.0; // this is the signal weight
+         else            weight = 2.0; // this is the background weight
+         
+         tree->Fill();
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   plot();
+}
+
+void create_fullcirc(Int_t nmax  = 20000,  Bool_t distort=false)
+{
+  TFile* dataFile = TFile::Open( "circledata.root", "RECREATE" );
+   int nvar = 2;
+   int nsig = 0, nbgd=0;
+   Float_t weight=1;
+   Float_t xvar[100];
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar)).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar)).Data() );
+   }
+   treeS->Branch("weight", &weight, "weight/F");
+   treeB->Branch("weight", &weight, "weight/F");
+
+   TRandom R( 100 );
+   do {
+      for (Int_t ivar=0; ivar<nvar; ivar++) { xvar[ivar]=2.*R.Rndm()-1.;}
+      Float_t xout = xvar[0]*xvar[0]+xvar[1]*xvar[1];
+      if (nsig<10) cout << "xout = " << xout<<endl;
+      if (xout < 0.3  || (xout >0.3 && xout<0.5 && R.Rndm() > xout)) {
+         if (distort && xvar[0] < 0 && R.Rndm()>0.1) continue; 
+         treeS->Fill();
+         nsig++;
+      }
+      else {
+         if (distort && xvar[0] > 0 && R.Rndm()>0.1) continue; 
+         treeB->Fill();
+         nbgd++;
+      }
+   } while ( nsig < nmax || nbgd < nmax);
+
+   dataFile->Write();
+   dataFile->Close();
+   
+} 
+
+// create the data
+void create_circ(Int_t N  = 6000, Bool_t distort = false)
+{
+   Int_t Nn = 0;
+   const Int_t nvar = 2;
+   Float_t xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+   }
+//    TTree *treeB  = treeS->CloneTree();
+//    for (Int_t ivar=0; ivar<nvar; ivar++) {
+//       treeS->SetBranchAddress( Form( "var%i", ivar ), &xvar[ivar] );
+//       treeB->SetBranchAddress( Form( "var%i", ivar ), &xvar[ivar] );
+//    }
+//    treeB->SetName ( "TreeB" );
+//    treeB->SetTitle( "TreeB" );
+      
+   TRandom R( 100 );
+   //Float_t phimin = -30, phimax = 130;
+   Float_t phimin = -70, phimax = 130;
+   Float_t phisig = 5;
+   Float_t rS = 1.1;
+   Float_t rB = 0.75;
+   Float_t rsig = 0.1;
+   Float_t fnmin = -(rS+4.0*rsig);
+   Float_t fnmax = +(rS+4.0*rsig);
+   Float_t dfn = fnmax-fnmin;
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      // event loop
+      TTree* tree = (itype==0) ? treeS : treeB;
+      for (Int_t i=0; i<N; i++) {
+	 Double_t r1=R.Rndm(),r2=R.Rndm(), r3; 
+	 if (itype==0) r3= r1>r2? r1 :r2;
+	 else r3= r2;
+	 Float_t phi;
+	 if (distort) phi = r3*(phimax - phimin) + phimin;
+	 else  phi = R.Rndm()*(phimax - phimin) + phimin;
+         phi += R.Gaus()*phisig;
+      
+         Float_t r = (itype==0) ? rS : rB;
+         r += R.Gaus()*rsig;
+
+         xvar[0] = r*cos(TMath::DegToRad()*phi);
+         xvar[1] = r*sin(TMath::DegToRad()*phi);
+         
+         tree->Fill();
+      }
+
+      for (Int_t i=0; i<Nn; i++) {
+
+         xvar[0] = dfn*R.Rndm()+fnmin;
+         xvar[1] = dfn*R.Rndm()+fnmin;
+         
+         tree->Fill();
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   plot();
+}
+
+
+void create_schachbrett(Int_t nEvents = 20000) {
+
+   const Int_t nvar = 2;
+   Float_t xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+   }
+
+   Int_t   nSeed   = 12345;
+   TRandom *m_rand = new TRandom(nSeed);
+   Double_t sigma=0.3;
+   Double_t meanX;
+   Double_t meanY;
+   Int_t xtype=1, ytype=1;
+   Int_t iev=0;
+   Int_t m_nDim = 2; // actually the boundary, there is a "bump" for every interger value
+                     // between in the Inteval [-m_nDim,m_nDim]
+   while (iev < nEvents){
+      xtype=1;
+      for (Int_t i=-m_nDim; i <=  m_nDim; i++){
+         ytype  =  1;
+         for (Int_t j=-m_nDim; j <=  m_nDim; j++){
+            meanX=Double_t(i);
+            meanY=Double_t(j);
+            xvar[0]=m_rand->Gaus(meanY,sigma);
+            xvar[1]=m_rand->Gaus(meanX,sigma);
+            Int_t type   = xtype*ytype;
+            TTree* tree = (type==1) ? treeS : treeB;
+            tree->Fill();
+            iev++;
+            ytype *= -1;
+         }
+         xtype *= -1;
+      }
+   }
+
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   plot();
+
+}
+
+
+void create_schachbrett_5D(Int_t nEvents = 200000) {
+   const Int_t nvar = 5;
+   Float_t xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+   }
+
+   Int_t   nSeed   = 12345;
+   TRandom *m_rand = new TRandom(nSeed);
+   Double_t sigma=0.3;
+   Int_t itype[nvar];
+   Int_t iev=0;
+   Int_t m_nDim = 2; // actually the boundary, there is a "bump" for every interger value
+                     // between in the Inteval [-m_nDim,m_nDim]
+
+   int idx[nvar];
+   while (iev < nEvents){
+      itype[0]=1;
+      for (idx[0]=-m_nDim; idx[0] <=  m_nDim; idx[0]++){
+         itype[1]=1;
+         for (idx[1]=-m_nDim; idx[1] <=  m_nDim; idx[1]++){
+            itype[2]=1;
+            for (idx[2]=-m_nDim; idx[2] <=  m_nDim; idx[2]++){
+               itype[3]=1;
+               for (idx[3]=-m_nDim; idx[3] <=  m_nDim; idx[3]++){
+                  itype[4]=1;
+                  for (idx[4]=-m_nDim; idx[4] <=  m_nDim; idx[4]++){
+                     Int_t type   = itype[0]; 
+                     for (Int_t i=0;i<nvar;i++){
+                        xvar[i]=m_rand->Gaus(Double_t(idx[i]),sigma);
+                        if (i>0) type *= itype[i];
+                     }
+                     TTree* tree = (type==1) ? treeS : treeB;
+                     tree->Fill();
+                     iev++;
+                     itype[4] *= -1;
+                  }
+                  itype[3] *= -1;
+               }
+               itype[2] *= -1;
+            }
+            itype[1] *= -1;
+         }
+         itype[0] *= -1;
+      }
+   }
+            
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   plot();
+
+}
+
+
+void create_schachbrett_4D(Int_t nEvents = 200000) {
+
+   const Int_t nvar = 4;
+   Float_t xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+   }
+
+   Int_t   nSeed   = 12345;
+   TRandom *m_rand = new TRandom(nSeed);
+   Double_t sigma=0.3;
+   Int_t itype[nvar];
+   Int_t iev=0;
+   Int_t m_nDim = 2; // actually the boundary, there is a "bump" for every interger value
+                     // between in the Inteval [-m_nDim,m_nDim]
+
+   int idx[nvar];
+   while (iev < nEvents){
+      itype[0]=1;
+      for (idx[0]=-m_nDim; idx[0] <=  m_nDim; idx[0]++){
+         itype[1]=1;
+         for (idx[1]=-m_nDim; idx[1] <=  m_nDim; idx[1]++){
+            itype[2]=1;
+            for (idx[2]=-m_nDim; idx[2] <=  m_nDim; idx[2]++){
+               itype[3]=1;
+               for (idx[3]=-m_nDim; idx[3] <=  m_nDim; idx[3]++){
+                  Int_t type   = itype[0]; 
+                  for (Int_t i=0;i<nvar;i++){
+                     xvar[i]=m_rand->Gaus(Double_t(idx[i]),sigma);
+                     if (i>0) type *= itype[i];
+                  }
+                  TTree* tree = (type==1) ? treeS : treeB;
+                  tree->Fill();
+                  iev++;
+                  itype[3] *= -1;
+               }
+               itype[2] *= -1;
+            }
+            itype[1] *= -1;
+         }
+         itype[0] *= -1;
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   plot();
+
+}
+
+
+void create_schachbrett_3D(Int_t nEvents = 20000) {
+
+   const Int_t nvar = 3;
+   Float_t xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+   }
+
+   Int_t   nSeed   = 12345;
+   TRandom *m_rand = new TRandom(nSeed);
+   Double_t sigma=0.3;
+   Int_t itype[nvar];
+   Int_t iev=0;
+   Int_t m_nDim = 2; // actually the boundary, there is a "bump" for every interger value
+                     // between in the Inteval [-m_nDim,m_nDim]
+
+   int idx[nvar];
+   while (iev < nEvents){
+      itype[0]=1;
+      for (idx[0]=-m_nDim; idx[0] <=  m_nDim; idx[0]++){
+         itype[1]=1;
+         for (idx[1]=-m_nDim; idx[1] <=  m_nDim; idx[1]++){
+            itype[2]=1;
+            for (idx[2]=-m_nDim; idx[2] <=  m_nDim; idx[2]++){
+               Int_t type   = itype[0]; 
+               for (Int_t i=0;i<nvar;i++){
+                  xvar[i]=m_rand->Gaus(Double_t(idx[i]),sigma);
+                  if (i>0) type *= itype[i];
+               }
+               TTree* tree = (type==1) ? treeS : treeB;
+               tree->Fill();
+               iev++;
+               itype[2] *= -1;
+            }
+            itype[1] *= -1;
+         }
+         itype[0] *= -1;
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   plot();
+
+}
+
+
+void create_schachbrett_2D(Int_t nEvents = 100000, Int_t nbumps=2) {
+
+   const Int_t nvar = 2;
+   Float_t xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+   }
+
+   Int_t   nSeed   = 345;
+   TRandom *m_rand = new TRandom(nSeed);
+   Double_t sigma=0.35;
+   Int_t itype[nvar];
+   Int_t iev=0;
+   Int_t m_nDim = nbumps; // actually the boundary, there is a "bump" for every interger value
+                     // between in the Inteval [-m_nDim,m_nDim]
+
+   int idx[nvar];
+   while (iev < nEvents){
+      itype[0]=1;
+      for (idx[0]=-m_nDim; idx[0] <=  m_nDim; idx[0]++){
+         itype[1]=1;
+         for (idx[1]=-m_nDim; idx[1] <=  m_nDim; idx[1]++){
+            Int_t type   = itype[0]; 
+            for (Int_t i=0;i<nvar;i++){
+               xvar[i]=m_rand->Gaus(Double_t(idx[i]),sigma);
+               if (i>0) type *= itype[i];
+            }
+            TTree* tree = (type==1) ? treeS : treeB;
+            tree->Fill();
+            iev++;
+            itype[1] *= -1;
+         }
+         itype[0] *= -1;
+      }
+   }
+   
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   plot();
+
+}
+
+
+
+void create_3Bumps(Int_t nEvents = 5000) {
+   // signal is clustered around (1,0) and (-1,0) where one is two times(1,0) 
+   // bkg                        (0,0)
+   
+
+
+   const Int_t nvar = 2;
+   Float_t xvar[nvar];
+
+   // output flie
+   TString filename = "data_3Bumps.root";
+   TFile* dataFile = TFile::Open( filename, "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar )).Data() );
+   }
+
+   Int_t   nSeed   = 12345;
+   TRandom *m_rand = new TRandom(nSeed);
+   Double_t sigma=0.2;
+   Int_t type;
+   Int_t iev=0;
+   Double_t Centers[nvar][6] = {{-1,0,0,0,1,1},{0,0,0,0,0,0}}; // 
+
+
+   while (iev < nEvents){
+      for (int idx=0; idx<6; idx++){
+         if (idx==1 || idx==2 || idx==3) type = 0;
+         else type=1;
+         for (Int_t ivar=0;ivar<nvar;ivar++){
+            xvar[ivar]=m_rand->Gaus(Centers[ivar][idx],sigma);
+         }
+         TTree* tree = (type==1) ? treeS : treeB;
+         tree->Fill();
+         iev++;
+      }
+   }
+   
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+
+   plot(filename);
+
+}
+
+void createOnionData(Int_t nmax = 50000){
+   // output file
+   TFile* dataFile = TFile::Open( "oniondata.root", "RECREATE" );
+   int nvar = 4;
+   int nsig = 0, nbgd=0;
+   Float_t xvar[100];
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeS->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+      treeB->Branch( TString(Form( "var%i", ivar+1 )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar+1 )).Data() );
+   }
+   
+   TRandom R( 100 );
+   do {
+      for (Int_t ivar=0; ivar<nvar; ivar++) { xvar[ivar]=R.Rndm();}
+      Float_t xout = sin(2.*acos(-1.)*(xvar[0]*xvar[1]*xvar[2]*xvar[3]+xvar[0]*xvar[1]));
+      if (nsig<100) cout << "xout = " << xout<<endl;
+      Int_t i = (Int_t) ((1.+xout)*4.99);
+      if (i%2 == 0 && nsig < nmax) {
+	 treeS->Fill();
+	 nsig++;
+      }
+      if (i%2 != 0 && nbgd < nmax){
+	 treeB->Fill();
+	 nbgd++;
+      }
+   } while ( nsig < nmax || nbgd < nmax);
+
+   dataFile->Write();
+   dataFile->Close();
+}
+
+void create_multiclassdata(Int_t nmax  = 20000)
+{
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+   int ncls = 3;
+   int nvar = 4;
+   int ndat = 0;
+   Int_t cls;
+   Float_t thecls;
+   Float_t weight=1;
+   Float_t xcls[100];
+   Float_t xmean[3][4] = {
+      { 0.   ,  0.3,  0.5, 0.9 }, 
+      { -0.2 , -0.3,  0.5, 0.4 }, 
+      { 0.2  ,  0.1, -0.1, 0.7 }} ;
+
+   Float_t xvar[100];
+   // create tree using class flag stored in int variable cls
+   TTree* treeR = new TTree( "TreeR", "TreeR", 1 );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      treeR->Branch( TString(Form( "var%i", ivar )).Data(), &xvar[ivar], TString(Form( "var%i/F", ivar)).Data() );
+   }
+   for (Int_t icls=0; icls<ncls; icls++) {
+      treeR->Branch(TString(Form( "cls%i", icls )).Data(), &xcls[icls], TString(Form( "cls%i/F", icls)).Data() );
+   }
+
+   treeR->Branch("cls", &thecls, "cls/F");
+   treeR->Branch("weight", &weight, "weight/F");
+   
+   TRandom R( 100 );
+   do {
+      for (Int_t icls=0; icls<ncls; icls++) xcls[icls]=0.;
+      cls = R.Integer(ncls);
+      thecls = cls;
+      xcls[cls]=1.;
+      for (Int_t ivar=0; ivar<nvar; ivar++) { 
+         xvar[ivar]=R.Gaus(xmean[cls][ivar],1.);
+      }
+      
+      if (ndat<30) cout << "cls=" << cls <<" xvar = " << xvar[0]<<" " <<xvar[1]<<" " << xvar[2]<<" " <<xvar[3]<<endl;
+      
+      treeR->Fill();
+      ndat++;
+   } while ( ndat < nmax );
+
+   dataFile->Write();
+   dataFile->Close();
+   
+} 
+
+
+
+
+
+
+// create the data
+void create_array_with_different_lengths(Int_t N = 100)
+{
+   const Int_t nvar = 4;
+   Int_t nvarCurrent = 4;
+   Float_t xvar[nvar];
+
+   // output flie
+   TFile* dataFile = TFile::Open( "data.root", "RECREATE" );
+
+   // create signal and background trees
+   TTree* treeS = new TTree( "TreeS", "TreeS", 1 );   
+   TTree* treeB = new TTree( "TreeB", "TreeB", 1 );   
+   treeS->Branch( "arrSize", &nvarCurrent, "arrSize/I" );
+   treeS->Branch( "arr", xvar, "arr[arrSize]/F" );
+   treeB->Branch( "arrSize", &nvarCurrent, "arrSize/I" );
+   treeB->Branch( "arr", xvar, "arr[arrSize]/F" );
+      
+   TRandom R( 100 );
+   Float_t xS[nvar] = {  0.2,  0.3,  0.5,  0.9 };
+   Float_t xB[nvar] = { -0.2, -0.3, -0.5, -0.6 };
+   Float_t dx[nvar] = {  1.0,  1.0, 1.0, 1.0 };
+   TArrayD* v = new TArrayD( nvar );
+   Float_t rho[20];
+   rho[1*2] = 0.4;
+   rho[1*3] = 0.6;
+   rho[1*4] = 0.9;
+   rho[2*3] = 0.7;
+   rho[2*4] = 0.8;
+   rho[3*4] = 0.93;
+
+   // create covariance matrix
+   TMatrixD* covMatS = new TMatrixD( nvar, nvar );
+   TMatrixD* covMatB = new TMatrixD( nvar, nvar );
+   for (Int_t ivar=0; ivar<nvar; ivar++) {
+      (*covMatS)(ivar,ivar) = dx[ivar]*dx[ivar];
+      (*covMatB)(ivar,ivar) = dx[ivar]*dx[ivar];
+      for (Int_t jvar=ivar+1; jvar<nvar; jvar++) {
+         (*covMatS)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatS)(jvar,ivar) = (*covMatS)(ivar,jvar);
+
+         (*covMatB)(ivar,jvar) = rho[(ivar+1)*(jvar+1)]*dx[ivar]*dx[jvar];
+         (*covMatB)(jvar,ivar) = (*covMatB)(ivar,jvar);
+      }
+   }
+   cout << "signal covariance matrix: " << endl;
+   covMatS->Print();
+   cout << "background covariance matrix: " << endl;
+   covMatB->Print();
+
+   // produce the square-root matrix
+   TMatrixD* sqrtMatS = produceSqrtMat( *covMatS );
+   TMatrixD* sqrtMatB = produceSqrtMat( *covMatB );
+
+   // loop over species
+   for (Int_t itype=0; itype<2; itype++) {
+
+      Float_t*  x;
+      TMatrixD* m;
+      if (itype == 0) { x = xS; m = sqrtMatS; cout << "- produce signal" << endl; }
+      else            { x = xB; m = sqrtMatB; cout << "- produce background" << endl; }
+
+      // event loop
+      TTree* tree = (itype==0) ? treeS : treeB;
+      for (Int_t i=0; i<N; i++) {
+
+         if (i%1000 == 0) cout << "... event: " << i << " (" << N << ")" << endl;
+         getGaussRnd( *v, *m, R );
+
+         for (Int_t ivar=0; ivar<nvar; ivar++) xvar[ivar] = (*v)[ivar] + x[ivar];
+         
+
+	 nvarCurrent = (i%4)+1;
+
+         tree->Fill();
+      }
+   }
+
+   // write trees
+   treeS->Write();
+   treeB->Write();
+
+   treeS->Show(0);
+   treeB->Show(1);
+
+   dataFile->Close();
+   cout << "created data file: " << dataFile->GetName() << endl;
+}
+
diff --git a/tmva/test/efficiencies.C b/tmva/test/efficiencies.C
index 851ed62f02924e67647e03924c2b4b983138556a..95128c54402669d942372be74e0ac34f2706ffb0 100644
--- a/tmva/test/efficiencies.C
+++ b/tmva/test/efficiencies.C
@@ -19,7 +19,7 @@ void plot_efficiencies( TFile* file, Int_t type = 2, TDirectory* BinDir)
    if (type == 2) {
       Float_t z = y1;
       y1 = 1 - y2;
-      y2 = 1 - z;    
+      y2 = 1 - z;
       //      cout << "--- type==2: plot background rejection versus signal efficiency" << endl;
    }
    else {
@@ -40,12 +40,12 @@ void plot_efficiencies( TFile* file, Int_t type = 2, TDirectory* BinDir)
       y0H = 1 - y0H + dyH + 0.07;
    }
    TLegend *legend = new TLegend( x0L, y0H-dyH, x0L+dxL, y0H );
-   legend->SetTextSize( 0.05 );
+   //legend->SetTextSize( 0.05 );
    legend->SetHeader( "MVA Method:" );
    legend->SetMargin( 0.4 );
 
    TString xtit = "Signal efficiency";
-   TString ytit = "Background efficiency";  
+   TString ytit = "Background efficiency";
    if (type == 2) ytit = "Background rejection";
    TString ftit = ytit + " versus " + xtit;
 
@@ -61,7 +61,7 @@ void plot_efficiencies( TFile* file, Int_t type = 2, TDirectory* BinDir)
    frame->GetYaxis()->SetTitle( ytit );
    TMVAGlob::SetFrameStyle( frame, 1.0 );
 
-   frame->Draw();  
+   frame->Draw();
 
    Int_t color = 1;
    Int_t nmva  = 0;
@@ -90,12 +90,12 @@ void plot_efficiencies( TFile* file, Int_t type = 2, TDirectory* BinDir)
          TMVAGlob::GetMethodTitle(methodTitle,titDir);
          TIter nextKey( titDir->GetListOfKeys() );
          while ((hkey = TMVAGlob::NextKey(nextKey,"TH1"))) {
-            TH1 *h = (TH1*)hkey->ReadObj();    
+            TH1 *h = (TH1*)hkey->ReadObj();
             TString hname = h->GetName();
             if (hname.Contains( hNameRef ) && hname.BeginsWith( "MVA_" )) {
                h->SetLineWidth(3);
                h->SetLineColor(color);
-               color++; if (color == 5 || color == 10 || color == 11) color++; 
+               color++; if (color == 5 || color == 10 || color == 11) color++;
                h->Draw("csame");
                hists.Add(h);
                nmva++;
@@ -122,8 +122,8 @@ void plot_efficiencies( TFile* file, Int_t type = 2, TDirectory* BinDir)
       }
       legend->AddEntry(histWithLargestInt,TString(histWithLargestInt->GetTitle()).ReplaceAll("MVA_",""),"l");
       hists.Remove(histWithLargestInt);
-   }   
-   
+   }
+
    // rescale legend box size
    // current box size has been tuned for 3 MVAs + 1 title
    if (type == 1) {
@@ -131,12 +131,12 @@ void plot_efficiencies( TFile* file, Int_t type = 2, TDirectory* BinDir)
       legend->SetY1( y0H - dyH );
    }
    else {
-      dyH *= (Float_t(nmva - 3.0)/4.0);
+      dyH *= (Float_t(TMath::Min(10,nmva) - 3.0)/4.0);
       legend->SetY2( y0H + dyH);
    }
 
    // redraw axes
-   frame->Draw("sameaxis");  
+   frame->Draw("sameaxis");
    legend->Draw("same");
 
    // ============================================================
@@ -162,12 +162,12 @@ void efficiencies( TString fin = "TMVA.root", Int_t type = 2, Bool_t useTMVAStyl
 {
    // argument: type = 1 --> plot efficiency(B) versus eff(S)
    //           type = 2 --> plot rejection (B) versus efficiency (S)
-  
+
    // set style and remove existing canvas'
    TMVAGlob::Initialize( useTMVAStyle );
 
    // checks if file with name "fin" is already open, and if not opens one
-   TFile* file = TMVAGlob::OpenFile( fin );  
+   TFile* file = TMVAGlob::OpenFile( fin );
 
    // check if multi-cut MVA or only one set of MVAs
    Bool_t multiMVA=kFALSE;
@@ -177,10 +177,10 @@ void efficiencies( TString fin = "TMVA.root", Int_t type = 2, Bool_t useTMVAStyl
    // one contains the key word 'multicutMVA'
    while ((key = (TKey*)nextDir())) {
       TClass *cl = gROOT->GetClass(key->GetClassName());
-      if (!cl->InheritsFrom("TDirectory")) continue;    
-      TDirectory *d = (TDirectory*)key->ReadObj();    
+      if (!cl->InheritsFrom("TDirectory")) continue;
+      TDirectory *d = (TDirectory*)key->ReadObj();
       TString path(d->GetPath());
-      if (path.Contains("multicutMVA")){         
+      if (path.Contains("multicutMVA")){
          multiMVA=kTRUE;
          plot_efficiencies( file, type, d );
       }