From 7b3a903cf11a62523640dac2df89139e57c2056c Mon Sep 17 00:00:00 2001
From: Olivier Couet <olivier.couet@cern.ch>
Date: Fri, 16 Dec 2016 09:53:34 +0100
Subject: [PATCH] - Doxygen format - in group TMVA - remove trailing spaces -
 remove TABs - spell check - math formula in latex - add image

---
 documentation/doxygen/images/tmva_mlp.png | Bin 0 -> 5606 bytes
 tmva/tmva/inc/TMVA/MethodCFMlpANN.h       |  28 +--
 tmva/tmva/src/MethodBayesClassifier.cxx   |  36 ++--
 tmva/tmva/src/MethodBoost.cxx             | 242 +++++++++++-----------
 tmva/tmva/src/MethodCFMlpANN.cxx          | 173 ++++++++--------
 tmva/tmva/src/MethodCFMlpANN_Utils.cxx    | 215 +++++++++++--------
 tmva/tmva/src/MethodCategory.cxx          |  46 ++--
 7 files changed, 401 insertions(+), 339 deletions(-)
 create mode 100644 documentation/doxygen/images/tmva_mlp.png

diff --git a/documentation/doxygen/images/tmva_mlp.png b/documentation/doxygen/images/tmva_mlp.png
new file mode 100644
index 0000000000000000000000000000000000000000..e260a7f5e0242571497e043834ca6a1e024fa206
GIT binary patch
literal 5606
zcmeAS@N?(olHy`uVBq!ia0y~yU{YgXV3g!wW?*2Laz$<;0|Ns~x}&cn1H;CC?mvmF
z3=9m6#X;^)4C~IxykuZtP)PO&@?~JCQe$9fXklRZ#lXPO@PdJ%)PRBERRRNp)eHs(
z@q#(K0&N%=7!m?}LR=ZZKvPrm?%lh)A3XN;_5J_<zp?R59xiTH78Yh^W^PU{b~biy
zE-p?E4i0v9b~ZLH4o(J!dM=I_PWDtTj&N=+7jDi#Hda<njzk_VYj(C=4)#oLPH#?j
z4tBP}6`N|GgB)2B<QL4~@a#q!h?C^)?!uOswxgDTfq^sM&8?V$fdS-a4u;Jc@(VzM
z1s;(gK@SE71`h_eli7A4!LkC!bOr_{e+C9d6^5S7`5X)kBK)2%jv*Dd-p=iAn!Qb^
zO)@=F{qD0(x6LwdUH<m>{--s)jTdX(q#9X1rzbPJF;B8{6K|1d>t#9Tly#ho{cGJ-
z3!BqjDLvNWDwmno<WF2|#a*#hPVME5Yh8u+n@XZWJWM|5EZmwkLm(+y^VnVP_!|bX
zd8e;$=`iBI!2epx+A^K_*}?;VoYE)R*j%{N!l<z=)b`lj(+603)+hdyVyou<^=SFc
ztWw)4#T9#ISp4adRo}L5VwK^$xMk*?H4A00pFa6(ZmG{<_apsp!}8}IzU;5q;`vsh
zTwhGBVXvM~w&je5nXLP`#SR=*si>%1=zP>g<JjHqW=^gB?8^UtwEHAJx@}zbW0TYJ
zqMqN=`6qjL+Qx1>?S5_R)>#iktqvXzUpw35$d3BTy_{#($%NieTc7>J@WSWgmP>k1
zKDsx3{lQ}@H9y`=J12Ael%d^{OD8uJJ9WzM`zf(~#WP`F!%WAODmQoDUvTK5bpJ-L
zr9mr0w5EcnvI|d5*zf5sbH9+f{L1ZrJGwj@MTA8itrLrG+TH%POQCPUX+80qQ<85#
zyKQIqHhOwwMdjiY7xr5Z7Fd)?yKkwFJt6&4ZRIH*b&0^Q+9t6PvpvODFRzi)?DA|p
zVt7;Su5q7Iz2h8@OP$+8SAFhZJVT^oYQ;;()3a4~C0$zg%<))k(xO{lubFwYLIb)^
z{+#k~TVj=(pP^;%rd@IgY)=C&?Yl9-hVyg94lBLio;wYUREkSHXMMgA6i`zn^wjpQ
z-JL0?xc+dc^Bi0rb?>b0U6rYuyLT2<FPx#JzV405%%D84eHjjSr1+{QYX%>6V9Z&6
zMXYOOrnlwB)TJB?ws>1lF}?9A@&AXG&@-J&ER~l%Q(ZVCqU(oHlXGOVuKcxc%hf&o
zRL&kvS~VkL)7+Enfj)0?rXE{zAkJi2SEs76<D4yviyj?6a`LK^AIsaL$*X3};Ib5c
zuuFLTw4A9c%h!b|9KQ3Sf8})Hqensx%w*WPGx_A2&*ye(Ua^T>d3vLHpM=QK7G-<2
z4~p`_ET^10H<+K?Y~IHqBCp!z8L)6wr?~Kftb6x^JuGf^{q@h7nUKHd@7`pAe{$|D
zT}g)mH_Q!gJ9)?awvyUn(cq&hx^ik$zNbA1YH#6mF04*gGAUE*;`H|Mxim+jJnGir
zBf`N)Rn{iR=d9qGedI{Y?QM=WZk~qqSEunguUuEPeBt5U)ywZ@AD>{|q0s1QsD5Zh
z#Is`|C!Kp%CM9X~cHU+@Z{_ebx?ZmOq!yRzvaU|W^E<Y(s9vky+{R+A;Kg|~PG)ie
z|7<=Lj?<H6jx`9s=Megm*Y&06|Bm>*bA%=f*E33JSDd-#X93chU$ns5`ksTDTFbgA
z8S9E%4aFIbzOd{(cE$XI@g$LdyvHSd<tpA>ep6gtKQFlP#~i;qGSQjEbv|zFlM{UA
ziaijJm@R(0pu{I=)_spHcZ;pQu=$uYG;^xQPP_OpdnwPm^>;t+sTJ8Pe)XS8_^*I8
z$sU87N}G=CF>PYe6-`RodoTOrieu7y82;+&HyJy9`zp0O#wX#Q$-K)eMGCX-MJ@bu
zdz0b8_3w-fB=^jCAn-uy1^YyYq6ts-WUY<c;$GzWutg<j;c{;|wa>2~_AE-jd!=)e
zbY0nU&p#{o*Yy6J@<H2VW6Q>>?v5D;9^6ftl@tGS#*Nw^S0Byo?~FZEaOTG>tJ6%Y
z^O&Bmoz26i{?*{*npsm$NnL9{78R*4{dHfh$kr0J)fX%Sz0)P;C^9=AUA>q=SY%3O
z>ns!5r)Ar`3<{4e3@K8pmQ?UdTsY&%(~7fhh9|cyz8t<r%d2bAL=EXIt=t(iG*moN
zXXG4couHpMJJ4&xsi4J?{~vbTjo|t=o2Bd8gWO9O)<0R2z3}+94~tgHG47o9;lw8K
zWi2y>gl4=s?<pdD>rHlw=Wn^by@e@vE=V%X(D~jbKUb0Y75m0Kd42P}X+CHACMRY}
z=`EWeyFvF$AE)-}A2W^=PtDH%GR^kiLhXeg)v~8W-Ch6eZSoyf>-k#m+O}3@vKzeF
zA@=Rrk{!#om9u129eDWVIh(q250Cezs=F7rrG}@oCv*B3SNPxFR{K0he`<1&A=@L-
z$qyusKM|?jG5zjI=LoLpJCE0WNM>45!(<_>7TPYrF?+LNqR++lcl^IMUpkWbA?eY6
z>k2-fvVNy1<ubL0ntlZ?9;K5LPv*3A-k%-uaL%ri1vNfeM;BhoeQ5Z;P%qz`{Vq!!
zm+W$jSx$#HE#JR%^DOoPoj1On9_h)i=jPPin{i$sd{4b=hJW7VfW*!(p5dLYZw=M%
zNOA_Kw5Ff&e;;D5$}#C+%Kr)%kpsT!>k>TFEVT-h?n;#T{cKKhnjTQJrN{F@kEn3l
z5`)ZbHzXxf`;vr$UfRl@TgOwp<X5)f>@H6Cpcw&Ovb!v!I-NI)Kk+qu=%Hij(mhvq
z-u4UA_?(MQUQGF_a%i^S#p4qaicTa3?Kb>l5r4Er$J6JC#mdNMJu|<ToYOt|MBtIj
zh3P%ts^0JwmM9!NsX9wW_nO~?J0I1oT;&5d&G0A)IzO`_&1<3m#qzw9YV9oQQ?=Em
zRlX9bRi9o^v?b(Ch5A(`KK}qOAJ62VQ=JbqCSTmHSl_fS*n7?9c>*ctkMEQ?|6B0+
zYKy5$3?)w)Bw9}X!E(s&?^Jp5t2!UKm!Gt&Ze0@a>EG6t$z8VDk3QN=)e<@En~-!f
z)az6Tv+#}mFC?3_SIJymapn2Gw~MZsuAd>YTGi)?3EQ;LnX7_cJV{iVF+bCX>Eksy
z;r&@#O1^zpTkZ5lQ%%P-_=U%}OUl7rogJ^1cx&HN>ABasackhgPY-k2FTA@qwP40A
z&cnxCHYGW2EM@Uw5}yAgX5mVMwJJWn-#o57OP#j$PVb-hQI4NiOKf(t`^R{1gWoEq
zV^^j;IOlRZ^wb2CsQa3+R=&*deHI+6@%h%`bL`FJiFa*U9i=^0Zf-nwbjkH_37yRc
zBBd`b$!jiuWuE+R2P3;dx$&ip_cy|V4p#553S<&C2~H82oO*5DH^Y?|B*J_?1W5>Q
zz94yH2FLcy?YT;uH@p^}T)E1=q9piizCd}pP%Dd?T)xwq**=r?yHnrpUbuYu@&%Ub
z^cgw6eYrRHq~^OvXF?u)_vJfgGvVByAB|4OXU>{*<C3GlrDs7=ddj5xGRDypEd{G<
zv$Jk_?e}T@aAnfRGbuZrBa2PvY?LWpX8Za0<sU**<X&v$bP{U}dUj;qnzNQKCTkv-
z+x(BqusHq5lfZvrYkyTPv+uQCK3}kVxro}PZ+>hi&6EOyPIOLQoBC7A^n1ltA=mRP
zS3N(;i5_P;Wf-|h)aB2~oTnFyiq1@&8SEpWWI0*M@6PJ`hg4>6eD*s(?ODRY6w~7?
zpM2@(+_0^A?~xOiUH*!mGIY$nUgHz+?YDvBoD2SESeWkpf4DW%)cV?Pt*MbYK0&<?
zZG6sL)aUf!kjy^g*<{eD&Mh3KdVCQ}GLu1WmW*)8{rfMRjN=b&>Qi#aezyLj9oPTb
zKt`VriIZpaeE)9Zw*RAyd#FL<RFMid`K!gdJXTD<`)=;`s@S#nr%z<5nBU-Je%{p}
z($Q1g%9C{ur;V-Sw7s6|j14(nvP`}p{%()F0cVBz;(q%TOUyrhyTKgVe>c@2lHdMz
z)5<EP_LFN@?418&q5<dL7u<IZeeO^8z2d84AebRg9c8uQk7M5Ns*h5^Uqv5u++$g^
z+p>55gh}QlMN(GJ*53B|tG43c<bKXoQ@+H$*ikC{+uSClA=rH8lzodQw{~vS^6V^@
z-zYJ8D);WrJA!JjA3QHzal)qKfJRBrGr_e>i^W@o)661vU*EI5Sikbqn}yrf_fKB;
z%lB3PcO?<)i<2F4zdX?UYM^P5o^&Z$U&m|GuXc}qbI-YoOT4!09-lp-RK>?^!v0Vn
zxxH;#o)hN=Exta(t1nKO<GQaxL(h?&axM$wwtdJ5F1F`VD=BvI4ZW^(zD3%qVM3tK
zA1&!HzUJ>YH~-Q~<tv@7Q=y@j=$Wv|R`qDnEYo?<&fR-{V-lBq(80?e-#!X6S~}xH
zNY}{=g3=y(YI%#(-xue2HqH1F@+)G-fzKt^{S7%izD%7^oAjtuwX0L{p!gZ7qyCTi
zoos}+e7D@D;^`CSwIbjAmTwoQzo~(w`_U%}LbV6q{4JIdoqTxZq1UUb54ao+?zK!1
znS6L9m)hI##88{72VL$Lml%At{5T^kxpTjz$ukeBnJ43pPP0(kc*)K1W$2^UzA&#o
zjkU8^a0nf9iMBXCrAk=Tanp<y#+ECtfCP`a)Se7nI%5Tg&=D6!%XOf}?WgZ0W?Pou
z@=BT!aO+T)=lV-*8@m)VeYPaOy%3bYNXdgk_^ayiX-SKeJXnNVpS%e231P_;4Rf8q
zpms~lZXd{0t>Z3pPjW8V{x5_@H2Kg+)h^HVT6!g0PFOZ`83uZsQ_Ow2f<x=D%iNQm
zOJ=O#xa8#S8o;S`Yh#(zO7lR$kcQ4*lRkUw?d)2R<Fh5HNDU-7rMvUjq+n0OK*1%g
zeO(J|e75|nGGa9ltc~2e!}992Pf1K45`z?yXYONpkU6R8(rTL-Iuq_#Xr8~pI>}n2
z`gr(MRdHdK&ie~LXsJmEJzWxRIU`~U3%A@V9iJ(a^^P8I2{+uR8R)lbO7BT0&NcGS
z<Sbp!?XCN?Vu^;DtX2=_nHe9tWH&$ir11Ff{mZxR-LyWZwN1xof^p+aA?C>`f3H^T
zeDlUfWb(PAM^s`TN$9V3n{g*F(8nrNVsfX5_~hHsZOZo_cQ^$4@F}UOWhQzFsBI5&
zH+&h!=##MS$g{*vGpYlM4ztXYTcV-%SF`D4dyLP9y^@ocZxvfTqhv+E%E`C+v%`ws
zuB$q_BzUQz=E*RAH80P8u5as4-nq;uExdGz|5A&K(e<kiHgkFeCGnXDdhH4=InsHg
z!uUuhU;OKEIX?%!NxLlrJbq0tU$FR%K)S5q-Thx8Wkp?2b#^T}?dlmAxMa=>ud8;>
zZ|6UpSdy4~Zrk>oovT-^2=|#X$yc@aWZ2K;GjdjOCu?7P#AdnT?UuErGoP*znYpI=
z(ClRsxLzw&^~QB1u?M)6UDLDNuw&!i<n>Bf>(}hcXx*azw#>EsmDjrK53ByW&F<NH
zVcA2$4`~O}uV*ijd^;_2ZqTO_Z?#VyS)FfQe?f7^&+tdvHop{@amVIxX_3L@Yrk*p
z$&A|hb~jsOUDP)Ii2|$iN@K;R?TPzu6RlpvcKQ2^+v^_+obsH%Ytehv&%ADI{>%6o
zePWcuIF2sy*PRiwKHOgIG=JC0%Bf$}?m49g_?%m+{cqPShl<rQCO+%dCVn~-I`?u$
z;*Z*EmN93~uXJphy{kX1|7PAsgGldX$8SC0-#FhmlX<e3#!2gL-{N;qW*B^a{_ll<
z@AG=8=PAc$bv82W`G4@sURAXiuRO+AHmcX!!m|U+RE3on%viC($1BZ8Y0ioHOy>Cy
zKCW+@BE0|Cktfgm?CPr4)c;A{Uu@a^>u`!zVM4{vub)rX9u_~D{`l$Qa*ntCocCw?
z)W_HTwek^`^LVzl=+RZC6N`J9KisiU+q`dY-LHr=$4@!iF5Zq~-*(&LJg3*KgHNVE
z{OUcE_2lN8F8RBYlSE8sf1AB!s$uTj-nDZLJDVo9o{UNLP`94K%73G$ZQCp39i@9&
z)%F*b{gYiU!z(QDOvHZ~NODF`E8A!B#bSFH)#_`Wzxn(3_}kBKm)$jF-27fNG-!@p
z<fN-VqqZ3xxpaG%zV?s#2`8j&iVAXM-^*J`XKtL+x%2nF84aCG9SM_k%<nCWV7C7;
z|3lZb+JngpH<s>6<X~O!;_Q#2X%p-IiT=IGQGfgS`uU!dzi!>u{`X$Wy1UO3m{MoB
zG}X7R|NO|CLD(aqNI&aZ-E=1(xx&iLX1~6Ce|)@Tntj>N_<Ne2T*3_dSmo6EwGKY^
zs~4HPbH!1~mTqqA9f|CPOl2>Y&cDX@#L`kZ=S`0AqU2A1%ViGa=d9{DQ~f99YU-;K
zOP<tKPE9b1U*){(!p8)rw6#i2CqHo2-#X4JY;ykK-O0|gYuydsE`Qt<!5L~%|1a_X
zu6N%KB;T}K`zIkREvuvJ<n!<U<uhl@2tDp`%1>TQE$kf|Z{(Z@7Y>$r?}+oxe7VHX
zQB<VAc;=Vy_7;qHSd-uG`*i;P;dh^=TE4zJH}m2B=k@XN`|S4b3wouN@a)Q`TyyV5
zGk)mhS$D_P{rqzChp1`JLHQ$gmi@W&e^>UzGR#<QQ})ABPg^bjb(#17zpczWx$|c1
z|L485vNmv+S6p0tT)e$)y<%u$=i}CEB{7{14_II4{;6vd-{P)f^8Cr684)U06CIzP
zkS@~hUv+Hu&a>MET(Y-+JFCT|c7Fc7yC=;PeWoojHsowCtNZ!=>JL+)D&3$Rb88!Z
zaEnFyr5<rSbh9$9!kI_Xq_X*qx{%r)tu@y-M!#cNx_DxH@yZ7q6VuPX4u8#H5wYR7
z>Ai>3J5SEO>gjcyMV}?pLQZXE%m<a;<;GWLpT9Qk{QVp5%jU$esg?B}SvPwF*IbRr
z-=1Q^oB5TMgw!5v-238u|GS0LXRY4f)SP=f`N1sT<V_|C*UkLf%q>~f(mtiV_tcQt
zx3{I<yLb7yqem`3EfPMJHd!G5+p#FC{D13ivPD^K4UrTMsV#N$SuSPs(_YQ>MQiLY
z-xFC2?=P7ou~PPkN)mH;{QjC^r{~s_f6e&wtvP#3YupVlts^Xt&as5X+t)SL%Qo*$
zblTzk=db$NORL1*y$lJGRMV86-}6yzeb0nR>hm<q1TP(uePs2>W*29U^!+o5p88)V
z{7hF~Ub%<U*5PaLE61<K%QkM-l-OIAG?#C({GN>)>fcNi$<@?O)-eCK-CBBC#L^Bi
z%gJ3vZZ-e9w$;qe3iUg8<&&D1=Vnk9nwrAPQxWoK-}6PcJpSxi+1eR&X~QOilYH9M
zYOCH}@p`wVNKR}2trPRj&N+#Oaa&$FWVz%^ebT2HnoCyn#xpQ5D3!QIlqBcp=BDPA
zFa+nPm6RtIrCKTYW+oTq7w4yyDEOzPWhSS-*ia$Ez`&piQk{}ml4_M)l$uzQ%3x$*
zXr^muq-$svVqj@yWMpM*tZiUuWndt<yEy<wLvDUbW?Cg~4GGVz-++c$Jzf1=);T3K
F0RS<MP?`V$

literal 0
HcmV?d00001

diff --git a/tmva/tmva/inc/TMVA/MethodCFMlpANN.h b/tmva/tmva/inc/TMVA/MethodCFMlpANN.h
index ef18aca4a55..4deeb926141 100644
--- a/tmva/tmva/inc/TMVA/MethodCFMlpANN.h
+++ b/tmva/tmva/inc/TMVA/MethodCFMlpANN.h
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$    
-// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss 
+// @(#)root/tmva $Id$
+// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -46,11 +46,11 @@
  *                                                                                *
  *        This artificial neural network usually needs a relatively large         *
  *        number of cycles to converge (8000 and more). Overtraining can          *
- *        be efficienctly tested by comparing the signal and background           *
+ *        be efficiently tested by comparing the signal and background            *
  *        output of the NN for the events that were used for training and         *
  *        an independent data sample (with equal properties). If the separation   *
  *        performance is significantly better for the training sample, the        *
- *        NN interprets statistical effects, and is hence overtrained. In         * 
+ *        NN interprets statistical effects, and is hence overtrained. In         *
  *        this case, the number of cycles should be reduced, or the size          *
  *        of the training sample increased.                                       *
  *                                                                                *
@@ -61,9 +61,9 @@
  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
  *                                                                                *
  * Copyright (c) 2005:                                                            *
- *      CERN, Switzerland                                                         * 
- *      U. of Victoria, Canada                                                    * 
- *      MPI-K Heidelberg, Germany                                                 * 
+ *      CERN, Switzerland                                                         *
+ *      U. of Victoria, Canada                                                    *
+ *      MPI-K Heidelberg, Germany                                                 *
  *      LAPP, Annecy, France                                                      *
  *                                                                                *
  * Redistribution and use in source and binary forms, with or without             *
@@ -102,15 +102,15 @@ namespace TMVA {
    public:
 
       MethodCFMlpANN( const TString& jobName,
-                      const TString& methodTitle, 
+                      const TString& methodTitle,
                       DataSetInfo& theData,
                       const TString& theOption = "3000:N-1:N-2");
 
-      MethodCFMlpANN( DataSetInfo& theData, 
+      MethodCFMlpANN( DataSetInfo& theData,
                       const TString& theWeightFile);
 
       virtual ~MethodCFMlpANN( void );
-    
+
       virtual Bool_t HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t /*numberTargets*/ );
 
       // training method
@@ -148,7 +148,7 @@ namespace TMVA {
 
       Int_t DataInterface( Double_t*, Double_t*, Int_t*, Int_t*, Int_t*, Int_t*,
                            Double_t*, Int_t*, Int_t* );
-  
+
    private:
 
       void PrintWeights( std::ostream & o ) const;
@@ -156,7 +156,7 @@ namespace TMVA {
       // the option handling methods
       void DeclareOptions();
       void ProcessOptions();
-      
+
       // LUTs
       TMatrixF       *fData;     // the (data,var) string
       std::vector<Int_t> *fClass;    // the event class (1=signal, 2=background)
@@ -169,13 +169,13 @@ namespace TMVA {
       Double_t**    fYNN;       // weights
       TString       fLayerSpec; // the hidden layer specification string
       Int_t MethodCFMlpANN_nsel;
-      
+
       // auxiliary member functions
       Double_t EvalANN( std::vector<Double_t>&, Bool_t& isOK );
       void     NN_ava ( Double_t* );
       Double_t NN_fonc( Int_t, Double_t ) const;
 
-      // default initialisation 
+      // default initialisation
       void Init( void );
 
       ClassDef(MethodCFMlpANN,0); // Interface for Clermond-Ferrand artificial neural network
diff --git a/tmva/tmva/src/MethodBayesClassifier.cxx b/tmva/tmva/src/MethodBayesClassifier.cxx
index 262389f4b47..36c0c5ef3cc 100644
--- a/tmva/tmva/src/MethodBayesClassifier.cxx
+++ b/tmva/tmva/src/MethodBayesClassifier.cxx
@@ -1,5 +1,5 @@
-// @(#)root/tmva $Id$    
-// Author: Marcin .... 
+// @(#)root/tmva $Id$
+// Author: Marcin ....
 
 /**********************************************************************************
  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
@@ -25,10 +25,12 @@
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
-//_______________________________________________________________________
-//                                                                      
-// ... description of bayesian classifiers ...
-//_______________________________________________________________________
+/*! \class TMVA::MethodBayesClassifier
+\ingroup TMVA
+
+Description of bayesian classifiers.
+
+*/
 
 #include "TMVA/MethodBayesClassifier.h"
 
@@ -51,7 +53,7 @@ ClassImp(TMVA::MethodBayesClassifier)
 
    TMVA::MethodBayesClassifier::MethodBayesClassifier( const TString& jobName,
                                                        const TString& methodTitle,
-                                                       DataSetInfo& theData, 
+                                                       DataSetInfo& theData,
                                                        const TString& theOption ) :
    TMVA::MethodBase( jobName, Types::kBayesClassifier, methodTitle, theData, theOption)
 {
@@ -60,14 +62,14 @@ ClassImp(TMVA::MethodBayesClassifier)
 ////////////////////////////////////////////////////////////////////////////////
 /// constructor from weight file
 
-TMVA::MethodBayesClassifier::MethodBayesClassifier( DataSetInfo& theData, 
+TMVA::MethodBayesClassifier::MethodBayesClassifier( DataSetInfo& theData,
                                                     const TString& theWeightFile) :
-   TMVA::MethodBase( Types::kBayesClassifier, theData, theWeightFile) 
+   TMVA::MethodBase( Types::kBayesClassifier, theData, theWeightFile)
 {
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// Variable can handle classification with 2 classes 
+/// Variable can handle classification with 2 classes
 
 Bool_t TMVA::MethodBayesClassifier::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t /*numberTargets*/ )
 {
@@ -84,16 +86,16 @@ void TMVA::MethodBayesClassifier::Init( void )
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// define the options (their key words) that can be set in the option string 
+/// define the options (their key words) that can be set in the option string
 
-void TMVA::MethodBayesClassifier::DeclareOptions() 
+void TMVA::MethodBayesClassifier::DeclareOptions()
 {
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// the option string is decoded, for availabel options see "DeclareOptions"
+/// the option string is decoded, for available options see "DeclareOptions"
 
-void TMVA::MethodBayesClassifier::ProcessOptions() 
+void TMVA::MethodBayesClassifier::ProcessOptions()
 {
 }
 
@@ -105,7 +107,7 @@ TMVA::MethodBayesClassifier::~MethodBayesClassifier( void )
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// some training 
+/// some training
 
 void TMVA::MethodBayesClassifier::Train( void )
 {
@@ -116,7 +118,7 @@ void TMVA::MethodBayesClassifier::Train( void )
 void TMVA::MethodBayesClassifier::AddWeightsXMLTo( void* /*parent*/ ) const {
    Log() << kFATAL << "Please implement writing of weights as XML" << Endl;
 }
-  
+
 ////////////////////////////////////////////////////////////////////////////////
 /// read back the training results from a file (stream)
 
@@ -149,7 +151,7 @@ void TMVA::MethodBayesClassifier::MakeClassSpecific( std::ostream& fout, const T
 ////////////////////////////////////////////////////////////////////////////////
 /// get help message text
 ///
-/// typical length of text line: 
+/// typical length of text line:
 ///         "|--------------------------------------------------------------|"
 
 void TMVA::MethodBayesClassifier::GetHelpMessage() const
diff --git a/tmva/tmva/src/MethodBoost.cxx b/tmva/tmva/src/MethodBoost.cxx
index 1c85a08f903..1349e5d53d4 100644
--- a/tmva/tmva/src/MethodBoost.cxx
+++ b/tmva/tmva/src/MethodBoost.cxx
@@ -29,13 +29,17 @@
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
-//_______________________________________________________________________
-//
-// This class is meant to boost a single classifier. Boosting means    //
-// training the classifier a few times. Everytime the wieghts of the   //
-// events are modified according to how well the classifier performed  //
-// on the test sample.                                                 //
-////////////////////////////////////////////////////////////////////////////////
+/*! \class TMVA::MethodBoost
+\ingroup TMVA
+
+Class for boosting a TMVA method
+
+This class is meant to boost a single classifier. Boosting means
+training the classifier a few times. Every time the weights of the
+events are modified according to how well the classifier performed
+on the test sample.
+
+*/
 
 #include "TMVA/MethodBoost.h"
 
@@ -95,7 +99,7 @@ ClassImp(TMVA::MethodBoost)
    , fBoostNum(0)
    , fDetailedMonitoring(kFALSE)
    , fAdaBoostBeta(0)
-   , fRandomSeed(0) 
+   , fRandomSeed(0)
    , fBaggedSampleFraction(0)
    , fBoostedMethodTitle(methodTitle)
    , fBoostedMethodOptions(theOption)
@@ -176,7 +180,7 @@ void TMVA::MethodBoost::DeclareOptions()
 
    DeclareOptionRef( fMonitorBoostedMethod = kTRUE, "Boost_MonitorMethod",
                      "Write monitoring histograms for each boosted classifier" );
-   
+
    DeclareOptionRef( fDetailedMonitoring = kFALSE, "Boost_DetailedMonitoring",
                      "Produce histograms for detailed boost  monitoring" );
 
@@ -189,7 +193,7 @@ void TMVA::MethodBoost::DeclareOptions()
 
    DeclareOptionRef( fAdaBoostBeta = 1.0, "Boost_AdaBoostBeta",
                      "The ADA boost parameter that sets the effect of every boost step on the events' weights" );
-   
+
    DeclareOptionRef( fTransformString = "step", "Boost_Transform",
                      "Type of transform applied to every boosted method linear, log, step" );
    AddPreDefVal(TString("step"));
@@ -205,8 +209,8 @@ void TMVA::MethodBoost::DeclareOptions()
 
 ////////////////////////////////////////////////////////////////////////////////
 /// options that are used ONLY for the READER to ensure backward compatibility
-///   they are hence without any effect (the reader is only reading the training 
-///   options that HAD been used at the training of the .xml weightfile at hand
+///   they are hence without any effect (the reader is only reading the training
+///   options that HAD been used at the training of the .xml weight file at hand
 
 void TMVA::MethodBoost::DeclareCompatibilityOptions()
 {
@@ -228,9 +232,9 @@ void TMVA::MethodBoost::DeclareCompatibilityOptions()
    AddPreDefVal(TString("log"));
    AddPreDefVal(TString("gauss"));
 
-   // this option here 
+   // this option here
    //DeclareOptionRef( fBoostType  = "AdaBoost", "Boost_Type", "Boosting type for the classifiers" );
-   // still exists, but these two possible values 
+   // still exists, but these two possible values
    AddPreDefVal(TString("HighEdgeGauss"));
    AddPreDefVal(TString("HighEdgeCoPara"));
    // have been deleted .. hope that works :)
@@ -239,6 +243,7 @@ void TMVA::MethodBoost::DeclareCompatibilityOptions()
                      "Recalculate the classifier MVA Signallike cut at every boost iteration" );
 
 }
+
 ////////////////////////////////////////////////////////////////////////////////
 /// just registering the string from which the boosted classifier will be created
 
@@ -257,7 +262,7 @@ Bool_t TMVA::MethodBoost::BookMethod( Types::EMVA theMethod, TString methodTitle
 ////////////////////////////////////////////////////////////////////////////////
 
 void TMVA::MethodBoost::Init()
-{ 
+{
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -265,7 +270,7 @@ void TMVA::MethodBoost::Init()
 
 void TMVA::MethodBoost::InitHistos()
 {
-   
+
    Results* results = Data()->GetResults(GetMethodName(), Types::kTraining, GetAnalysisType());
 
    results->Store(new TH1F("MethodWeight","Normalized Classifier Weight",fBoostNum,0,fBoostNum),"ClassifierWeight");
@@ -391,12 +396,12 @@ void TMVA::MethodBoost::Train()
                                                              fBoostedMethodOptions);
       TMVA::MsgLogger::EnableOutput();
 
-      // supressing the rest of the classifier output the right way
+      // suppressing the rest of the classifier output the right way
       fCurrentMethod  = (dynamic_cast<MethodBase*>(method));
 
       if (fCurrentMethod==0) {
          Log() << kFATAL << "uups.. guess the booking of the " << fCurrentMethodIdx << "-th classifier somehow failed" << Endl;
-         return; // hope that makes coverity happy (as if fears I migh use the pointer later on, not knowing that FATAL exits
+         return; // hope that makes coverity happy (as if fears I might use the pointer later on, not knowing that FATAL exits
       }
 
       // set fDataSetManager if MethodCategory (to enable Category to create datasetinfo objects) // DSMTEST
@@ -415,7 +420,7 @@ void TMVA::MethodBoost::Train()
       fCurrentMethod->ProcessSetup();
       fCurrentMethod->CheckSetup();
 
-      
+
       // reroute transformationhandler
       fCurrentMethod->RerouteTransformationHandler (&(this->GetTransformationHandler()));
 
@@ -438,22 +443,22 @@ void TMVA::MethodBoost::Train()
       timer.DrawProgressBar( fCurrentMethodIdx );
       if (fCurrentMethodIdx==0) MonitorBoost(Types::kBoostProcBegin,fCurrentMethodIdx);
       MonitorBoost(Types::kBeforeTraining,fCurrentMethodIdx);
-      TMVA::MsgLogger::InhibitOutput(); //supressing Logger outside the method
+      TMVA::MsgLogger::InhibitOutput(); //suppressing Logger outside the method
       if (fBoostType=="Bagging") Bagging();  // you want also to train the first classifier on a bagged sample
       SingleTrain();
       TMVA::MsgLogger::EnableOutput();
       if(!IsSilentFile())fCurrentMethod->WriteMonitoringHistosToFile();
-      
+
       // calculate MVA values of current method for all events in training sample
       // (used later on to get 'misclassified events' etc for the boosting
       CalcMVAValues();
 
       if(!IsSilentFile()) if (fCurrentMethodIdx==0 && fMonitorBoostedMethod) CreateMVAHistorgrams();
-      
+
       // get ROC integral and overlap integral for single method on
       // training sample if fMethodWeightType == "ByROC" or the user
       // wants detailed monitoring
-         
+
       // boosting (reweight training sample)
       MonitorBoost(Types::kBeforeBoosting,fCurrentMethodIdx);
       SingleBoost(fCurrentMethod);
@@ -462,7 +467,7 @@ void TMVA::MethodBoost::Train()
       results->GetHist("BoostWeight")->SetBinContent(fCurrentMethodIdx+1,fBoostWeight);
       results->GetHist("ErrorFraction")->SetBinContent(fCurrentMethodIdx+1,fMethodError);
 
-      if (fDetailedMonitoring) {      
+      if (fDetailedMonitoring) {
          fROC_training = GetBoostROCIntegral(kTRUE, Types::kTraining, kTRUE);
          results->GetHist("ROCIntegral_test")->SetBinContent(fCurrentMethodIdx+1, GetBoostROCIntegral(kTRUE,  Types::kTesting));
          results->GetHist("ROCIntegralBoosted_test")->SetBinContent(fCurrentMethodIdx+1, GetBoostROCIntegral(kFALSE, Types::kTesting));
@@ -478,10 +483,10 @@ void TMVA::MethodBoost::Train()
       // stop boosting if needed when error has reached 0.5
       // thought of counting a few steps, but it doesn't seem to be necessary
       Log() << kDEBUG << "AdaBoost (methodErr) err = " << fMethodError << Endl;
-      if (fMethodError > 0.49999) StopCounter++; 
+      if (fMethodError > 0.49999) StopCounter++;
       if (StopCounter > 0 && fBoostType != "Bagging") {
          timer.DrawProgressBar( fBoostNum );
-         fBoostNum = fCurrentMethodIdx+1; 
+         fBoostNum = fCurrentMethodIdx+1;
          Log() << kINFO << "Error rate has reached 0.5 ("<< fMethodError<<"), boosting process stopped at #" << fBoostNum << " classifier" << Endl;
          if (fBoostNum < 5)
             Log() << kINFO << "The classifier might be too strong to boost with Beta = " << fAdaBoostBeta << ", try reducing it." <<Endl;
@@ -490,25 +495,25 @@ void TMVA::MethodBoost::Train()
    }
 
    //as MethodBoost acts not on a private event sample (like MethodBDT does), we need to remember not
-   // to leave "boosted" events to the next classifier in the factory 
+   // to leave "boosted" events to the next classifier in the factory
 
    ResetBoostWeights();
 
    Timer* timer1= new Timer( fBoostNum, GetName() );
    // normalizing the weights of the classifiers
    for (fCurrentMethodIdx=0;fCurrentMethodIdx<fBoostNum;fCurrentMethodIdx++) {
-      // pefroming post-boosting actions
+      // performing post-boosting actions
 
       timer1->DrawProgressBar( fCurrentMethodIdx );
-      
+
       if (fCurrentMethodIdx==fBoostNum) {
-         Log() << kINFO << "Elapsed time: " << timer1->GetElapsedTime() 
+         Log() << kINFO << "Elapsed time: " << timer1->GetElapsedTime()
                << "                              " << Endl;
       }
-      
+
       TH1F* tmp = dynamic_cast<TH1F*>( results->GetHist("ClassifierWeight") );
       if (tmp) tmp->SetBinContent(fCurrentMethodIdx+1,fMethodWeight[fCurrentMethodIdx]);
-      
+
    }
 
    // Ensure that in case of only 1 boost the method weight equals
@@ -527,15 +532,15 @@ void TMVA::MethodBoost::Train()
 
 void TMVA::MethodBoost::CleanBoostOptions()
 {
-   fBoostedMethodOptions=GetOptions(); 
+   fBoostedMethodOptions=GetOptions();
 }
 
 ////////////////////////////////////////////////////////////////////////////////
 
 void TMVA::MethodBoost::CreateMVAHistorgrams()
 {
-   if (fBoostNum <=0) Log() << kFATAL << "CreateHistorgrams called before fBoostNum is initialized" << Endl;
-   // calculating histograms boundries and creating histograms..
+   if (fBoostNum <=0) Log() << kFATAL << "CreateHistograms called before fBoostNum is initialized" << Endl;
+   // calculating histograms boundaries and creating histograms..
    // nrms = number of rms around the average to use for outline (of the 0 classifier)
    Double_t meanS, meanB, rmsS, rmsB, xmin, xmax, nrms = 10;
    Int_t signalClass = 0;
@@ -549,7 +554,7 @@ void TMVA::MethodBoost::CreateMVAHistorgrams()
    xmin = TMath::Max( TMath::Min(meanS - nrms*rmsS, meanB - nrms*rmsB ), xmin );
    xmax = TMath::Min( TMath::Max(meanS + nrms*rmsS, meanB + nrms*rmsB ), xmax ) + 0.00001;
 
-   // creating all the historgrams
+   // creating all the histograms
    for (UInt_t imtd=0; imtd<fBoostNum; imtd++) {
       fTrainSigMVAHist .push_back( new TH1F( Form("MVA_Train_S_%04i",imtd), "MVA_Train_S",        fNbins, xmin, xmax ) );
       fTrainBgdMVAHist .push_back( new TH1F( Form("MVA_Train_B%04i", imtd), "MVA_Train_B",        fNbins, xmin, xmax ) );
@@ -681,7 +686,7 @@ void TMVA::MethodBoost::SingleTrain()
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// find the CUT on the individual MVA that defines an event as 
+/// find the CUT on the individual MVA that defines an event as
 /// correct or misclassified (to be used in the boosting process)
 
 void TMVA::MethodBoost::FindMVACut(MethodBase *method)
@@ -700,9 +705,9 @@ void TMVA::MethodBoost::FindMVACut(MethodBase *method)
       if (val<minMVA) minMVA=val;
    }
    maxMVA = maxMVA+(maxMVA-minMVA)/nBins;
-   
+
    Double_t sum = 0.;
-   
+
    TH1D *mvaS  = new TH1D(Form("MVAS_%d",fCurrentMethodIdx) ,"",nBins,minMVA,maxMVA);
    TH1D *mvaB  = new TH1D(Form("MVAB_%d",fCurrentMethodIdx) ,"",nBins,minMVA,maxMVA);
    TH1D *mvaSC = new TH1D(Form("MVASC_%d",fCurrentMethodIdx),"",nBins,minMVA,maxMVA);
@@ -718,7 +723,7 @@ void TMVA::MethodBoost::FindMVACut(MethodBase *method)
    }
 
    for (Long64_t ievt=0; ievt<Data()->GetNEvents(); ievt++) {
-      
+
       Double_t weight = GetEvent(ievt)->GetWeight();
       Double_t mvaVal=method->GetMvaValue();
       sum +=weight;
@@ -729,21 +734,23 @@ void TMVA::MethodBoost::FindMVACut(MethodBase *method)
       }
    }
    SeparationBase *sepGain;
-   
 
-   // Boosting should use Miscalssification not Gini Index (changed, Helge 31.5.2013)
-   // ACHTUNG !! mit "Misclassification" geht es NUR wenn man die Signal zu Background bei jedem Boost schritt
-   // wieder hinbiegt. Es gibt aber komischerweise bessere Ergebnisse (genau wie bei BDT auch schon beobachtet) wenn
-   // man GiniIndex benutzt und akzeptiert dass jedes andere mal KEIN vernuenftiger Cut gefunden wird - d.h. der
-   // Cut liegt dann ausserhalb der MVA value range, alle events sind als Bkg classifiziert und dann wird entpsrehcend
-   // des Boost algorithmus 'automitisch' etwas renormiert .. sodass im naechsten Schritt dann wieder was vernuenftiges
-   // rauskommt. Komisch .. dass DAS richtig sein soll ?? 
+
+   // Boosting should use Misclassification not Gini Index (changed, Helge 31.5.2013)
+   // WARNING! It works with Misclassification only if you fix the signal to
+   // background at every step. Strangely enough, there are better results
+   // ( as seen with BDT ) if you use Gini Index, and accept that sometimes no
+   // sensible cut is found - i.e. the cut is then outside the MVA value range,
+   // all events are classified as background and then according to the Boost
+   // algorithm something is renormed 'automatically' ... so that in the next
+   // step again the result is something sensible.
+   // Strange ... that THIS is supposed to be right?
 
    //   SeparationBase *sepGain2 = new MisClassificationError();
    //sepGain = new MisClassificationError();
-   sepGain = new GiniIndex(); 
+   sepGain = new GiniIndex();
    //sepGain = new CrossEntropy();
-   
+
    Double_t sTot = mvaS->GetSum();
    Double_t bTot = mvaB->GetSum();
 
@@ -756,21 +763,21 @@ void TMVA::MethodBoost::FindMVACut(MethodBase *method)
    Double_t sSelCut=sSel;
    Double_t bSelCut=bSel;
    //      std::cout << "minMVA =" << minMVA << " maxMVA = " << maxMVA << " width = " << mvaSC->GetBinWidth(1) <<  std::endl;
-   
+
    //      for (Int_t ibin=1;ibin<=nBins;ibin++) std::cout << " cutvalues[" << ibin<<"]="<<mvaSC->GetBinLowEdge(ibin) << "  " << mvaSC->GetBinCenter(ibin) << std::endl;
    Double_t mvaCutOrientation=1; // 1 if mva > mvaCut --> Signal and -1 if mva < mvaCut (i.e. mva*-1 > mvaCut*-1) --> Signal
-   for (Int_t ibin=1;ibin<=nBins;ibin++){ 
+   for (Int_t ibin=1;ibin<=nBins;ibin++){
       mvaSC->SetBinContent(ibin,mvaS->GetBinContent(ibin)+mvaSC->GetBinContent(ibin-1));
       mvaBC->SetBinContent(ibin,mvaB->GetBinContent(ibin)+mvaBC->GetBinContent(ibin-1));
-      
+
       sSel=mvaSC->GetBinContent(ibin);
       bSel=mvaBC->GetBinContent(ibin);
 
       // if (ibin==nBins){
       //    std::cout << "Last bin s="<< sSel <<" b="<<bSel << " s="<< sTot-sSel <<" b="<<bTot-bSel << endl;
       // }
-     
-      if (separationGain < sepGain->GetSeparationGain(sSel,bSel,sTot,bTot) 
+
+      if (separationGain < sepGain->GetSeparationGain(sSel,bSel,sTot,bTot)
           //  &&           (mvaSC->GetBinCenter(ibin) >0 || (fCurrentMethodIdx+1)%2 )
           ){
          separationGain = sepGain->GetSeparationGain(sSel,bSel,sTot,bTot);
@@ -794,19 +801,19 @@ void TMVA::MethodBoost::FindMVACut(MethodBase *method)
         << " bSel=" << bSel
         << " s/b(1)=" << sSel/bSel
         << " s/b(2)=" << (sTot-sSel)/(bTot-bSel)
-        << " sepGain="<<sepGain->GetSeparationGain(sSel,bSel,sTot,bTot) 
+        << " sepGain="<<sepGain->GetSeparationGain(sSel,bSel,sTot,bTot)
         << " sepGain2="<<sepGain2->GetSeparationGain(sSel,bSel,sTot,bTot)
         << "      " <<ori
         << std::endl;
       */
-         
+
    }
-   
+
    if (0){
       double parentIndex=sepGain->GetSeparationIndex(sTot,bTot);
       double leftIndex  =sepGain->GetSeparationIndex(sSelCut,bSelCut);
       double rightIndex  =sepGain->GetSeparationIndex(sTot-sSelCut,bTot-bSelCut);
-      std::cout 
+      std::cout
          << " sTot=" << sTot
          << " bTot=" << bTot
          << " s="<<sSelCut
@@ -821,7 +828,7 @@ void TMVA::MethodBoost::FindMVACut(MethodBase *method)
          << " sepGain=" << parentIndex-( (sSelCut+bSelCut) * leftIndex + (sTot-sSelCut+bTot-bSelCut) * rightIndex )/(sTot+bTot)
          << " sepGain="<<separationGain
          << " sepGain="<<sepGain->GetSeparationGain(sSelCut,bSelCut,sTot,bTot)
-         << " cut=" << mvaCut 
+         << " cut=" << mvaCut
          << " idx="<<fCurrentMethodIdx
          << " cutOrientation="<<mvaCutOrientation
          << std::endl;
@@ -831,12 +838,12 @@ void TMVA::MethodBoost::FindMVACut(MethodBase *method)
 
    results->GetHist("SeparationGain")->SetBinContent(fCurrentMethodIdx+1,separationGain);
 
-   
+
    Log() << kDEBUG << "(old step) Setting method cut to " <<method->GetSignalReferenceCut()<< Endl;
 
    if(IsSilentFile())
    {
-        mvaS ->Delete();  
+        mvaS ->Delete();
         mvaB ->Delete();
         mvaSC->Delete();
         mvaBC->Delete();
@@ -848,8 +855,8 @@ void TMVA::MethodBoost::FindMVACut(MethodBase *method)
 Double_t TMVA::MethodBoost::SingleBoost(MethodBase* method)
 {
    Double_t returnVal=-1;
-   
-   
+
+
    if      (fBoostType=="AdaBoost")      returnVal = this->AdaBoost  (method,1);
    else if (fBoostType=="RealAdaBoost")  returnVal = this->AdaBoost  (method,0);
    else if (fBoostType=="Bagging")       returnVal = this->Bagging   ();
@@ -860,12 +867,12 @@ Double_t TMVA::MethodBoost::SingleBoost(MethodBase* method)
    return returnVal;
 }
 ////////////////////////////////////////////////////////////////////////////////
-/// the standard (discrete or real) AdaBoost algorithm 
+/// the standard (discrete or real) AdaBoost algorithm
 
 Double_t TMVA::MethodBoost::AdaBoost(MethodBase* method, Bool_t discreteAdaBoost)
 {
    if (!method) {
-      Log() << kWARNING << " AdaBoost called without classifier reference - needed for calulating AdaBoost " << Endl;
+      Log() << kWARNING << " AdaBoost called without classifier reference - needed for calculating AdaBoost " << Endl;
       return 0;
    }
 
@@ -873,7 +880,7 @@ Double_t TMVA::MethodBoost::AdaBoost(MethodBase* method, Bool_t discreteAdaBoost
    Double_t sumAll=0, sumWrong=0;
    Bool_t* WrongDetection=new Bool_t[GetNEvents()];
    QuickMVAProbEstimator *MVAProb=NULL;
-   
+
    if (discreteAdaBoost) {
       FindMVACut(method);
       Log() << kDEBUG  << " individual mva cut value = " << method->GetSignalReferenceCut() << Endl;
@@ -882,9 +889,9 @@ Double_t TMVA::MethodBoost::AdaBoost(MethodBase* method, Bool_t discreteAdaBoost
       // the RealAdaBoost does use a simple "yes (signal)" or "no (background)"
       // answer from your single MVA, but a "signal probability" instead (in the BDT case,
       // that would be the 'purity' in the leaf node. For some MLP parameter, the MVA output
-      // can also interpreted as a probability, but here I try a genera aproach to get this
-      // probability from the MVA distributions... 
-      
+      // can also interpreted as a probability, but here I try a general approach to get this
+      // probability from the MVA distributions...
+
       for (Long64_t evt=0; evt<GetNEvents(); evt++) {
          const Event* ev =  Data()->GetEvent(evt);
          MVAProb->AddEvent(fMVAvalues->at(evt),ev->GetWeight(),ev->GetClass());
@@ -914,13 +921,13 @@ Double_t TMVA::MethodBoost::AdaBoost(MethodBase* method, Bool_t discreteAdaBoost
             }
         }
       }
-      
+
       if (discreteAdaBoost){
-         if (sig  == method->IsSignalLike(fMVAvalues->at(ievt))){   
+         if (sig  == method->IsSignalLike(fMVAvalues->at(ievt))){
             WrongDetection[ievt]=kFALSE;
          }else{
-            WrongDetection[ievt]=kTRUE; 
-            sumWrong+=w; 
+            WrongDetection[ievt]=kTRUE;
+            sumWrong+=w;
          }
       }else{
          Double_t mvaProb = MVAProb->GetMVAProbAt((Float_t)fMVAvalues->at(ievt));
@@ -934,7 +941,7 @@ Double_t TMVA::MethodBoost::AdaBoost(MethodBase* method, Bool_t discreteAdaBoost
 
    fMethodError=sumWrong/sumAll;
 
-   // calculating the fMethodError and the boostWeight out of it uses the formula 
+   // calculating the fMethodError and the boostWeight out of it uses the formula
    // w = ((1-err)/err)^beta
 
    Double_t boostWeight=0;
@@ -947,21 +954,21 @@ Double_t TMVA::MethodBoost::AdaBoost(MethodBase* method, Bool_t discreteAdaBoost
          boostWeight = TMath::Log((1.-fMethodError)/fMethodError)*fAdaBoostBeta;
       else
          boostWeight = TMath::Log((1.+fMethodError)/(1-fMethodError))*fAdaBoostBeta;
-      
-      
+
+
       //   std::cout << "boostweight = " << boostWeight << std::endl;
-      
+
       // ADA boosting, rescaling the weight of the wrong events according to the error level
       // over the entire test sample rescaling all the weights to have the same sum, but without
       // touching the original weights (changing only the boosted weight of all the events)
       // first reweight
-      
+
       Double_t newSum=0., oldSum=0.;
-      
-      
+
+
       Double_t boostfactor = TMath::Exp(boostWeight);
-      
-      
+
+
       for (Long64_t ievt=0; ievt<GetNEvents(); ievt++) {
          const Event* ev =  Data()->GetEvent(ievt);
          oldSum += ev->GetWeight();
@@ -972,27 +979,27 @@ Double_t TMVA::MethodBoost::AdaBoost(MethodBase* method, Bool_t discreteAdaBoost
                else                     ev->ScaleBoostWeight(1./boostfactor);
             }
             //         if (ievt<30) std::cout<<ievt<<" var0="<<ev->GetValue(0)<<" var1="<<ev->GetValue(1)<<" weight="<<ev->GetWeight() << "  boostby:"<<boostfactor<<std::endl;
-            
+
          }else{
             // events are classified by their probability of being signal or background
             // (eventually you should write this one - i.e. re-use the MVA value that were already
-            // calcualted and stroed..   however ,for the moement ..
+            // calculated and stored..   however ,for the moment ..
             Double_t mvaProb = MVAProb->GetMVAProbAt((Float_t)fMVAvalues->at(ievt));
             mvaProb = 2*(mvaProb-0.5);
             // mvaProb = (1-mvaProb);
-            
+
             Int_t    trueType=1;
             if (DataInfo().IsSignal(ev)) trueType = 1;
             else trueType = -1;
-            
+
             boostfactor = TMath::Exp(-1*boostWeight*trueType*mvaProb);
             if (ev->GetWeight() > 0) ev->ScaleBoostWeight(boostfactor);
             else                     ev->ScaleBoostWeight(1./boostfactor);
-            
+
          }
          newSum += ev->GetWeight();
       }
-      
+
       Double_t normWeight = oldSum/newSum;
       // next normalize the weights
       Double_t normSig=0, normBkg=0;
@@ -1002,15 +1009,15 @@ Double_t TMVA::MethodBoost::AdaBoost(MethodBase* method, Bool_t discreteAdaBoost
          if (ev->GetClass()) normSig+=ev->GetWeight();
          else                normBkg+=ev->GetWeight();
       }
-      
+
       Results* results = Data()->GetResults(GetMethodName(), Types::kTraining, GetAnalysisType());
       results->GetHist("SoverBtotal")->SetBinContent(fCurrentMethodIdx+1, normSig/normBkg);
-      
+
       for (Long64_t ievt=0; ievt<GetNEvents(); ievt++) {
          const Event* ev = Data()->GetEvent(ievt);
-         
-         if (ev->GetClass()) ev->ScaleBoostWeight(oldSum/normSig/2); 
-         else                ev->ScaleBoostWeight(oldSum/normBkg/2); 
+
+         if (ev->GetClass()) ev->ScaleBoostWeight(oldSum/normSig/2);
+         else                ev->ScaleBoostWeight(oldSum/normBkg/2);
       }
    }
 
@@ -1064,11 +1071,11 @@ void TMVA::MethodBoost::GetHelpMessage() const
    Log() << "(Boost_Type), which can be set to either AdaBoost or Bagging." << Endl;
    Log() << "AdaBoosting: The most important parameters in this configuration" <<Endl;
    Log() << "is the beta parameter (Boost_AdaBoostBeta)  " << Endl;
-   Log() << "When boosting a linear classifier, it is sometimes advantageous"<<Endl; 
+   Log() << "When boosting a linear classifier, it is sometimes advantageous"<<Endl;
    Log() << "to transform the MVA output non-linearly. The following options" <<Endl;
    Log() << "are available: step, log, and minmax, the default is no transform."<<Endl;
    Log() <<Endl;
-   Log() << "Some classifiers are hard to boost and do not improve much in"<<Endl; 
+   Log() << "Some classifiers are hard to boost and do not improve much in"<<Endl;
    Log() << "their performance by boosting them, some even slightly deteriorate"<< Endl;
    Log() << "due to the boosting." <<Endl;
    Log() << "The booking of the boost method is special since it requires"<<Endl;
@@ -1083,7 +1090,7 @@ void TMVA::MethodBoost::GetHelpMessage() const
 ////////////////////////////////////////////////////////////////////////////////
 
 const TMVA::Ranking* TMVA::MethodBoost::CreateRanking()
-{ 
+{
    return 0;
 }
 
@@ -1101,7 +1108,7 @@ Double_t TMVA::MethodBoost::GetMvaValue( Double_t* err, Double_t* errUpper )
       if (m==0) continue;
       Double_t val = fTmpEvent ? m->GetMvaValue(fTmpEvent) : m->GetMvaValue();
       Double_t sigcut = m->GetSignalReferenceCut();
-      
+
       // default is no transform
       if (fTransformString == "linear"){
 
@@ -1156,14 +1163,14 @@ Double_t TMVA::MethodBoost::GetBoostROCIntegral(Bool_t singleMethod, Types::ETre
    // set data sample training / testing
    Data()->SetCurrentType(eTT);
 
-   MethodBase* method = singleMethod ? dynamic_cast<MethodBase*>(fMethods.back()) : 0; // ToDo CoVerity flags this line as there is no prtection against a zero-pointer delivered by dynamic_cast
-   // to make CoVerity happy (although, OF COURSE, the last method in the commitee
+   MethodBase* method = singleMethod ? dynamic_cast<MethodBase*>(fMethods.back()) : 0; // ToDo CoVerity flags this line as there is no protection against a zero-pointer delivered by dynamic_cast
+   // to make CoVerity happy (although, OF COURSE, the last method in the committee
    // has to be also of type MethodBase as ANY method is... hence the dynamic_cast
    // will never by "zero" ...
    if (singleMethod && !method) {
       Log() << kFATAL << " What do you do? Your method:"
-            << fMethods.back()->GetName() 
-            << " seems not to be a propper TMVA method" 
+            << fMethods.back()->GetName()
+            << " seems not to be a propper TMVA method"
             << Endl;
       std::exit(1);
    }
@@ -1190,7 +1197,7 @@ Double_t TMVA::MethodBoost::GetBoostROCIntegral(Bool_t singleMethod, Types::ETre
    std::vector <Float_t>* mvaRes;
    if (singleMethod && eTT==Types::kTraining)
       mvaRes = fMVAvalues; // values already calculated
-   else {  
+   else {
       mvaRes = new std::vector <Float_t>(GetNEvents());
       for (Long64_t ievt=0; ievt<GetNEvents(); ievt++) {
          GetEvent(ievt);
@@ -1230,7 +1237,7 @@ Double_t TMVA::MethodBoost::GetBoostROCIntegral(Bool_t singleMethod, Types::ETre
 
       if (CalcOverlapIntergral) {
          Float_t w_ov = ev->GetWeight();
-         if (DataInfo().IsSignal(ev))  
+         if (DataInfo().IsSignal(ev))
             mva_s_overlap->Fill( (*mvaRes)[ievt], w_ov );
          else
             mva_b_overlap->Fill( (*mvaRes)[ievt], w_ov );
@@ -1243,7 +1250,7 @@ Double_t TMVA::MethodBoost::GetBoostROCIntegral(Bool_t singleMethod, Types::ETre
 
    // calculate ROC integral from fS, fB
    Double_t ROC = MethodBase::GetROCIntegral(fS, fB);
-   
+
    // calculate overlap integral
    if (CalcOverlapIntergral) {
       gTools().NormHist( mva_s_overlap );
@@ -1290,10 +1297,9 @@ void TMVA::MethodBoost::CalcMVAValues()
    }
 
    // fill cumulative mva distribution
-   
 
-}
 
+}
 
 ////////////////////////////////////////////////////////////////////////////////
 /// fill various monitoring histograms from information of the individual classifiers that
@@ -1312,7 +1318,7 @@ void TMVA::MethodBoost::MonitorBoost( Types::EBoostStage stage , UInt_t methodIn
             results->Store(new TH1I("NodesBeforePruning","nodes before pruning",this->GetBoostNum(),0,this->GetBoostNum()),"NodesBeforePruning");
             results->Store(new TH1I("NodesAfterPruning","nodes after pruning",this->GetBoostNum(),0,this->GetBoostNum()),"NodesAfterPruning");
          }
-         
+
          if (stage == Types::kBeforeTraining){
          }
          else if (stage == Types::kBeforeBoosting){
@@ -1320,30 +1326,30 @@ void TMVA::MethodBoost::MonitorBoost( Types::EBoostStage stage , UInt_t methodIn
             results->GetHist("NodesAfterPruning")->SetBinContent(methodIndex+1,currentDT->GetNNodes());
          }
          else if (stage == Types::kAfterBoosting){
-            
+
          }
          else if (stage != Types::kBoostProcEnd){
-            Log() << kINFO << "<Train> average number of nodes before/after pruning : " 
-                  <<   results->GetHist("NodesBeforePruning")->GetMean() << " / " 
+            Log() << kINFO << "<Train> average number of nodes before/after pruning : "
+                  <<   results->GetHist("NodesBeforePruning")->GetMean() << " / "
                   <<   results->GetHist("NodesAfterPruning")->GetMean()
                   << Endl;
          }
       }
-      
+
    }else if (GetCurrentMethod(methodIndex)->GetMethodType() == TMVA::Types::kFisher) {
       if (stage == Types::kAfterBoosting){
          TMVA::MsgLogger::EnableOutput();
       }
    }else{
       if (methodIndex < 3){
-         Log() << kDEBUG << "No detailed boost monitoring for " 
-               << GetCurrentMethod(methodIndex)->GetMethodName() 
+         Log() << kDEBUG << "No detailed boost monitoring for "
+               << GetCurrentMethod(methodIndex)->GetMethodName()
                << " yet available " << Endl;
       }
    }
 
    //boosting plots universal for all classifiers 'typically for debug purposes only as they are not general enough'
-   
+
    if (stage == Types::kBeforeBoosting){
       // if you want to display the weighted events for 2D case at each boost step:
       if (fDetailedMonitoring){
@@ -1353,7 +1359,7 @@ void TMVA::MethodBoost::MonitorBoost( Types::EBoostStage stage , UInt_t methodIn
             results->GetHist(Form("EventDistSig_%d",methodIndex))->SetMarkerColor(4);
             results->Store(new TH2F(Form("EventDistBkg_%d",methodIndex),Form("EventDistBkg_%d",methodIndex),100,0,7,100,0,7));
             results->GetHist(Form("EventDistBkg_%d",methodIndex))->SetMarkerColor(2);
-            
+
             Data()->SetCurrentType(Types::kTraining);
             for (Long64_t ievt=0; ievt<GetNEvents(); ievt++) {
                const Event* ev = GetEvent(ievt);
@@ -1362,14 +1368,14 @@ void TMVA::MethodBoost::MonitorBoost( Types::EBoostStage stage , UInt_t methodIn
                Float_t v1= ev->GetValue(1);
                //         if (ievt<3) std::cout<<ievt<<" var0="<<v0<<" var1="<<v1<<" weight="<<w<<std::endl;
                TH2* h;
-               if (DataInfo().IsSignal(ev)) h=results->GetHist2D(Form("EventDistSig_%d",methodIndex));      
+               if (DataInfo().IsSignal(ev)) h=results->GetHist2D(Form("EventDistSig_%d",methodIndex));
                else                         h=results->GetHist2D(Form("EventDistBkg_%d",methodIndex));
                if (h) h->Fill(v0,v1,w);
             }
          }
       }
    }
-   
+
    return;
 }
 
diff --git a/tmva/tmva/src/MethodCFMlpANN.cxx b/tmva/tmva/src/MethodCFMlpANN.cxx
index 2022458f7cc..3d30f07ff91 100644
--- a/tmva/tmva/src/MethodCFMlpANN.cxx
+++ b/tmva/tmva/src/MethodCFMlpANN.cxx
@@ -27,41 +27,39 @@
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
-//_______________________________________________________________________
-//
-// Begin_Html
-/*
-  Interface to Clermond-Ferrand artificial neural network
-
-  <p>
-  The CFMlpANN belong to the class of Multilayer Perceptrons (MLP), which are
-  feed-forward networks according to the following propagation schema:<br>
-  <center>
-  <img vspace=10 src="gif/tmva_mlp.gif" align="bottom" alt="Schema for artificial neural network">
-  </center>
-  The input layer contains as many neurons as input variables used in the MVA.
-  The output layer contains two neurons for the signal and background
-  event classes. In between the input and output layers are a variable number
-  of <i>k</i> hidden layers with arbitrary numbers of neurons. (While the
-  structure of the input and output layers is determined by the problem, the
-  hidden layers can be configured by the user through the option string
-  of the method booking.) <br>
-
-  As indicated in the sketch, all neuron inputs to a layer are linear
-  combinations of the neuron output of the previous layer. The transfer
-  from input to output within a neuron is performed by means of an "activation
-  function". In general, the activation function of a neuron can be
-  zero (deactivated), one (linear), or non-linear. The above example uses
-  a sigmoid activation function. The transfer function of the output layer
-  is usually linear. As a consequence: an ANN without hidden layer should
-  give identical discrimination power as a linear discriminant analysis (Fisher).
-  In case of one hidden layer, the ANN computes a linear combination of
-  sigmoid.  <br>
-
-  The learning method used by the CFMlpANN is only stochastic.
+/*! \class TMVA::MethodCFMlpANN
+\ingroup TMVA
+
+Interface to Clermond-Ferrand artificial neural network
+
+
+The CFMlpANN belong to the class of Multilayer Perceptrons (MLP), which are
+feed-forward networks according to the following propagation schema:
+
+\image html tmva_mlp.png Schema for artificial neural network.
+
+The input layer contains as many neurons as input variables used in the MVA.
+The output layer contains two neurons for the signal and background
+event classes. In between the input and output layers are a variable number
+of <i>k</i> hidden layers with arbitrary numbers of neurons. (While the
+structure of the input and output layers is determined by the problem, the
+hidden layers can be configured by the user through the option string
+of the method booking.)
+
+As indicated in the sketch, all neuron inputs to a layer are linear
+combinations of the neuron output of the previous layer. The transfer
+from input to output within a neuron is performed by means of an "activation
+function". In general, the activation function of a neuron can be
+zero (deactivated), one (linear), or non-linear. The above example uses
+a sigmoid activation function. The transfer function of the output layer
+is usually linear. As a consequence: an ANN without hidden layer should
+give identical discrimination power as a linear discriminant analysis (Fisher).
+In case of one hidden layer, the ANN computes a linear combination of
+sigmoid.
+
+The learning method used by the CFMlpANN is only stochastic.
 */
-// End_Html
-//_______________________________________________________________________
+
 
 #include "TMVA/MethodCFMlpANN.h"
 
@@ -99,7 +97,9 @@ ClassImp(TMVA::MethodCFMlpANN)
 
 ////////////////////////////////////////////////////////////////////////////////
 /// standard constructor
+///
 /// option string: "n_training_cycles:n_hidden_layers"
+///
 /// default is:  n_training_cycles = 5000, n_layers = 4
 ///
 /// * note that the number of hidden layers in the NN is:
@@ -107,18 +107,21 @@ ClassImp(TMVA::MethodCFMlpANN)
 ///
 /// * since there is one input and one output layer. The number of
 ///   nodes (neurons) is predefined to be:
-///   n_nodes[i] = nvars + 1 - i (where i=1..n_layers)
+///
+///     n_nodes[i] = nvars + 1 - i (where i=1..n_layers)
 ///
 ///   with nvars being the number of variables used in the NN.
 ///
-/// Hence, the default case is: n_neurons(layer 1 (input)) : nvars
-///                             n_neurons(layer 2 (hidden)): nvars-1
-///                             n_neurons(layer 3 (hidden)): nvars-1
-///                             n_neurons(layer 4 (out))   : 2
+/// Hence, the default case is:
+///
+///     n_neurons(layer 1 (input)) : nvars
+///     n_neurons(layer 2 (hidden)): nvars-1
+///     n_neurons(layer 3 (hidden)): nvars-1
+///     n_neurons(layer 4 (out))   : 2
 ///
 /// This artificial neural network usually needs a relatively large
 /// number of cycles to converge (8000 and more). Overtraining can
-/// be efficienctly tested by comparing the signal and background
+/// be efficiently tested by comparing the signal and background
 /// output of the NN for the events that were used for training and
 /// an independent data sample (with equal properties). If the separation
 /// performance is significantly better for the training sample, the
@@ -192,7 +195,7 @@ void TMVA::MethodCFMlpANN::ProcessOptions()
       if (layerSpec.First(',')<0) {
          sToAdd = layerSpec;
          layerSpec = "";
-      } 
+      }
       else {
          sToAdd = layerSpec(0,layerSpec.First(','));
          layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
@@ -208,14 +211,14 @@ void TMVA::MethodCFMlpANN::ProcessOptions()
 
    if (IgnoreEventsWithNegWeightsInTraining()) {
       Log() << kFATAL << "Mechanism to ignore events with negative weights in training not yet available for method: "
-            << GetMethodTypeName() 
+            << GetMethodTypeName()
             << " --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
             << Endl;
    }
 
    Log() << kINFO << "Use configuration (nodes per layer): in=";
    for (Int_t i=0; i<fNlayers-1; i++) Log() << kINFO << fNodes[i] << ":";
-   Log() << kINFO << fNodes[fNlayers-1] << "=out" << Endl;   
+   Log() << kINFO << fNodes[fNlayers-1] << "=out" << Endl;
 
    // some info
    Log() << "Use " << fNcycles << " training cycles" << Endl;
@@ -224,7 +227,7 @@ void TMVA::MethodCFMlpANN::ProcessOptions()
 
    // note that one variable is type
    if (nEvtTrain>0) {
-    
+
       // Data LUT
       fData  = new TMatrix( nEvtTrain, GetNvar() );
       fClass = new std::vector<Int_t>( nEvtTrain );
@@ -235,16 +238,16 @@ void TMVA::MethodCFMlpANN::ProcessOptions()
       for (Int_t ievt=0; ievt<nEvtTrain; ievt++) {
          const Event * ev = GetEvent(ievt);
 
-         // identify signal and background events  
+         // identify signal and background events
          (*fClass)[ievt] = DataInfo().IsSignal(ev) ? 1 : 2;
-      
+
          // use normalized input Data
          for (ivar=0; ivar<GetNvar(); ivar++) {
             (*fData)( ievt, ivar ) = ev->GetValue(ivar);
          }
       }
 
-      //Log() << kVERBOSE << Data()->GetNEvtSigTrain() << " Signal and " 
+      //Log() << kVERBOSE << Data()->GetNEvtSigTrain() << " Signal and "
       //        << Data()->GetNEvtBkgdTrain() << " background" << " events in trainingTree" << Endl;
    }
 
@@ -259,7 +262,7 @@ void TMVA::MethodCFMlpANN::Init( void )
    SetNormalised( kTRUE );
 
    // initialize dimensions
-   MethodCFMlpANN_nsel = 0;  
+   MethodCFMlpANN_nsel = 0;
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -301,13 +304,13 @@ void TMVA::MethodCFMlpANN::Train( void )
    fYNN = new Double_t*[nlayers];
    for (Int_t layer=0; layer<nlayers; layer++)
       fYNN[layer] = new Double_t[fNodes[layer]];
-   
+
    // please check
 #ifndef R__WIN32
    Train_nn( &dumDat, &dumDat, &ntrain, &ntest, &nvar, &nlayers, nodes, &ncycles );
 #else
    Log() << kWARNING << "<Train> sorry CFMlpANN does not run on Windows" << Endl;
-#endif  
+#endif
 
    delete [] nodes;
 
@@ -344,7 +347,7 @@ Double_t TMVA::MethodCFMlpANN::EvalANN( std::vector<Double_t>& inVar, Bool_t& is
    // hardcopy of input variables (necessary because they are update later)
    Double_t* xeev = new Double_t[GetNvar()];
    for (UInt_t ivar=0; ivar<GetNvar(); ivar++) xeev[ivar] = inVar[ivar];
-  
+
    // ---- now apply the weights: get NN output
    isOK = kTRUE;
    for (UInt_t jvar=0; jvar<GetNvar(); jvar++) {
@@ -356,11 +359,11 @@ Double_t TMVA::MethodCFMlpANN::EvalANN( std::vector<Double_t>& inVar, Bool_t& is
          xeev[jvar] = 0;
       }
       else {
-         xeev[jvar] = xeev[jvar] - ((fVarn_1.xmax[jvar] + fVarn_1.xmin[jvar])/2);    
-         xeev[jvar] = xeev[jvar] / ((fVarn_1.xmax[jvar] - fVarn_1.xmin[jvar])/2);    
+         xeev[jvar] = xeev[jvar] - ((fVarn_1.xmax[jvar] + fVarn_1.xmin[jvar])/2);
+         xeev[jvar] = xeev[jvar] / ((fVarn_1.xmax[jvar] - fVarn_1.xmin[jvar])/2);
       }
    }
-    
+
    NN_ava( xeev );
 
    Double_t retval = 0.5*(1.0 + fYNN[fParam_1.layerm-1][0]);
@@ -374,7 +377,7 @@ Double_t TMVA::MethodCFMlpANN::EvalANN( std::vector<Double_t>& inVar, Bool_t& is
 /// auxiliary functions
 
 void  TMVA::MethodCFMlpANN::NN_ava( Double_t* xeev )
-{  
+{
    for (Int_t ivar=0; ivar<fNeur_1.neuron[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
 
    for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
@@ -387,7 +390,7 @@ void  TMVA::MethodCFMlpANN::NN_ava( Double_t* xeev )
          }
          fYNN[layer][j-1] = NN_fonc( layer, x );
       }
-   }  
+   }
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -396,7 +399,7 @@ void  TMVA::MethodCFMlpANN::NN_ava( Double_t* xeev )
 Double_t TMVA::MethodCFMlpANN::NN_fonc( Int_t i, Double_t u ) const
 {
    Double_t f(0);
-  
+
    if      (u/fDel_1.temp[i] >  170) f = +1;
    else if (u/fDel_1.temp[i] < -170) f = -1;
    else {
@@ -500,15 +503,15 @@ void TMVA::MethodCFMlpANN::ReadWeightsFromStream( std::istream & istr )
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// data interface function 
+/// data interface function
 
-Int_t TMVA::MethodCFMlpANN::DataInterface( Double_t* /*tout2*/, Double_t*  /*tin2*/, 
-                                           Int_t* /* icode*/, Int_t*  /*flag*/, 
-                                           Int_t*  /*nalire*/, Int_t* nvar, 
+Int_t TMVA::MethodCFMlpANN::DataInterface( Double_t* /*tout2*/, Double_t*  /*tin2*/,
+                                           Int_t* /* icode*/, Int_t*  /*flag*/,
+                                           Int_t*  /*nalire*/, Int_t* nvar,
                                            Double_t* xpg, Int_t* iclass, Int_t* ikend )
 {
    // icode and ikend are dummies needed to match f2c mlpl3 functions
-   *ikend = 0; 
+   *ikend = 0;
 
 
    // sanity checks
@@ -522,7 +525,7 @@ Int_t TMVA::MethodCFMlpANN::DataInterface( Double_t* /*tout2*/, Double_t*  /*tin
 
    // fill variables
    *iclass = (int)this->GetClass( MethodCFMlpANN_nsel );
-   for (UInt_t ivar=0; ivar<this->GetNvar(); ivar++) 
+   for (UInt_t ivar=0; ivar<this->GetNvar(); ivar++)
       xpg[ivar] = (double)this->GetData( MethodCFMlpANN_nsel, ivar );
 
    ++MethodCFMlpANN_nsel;
@@ -533,7 +536,7 @@ Int_t TMVA::MethodCFMlpANN::DataInterface( Double_t* /*tout2*/, Double_t*  /*tin
 ////////////////////////////////////////////////////////////////////////////////
 /// write weights to xml file
 
-void TMVA::MethodCFMlpANN::AddWeightsXMLTo( void* parent ) const 
+void TMVA::MethodCFMlpANN::AddWeightsXMLTo( void* parent ) const
 {
    void *wght = gTools().AddChild(parent, "Weights");
    gTools().AddAttr(wght,"NVars",fParam_1.nvar);
@@ -542,7 +545,7 @@ void TMVA::MethodCFMlpANN::AddWeightsXMLTo( void* parent ) const
    void* minmaxnode = gTools().AddChild(wght, "VarMinMax");
    stringstream s;
    s.precision( 16 );
-   for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++) 
+   for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
       s << std::scientific << fVarn_1.xmin[ivar] <<  " " << fVarn_1.xmax[ivar] <<  " ";
    gTools().AddRawLine( minmaxnode, s.str().c_str() );
    void* neurons = gTools().AddChild(wght, "NNeurons");
@@ -558,7 +561,7 @@ void TMVA::MethodCFMlpANN::AddWeightsXMLTo( void* parent ) const
       for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
          neuronnode = gTools().AddChild(layernode,"Neuron"+gTools().StringFromInt(neuron));
          stringstream weights;
-         weights.precision( 16 );         
+         weights.precision( 16 );
          weights << std::scientific << Ww_ref(fNeur_1.ww, layer+1, neuron+1);
          for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
             weights << " " << std::scientific << W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
@@ -569,9 +572,9 @@ void TMVA::MethodCFMlpANN::AddWeightsXMLTo( void* parent ) const
    void* tempnode = gTools().AddChild(wght, "LayerTemp");
    stringstream temp;
    temp.precision( 16 );
-   for (Int_t layer=0; layer<fParam_1.layerm; layer++) {         
+   for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
       temp << std::scientific << fDel_1.temp[layer] << " ";
-   }   
+   }
    gTools().AddRawLine(tempnode, temp.str().c_str() );
 }
 ////////////////////////////////////////////////////////////////////////////////
@@ -583,7 +586,7 @@ void TMVA::MethodCFMlpANN::ReadWeightsFromXML( void* wghtnode )
    void* minmaxnode = gTools().GetChild(wghtnode);
    const char* minmaxcontent = gTools().GetContent(minmaxnode);
    stringstream content(minmaxcontent);
-   for (UInt_t ivar=0; ivar<GetNvar(); ivar++) 
+   for (UInt_t ivar=0; ivar<GetNvar(); ivar++)
       content >> fVarn_1.xmin[ivar] >> fVarn_1.xmax[ivar];
    if (fYNN != 0) {
       for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
@@ -594,7 +597,7 @@ void TMVA::MethodCFMlpANN::ReadWeightsFromXML( void* wghtnode )
    void *layernode=gTools().GetNextChild(minmaxnode);
    const char* neuronscontent = gTools().GetContent(layernode);
    stringstream ncontent(neuronscontent);
-   for (Int_t layer=0; layer<fParam_1.layerm; layer++) {              
+   for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
       // read number of neurons for each layer;
       // coverity[tainted_data_argument]
       ncontent >> fNeur_1.neuron[layer];
@@ -613,7 +616,7 @@ void TMVA::MethodCFMlpANN::ReadWeightsFromXML( void* wghtnode )
          }
          neuronnode=gTools().GetNextChild(neuronnode);
       }
-   } 
+   }
    void* tempnode=gTools().GetNextChild(layernode);
    const char* temp = gTools().GetContent(tempnode);
    stringstream t(temp);
@@ -631,30 +634,30 @@ void TMVA::MethodCFMlpANN::PrintWeights( std::ostream & o ) const
    // write number of variables and classes
    o << "Number of vars " << fParam_1.nvar << std::endl;
    o << "Output nodes   " << fParam_1.lclass << std::endl;
-   
+
    // write extrema of input variables
-   for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++) 
+   for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++)
       o << "Var " << ivar << " [" << fVarn_1.xmin[ivar] << " - " << fVarn_1.xmax[ivar] << "]" << std::endl;
-        
+
    // write number of layers (sum of: input + output + hidden)
    o << "Number of layers " << fParam_1.layerm << std::endl;
-   
+
    o << "Nodes per layer ";
    for (Int_t layer=0; layer<fParam_1.layerm; layer++)
       // write number of neurons for each layer
-      o << fNeur_1.neuron[layer] << "     ";   
+      o << fNeur_1.neuron[layer] << "     ";
    o << std::endl;
-        
+
    // write weights
-   for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) { 
-          
+   for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
+
       Int_t nq = fNeur_1.neuron[layer]/10;
       Int_t nr = fNeur_1.neuron[layer] - nq*10;
-          
+
       Int_t kk(0);
       if (nr==0) kk = nq;
       else       kk = nq+1;
-          
+
       for (Int_t k=1; k<=kk; k++) {
          Int_t jmin = 10*k - 9;
          Int_t jmax = 10*k;
@@ -675,16 +678,18 @@ void TMVA::MethodCFMlpANN::PrintWeights( std::ostream & o ) const
             }
             o << std::endl;
          }
-            
+
          // skip two empty lines
          o << std::endl;
       }
    }
    for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
       o << "Del.temp in layer " << layer << " :  " << fDel_1.temp[layer] << std::endl;
-   }      
+   }
 }
 
+////////////////////////////////////////////////////////////////////////////////
+
 void TMVA::MethodCFMlpANN::MakeClassSpecific( std::ostream& fout, const TString& className ) const
 {
    // write specific classifier response
@@ -702,7 +707,7 @@ void TMVA::MethodCFMlpANN::MakeClassSpecificHeader( std::ostream& , const TStrin
 ////////////////////////////////////////////////////////////////////////////////
 /// get help message text
 ///
-/// typical length of text line: 
+/// typical length of text line:
 ///         "|--------------------------------------------------------------|"
 
 void TMVA::MethodCFMlpANN::GetHelpMessage() const
diff --git a/tmva/tmva/src/MethodCFMlpANN_Utils.cxx b/tmva/tmva/src/MethodCFMlpANN_Utils.cxx
index bbfec0c81a2..84cf9939ad3 100644
--- a/tmva/tmva/src/MethodCFMlpANN_Utils.cxx
+++ b/tmva/tmva/src/MethodCFMlpANN_Utils.cxx
@@ -46,16 +46,18 @@
  *                                                                                *
  **********************************************************************************/
 
-//_______________________________________________________________________
-//
-// Implementation of Clermond-Ferrand artificial neural network
-//
-// Reference for the original FORTRAN version "mlpl3.F":
-//      Authors  : J. Proriol and contributions from ALEPH-Clermont-Ferrand
-//                 Team members
-//      Copyright: Laboratoire Physique Corpusculaire
-//                 Universite de Blaise Pascal, IN2P3/CNRS
-//_______________________________________________________________________
+/*! \class TMVA::MethodCFMlpANN_Utils
+\ingroup TMVA
+
+Implementation of Clermond-Ferrand artificial neural network
+
+Reference for the original FORTRAN version "mlpl3.F":
+  - Authors  : J. Proriol and contributions from ALEPH-Clermont-Ferrand
+               Team members
+  - Copyright: Laboratoire Physique Corpusculaire
+               Universite de Blaise Pascal, IN2P3/CNRS
+*/
+
 
 #include <string>
 #include <iostream>
@@ -73,16 +75,18 @@ using std::cout;
 using std::endl;
 
 ClassImp(TMVA::MethodCFMlpANN_Utils)
-   
+
 const Int_t       TMVA::MethodCFMlpANN_Utils::fg_max_nVar_   = max_nVar_;
 const Int_t       TMVA::MethodCFMlpANN_Utils::fg_max_nNodes_ = max_nNodes_;
 const char* const TMVA::MethodCFMlpANN_Utils::fg_MethodName  = "--- CFMlpANN                 ";
 
+////////////////////////////////////////////////////////////////////////////////
+/// default constructor
+
 TMVA::MethodCFMlpANN_Utils::MethodCFMlpANN_Utils():fg_100(100),
 fg_0(0),
 fg_999(999)
 {
-   // default constructor
    Int_t i(0);
    for(i=0; i<max_nVar_;++i) fVarn_1.xmin[i] = 0;
    fCost_1.ancout = 0;
@@ -107,7 +111,7 @@ fg_999(999)
    for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.ww[i] = 0;
    for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.x[i] = 0;
    for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.y[i] = 0;
-      
+
    fParam_1.eeps = 0;
    fParam_1.epsmin = 0;
    fParam_1.epsmax = 0;
@@ -135,13 +139,17 @@ fg_999(999)
    fLogger = 0;
 }
 
-TMVA::MethodCFMlpANN_Utils::~MethodCFMlpANN_Utils() 
+////////////////////////////////////////////////////////////////////////////////
+/// Destructor.
+
+TMVA::MethodCFMlpANN_Utils::~MethodCFMlpANN_Utils()
 {
-   // destructor
 }
 
-void TMVA::MethodCFMlpANN_Utils::Train_nn( Double_t *tin2, Double_t *tout2, Int_t *ntrain, 
-                                           Int_t *ntest, Int_t *nvar2, Int_t *nlayer, 
+////////////////////////////////////////////////////////////////////////////////
+
+void TMVA::MethodCFMlpANN_Utils::Train_nn( Double_t *tin2, Double_t *tout2, Int_t *ntrain,
+                                           Int_t *ntest, Int_t *nvar2, Int_t *nlayer,
                                            Int_t *nodes, Int_t *ncycle )
 {
    // training interface - called from MethodCFMlpANN class object
@@ -180,7 +188,7 @@ void TMVA::MethodCFMlpANN_Utils::Train_nn( Double_t *tin2, Double_t *tout2, Int_
    if (fNeur_1.neuron[fParam_1.layerm - 1] == 1) {
       // imax = 2;
       fParam_1.lclass = 2;
-   } 
+   }
    else {
       // imax = fNeur_1.neuron[fParam_1.layerm - 1] << 1;
       fParam_1.lclass = fNeur_1.neuron[fParam_1.layerm - 1];
@@ -194,8 +202,10 @@ void TMVA::MethodCFMlpANN_Utils::Train_nn( Double_t *tin2, Double_t *tout2, Int_
    fVarn3_1.Delete();
 }
 
-void TMVA::MethodCFMlpANN_Utils::Entree_new( Int_t *, char *, Int_t *ntrain, 
-                                             Int_t *ntest, Int_t *numlayer, Int_t *nodes, 
+////////////////////////////////////////////////////////////////////////////////
+
+void TMVA::MethodCFMlpANN_Utils::Entree_new( Int_t *, char *, Int_t *ntrain,
+                                             Int_t *ntest, Int_t *numlayer, Int_t *nodes,
                                              Int_t *numcycle, Int_t /*det_len*/)
 {
    // first initialisation of ANN
@@ -208,7 +218,7 @@ void TMVA::MethodCFMlpANN_Utils::Entree_new( Int_t *, char *, Int_t *ntrain,
    /* NTEST: Nb of events used for the test */
    /* TIN: Input variables */
    /* TOUT: type of the event */
- 
+
    fCost_1.ancout = 1e30;
 
    /* .............. HardCoded Values .................... */
@@ -239,7 +249,7 @@ void TMVA::MethodCFMlpANN_Utils::Entree_new( Int_t *, char *, Int_t *ntrain,
    fParam_1.nunisor = 30;
    fParam_1.nunishort = 48;
    fParam_1.nunap = 40;
-   
+
    ULog() << kINFO << "Total number of events for training: " << fParam_1.nevl << Endl;
    ULog() << kINFO << "Total number of training cycles    : " << fParam_1.nblearn << Endl;
    if (fParam_1.nevl > max_Events_) {
@@ -268,7 +278,7 @@ void TMVA::MethodCFMlpANN_Utils::Entree_new( Int_t *, char *, Int_t *ntrain,
       ULog() << kINFO << "Number of layers for neuron(" << j << "): " << fNeur_1.neuron[j - 1] << Endl;
    }
    if (fNeur_1.neuron[fParam_1.layerm - 1] != 2) {
-      printf("Error: wrong number of classes at ouput layer: %i != 2 ==> abort\n",
+      printf("Error: wrong number of classes at output layer: %i != 2 ==> abort\n",
              fNeur_1.neuron[fParam_1.layerm - 1]);
       Arret("stop");
    }
@@ -316,9 +326,11 @@ void TMVA::MethodCFMlpANN_Utils::Entree_new( Int_t *, char *, Int_t *ntrain,
 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 void TMVA::MethodCFMlpANN_Utils::Wini()
 {
-   // [smart comments to be added]
    Int_t i__1, i__2, i__3;
    Int_t i__, j;
    Int_t layer;
@@ -345,15 +357,17 @@ void TMVA::MethodCFMlpANN_Utils::Wini()
 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 void TMVA::MethodCFMlpANN_Utils::En_avant(Int_t *ievent)
 {
-   // [smart comments to be added]
    Int_t i__1, i__2, i__3;
 
    Double_t f;
    Int_t i__, j;
    Int_t layer;
-   
+
    i__1 = fNeur_1.neuron[0];
    for (i__ = 1; i__ <= i__1; ++i__) {
       y_ref(1, i__) = xeev_ref(*ievent, i__);
@@ -365,7 +379,7 @@ void TMVA::MethodCFMlpANN_Utils::En_avant(Int_t *ievent)
          x_ref(layer + 1, j) = 0.;
          i__3 = fNeur_1.neuron[layer - 1];
          for (i__ = 1; i__ <= i__3; ++i__) {
-            x_ref(layer + 1, j) = ( x_ref(layer + 1, j) + y_ref(layer, i__) 
+            x_ref(layer + 1, j) = ( x_ref(layer + 1, j) + y_ref(layer, i__)
                                     * w_ref(layer + 1, j, i__) );
          }
          x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
@@ -374,7 +388,7 @@ void TMVA::MethodCFMlpANN_Utils::En_avant(Int_t *ievent)
          y_ref(layer + 1, j) = f;
       }
    }
-} 
+}
 
 #undef ww_ref
 #undef y_ref
@@ -384,9 +398,11 @@ void TMVA::MethodCFMlpANN_Utils::En_avant(Int_t *ievent)
 
 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 void TMVA::MethodCFMlpANN_Utils::Leclearn( Int_t *ktest, Double_t *tout2, Double_t *tin2 )
 {
-   // [smart comments to be added]
    Int_t i__1, i__2;
 
    Int_t i__, j, k, l;
@@ -405,7 +421,7 @@ void TMVA::MethodCFMlpANN_Utils::Leclearn( Int_t *ktest, Double_t *tout2, Double
    }
    i__1 = fParam_1.nevl;
    for (i__ = 1; i__ <= i__1; ++i__) {
-      DataInterface(tout2, tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar, 
+      DataInterface(tout2, tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar,
                     xpg, &fVarn_1.nclass[i__ - 1], &ikend);
       if (ikend == -1) {
          break;
@@ -414,7 +430,7 @@ void TMVA::MethodCFMlpANN_Utils::Leclearn( Int_t *ktest, Double_t *tout2, Double
       CollectVar(&fParam_1.nvar, &fVarn_1.nclass[i__ - 1], xpg);
 
       i__2 = fParam_1.nvar;
-      for (j = 1; j <= i__2; ++j) {        
+      for (j = 1; j <= i__2; ++j) {
          xeev_ref(i__, j) = xpg[j - 1];
       }
       if (fVarn_1.iclass == 1) {
@@ -454,11 +470,11 @@ void TMVA::MethodCFMlpANN_Utils::Leclearn( Int_t *ktest, Double_t *tout2, Double
          if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
                                                                            Float_t)0.) {
             xeev_ref(i__, l) = (Float_t)0.;
-         } 
+         }
          else {
-            xeev_ref(i__, l) = xeev_ref(i__, l) - (fVarn_1.xmax[l - 1] + 
+            xeev_ref(i__, l) = xeev_ref(i__, l) - (fVarn_1.xmax[l - 1] +
                                                    fVarn_1.xmin[l - 1]) / 2.;
-            xeev_ref(i__, l) = xeev_ref(i__, l) / ((fVarn_1.xmax[l - 1] - 
+            xeev_ref(i__, l) = xeev_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
                                                     fVarn_1.xmin[l - 1]) / 2.);
          }
       }
@@ -477,9 +493,11 @@ void TMVA::MethodCFMlpANN_Utils::Leclearn( Int_t *ktest, Double_t *tout2, Double
 #define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7]
 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 void TMVA::MethodCFMlpANN_Utils::En_arriere( Int_t *ievent )
 {
-   // [smart comments to be added]
    Int_t i__1, i__2, i__3;
 
    Double_t f;
@@ -490,7 +508,7 @@ void TMVA::MethodCFMlpANN_Utils::En_arriere( Int_t *ievent )
    for (i__ = 1; i__ <= i__1; ++i__) {
       if (fVarn_1.nclass[*ievent - 1] == i__) {
          fNeur_1.o[i__ - 1] = 1.;
-      } 
+      }
       else {
          fNeur_1.o[i__ - 1] = -1.;
       }
@@ -500,12 +518,12 @@ void TMVA::MethodCFMlpANN_Utils::En_arriere( Int_t *ievent )
    for (i__ = 1; i__ <= i__1; ++i__) {
       f = y_ref(l, i__);
       df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
-      del_ref(l, i__) = df * (fNeur_1.o[i__ - 1] - y_ref(l, i__)) * 
+      del_ref(l, i__) = df * (fNeur_1.o[i__ - 1] - y_ref(l, i__)) *
          fDel_1.coef[i__ - 1];
       delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
       i__2 = fNeur_1.neuron[l - 2];
       for (j = 1; j <= i__2; ++j) {
-         delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(l - 
+         delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(l -
                                                                        1, j);
          /* L20: */
       }
@@ -533,12 +551,12 @@ void TMVA::MethodCFMlpANN_Utils::En_arriere( Int_t *ievent )
    for (l = 2; l <= i__1; ++l) {
       i__2 = fNeur_1.neuron[l - 1];
       for (i__ = 1; i__ <= i__2; ++i__) {
-         deltaww_ref(l, i__) = delww_ref(l, i__) + fParam_1.eta * 
+         deltaww_ref(l, i__) = delww_ref(l, i__) + fParam_1.eta *
             deltaww_ref(l, i__);
          ww_ref(l, i__) = ww_ref(l, i__) + deltaww_ref(l, i__);
          i__3 = fNeur_1.neuron[l - 2];
          for (j = 1; j <= i__3; ++j) {
-            delta_ref(l, i__, j) = delw_ref(l, i__, j) + fParam_1.eta * 
+            delta_ref(l, i__, j) = delw_ref(l, i__, j) + fParam_1.eta *
                delta_ref(l, i__, j);
             w_ref(l, i__, j) = w_ref(l, i__, j) + delta_ref(l, i__, j);
          }
@@ -559,6 +577,8 @@ void TMVA::MethodCFMlpANN_Utils::En_arriere( Int_t *ievent )
 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
 
+////////////////////////////////////////////////////////////////////////////////
+
 void TMVA::MethodCFMlpANN_Utils::Out( Int_t *iii, Int_t *maxcycle )
 {
    // write weights to file
@@ -574,6 +594,8 @@ void TMVA::MethodCFMlpANN_Utils::Out( Int_t *iii, Int_t *maxcycle )
 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
 
+////////////////////////////////////////////////////////////////////////////////
+
 void TMVA::MethodCFMlpANN_Utils::Innit( char *det, Double_t *tout2, Double_t *tin2, Int_t )
 {
    // Initialization
@@ -604,13 +626,13 @@ void TMVA::MethodCFMlpANN_Utils::Innit( char *det, Double_t *tout2, Double_t *ti
    }
    if (fParam_1.ichoi == 1) {
       Inl();
-   } 
+   }
    else {
       Wini();
    }
    kkk = 0;
    i__3 = fParam_1.nblearn;
-   Timer timer( i__3, "CFMlpANN" ); 
+   Timer timer( i__3, "CFMlpANN" );
    Int_t num = i__3/100;
 
    for (i1 = 1; i1 <= i__3; ++i1) {
@@ -639,9 +661,9 @@ void TMVA::MethodCFMlpANN_Utils::Innit( char *det, Double_t *tout2, Double_t *ti
                nrest = i__ % fParam_1.lclass;
                fParam_1.ndiv = i__ / fParam_1.lclass;
                if (nrest != 0) {
-                  ievent = fParam_1.ndiv + 1 + (fParam_1.lclass - nrest) * 
+                  ievent = fParam_1.ndiv + 1 + (fParam_1.lclass - nrest) *
                      nevod;
-               } 
+               }
                else {
                   ievent = fParam_1.ndiv;
                }
@@ -668,9 +690,11 @@ void TMVA::MethodCFMlpANN_Utils::Innit( char *det, Double_t *tout2, Double_t *ti
 #undef deltaww_ref
 #undef delta_ref
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 void TMVA::MethodCFMlpANN_Utils::TestNN()
 {
-   // [smart comments to be added]
    Int_t i__1;
 
    Int_t i__;
@@ -679,18 +703,18 @@ void TMVA::MethodCFMlpANN_Utils::TestNN()
    ktest = 0;
    if (fParam_1.layerm > max_nLayers_) {
       ktest = 1;
-      printf("Error: number of layers exceeds maximum: %i, %i ==> abort", 
+      printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
              fParam_1.layerm, max_nLayers_ );
       Arret("modification of mlpl3_param_lim.inc is needed ");
    }
    if (fParam_1.nevl > max_Events_) {
       ktest = 1;
-      printf("Error: number of training events exceeds maximum: %i, %i ==> abort", 
+      printf("Error: number of training events exceeds maximum: %i, %i ==> abort",
              fParam_1.nevl, max_Events_ );
       Arret("modification of mlpl3_param_lim.inc is needed ");
    }
    if (fParam_1.nevt > max_Events_) {
-      printf("Error: number of testing events exceeds maximum: %i, %i ==> abort", 
+      printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
              fParam_1.nevt, max_Events_ );
       Arret("modification of mlpl3_param_lim.inc is needed ");
    }
@@ -702,7 +726,7 @@ void TMVA::MethodCFMlpANN_Utils::TestNN()
    }
    if (fParam_1.nvar > max_nVar_) {
       ktest = 1;
-      printf("Error: number of variables exceeds maximum: %i, %i ==> abort", 
+      printf("Error: number of variables exceeds maximum: %i, %i ==> abort",
              fParam_1.nvar, fg_max_nVar_ );
       Arret("modification of mlpl3_param_lim.inc is needed");
    }
@@ -710,7 +734,7 @@ void TMVA::MethodCFMlpANN_Utils::TestNN()
    for (i__ = 1; i__ <= i__1; ++i__) {
       if (fNeur_1.neuron[i__ - 1] > max_nNodes_) {
          ktest = 1;
-         printf("Error: number of neurons at layer exceeds maximum: %i, %i ==> abort", 
+         printf("Error: number of neurons at layer exceeds maximum: %i, %i ==> abort",
                 i__, fg_max_nNodes_ );
       }
    }
@@ -722,15 +746,17 @@ void TMVA::MethodCFMlpANN_Utils::TestNN()
 
 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 void TMVA::MethodCFMlpANN_Utils::Cout( Int_t * /*i1*/, Double_t *xxx )
 {
-   // [smart comments to be added]
    Int_t i__1, i__2;
    Double_t d__1;
-   
+
    Double_t c__;
    Int_t i__, j;
-   
+
    c__ = 0.;
    i__1 = fParam_1.nevl;
    for (i__ = 1; i__ <= i__1; ++i__) {
@@ -739,11 +765,11 @@ void TMVA::MethodCFMlpANN_Utils::Cout( Int_t * /*i1*/, Double_t *xxx )
       for (j = 1; j <= i__2; ++j) {
          if (fVarn_1.nclass[i__ - 1] == j) {
             fNeur_1.o[j - 1] = 1.;
-         } 
+         }
          else {
             fNeur_1.o[j - 1] = -1.;
          }
-         // Computing 2nd power 
+         // Computing 2nd power
          d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
          c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
       }
@@ -758,9 +784,11 @@ void TMVA::MethodCFMlpANN_Utils::Cout( Int_t * /*i1*/, Double_t *xxx )
 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 void TMVA::MethodCFMlpANN_Utils::Inl()
 {
-   // [smart comments to be added]
    Int_t i__1, i__2;
 
    Int_t jmax, k, layer, kk, nq, nr;
@@ -773,7 +801,7 @@ void TMVA::MethodCFMlpANN_Utils::Inl()
       nr = fNeur_1.neuron[layer] - nq * 10;
       if (nr == 0) {
          kk = nq;
-      } 
+      }
       else {
          kk = nq + 1;
       }
@@ -792,14 +820,16 @@ void TMVA::MethodCFMlpANN_Utils::Inl()
 #undef ww_ref
 #undef w_ref
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 Double_t TMVA::MethodCFMlpANN_Utils::Fdecroi( Int_t *i__ )
 {
-   // [smart comments to be added]
    Double_t ret_val;
-   
+
    Double_t aaa, bbb;
-   
-   aaa = (fParam_1.epsmin - fParam_1.epsmax) / (Double_t) (fParam_1.nblearn * 
+
+   aaa = (fParam_1.epsmin - fParam_1.epsmax) / (Double_t) (fParam_1.nblearn *
                                                            fParam_1.nevl - 1);
    bbb = fParam_1.epsmax - aaa;
    ret_val = aaa * (Double_t) (*i__) + bbb;
@@ -808,12 +838,14 @@ Double_t TMVA::MethodCFMlpANN_Utils::Fdecroi( Int_t *i__ )
 
 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
 
-void TMVA::MethodCFMlpANN_Utils::GraphNN( Int_t *ilearn, Double_t * /*xxx*/, 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
+void TMVA::MethodCFMlpANN_Utils::GraphNN( Int_t *ilearn, Double_t * /*xxx*/,
                                           Double_t * /*yyy*/, char * /*det*/, Int_t  /*det_len*/ )
 {
-   // [smart comments to be added]
    Int_t i__1, i__2;
-   
+
    Double_t xmok[max_nNodes_];
    // Float_t xpaw;
    Double_t xmko[max_nNodes_];
@@ -827,7 +859,7 @@ void TMVA::MethodCFMlpANN_Utils::GraphNN( Int_t *ilearn, Double_t * /*xxx*/,
    //    vbn[i__ - 1] = (Float_t)0.;
    // }
    if (*ilearn == 1) {
-      // AH: removed output 
+      // AH: removed output
    }
    i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
    for (i__ = 1; i__ <= i__1; ++i__) {
@@ -845,7 +877,7 @@ void TMVA::MethodCFMlpANN_Utils::GraphNN( Int_t *ilearn, Double_t * /*xxx*/,
          if (fVarn_1.nclass[i__ - 1] == j) {
             ++nok[j - 1];
             xmok[j - 1] += y_ref(fParam_1.layerm, j);
-         } 
+         }
          else {
             ++nko[j - 1];
             xmko[j - 1] += y_ref(fParam_1.layerm, j);
@@ -869,10 +901,11 @@ void TMVA::MethodCFMlpANN_Utils::GraphNN( Int_t *ilearn, Double_t * /*xxx*/,
 
 #undef y_ref
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 Double_t TMVA::MethodCFMlpANN_Utils::Sen3a( void )
 {
-   // [smart comments to be added]
-
    // Initialized data
    Int_t    m12 = 4096;
    Double_t f1  = 2.44140625e-4;
@@ -888,7 +921,7 @@ Double_t TMVA::MethodCFMlpANN_Utils::Sen3a( void )
    Double_t ret_val;
    Int_t    k3, l3, k2, l2, k1, l1;
 
-   // reference: /k.d.senne/j. stochastics/ vol 1,no 3 (1974),pp.215-38 
+   // reference: /k.d.senne/j. stochastics/ vol 1,no 3 (1974),pp.215-38
    k3 = fg_i3 * j3;
    l3 = k3 / m12;
    k2 = fg_i2 * j3 + fg_i3 * j2 + l3;
@@ -901,7 +934,9 @@ Double_t TMVA::MethodCFMlpANN_Utils::Sen3a( void )
    ret_val = f1 * (Double_t) fg_i1 + f2 * (Float_t) fg_i2 + f3 * (Double_t) fg_i3;
 
    return ret_val;
-} 
+}
+
+////////////////////////////////////////////////////////////////////////////////
 
 void TMVA::MethodCFMlpANN_Utils::Foncf( Int_t *i__, Double_t *u, Double_t *f )
 {
@@ -910,10 +945,10 @@ void TMVA::MethodCFMlpANN_Utils::Foncf( Int_t *i__, Double_t *u, Double_t *f )
 
    if (*u / fDel_1.temp[*i__ - 1] > 170.) {
       *f = .99999999989999999;
-   } 
+   }
    else if (*u / fDel_1.temp[*i__ - 1] < -170.) {
       *f = -.99999999989999999;
-   } 
+   }
    else {
       yy = TMath::Exp(-(*u) / fDel_1.temp[*i__ - 1]);
       *f = (1. - yy) / (yy + 1.);
@@ -924,9 +959,11 @@ void TMVA::MethodCFMlpANN_Utils::Foncf( Int_t *i__, Double_t *u, Double_t *f )
 
 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 void TMVA::MethodCFMlpANN_Utils::Cout2( Int_t * /*i1*/, Double_t *yyy )
 {
-   // [smart comments to be added]
    Int_t i__1, i__2;
    Double_t d__1;
 
@@ -941,7 +978,7 @@ void TMVA::MethodCFMlpANN_Utils::Cout2( Int_t * /*i1*/, Double_t *yyy )
       for (j = 1; j <= i__2; ++j) {
          if (fVarn_1.mclass[i__ - 1] == j) {
             fNeur_1.o[j - 1] = 1.;
-         } 
+         }
          else {
             fNeur_1.o[j - 1] = -1.;
          }
@@ -958,9 +995,11 @@ void TMVA::MethodCFMlpANN_Utils::Cout2( Int_t * /*i1*/, Double_t *yyy )
 
 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 void TMVA::MethodCFMlpANN_Utils::Lecev2( Int_t *ktest, Double_t *tout2, Double_t *tin2 )
 {
-   // [smart comments to be added]
    Int_t i__1, i__2;
 
    Int_t i__, j, l;
@@ -980,7 +1019,7 @@ void TMVA::MethodCFMlpANN_Utils::Lecev2( Int_t *ktest, Double_t *tout2, Double_t
    // }
    i__1 = fParam_1.nevt;
    for (i__ = 1; i__ <= i__1; ++i__) {
-      DataInterface(tout2, tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar, 
+      DataInterface(tout2, tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar,
                     xpg, &fVarn_1.mclass[i__ - 1], &ikend);
 
       if (ikend == -1) {
@@ -992,7 +1031,7 @@ void TMVA::MethodCFMlpANN_Utils::Lecev2( Int_t *ktest, Double_t *tout2, Double_t
          xx_ref(i__, j) = xpg[j - 1];
       }
    }
- 
+
    i__1 = fParam_1.nevt;
    for (i__ = 1; i__ <= i__1; ++i__) {
       i__2 = fParam_1.nvar;
@@ -1000,16 +1039,16 @@ void TMVA::MethodCFMlpANN_Utils::Lecev2( Int_t *ktest, Double_t *tout2, Double_t
          if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
                                                                            Float_t)0.) {
             xx_ref(i__, l) = (Float_t)0.;
-         } 
+         }
          else {
-            xx_ref(i__, l) = xx_ref(i__, l) - (fVarn_1.xmax[l - 1] + 
+            xx_ref(i__, l) = xx_ref(i__, l) - (fVarn_1.xmax[l - 1] +
                                                fVarn_1.xmin[l - 1]) / 2.;
-            xx_ref(i__, l) = xx_ref(i__, l) / ((fVarn_1.xmax[l - 1] - 
+            xx_ref(i__, l) = xx_ref(i__, l) / ((fVarn_1.xmax[l - 1] -
                                                 fVarn_1.xmin[l - 1]) / 2.);
          }
       }
    }
-} 
+}
 
 #undef xx_ref
 
@@ -1019,9 +1058,11 @@ void TMVA::MethodCFMlpANN_Utils::Lecev2( Int_t *ktest, Double_t *tout2, Double_t
 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 void TMVA::MethodCFMlpANN_Utils::En_avant2( Int_t *ievent )
 {
-   // [smart comments to be added]
    Int_t i__1, i__2, i__3;
 
    Double_t f;
@@ -1039,7 +1080,7 @@ void TMVA::MethodCFMlpANN_Utils::En_avant2( Int_t *ievent )
          x_ref(layer + 1, j) = 0.;
          i__3 = fNeur_1.neuron[layer - 1];
          for (i__ = 1; i__ <= i__3; ++i__) {
-            x_ref(layer + 1, j) = x_ref(layer + 1, j) + y_ref(layer, i__) 
+            x_ref(layer + 1, j) = x_ref(layer + 1, j) + y_ref(layer, i__)
                * w_ref(layer + 1, j, i__);
          }
          x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
@@ -1057,6 +1098,8 @@ void TMVA::MethodCFMlpANN_Utils::En_avant2( Int_t *ievent )
 #undef x_ref
 #undef w_ref
 
+////////////////////////////////////////////////////////////////////////////////
+
 void TMVA::MethodCFMlpANN_Utils::Arret( const char* mot )
 {
    // fatal error occurred: stop execution
@@ -1064,11 +1107,13 @@ void TMVA::MethodCFMlpANN_Utils::Arret( const char* mot )
    std::exit(1);
 }
 
+////////////////////////////////////////////////////////////////////////////////
+/// [smart comments to be added]
+
 void TMVA::MethodCFMlpANN_Utils::CollectVar( Int_t * /*nvar*/, Int_t * /*class__*/, Double_t * /*xpg*/ )
 {
-   // // [smart comments to be added]
    // Int_t i__1;
-   
+
    // Int_t i__;
    // Float_t x[201];
 
diff --git a/tmva/tmva/src/MethodCategory.cxx b/tmva/tmva/src/MethodCategory.cxx
index 8a6f70d952e..c2cbe8085cf 100644
--- a/tmva/tmva/src/MethodCategory.cxx
+++ b/tmva/tmva/src/MethodCategory.cxx
@@ -30,13 +30,17 @@
  * (http://tmva.sourceforge.net/LICENSE)                                          *
  **********************************************************************************/
 
-//__________________________________________________________________________
-//
-// This class is meant to allow categorisation of the data. For different //
-// categories, different classifiers may be booked and different variab-  //
-// les may be considered. The aim is to account for the difference that   //
-// is due to different locations/angles.                                  //
-////////////////////////////////////////////////////////////////////////////////
+/*! \class TMVA::MethodCategory
+\ingroup TMVA
+
+Class for categorizing the phase space
+
+This class is meant to allow categorisation of the data. For different
+categories, different classifiers may be booked and different variables
+may be considered. The aim is to account for the difference that
+is due to different locations/angles.
+*/
+
 
 #include "TMVA/MethodCategory.h"
 
@@ -162,7 +166,7 @@ TMVA::IMethod* TMVA::MethodCategory::AddMethod( const TCut& theCut,
    method->ProcessSetup();
    method->SetFile(fFile);
    method->SetSilentFile(IsSilentFile());
-   
+
 
    // set or create correct method base dir for added method
    const TString dirName(Form("Method_%s",method->GetMethodTypeName().Data()));
@@ -238,7 +242,7 @@ TMVA::DataSetInfo& TMVA::MethodCategory::CreateCategoryDSI(const TCut& theCut,
 
       // check the variables of the old dsi for the variable that we want to add
       for (itrVarInfo = oldDSI.GetVariableInfos().begin(); itrVarInfo != oldDSI.GetVariableInfos().end(); itrVarInfo++) {
-         if((*itrVariables==itrVarInfo->GetLabel()) ) { // || (*itrVariables==itrVarInfo->GetExpression())) { 
+         if((*itrVariables==itrVarInfo->GetLabel()) ) { // || (*itrVariables==itrVarInfo->GetExpression())) {
             // don't compare the expression, since the user might take two times the same expression, but with different labels
             // and apply different transformations to the variables.
             dsi->AddVariable(*itrVarInfo);
@@ -247,7 +251,7 @@ TMVA::DataSetInfo& TMVA::MethodCategory::CreateCategoryDSI(const TCut& theCut,
          }
          counter++;
       }
-      
+
       // check the spectators of the old dsi for the variable that we want to add
       for (itrVarInfo = oldDSI.GetSpectatorInfos().begin(); itrVarInfo != oldDSI.GetSpectatorInfos().end(); itrVarInfo++) {
          if((*itrVariables==itrVarInfo->GetLabel()) ) { // || (*itrVariables==itrVarInfo->GetExpression())) {
@@ -281,7 +285,7 @@ TMVA::DataSetInfo& TMVA::MethodCategory::CreateCategoryDSI(const TCut& theCut,
    // set classes and cuts
    UInt_t nClasses=oldDSI.GetNClasses();
    TString className;
-  
+
    for (UInt_t i=0; i<nClasses; i++) {
       className = oldDSI.GetClassInfo(i)->GetName();
       dsi->AddClass(className);
@@ -298,7 +302,7 @@ TMVA::DataSetInfo& TMVA::MethodCategory::CreateCategoryDSI(const TCut& theCut,
 
    DataSetInfo& dsiReference= (*dsi);
 
-   return dsiReference;  
+   return dsiReference;
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -339,7 +343,7 @@ void TMVA::MethodCategory::InitCircularTree(const DataSetInfo& dsi)
       // The add-then-remove can lead to  a problem if gDirectory points to the same directory (for example
       // gROOT) in the current thread and another one (and both try to add to the directory at the same time).
       TDirectory::TContext ctxt(nullptr);
-      fCatTree = new TTree(Form("Circ%s",GetMethodName().Data()),"Circlar Tree for categorization");
+      fCatTree = new TTree(Form("Circ%s",GetMethodName().Data()),"Circular Tree for categorization");
       fCatTree->SetCircular(1);
    }
 
@@ -533,7 +537,7 @@ void TMVA::MethodCategory::ReadWeightsFromXML( void* wghtnode )
 ////////////////////////////////////////////////////////////////////////////////
 /// process user options
 
-void TMVA::MethodCategory::ProcessOptions() 
+void TMVA::MethodCategory::ProcessOptions()
 {
 }
 
@@ -548,18 +552,18 @@ void TMVA::MethodCategory::GetHelpMessage() const
    Log() << Endl;
    Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
    Log() << Endl;
-   Log() << "This method allows to define different categories of events. The" <<Endl;  
-   Log() << "categories are defined via cuts on the variables. For each" << Endl; 
+   Log() << "This method allows to define different categories of events. The" <<Endl;
+   Log() << "categories are defined via cuts on the variables. For each" << Endl;
    Log() << "category, a different classifier and set of variables can be" <<Endl;
    Log() << "specified. The categories which are defined for this method must" << Endl;
    Log() << "be disjoint." << Endl;
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// no ranking 
+/// no ranking
 
 const TMVA::Ranking* TMVA::MethodCategory::CreateRanking()
-{ 
+{
    return 0;
 }
 
@@ -569,7 +573,7 @@ Bool_t TMVA::MethodCategory::PassesCut( const Event* ev, UInt_t methodIdx )
 {
    // if it's not a simple 'spectator' variable (0 or 1) that the categories are defined by
    // (but rather some 'formula' (i.e. eta>0), then this formulas are stored in fCatTree and that
-   // one will be evaluated.. (the formulae return 'true' or 'false' 
+   // one will be evaluated.. (the formulae return 'true' or 'false'
    if (fCatTree) {
       if (methodIdx>=fCatFormulas.size()) {
          Log() << kFATAL << "Large method index " << methodIdx << ", number of category formulas = "
@@ -577,7 +581,7 @@ Bool_t TMVA::MethodCategory::PassesCut( const Event* ev, UInt_t methodIdx )
       }
       TTreeFormula* f = fCatFormulas[methodIdx];
       return f->EvalInstance(0) > 0.5;
-   } 
+   }
    // otherwise, it simply looks if "variable == true"  ("greater 0.5 to be "sure" )
    else {
 
@@ -636,7 +640,7 @@ Double_t TMVA::MethodCategory::GetMvaValue( Double_t* err, Double_t* errUpper )
 ////////////////////////////////////////////////////////////////////////////////
 /// returns the mva value of the right sub-classifier
 
-const std::vector<Float_t> &TMVA::MethodCategory::GetRegressionValues() 
+const std::vector<Float_t> &TMVA::MethodCategory::GetRegressionValues()
 {
    if (fMethods.empty()) return MethodBase::GetRegressionValues();
 
-- 
GitLab