diff --git a/tutorials/fit/ConfidenceIntervals.C b/tutorials/fit/ConfidenceIntervals.C
index b3fca2012a8a689ab4b2f7b7c8cfbbfbe2005b3c..9193dfa9cac99ee9ffbc8738288456c5f594e3a5 100644
--- a/tutorials/fit/ConfidenceIntervals.C
+++ b/tutorials/fit/ConfidenceIntervals.C
@@ -1,6 +1,6 @@
 /// \file
 /// \ingroup tutorial_fit
-/// \notebook
+/// \notebook -js
 /// Illustrates TVirtualFitter::GetConfidenceIntervals
 /// This method computes confidence intervals for the fitted function
 ///
@@ -41,12 +41,12 @@ void ConfidenceIntervals()
    fpol->SetLineWidth(2);
    gr->Fit(fpol, "Q");
 
-   //Create a TGraphErrors to hold the confidence intervals
+   /*Create a TGraphErrors to hold the confidence intervals*/
    TGraphErrors *grint = new TGraphErrors(ngr);
    grint->SetTitle("Fitted line with .95 conf. band");
    for (i=0; i<ngr; i++)
       grint->SetPoint(i, gr->GetX()[i], 0);
-   //Compute the confidence intervals at the x points of the created graph
+   /*Compute the confidence intervals at the x points of the created graph*/
    (TVirtualFitter::GetFitter())->GetConfidenceIntervals(grint);
    //Now the "grint" graph contains function values as its y-coordinates
    //and confidence intervals as the errors on these coordinates
@@ -70,7 +70,7 @@ void ConfidenceIntervals()
    h->Fit(f, "Q");
    h->Draw();
 
-   //Create a histogram to hold the confidence intervals
+   /*Create a histogram to hold the confidence intervals*/
    TH1D *hint = new TH1D("hint",
       "Fitted gaussian with .95 conf.band", 100, -3, 3);
    (TVirtualFitter::GetFitter())->GetConfidenceIntervals(hint);
@@ -104,7 +104,7 @@ void ConfidenceIntervals()
    //Fit the graph
    f2->SetParameters(0.5,1.5);
    gr2->Fit(f2, "Q");
-   //Compute the confidence intervals
+   /*Compute the confidence intervals*/
    (TVirtualFitter::GetFitter())->GetConfidenceIntervals(grint2);
    //Now the "grint2" graph contains function values as z-coordinates
    //and confidence intervals as their errors
diff --git a/tutorials/fit/ErrorIntegral.C b/tutorials/fit/ErrorIntegral.C
index c79013d1eb50cefd53d656814acc388040a5bb61..f57e3932ac1508157d3e26d26074ea6f6932286d 100644
--- a/tutorials/fit/ErrorIntegral.C
+++ b/tutorials/fit/ErrorIntegral.C
@@ -1,6 +1,6 @@
 /// \file
 /// \ingroup tutorial_fit
-/// \notebook
+/// \notebook -js
 /// Estimate the error in the integral of a fitted function
 /// taking into account the errors in the parameters resulting from the fit.
 /// The error is estimated also using the correlations values obtained from
@@ -42,42 +42,42 @@ double f(double * x, double * p) {
 
 #ifdef HAVE_OLD_ROOT_VERSION
 //____________________________________________________________________
-double df_dPar(double * x, double * p) {
-   // derivative of the function w.r..t parameters
-   // use calculated derivatives from TF1::GradientPar
+   double df_dPar(double * x, double * p) {
+      // derivative of the function w.r..t parameters
+      // use calculated derivatives from TF1::GradientPar
 
-   double grad[NPAR];
-   // p is used to specify for which parameter the derivative is computed
-   int ipar = int(p[0] );
-   assert (ipar >=0 && ipar < NPAR );
+      double grad[NPAR];
+      // p is used to specify for which parameter the derivative is computed
+      int ipar = int(p[0] );
+      assert (ipar >=0 && ipar < NPAR );
 
-   assert(fitFunc);
-   fitFunc->GradientPar(x, grad);
+      assert(fitFunc);
+      fitFunc->GradientPar(x, grad);
 
-   return grad[ipar];
-}
+      return grad[ipar];
+   }
 
 //____________________________________________________________________
-double IntegralError(int npar, double * c, double * errPar,
-   double * covMatrix = 0) {
-// calculate the error on the integral given the parameter error and
-// the integrals of the gradient functions c[]
-
-   double err2 = 0;
-   for (int i = 0; i < npar; ++i) {
-      if (covMatrix == 0) { // assume error are uncorrelated
-         err2 += c[i] * c[i] * errPar[i] * errPar[i];
-      } else {
-         double s = 0;
-         for (int j = 0; j < npar; ++j) {
-            s += covMatrix[i*npar + j] * c[j];
+   double IntegralError(int npar, double * c, double * errPar,
+      double * covMatrix = 0) {
+      // calculate the error on the integral given the parameter error and
+      // the integrals of the gradient functions c[]
+
+      double err2 = 0;
+      for (int i = 0; i < npar; ++i) {
+         if (covMatrix == 0) { // assume error are uncorrelated
+            err2 += c[i] * c[i] * errPar[i] * errPar[i];
+         } else {
+            double s = 0;
+            for (int j = 0; j < npar; ++j) {
+               s += covMatrix[i*npar + j] * c[j];
+            }
+            err2 += c[i] * s;
          }
-         err2 += c[i] * s;
       }
-   }
 
-   return TMath::Sqrt(err2);
-}
+      return TMath::Sqrt(err2);
+   }
 #endif
 
 //____________________________________________________________________
@@ -94,7 +94,7 @@ void ErrorIntegral() {
 
    h1->Draw();
 
-   // calculate the integral
+   /* calculate the integral*/
    double integral = fitFunc->Integral(0,1);
 
    TVirtualFitter * fitter = TVirtualFitter::GetFitter();
@@ -103,8 +103,8 @@ void ErrorIntegral() {
 
 #ifdef HAVE_OLD_ROOT_VERSION
 
-   // calculate now the error (needs the derivatives of the function
-   // w..r.t the parameters)
+   /* calculate now the error (needs the derivatives of the function
+   w..r.t the parameters)*/
    TF1 * deriv_par0 = new TF1("dfdp0",df_dPar,0,1,1);
    deriv_par0->SetParameter(0,0);
 
@@ -118,17 +118,15 @@ void ErrorIntegral() {
 
    double * epar = fitFunc->GetParErrors();
 
-   // without correlations
+   /* without correlations*/
    double sigma_integral_0 = IntegralError(2,c,epar);
 
-
-
-   // with correlations
+   /*with correlations*/
    double sigma_integral = IntegralError(2,c,epar,covMatrix);
 
 #else
 
-   // using new function in TF1 (from 12/6/2007)
+   /* using new function in TF1 (from 12/6/2007)*/
    double sigma_integral = fitFunc->IntegralError(0,1);
 
 #endif
diff --git a/tutorials/fit/fitLinear2.C b/tutorials/fit/fitLinear2.C
index a4b89f151537f81900aed3d7f5530de0a861e455..462e456815c91c2fe432c7339225bcf49553f4d6 100644
--- a/tutorials/fit/fitLinear2.C
+++ b/tutorials/fit/fitLinear2.C
@@ -20,7 +20,7 @@ void fitLinear2()
 {
    Int_t n=100;
    Int_t i;
-   TRandom rand;
+   TRandom rand1;
    TLinearFitter *lf=new TLinearFitter(5);
 
    //The predefined "hypN" functions are the fastest to fit
@@ -32,13 +32,13 @@ void fitLinear2()
 
    //Create the points and put them into the fitter
    for (i=0; i<n; i++){
-      x[0 + i*5] = rand.Uniform(-10, 10);
-      x[1 + i*5] = rand.Uniform(-10, 10);
-      x[2 + i*5] = rand.Uniform(-10, 10);
-      x[3 + i*5] = rand.Uniform(-10, 10);
-      x[4 + i*5] = rand.Uniform(-10, 10);
+      x[0 + i*5] = rand1.Uniform(-10, 10);
+      x[1 + i*5] = rand1.Uniform(-10, 10);
+      x[2 + i*5] = rand1.Uniform(-10, 10);
+      x[3 + i*5] = rand1.Uniform(-10, 10);
+      x[4 + i*5] = rand1.Uniform(-10, 10);
       e[i] = 0.01;
-      y[i] = 4*x[0+i*5] + x[1+i*5] + 2*x[2+i*5] + 3*x[3+i*5] + 0.2*x[4+i*5]  + rand.Gaus()*e[i];
+      y[i] = 4*x[0+i*5] + x[1+i*5] + 2*x[2+i*5] + 3*x[3+i*5] + 0.2*x[4+i*5]  + rand1.Gaus()*e[i];
    }
 
    //To avoid copying the data into the fitter, the following function can be used:
@@ -61,13 +61,13 @@ void fitLinear2()
 
    //Now suppose you want to add some more points and see if the parameters will change
    for (i=n; i<n*2; i++) {
-      x[0+i*5] = rand.Uniform(-10, 10);
-      x[1+i*5] = rand.Uniform(-10, 10);
-      x[2+i*5] = rand.Uniform(-10, 10);
-      x[3+i*5] = rand.Uniform(-10, 10);
-      x[4+i*5] = rand.Uniform(-10, 10);
+      x[0+i*5] = rand1.Uniform(-10, 10);
+      x[1+i*5] = rand1.Uniform(-10, 10);
+      x[2+i*5] = rand1.Uniform(-10, 10);
+      x[3+i*5] = rand1.Uniform(-10, 10);
+      x[4+i*5] = rand1.Uniform(-10, 10);
       e[i] = 0.01;
-      y[i] = 4*x[0+i*5] + x[1+i*5] + 2*x[2+i*5] + 3*x[3+i*5] + 0.2*x[4+i*5]  + rand.Gaus()*e[i];
+      y[i] = 4*x[0+i*5] + x[1+i*5] + 2*x[2+i*5] + 3*x[3+i*5] + 0.2*x[4+i*5]  + rand1.Gaus()*e[i];
    }
 
    //Assign the data the same way as before
diff --git a/tutorials/fit/fitcont.C b/tutorials/fit/fitcont.C
index 0a1eeb2842e424e8ca1f8eb1032eecb28e6001bb..f4cb7dbaad0b1dc859a43cc4dcbff01d75afd886 100644
--- a/tutorials/fit/fitcont.C
+++ b/tutorials/fit/fitcont.C
@@ -32,16 +32,16 @@ void fitcont()
    TCanvas *c2 = new TCanvas("c2","contours",10,10,600,800);
    c2->Divide(1,2);
    c2->cd(1);
-   //get first contour for parameter 1 versus parameter 2
+   /*get first contour for parameter 1 versus parameter 2*/
    TGraph *gr12 = (TGraph*)gMinuit->Contour(40,1,2);
    gr12->Draw("alp");
    c2->cd(2);
-   //Get contour for parameter 0 versus parameter 2  for ERRDEF=2
+   /*Get contour for parameter 0 versus parameter 2  for ERRDEF=2*/
    gMinuit->SetErrorDef(4); //note 4 and not 2!
    TGraph *gr2 = (TGraph*)gMinuit->Contour(80,0,2);
    gr2->SetFillColor(42);
    gr2->Draw("alf");
-   //Get contour for parameter 0 versus parameter 2 for ERRDEF=1
+   /*Get contour for parameter 0 versus parameter 2 for ERRDEF=1*/
    gMinuit->SetErrorDef(1);
    TGraph *gr1 = (TGraph*)gMinuit->Contour(80,0,2);
    gr1->SetFillColor(38);
diff --git a/tutorials/fit/fitslicesy.C b/tutorials/fit/fitslicesy.C
index 2c71d91b9a06b280d3102bf3bbf2a02b40ba4859..fbf439855cff08e1beb5596871fe671a00a4d1cf 100644
--- a/tutorials/fit/fitslicesy.C
+++ b/tutorials/fit/fitslicesy.C
@@ -27,9 +27,9 @@ void fitslicesy() {
    dir.Append("/hsimple.C");
    dir.ReplaceAll("/./","/");
    if (!gInterpreter->IsLoaded(dir.Data())) gInterpreter->LoadMacro(dir.Data());
-   TFile *hsimple = (TFile*)gROOT->ProcessLineFast("hsimple(1)");
-   if (!hsimple) return;
-   TH2F *hpxpy = (TH2F*)hsimple->Get("hpxpy");
+   TFile *hsimpleFile = (TFile*)gROOT->ProcessLineFast("hsimple(1)");
+   if (!hsimpleFile) return;
+   TH2F *hpxpy = (TH2F*)hsimpleFile->Get("hpxpy");
 
 // Create a canvas and divide it
    TCanvas *c1 = new TCanvas("c1","c1",700,500);
@@ -53,7 +53,7 @@ void fitslicesy() {
 // Show fitted "mean" for each slice
    left->cd(2);
    gPad->SetFillColor(33);
-   TH2F *hpxpy_0 = (TH2F*)hsimple->Get("hpxpy_0");
+   TH2F *hpxpy_0 = (TH2F*)hsimpleFile->Get("hpxpy_0");
    hpxpy_0->Draw();
    TPad *right = (TPad*)c1->cd(2);
    right->Divide(1,2);
@@ -61,7 +61,7 @@ void fitslicesy() {
    gPad->SetTopMargin(0.12);
    gPad->SetLeftMargin(0.15);
    gPad->SetFillColor(33);
-   TH2F *hpxpy_1 = (TH2F*)hsimple->Get("hpxpy_1");
+   TH2F *hpxpy_1 = (TH2F*)hsimpleFile->Get("hpxpy_1");
    hpxpy_1->Draw();
 
 // Show fitted "sigma" for each slice
@@ -69,7 +69,7 @@ void fitslicesy() {
    gPad->SetTopMargin(0.12);
    gPad->SetLeftMargin(0.15);
    gPad->SetFillColor(33);
-   TH2F *hpxpy_2 = (TH2F*)hsimple->Get("hpxpy_2");
+   TH2F *hpxpy_2 = (TH2F*)hsimpleFile->Get("hpxpy_2");
    hpxpy_2->SetMinimum(0.8);
    hpxpy_2->Draw();
 
diff --git a/tutorials/foam/foam_demo.C b/tutorials/foam/foam_demo.C
index 04a498573be8982d32f92be16d6fbe95b250385e..0ad860b494f7bc30bdc702e1f19419f4ebc95d39 100644
--- a/tutorials/foam/foam_demo.C
+++ b/tutorials/foam/foam_demo.C
@@ -1,5 +1,6 @@
 /// \file
 /// \ingroup tutorial_FOAM
+/// \notebook -nodraw
 /// Demonstrate the TFoam class.
 ///
 ///  To run this macro type from CINT command line
@@ -32,31 +33,31 @@
 
 class TFDISTR: public TFoamIntegrand {
 public:
-  TFDISTR(){};
-  Double_t Density(int nDim, Double_t *Xarg){
-  // Integrand for mFOAM
-  Double_t Fun1,Fun2,R1,R2;
-  Double_t pos1=1e0/3e0;
-  Double_t pos2=2e0/3e0;
-  Double_t Gam1= 0.100e0;  // as in JPC
-  Double_t Gam2= 0.100e0;  // as in JPC
-  Double_t sPi = sqrt(TMath::Pi());
-  Double_t xn1=1e0;
-  Double_t xn2=1e0;
-  int i;
-  R1=0;
-  R2=0;
-  for(i = 0 ; i<nDim ; i++){
-    R1=R1+(Xarg[i] -pos1)*(Xarg[i] -pos1);
-    R2=R2+(Xarg[i] -pos2)*(Xarg[i] -pos2);
-    xn1=xn1*Gam1*sPi;
-    xn2=xn2*Gam2*sPi;
-  }
-  R1   = sqrt(R1);
-  R2   = sqrt(R2);
-  Fun1 = exp(-(R1*R1)/(Gam1*Gam1))/xn1;  // Gaussian delta-like profile
-  Fun2 = exp(-(R2*R2)/(Gam2*Gam2))/xn2;  // Gaussian delta-like profile
-  return 0.5e0*(Fun1+ Fun2);
+   TFDISTR(){};
+   Double_t Density(int nDim, Double_t *Xarg){
+   // Integrand for mFOAM
+   Double_t Fun1,Fun2,R1,R2;
+   Double_t pos1=1e0/3e0;
+   Double_t pos2=2e0/3e0;
+   Double_t Gam1= 0.100e0;  // as in JPC
+   Double_t Gam2= 0.100e0;  // as in JPC
+   Double_t sPi = sqrt(TMath::Pi());
+   Double_t xn1=1e0;
+   Double_t xn2=1e0;
+   int i;
+   R1=0;
+   R2=0;
+   for(i = 0 ; i<nDim ; i++){
+      R1=R1+(Xarg[i] -pos1)*(Xarg[i] -pos1);
+      R2=R2+(Xarg[i] -pos2)*(Xarg[i] -pos2);
+      xn1=xn1*Gam1*sPi;
+      xn2=xn2*Gam2*sPi;
+   }
+   R1   = sqrt(R1);
+   R2   = sqrt(R2);
+   Fun1 = exp(-(R1*R1)/(Gam1*Gam1))/xn1;  // Gaussian delta-like profile
+   Fun2 = exp(-(R2*R2)/(Gam2*Gam2))/xn2;  // Gaussian delta-like profile
+   return 0.5e0*(Fun1+ Fun2);
 }
   ClassDef(TFDISTR,1) //Class of testing functions for FOAM
 };
@@ -64,92 +65,92 @@ ClassImp(TFDISTR)
 
 Int_t foam_demo()
 {
-  TFile RootFile("foam_demo.root","RECREATE","histograms");
-  long   loop;
-  Double_t MCresult,MCerror,MCwt;
-//=========================================================
-  long NevTot   =     50000;   // Total MC statistics
-  Int_t  kDim   =         2;   // total dimension
-  Int_t  nCells   =     500;   // Number of Cells
-  Int_t  nSampl   =     200;   // Number of MC events per cell in build-up
-  Int_t  nBin     =       8;   // Number of bins in build-up
-  Int_t  OptRej   =       1;   // Wted events for OptRej=0; wt=1 for OptRej=1 (default)
-  Int_t  OptDrive =       2;   // (D=2) Option, type of Drive =0,1,2 for TrueVol,Sigma,WtMax
-  Int_t  EvPerBin =      25;   // Maximum events (equiv.) per bin in buid-up
-  Int_t  Chat     =       1;   // Chat level
-//=========================================================
-  TRandom *PseRan   = new TRandom3();  // Create random number generator
-  TFoam   *FoamX    = new TFoam("FoamX");   // Create Simulator
-  TFoamIntegrand    *rho= new TFDISTR();
-  PseRan->SetSeed(4357);
-//=========================================================
-  cout<<"*****   Demonstration Program for Foam version "<<FoamX->GetVersion()<<"    *****"<<endl;
-  FoamX->SetkDim(        kDim);      // Mandatory!!!
-  FoamX->SetnCells(      nCells);    // optional
-  FoamX->SetnSampl(      nSampl);    // optional
-  FoamX->SetnBin(        nBin);      // optional
-  FoamX->SetOptRej(      OptRej);    // optional
-  FoamX->SetOptDrive(    OptDrive);  // optional
-  FoamX->SetEvPerBin(    EvPerBin);  // optional
-  FoamX->SetChat(        Chat);      // optional
-//===============================
-  FoamX->SetRho(rho);
-  FoamX->SetPseRan(PseRan);
-  FoamX->Initialize(); // Initialize simulator
-  FoamX->Write("FoamX");     // Writing Foam on the disk, TESTING PERSISTENCY!!!
-//===============================
-  long nCalls=FoamX->GetnCalls();
-  cout << "====== Initialization done, entering MC loop" << endl;
-//======================================================================
-  //cout<<" About to start MC loop: ";  cin.getline(question,20);
-  Double_t *MCvect =new Double_t[kDim]; // vector generated in the MC run
-//======================================================================
-  TH1D  *hst_Wt = new TH1D("hst_Wt" ,  "Main weight of Foam",25,0,1.25);
-  hst_Wt->Sumw2();
-//======================================================================
-  for(loop=0; loop<NevTot; loop++){
-//===============================
-    FoamX->MakeEvent();           // generate MC event
-//===============================
-    FoamX->GetMCvect( MCvect);
-    MCwt=FoamX->GetMCwt();
-    hst_Wt->Fill(MCwt,1.0);
-    if(loop<15){
-      cout<<"MCwt= "<<MCwt<<",  ";
-      cout<<"MCvect= ";
-      for ( Int_t k=0 ; k<kDim ; k++) cout<<MCvect[k]<<" "; cout<< endl;
-    }
-    if( ((loop)%100000)==0 ){
-      cout<<"   loop= "<<loop<<endl;
-    }
-  }
-//======================================================================
-//======================================================================
-  cout << "====== Events generated, entering Finalize" << endl;
+   TFile RootFile("foam_demo.root","RECREATE","histograms");
+   long   loop;
+   Double_t MCresult,MCerror,MCwt;
+   //=========================================================
+   long NevTot   =     50000;   // Total MC statistics
+   Int_t  kDim   =         2;   // total dimension
+   Int_t  nCells   =     500;   // Number of Cells
+   Int_t  nSampl   =     200;   // Number of MC events per cell in build-up
+   Int_t  nBin     =       8;   // Number of bins in build-up
+   Int_t  OptRej   =       1;   // Wted events for OptRej=0; wt=1 for OptRej=1 (default)
+   Int_t  OptDrive =       2;   // (D=2) Option, type of Drive =0,1,2 for TrueVol,Sigma,WtMax
+   Int_t  EvPerBin =      25;   // Maximum events (equiv.) per bin in buid-up
+   Int_t  Chat     =       1;   // Chat level
+   //=========================================================
+   TRandom *PseRan   = new TRandom3();  // Create random number generator
+   TFoam   *FoamX    = new TFoam("FoamX");   // Create Simulator
+   TFoamIntegrand    *rho= new TFDISTR();
+   PseRan->SetSeed(4357);
+   //=========================================================
+   cout<<"*****   Demonstration Program for Foam version "<<FoamX->GetVersion()<<"    *****"<<endl;
+   FoamX->SetkDim(        kDim);      // Mandatory!!!
+   FoamX->SetnCells(      nCells);    // optional
+   FoamX->SetnSampl(      nSampl);    // optional
+   FoamX->SetnBin(        nBin);      // optional
+   FoamX->SetOptRej(      OptRej);    // optional
+   FoamX->SetOptDrive(    OptDrive);  // optional
+   FoamX->SetEvPerBin(    EvPerBin);  // optional
+   FoamX->SetChat(        Chat);      // optional
+   //===============================
+   FoamX->SetRho(rho);
+   FoamX->SetPseRan(PseRan);
+   FoamX->Initialize(); // Initialize simulator
+   FoamX->Write("FoamX");     // Writing Foam on the disk, TESTING PERSISTENCY!!!
+   //===============================
+   long nCalls=FoamX->GetnCalls();
+   cout << "====== Initialization done, entering MC loop" << endl;
+   //======================================================================
+   //cout<<" About to start MC loop: ";  cin.getline(question,20);
+   Double_t *MCvect =new Double_t[kDim]; // vector generated in the MC run
+   //======================================================================
+   TH1D  *hst_Wt = new TH1D("hst_Wt" ,  "Main weight of Foam",25,0,1.25);
+   hst_Wt->Sumw2();
+   //======================================================================
+   for(loop=0; loop<NevTot; loop++){
+   /*===============================*/
+      FoamX->MakeEvent();           // generate MC event
+   /*===============================*/
+      FoamX->GetMCvect( MCvect);
+      MCwt=FoamX->GetMCwt();
+      hst_Wt->Fill(MCwt,1.0);
+      if(loop<15){
+         cout<<"MCwt= "<<MCwt<<",  ";
+         cout<<"MCvect= ";
+         for ( Int_t k=0 ; k<kDim ; k++) cout<<MCvect[k]<<" "; cout<< endl;
+      }
+      if( ((loop)%100000)==0 ){
+         cout<<"   loop= "<<loop<<endl;
+      }
+   }
+   //======================================================================
+   //======================================================================
+   cout << "====== Events generated, entering Finalize" << endl;
 
-  hst_Wt->Print("all");
-  Double_t eps = 0.0005;
-  Double_t Effic, WtMax, AveWt, Sigma;
-  Double_t IntNorm, Errel;
-  FoamX->Finalize(   IntNorm, Errel);     // final printout
-  FoamX->GetIntegMC( MCresult, MCerror);  // get MC intnegral
-  FoamX->GetWtParams(eps, AveWt, WtMax, Sigma); // get MC wt parameters
-  Effic=0; if(WtMax>0) Effic=AveWt/WtMax;
-  cout << "================================================================" << endl;
-  cout << " MCresult= " << MCresult << " +- " << MCerror << " RelErr= "<< MCerror/MCresult << endl;
-  cout << " Dispersion/<wt>= " << Sigma/AveWt << endl;
-  cout << "      <wt>/WtMax= " << Effic <<",    for epsilon = "<<eps << endl;
-  cout << " nCalls (initialization only) =   " << nCalls << endl;
-  cout << "================================================================" << endl;
+   hst_Wt->Print("all");
+   Double_t eps = 0.0005;
+   Double_t Effic, WtMax, AveWt, Sigma;
+   Double_t IntNorm, Errel;
+   FoamX->Finalize(   IntNorm, Errel);     // final printout
+   FoamX->GetIntegMC( MCresult, MCerror);  // get MC intnegral
+   FoamX->GetWtParams(eps, AveWt, WtMax, Sigma); // get MC wt parameters
+   Effic=0; if(WtMax>0) Effic=AveWt/WtMax;
+   cout << "================================================================" << endl;
+   cout << " MCresult= " << MCresult << " +- " << MCerror << " RelErr= "<< MCerror/MCresult << endl;
+   cout << " Dispersion/<wt>= " << Sigma/AveWt << endl;
+   cout << "      <wt>/WtMax= " << Effic <<",    for epsilon = "<<eps << endl;
+   cout << " nCalls (initialization only) =   " << nCalls << endl;
+   cout << "================================================================" << endl;
 
-  delete [] MCvect;
-  //
-  RootFile.ls();
-  RootFile.Write();
-  RootFile.Close();
-  cout << "***** End of Demonstration Program  *****" << endl;
+   delete [] MCvect;
+   //
+   RootFile.ls();
+   RootFile.Write();
+   RootFile.Close();
+   cout << "***** End of Demonstration Program  *****" << endl;
 
-  return 0;
+   return 0;
 } // end of Demo
 
 #endif
diff --git a/tutorials/foam/foam_demopers.C b/tutorials/foam/foam_demopers.C
index e3580305f8bd852db167adac7c443c0591470705..e16e5be33a9ba94f963883725b5e623c96da5f94 100644
--- a/tutorials/foam/foam_demopers.C
+++ b/tutorials/foam/foam_demopers.C
@@ -1,5 +1,6 @@
 /// \file
 /// \ingroup tutorial_FOAM
+/// \notebook -nodraw
 /// This simple macro demonstrates persistency of FOAM object.
 /// First run macro foam_demo.C to create file foam_demo.root with FOAM object.
 ///
@@ -20,60 +21,60 @@
 Int_t foam_demopers()
 {
 
-  gSystem->Load("libFoam");
+   gSystem->Load("libFoam");
 
-  // need to load the foam_demo tutorial for the definition of the function
-  TString macroName = gSystem->UnixPathName(__FILE__);
-  macroName.ReplaceAll("foam_demopers.C","foam_demo.C");
-  gROOT->ProcessLine(TString::Format(".L %s+",macroName.Data()));
+   // need to load the foam_demo tutorial for the definition of the function
+   TString macroName = gROOT->GetTutorialsDir();
+   macroName.Append("/foam/foam_demo.C");
+   gROOT->ProcessLine(TString::Format(".L %s+",macroName.Data()));
 
-  //******************************************
-  cout<<"====================== TestVector ================================"<<endl;
-  TFile fileA("foam_demo.root");
-  fileA.cd();
-  cout<<"------------------------------------------------------------------"<<endl;
-  fileA.ls();
-  cout<<"------------------------------------------------------------------"<<endl;
-  fileA.Map();
-  cout<<"------------------------------------------------------------------"<<endl;
-  fileA.ShowStreamerInfo();
-  cout<<"------------------------------------------------------------------"<<endl;
-  fileA.GetListOfKeys()->Print();
-  cout<<"------------------------------------------------------------------"<<endl;
-  //*******************************************
-  TFoam  *FoamX = (TFoam*)fileA.Get("FoamX");
-  //*******************************************
-//  FoamX->PrintCells();
-  FoamX->CheckAll(1);
+   //******************************************
+   cout<<"====================== TestVector ================================"<<endl;
+   TFile fileA("foam_demo.root");
+   fileA.cd();
+   cout<<"------------------------------------------------------------------"<<endl;
+   fileA.ls();
+   cout<<"------------------------------------------------------------------"<<endl;
+   fileA.Map();
+   cout<<"------------------------------------------------------------------"<<endl;
+   fileA.ShowStreamerInfo();
+   cout<<"------------------------------------------------------------------"<<endl;
+   fileA.GetListOfKeys()->Print();
+   cout<<"------------------------------------------------------------------"<<endl;
+   //*******************************************
+   TFoam  *FoamX = (TFoam*)fileA.Get("FoamX");
+   //*******************************************
+   //  FoamX->PrintCells();
+   FoamX->CheckAll(1);
 
- //N.B. the integrand functions need to be reset
- // because cannot be made persistent
-#ifdef __CINT__
- // this can be done only in CINT
- TFoamIntegrand    *rho= new TFDISTR();
-#else
-   // this should be done with AClic or Cling
-   TFoamIntegrand * rho = (TFoamIntegrand*) gROOT->ProcessLine("return new TFDISTR();");
-#endif
- FoamX->SetRho(rho);
+   //N.B. the integrand functions need to be reset
+   // because cannot be made persistent
+   #ifdef __CINT__
+      // this can be done only in CINT
+      TFoamIntegrand    *rho= new TFDISTR();
+   #else
+      // this should be done with AClic or Cling
+      TFoamIntegrand * rho = (TFoamIntegrand*) gROOT->ProcessLine("return new TFDISTR();");
+   #endif
+   FoamX->SetRho(rho);
 
- Double_t *MCvect =new Double_t[2]; // 2-dim vector generated in the MC run
+   Double_t *MCvect =new Double_t[2]; // 2-dim vector generated in the MC run
 
- for(long loop=0; loop<50000; loop++){
-    FoamX->MakeEvent();            // generate MC event
-    FoamX->GetMCvect( MCvect);     // get generated vector (x,y)
-    Double_t x=MCvect[0];
-    Double_t y=MCvect[1];
-    if(loop<10) cout<<"(x,y) =  ( "<< x <<", "<< y <<" )"<<endl;
-    }// loop
-  //
-  Double_t IntNorm, Errel;
-  FoamX->Finalize(   IntNorm, Errel);     // final printout
-  Double_t MCresult, MCerror;
-  FoamX->GetIntegMC( MCresult, MCerror);  // get MC integral, should be one
-  cout << " MCresult= " << MCresult << " +- " << MCerror <<endl;
-  cout<<"===================== TestPers FINISHED ======================="<<endl;
-  return 0;
+   for(long loop=0; loop<50000; loop++){
+      FoamX->MakeEvent();            // generate MC event
+      FoamX->GetMCvect( MCvect);     // get generated vector (x,y)
+      Double_t x=MCvect[0];
+      Double_t y=MCvect[1];
+      if(loop<10) cout<<"(x,y) =  ( "<< x <<", "<< y <<" )"<<endl;
+      }// loop
+   //
+   Double_t IntNorm, Errel;
+   FoamX->Finalize(   IntNorm, Errel);     // final printout
+   Double_t MCresult, MCerror;
+   FoamX->GetIntegMC( MCresult, MCerror);  // get MC integral, should be one
+   cout << " MCresult= " << MCresult << " +- " << MCerror <<endl;
+   cout<<"===================== TestPers FINISHED ======================="<<endl;
+   return 0;
 }
 //_____________________________________________________________________________
 //
diff --git a/tutorials/graphics/polytest2.C b/tutorials/graphics/polytest2.C
index 3f03e84b062ca4b5987643287370b2d362245983..ce80e00c01f8a7d29941fa3f1c6bba843021a196 100644
--- a/tutorials/graphics/polytest2.C
+++ b/tutorials/graphics/polytest2.C
@@ -1,4 +1,5 @@
 /// \file
+/// \notebook
 /// \ingroup tutorial_graphics
 /// This macro is testing the "compacting" algorithm in TPadPainter.
 /// It reduces the number of polygon's vertices using actual pixel coordinates.
diff --git a/tutorials/graphics/psview.C b/tutorials/graphics/psview.C
index 3cd25dd1250538d7d44c16d46e8e1533276297ef..63a42ec91b62707ab76568412f1c46fa5ea75137 100644
--- a/tutorials/graphics/psview.C
+++ b/tutorials/graphics/psview.C
@@ -1,5 +1,6 @@
 /// \file
 /// \ingroup tutorial_graphics
+/// \notebook
 /// An example how to display PS, EPS, PDF files in canvas.
 /// To load a PS file in a TCanvas, the ghostscript program needs to be install.
 /// - On most unix systems it is installed by default.
@@ -31,7 +32,9 @@ void psview()
    gROOT->SetBatch(1);
 
    // create a PostScript file
-   gROOT->Macro("feynman.C");
+   TString dir = gROOT->GetTutorialsDir();
+   dir.Append("/graphics/feynman.C");
+   gROOT->Macro(dir);
    gPad->Print("feynman.eps");
 
    // back to graphics mode
diff --git a/tutorials/image/img2pad.C b/tutorials/image/img2pad.C
index bf836c25f897dba24d0ad2604877089df26071cb..612283c7d806b1e9e3f637086986f4703c9d7da9 100644
--- a/tutorials/image/img2pad.C
+++ b/tutorials/image/img2pad.C
@@ -22,7 +22,7 @@ void img2pad()
    c->SetFixedAspectRatio();
 
    TCanvas *c1 = new TCanvas("roses", "roses", 800, 800);
-   img->Draw("T100,100,yellow");
+   img->Draw("T100,100,#ffff00");
    //img->Draw("T100,100,#556655");
    //img->Draw("T100,100");
 
diff --git a/tutorials/io/double32.C b/tutorials/io/double32.C
index 9ecfab55caa6b017d9a19bfe21b886b2aa37de46..3e422e6d1a9072fde397c37e1be17c3c33b49d0e 100644
--- a/tutorials/io/double32.C
+++ b/tutorials/io/double32.C
@@ -1,5 +1,6 @@
 /// \file
 /// \ingroup tutorial_io
+/// \notebook -js
 /// Tutorial illustrating use and precision of the Double32_t data type
 /// You must run this tutorial with ACLIC: a dictionary will be automatically
 /// created.
@@ -8,6 +9,7 @@
 /// ~~~
 /// The following cases are supported for streaming a Double32_t type
 /// depending on the range declaration in the comment field of the data member:
+///
 /// Case | Declaration
 /// -----|------------
 ///  A   | Double32_t     fNormal;
diff --git a/tutorials/math/TSVDUnfoldExample.C b/tutorials/math/TSVDUnfoldExample.C
index af96873890e8dfca34133be98aeccb5524d91dfd..52fcf5f89a23b1f96125fec974e3bdc8b7a1294f 100644
--- a/tutorials/math/TSVDUnfoldExample.C
+++ b/tutorials/math/TSVDUnfoldExample.C
@@ -48,6 +48,7 @@ Double_t Reconstruct( Double_t xt, TRandom3& R )
 
 void TSVDUnfoldExample()
 {
+   gROOT->SetStyle("Plain");
    gStyle->SetOptStat(0);
 
    TRandom3 R;
diff --git a/tutorials/math/kdTreeBinning.C b/tutorials/math/kdTreeBinning.C
index 5fe5d28fbf93f4d6219a0b3b4a0fe033e3674d73..0f5d6584479746c633f443379a9be0c2bd709ea0 100644
--- a/tutorials/math/kdTreeBinning.C
+++ b/tutorials/math/kdTreeBinning.C
@@ -1,7 +1,6 @@
 /// \file
 /// \ingroup tutorial_math
 /// \notebook
-///
 /// kdTreeBinning tutorial: bin the data in cells of equal content using a kd-tree
 ///
 /// Using TKDTree wrapper class as a data binning structure
diff --git a/tutorials/math/mathcoreGenVector.C b/tutorials/math/mathcoreGenVector.C
index a2850689fd697f176f0b5f4b41fb8871c6347f5e..610d5c67dfb96563ccaaaa9c07b84c31a46d9273 100644
--- a/tutorials/math/mathcoreGenVector.C
+++ b/tutorials/math/mathcoreGenVector.C
@@ -881,7 +881,7 @@ void mathcoreGenVector() {
 
    #ifdef __CINT__
    gSystem->Load("libMathCore");
-   using namespace ROOT::Math;
+   using namespace ROOT::Math ;
    #endif
 
    testVector3D();
diff --git a/tutorials/math/mathcoreVectorCollection.C b/tutorials/math/mathcoreVectorCollection.C
index 4d0b4d2377a47c697d6fca8ab26776705dc2cfe7..b7fea30584695a369de8ebf95594826723144b55 100644
--- a/tutorials/math/mathcoreVectorCollection.C
+++ b/tutorials/math/mathcoreVectorCollection.C
@@ -157,7 +157,7 @@ int mathcoreVectorCollection() {
    gSystem->Load("libMathCore");
    gSystem->Load("libPhysics");
     // in CINT need to do that after having loading the library
-   using namespace ROOT::Math;
+   using namespace ROOT::Math ;
 
    cout << "This tutorial can run only using ACliC, compiling it by doing: " << endl;
    cout << "\t  .x tutorials/math/mathcoreVectorCollection.C+" << endl;
diff --git a/tutorials/matrix/invertMatrix.C b/tutorials/matrix/invertMatrix.C
index aad960f64d521f822d8e329cc46aaa40c9bf8588..a6a9291205ed254edf7f5f25d69164c8e1ea6cbd 100644
--- a/tutorials/matrix/invertMatrix.C
+++ b/tutorials/matrix/invertMatrix.C
@@ -168,4 +168,4 @@ void invertMatrix(Int_t msize=6)
    const Double_t U4_max_offdiag = (U4.Abs()).Max();
    std::cout << "  Maximum off-diagonal = " << U4_max_offdiag << std::endl;
    std::cout << "  Determinant          = " << det4 << std::endl;
-   }
+}
diff --git a/tutorials/matrix/solveLinear.C b/tutorials/matrix/solveLinear.C
index 55d19bad602b3983812dce1364802b0cf8417368..6344affd43a533f1fdec93c787a81855596a941c 100644
--- a/tutorials/matrix/solveLinear.C
+++ b/tutorials/matrix/solveLinear.C
@@ -1,6 +1,6 @@
 /// \file
 /// \ingroup tutorial_matrix
-/// \notebook
+/// \notebook -nodraw
 /// This macro shows several ways to perform a linear least-squares
 /// analysis . To keep things simple we fit a straight line to 4
 /// data points
diff --git a/tutorials/roofit/rf303_conditional.C b/tutorials/roofit/rf303_conditional.C
index 5d4206def8cc5e2f0c18c3401ba74aeb97fc7c6e..d2166812f8428e06eb643f45fd36b5cb31023de7 100644
--- a/tutorials/roofit/rf303_conditional.C
+++ b/tutorials/roofit/rf303_conditional.C
@@ -24,7 +24,8 @@
 #include "TCanvas.h"
 #include "TAxis.h"
 #include "TH1.h"
-using namespace RooFit ;
+
+using namespace RooFit;
 
 
 RooDataSet* makeFakeDataXY() ;
diff --git a/tutorials/roofit/rf510_wsnamedsets.C b/tutorials/roofit/rf510_wsnamedsets.C
index 5cb75d7d254e0a90993f49966aa70ecb9652f8e0..11c5a12fe775a94084e10bd5b97e016ec0fc1e1c 100644
--- a/tutorials/roofit/rf510_wsnamedsets.C
+++ b/tutorials/roofit/rf510_wsnamedsets.C
@@ -24,7 +24,8 @@
 #include "TAxis.h"
 #include "TFile.h"
 #include "TH1.h"
-using namespace RooFit ;
+
+using namespace RooFit;
 
 
 void fillWorkspace(RooWorkspace& w) ;
diff --git a/tutorials/roofit/rf903_numintcache.C b/tutorials/roofit/rf903_numintcache.C
index 883258126fb0ebb52ad9cac42a4b7b83a9bbb436..1f7d895b80d5fdbb8f6c64b10c42ada877f80fa0 100644
--- a/tutorials/roofit/rf903_numintcache.C
+++ b/tutorials/roofit/rf903_numintcache.C
@@ -24,7 +24,7 @@
 #include "TFile.h"
 #include "TH1.h"
 
-using namespace RooFit ;
+using namespace RooFit;
 
 RooWorkspace* getWorkspace(Int_t mode) ;
 
diff --git a/tutorials/roostats/FourBinInstructional.C b/tutorials/roostats/FourBinInstructional.C
index 1e0b9d961d07d8b85daef19bc5520812be91caad..b759fbbcd234525f7bc597e5e6b104ed9351ddc3 100644
--- a/tutorials/roostats/FourBinInstructional.C
+++ b/tutorials/roostats/FourBinInstructional.C
@@ -167,188 +167,188 @@ using namespace RooStats;
 
 void FourBinInstructional(bool doBayesian=false, bool doFeldmanCousins=false, bool doMCMC=false){
 
-  // let's time this challenging example
-  TStopwatch t;
-  t.Start();
-
-  // set RooFit random seed for reproducible results
-  RooRandom::randomGenerator()->SetSeed(4357);
-
-  // make model
-  RooWorkspace* wspace = new RooWorkspace("wspace");
-  wspace->factory("Poisson::on(non[0,1000], sum::splusb(s[40,0,100],b[100,0,300]))");
-  wspace->factory("Poisson::off(noff[0,5000], prod::taub(b,tau[5,3,7],rho[1,0,2]))");
-  wspace->factory("Poisson::onbar(nonbar[0,10000], bbar[1000,500,2000])");
-  wspace->factory("Poisson::offbar(noffbar[0,1000000], prod::lambdaoffbar(bbar, tau))");
-  wspace->factory("Gaussian::mcCons(rhonom[1.,0,2], rho, sigma[.2])");
-  wspace->factory("PROD::model(on,off,onbar,offbar,mcCons)");
-  wspace->defineSet("obs","non,noff,nonbar,noffbar,rhonom");
-
-  wspace->factory("Uniform::prior_poi({s})");
-  wspace->factory("Uniform::prior_nuis({b,bbar,tau, rho})");
-  wspace->factory("PROD::prior(prior_poi,prior_nuis)");
-
-  ///////////////////////////////////////////
-  // Control some interesting variations
-  // define parameers of interest
-  // for 1-d plots
-  wspace->defineSet("poi","s");
-  wspace->defineSet("nuis","b,tau,rho,bbar");
-  // for 2-d plots to inspect correlations:
-  //  wspace->defineSet("poi","s,rho");
-
-  // test simpler cases where parameters are known.
-  //  wspace->var("tau")->setConstant();
-  //  wspace->var("rho")->setConstant();
-  //  wspace->var("b")->setConstant();
-  //  wspace->var("bbar")->setConstant();
-
-  // inspect workspace
-  //  wspace->Print();
-
-  ////////////////////////////////////////////////////////////
-  // Generate toy data
-  // generate toy data assuming current value of the parameters
-  // import into workspace.
-  // add Verbose() to see how it's being generated
-  RooDataSet* data =   wspace->pdf("model")->generate(*wspace->set("obs"),1);
-  //  data->Print("v");
-  wspace->import(*data);
-
-  /////////////////////////////////////////////////////
-  // Now the statistical tests
-  // model config
-  ModelConfig* modelConfig = new ModelConfig("FourBins");
-  modelConfig->SetWorkspace(*wspace);
-  modelConfig->SetPdf(*wspace->pdf("model"));
-  modelConfig->SetPriorPdf(*wspace->pdf("prior"));
-  modelConfig->SetParametersOfInterest(*wspace->set("poi"));
-  modelConfig->SetNuisanceParameters(*wspace->set("nuis"));
-  wspace->import(*modelConfig);
-  wspace->writeToFile("FourBin.root");
-
-  //////////////////////////////////////////////////
-  // If you want to see the covariance matrix uncomment
-  //  wspace->pdf("model")->fitTo(*data);
-
-  // use ProfileLikelihood
-  ProfileLikelihoodCalculator plc(*data, *modelConfig);
-  plc.SetConfidenceLevel(0.95);
-  LikelihoodInterval* plInt = plc.GetInterval();
-  RooFit::MsgLevel msglevel = RooMsgService::instance().globalKillBelow();
-  RooMsgService::instance().setGlobalKillBelow(RooFit::FATAL);
-  plInt->LowerLimit( *wspace->var("s") ); // get ugly print out of the way. Fix.
-  RooMsgService::instance().setGlobalKillBelow(msglevel);
-
-  // use FeldmaCousins (takes ~20 min)
-  FeldmanCousins fc(*data, *modelConfig);
-  fc.SetConfidenceLevel(0.95);
-  //number counting: dataset always has 1 entry with N events observed
-  fc.FluctuateNumDataEntries(false);
-  fc.UseAdaptiveSampling(true);
-  fc.SetNBins(40);
-  PointSetInterval* fcInt = NULL;
-  if(doFeldmanCousins){ // takes 7 minutes
-    fcInt = (PointSetInterval*) fc.GetInterval(); // fix cast
-  }
-
-
-  // use BayesianCalculator (only 1-d parameter of interest, slow for this problem)
-  BayesianCalculator bc(*data, *modelConfig);
-  bc.SetConfidenceLevel(0.95);
-  SimpleInterval* bInt = NULL;
-  if(doBayesian && wspace->set("poi")->getSize() == 1)   {
-    bInt = bc.GetInterval();
-  } else{
-    cout << "Bayesian Calc. only supports on parameter of interest" << endl;
-  }
-
-
-  // use MCMCCalculator  (takes about 1 min)
-  // Want an efficient proposal function, so derive it from covariance
-  // matrix of fit
-  RooFitResult* fit = wspace->pdf("model")->fitTo(*data,Save());
-  ProposalHelper ph;
-  ph.SetVariables((RooArgSet&)fit->floatParsFinal());
-  ph.SetCovMatrix(fit->covarianceMatrix());
-  ph.SetUpdateProposalParameters(kTRUE); // auto-create mean vars and add mappings
-  ph.SetCacheSize(100);
-  ProposalFunction* pf = ph.GetProposalFunction();
-
-  MCMCCalculator mc(*data, *modelConfig);
-  mc.SetConfidenceLevel(0.95);
-  mc.SetProposalFunction(*pf);
-  mc.SetNumBurnInSteps(500); // first N steps to be ignored as burn-in
-  mc.SetNumIters(50000);
-  mc.SetLeftSideTailFraction(0.5); // make a central interval
-  MCMCInterval* mcInt = NULL;
-  if(doMCMC)
-    mcInt = mc.GetInterval();
-
-  //////////////////////////////////////
-  // Make some  plots
-  TCanvas* c1 = (TCanvas*) gROOT->Get("c1");
-  if(!c1)
-    c1 = new TCanvas("c1");
-
-  if(doBayesian && doMCMC){
-    c1->Divide(3);
-    c1->cd(1);
-  }
-  else if (doBayesian || doMCMC){
-    c1->Divide(2);
-    c1->cd(1);
-  }
-
-  LikelihoodIntervalPlot* lrplot = new LikelihoodIntervalPlot(plInt);
-  lrplot->Draw();
-
-  if(doBayesian && wspace->set("poi")->getSize() == 1)   {
-    c1->cd(2);
-    // the plot takes a long time and print lots of error
-    // using a scan it is better
-    bc.SetScanOfPosterior(20);
-    RooPlot* bplot = bc.GetPosteriorPlot();
-    bplot->Draw();
-  }
-
-  if(doMCMC){
-    if(doBayesian && wspace->set("poi")->getSize() == 1)
-      c1->cd(3);
-    else
+   // let's time this challenging example
+   TStopwatch t;
+   t.Start();
+
+   // set RooFit random seed for reproducible results
+   RooRandom::randomGenerator()->SetSeed(4357);
+
+   // make model
+   RooWorkspace* wspace = new RooWorkspace("wspace");
+   wspace->factory("Poisson::on(non[0,1000], sum::splusb(s[40,0,100],b[100,0,300]))");
+   wspace->factory("Poisson::off(noff[0,5000], prod::taub(b,tau[5,3,7],rho[1,0,2]))");
+   wspace->factory("Poisson::onbar(nonbar[0,10000], bbar[1000,500,2000])");
+   wspace->factory("Poisson::offbar(noffbar[0,1000000], prod::lambdaoffbar(bbar, tau))");
+   wspace->factory("Gaussian::mcCons(rhonom[1.,0,2], rho, sigma[.2])");
+   wspace->factory("PROD::model(on,off,onbar,offbar,mcCons)");
+   wspace->defineSet("obs","non,noff,nonbar,noffbar,rhonom");
+
+   wspace->factory("Uniform::prior_poi({s})");
+   wspace->factory("Uniform::prior_nuis({b,bbar,tau, rho})");
+   wspace->factory("PROD::prior(prior_poi,prior_nuis)");
+
+   ///////////////////////////////////////////
+   // Control some interesting variations
+   // define parameers of interest
+   // for 1-d plots
+   wspace->defineSet("poi","s");
+   wspace->defineSet("nuis","b,tau,rho,bbar");
+   // for 2-d plots to inspect correlations:
+   //  wspace->defineSet("poi","s,rho");
+
+   // test simpler cases where parameters are known.
+   //  wspace->var("tau")->setConstant();
+   //  wspace->var("rho")->setConstant();
+   //  wspace->var("b")->setConstant();
+   //  wspace->var("bbar")->setConstant();
+
+   // inspect workspace
+   //  wspace->Print();
+
+   ////////////////////////////////////////////////////////////
+   // Generate toy data
+   // generate toy data assuming current value of the parameters
+   // import into workspace.
+   // add Verbose() to see how it's being generated
+   RooDataSet* data =   wspace->pdf("model")->generate(*wspace->set("obs"),1);
+   //  data->Print("v");
+   wspace->import(*data);
+
+   /////////////////////////////////////////////////////
+   // Now the statistical tests
+   // model config
+   ModelConfig* modelConfig = new ModelConfig("FourBins");
+   modelConfig->SetWorkspace(*wspace);
+   modelConfig->SetPdf(*wspace->pdf("model"));
+   modelConfig->SetPriorPdf(*wspace->pdf("prior"));
+   modelConfig->SetParametersOfInterest(*wspace->set("poi"));
+   modelConfig->SetNuisanceParameters(*wspace->set("nuis"));
+   wspace->import(*modelConfig);
+   wspace->writeToFile("FourBin.root");
+
+   //////////////////////////////////////////////////
+   // If you want to see the covariance matrix uncomment
+   //  wspace->pdf("model")->fitTo(*data);
+
+   // use ProfileLikelihood
+   ProfileLikelihoodCalculator plc(*data, *modelConfig);
+   plc.SetConfidenceLevel(0.95);
+   LikelihoodInterval* plInt = plc.GetInterval();
+   RooFit::MsgLevel msglevel = RooMsgService::instance().globalKillBelow();
+   RooMsgService::instance().setGlobalKillBelow(RooFit::FATAL);
+   plInt->LowerLimit( *wspace->var("s") ); // get ugly print out of the way. Fix.
+   RooMsgService::instance().setGlobalKillBelow(msglevel);
+
+   // use FeldmaCousins (takes ~20 min)
+   FeldmanCousins fc(*data, *modelConfig);
+   fc.SetConfidenceLevel(0.95);
+   //number counting: dataset always has 1 entry with N events observed
+   fc.FluctuateNumDataEntries(false);
+   fc.UseAdaptiveSampling(true);
+   fc.SetNBins(40);
+   PointSetInterval* fcInt = NULL;
+   if(doFeldmanCousins){ // takes 7 minutes
+      fcInt = (PointSetInterval*) fc.GetInterval(); // fix cast
+   }
+
+
+   // use BayesianCalculator (only 1-d parameter of interest, slow for this problem)
+   BayesianCalculator bc(*data, *modelConfig);
+   bc.SetConfidenceLevel(0.95);
+   SimpleInterval* bInt = NULL;
+   if(doBayesian && wspace->set("poi")->getSize() == 1)   {
+      bInt = bc.GetInterval();
+   } else{
+      cout << "Bayesian Calc. only supports on parameter of interest" << endl;
+   }
+
+
+   // use MCMCCalculator  (takes about 1 min)
+   // Want an efficient proposal function, so derive it from covariance
+   // matrix of fit
+   RooFitResult* fit = wspace->pdf("model")->fitTo(*data,Save());
+   ProposalHelper ph;
+   ph.SetVariables((RooArgSet&)fit->floatParsFinal());
+   ph.SetCovMatrix(fit->covarianceMatrix());
+   ph.SetUpdateProposalParameters(kTRUE); // auto-create mean vars and add mappings
+   ph.SetCacheSize(100);
+   ProposalFunction* pf = ph.GetProposalFunction();
+
+   MCMCCalculator mc(*data, *modelConfig);
+   mc.SetConfidenceLevel(0.95);
+   mc.SetProposalFunction(*pf);
+   mc.SetNumBurnInSteps(500); // first N steps to be ignored as burn-in
+   mc.SetNumIters(50000);
+   mc.SetLeftSideTailFraction(0.5); // make a central interval
+   MCMCInterval* mcInt = NULL;
+   if(doMCMC)
+      mcInt = mc.GetInterval();
+
+   //////////////////////////////////////
+   // Make some  plots
+   TCanvas* c1 = (TCanvas*) gROOT->Get("c1");
+   if(!c1)
+      c1 = new TCanvas("c1");
+
+   if(doBayesian && doMCMC){
+      c1->Divide(3);
+      c1->cd(1);
+   }
+   else if (doBayesian || doMCMC){
+      c1->Divide(2);
+      c1->cd(1);
+   }
+
+   LikelihoodIntervalPlot* lrplot = new LikelihoodIntervalPlot(plInt);
+   lrplot->Draw();
+
+   if(doBayesian && wspace->set("poi")->getSize() == 1)   {
       c1->cd(2);
-    MCMCIntervalPlot mcPlot(*mcInt);
-    mcPlot.Draw();
-  }
-
-  ////////////////////////////////////
-  // querry intervals
-  cout << "Profile Likelihood interval on s = [" <<
-    plInt->LowerLimit( *wspace->var("s") ) << ", " <<
-    plInt->UpperLimit( *wspace->var("s") ) << "]" << endl;
-  //Profile Likelihood interval on s = [12.1902, 88.6871]
-
-
-  if(doBayesian && wspace->set("poi")->getSize() == 1)   {
-    cout << "Bayesian interval on s = [" <<
-      bInt->LowerLimit( ) << ", " <<
-      bInt->UpperLimit( ) << "]" << endl;
-  }
-
-  if(doFeldmanCousins){
-    cout << "Feldman Cousins interval on s = [" <<
-      fcInt->LowerLimit( *wspace->var("s") ) << ", " <<
-      fcInt->UpperLimit( *wspace->var("s") ) << "]" << endl;
-    //Feldman Cousins interval on s = [18.75 +/- 2.45, 83.75 +/- 2.45]
-  }
-
-  if(doMCMC){
-    cout << "MCMC interval on s = [" <<
-      mcInt->LowerLimit(*wspace->var("s") ) << ", " <<
-      mcInt->UpperLimit(*wspace->var("s") ) << "]" << endl;
-    //MCMC interval on s = [15.7628, 84.7266]
-
-  }
-
-  t.Print();
+      // the plot takes a long time and print lots of error
+      // using a scan it is better
+      bc.SetScanOfPosterior(20);
+      RooPlot* bplot = bc.GetPosteriorPlot();
+      bplot->Draw();
+   }
+
+   if(doMCMC){
+      if(doBayesian && wspace->set("poi")->getSize() == 1)
+         c1->cd(3);
+      else
+         c1->cd(2);
+      MCMCIntervalPlot mcPlot(*mcInt);
+      mcPlot.Draw();
+   }
+
+   ////////////////////////////////////
+   // querry intervals
+   cout << "Profile Likelihood interval on s = [" <<
+      plInt->LowerLimit( *wspace->var("s") ) << ", " <<
+      plInt->UpperLimit( *wspace->var("s") ) << "]" << endl;
+   //Profile Likelihood interval on s = [12.1902, 88.6871]
+
+
+   if(doBayesian && wspace->set("poi")->getSize() == 1)   {
+      cout << "Bayesian interval on s = [" <<
+         bInt->LowerLimit( ) << ", " <<
+         bInt->UpperLimit( ) << "]" << endl;
+   }
+
+   if(doFeldmanCousins){
+      cout << "Feldman Cousins interval on s = [" <<
+         fcInt->LowerLimit( *wspace->var("s") ) << ", " <<
+         fcInt->UpperLimit( *wspace->var("s") ) << "]" << endl;
+      //Feldman Cousins interval on s = [18.75 +/- 2.45, 83.75 +/- 2.45]
+   }
+
+   if(doMCMC){
+      cout << "MCMC interval on s = [" <<
+         mcInt->LowerLimit(*wspace->var("s") ) << ", " <<
+         mcInt->UpperLimit(*wspace->var("s") ) << "]" << endl;
+      //MCMC interval on s = [15.7628, 84.7266]
+
+   }
+
+   t.Print();
 }
diff --git a/tutorials/roostats/OneSidedFrequentistUpperLimitWithBands.C b/tutorials/roostats/OneSidedFrequentistUpperLimitWithBands.C
index 4b9e9ea3fa4f6a46dadaa216456c4c2d5a013800..60f35e0f1462ef9446d9ee00ec27974a4e860ff0 100644
--- a/tutorials/roostats/OneSidedFrequentistUpperLimitWithBands.C
+++ b/tutorials/roostats/OneSidedFrequentistUpperLimitWithBands.C
@@ -185,347 +185,347 @@ void OneSidedFrequentistUpperLimitWithBands(const char* infile = "",
    }
 
 
-  /////////////////////////////////////////////////////////////
-  // Now get the data and workspace
-  ////////////////////////////////////////////////////////////
-
-  // get the workspace out of the file
-  RooWorkspace* w = (RooWorkspace*) file->Get(workspaceName);
-  if(!w){
-    cout <<"workspace not found" << endl;
-    return;
-  }
-
-  // get the modelConfig out of the file
-  ModelConfig* mc = (ModelConfig*) w->obj(modelConfigName);
-
-  // get the modelConfig out of the file
-  RooAbsData* data = w->data(dataName);
+   /////////////////////////////////////////////////////////////
+   // Now get the data and workspace
+   ////////////////////////////////////////////////////////////
+
+   // get the workspace out of the file
+   RooWorkspace* w = (RooWorkspace*) file->Get(workspaceName);
+   if(!w){
+      cout <<"workspace not found" << endl;
+      return;
+   }
 
-  // make sure ingredients are found
-  if(!data || !mc){
-    w->Print();
-    cout << "data or ModelConfig was not found" <<endl;
-    return;
-  }
+   // get the modelConfig out of the file
+   ModelConfig* mc = (ModelConfig*) w->obj(modelConfigName);
 
-  /////////////////////////////////////////////////////////////
-  // Now get the POI for convenience
-  // you may want to adjust the range of your POI
-  ////////////////////////////////////////////////////////////
-  RooRealVar* firstPOI = (RooRealVar*) mc->GetParametersOfInterest()->first();
-  //  firstPOI->setMin(0);
-  //  firstPOI->setMax(10);
-
-  /////////////////////////////////////////////
-  // create and use the FeldmanCousins tool
-  // to find and plot the 95% confidence interval
-  // on the parameter of interest as specified
-  // in the model config
-  // REMEMBER, we will change the test statistic
-  // so this is NOT a Feldman-Cousins interval
-  FeldmanCousins fc(*data,*mc);
-  fc.SetConfidenceLevel(confidenceLevel);
-  //  fc.AdditionalNToysFactor(0.25); // degrade/improve sampling that defines confidence belt
-  //  fc.UseAdaptiveSampling(true); // speed it up a bit, don't use for expectd limits
-  fc.SetNBins(nPointsToScan); // set how many points per parameter of interest to scan
-  fc.CreateConfBelt(true); // save the information in the belt for plotting
-
-  /////////////////////////////////////////////
-  // Feldman-Cousins is a unified limit by definition
-  // but the tool takes care of a few things for us like which values
-  // of the nuisance parameters should be used to generate toys.
-  // so let's just change the test statistic and realize this is
-  // no longer "Feldman-Cousins" but is a fully frequentist Neyman-Construction.
-  //  ProfileLikelihoodTestStatModified onesided(*mc->GetPdf());
-  //  fc.GetTestStatSampler()->SetTestStatistic(&onesided);
-  // ((ToyMCSampler*) fc.GetTestStatSampler())->SetGenerateBinned(true);
-  ToyMCSampler*  toymcsampler = (ToyMCSampler*) fc.GetTestStatSampler();
-  ProfileLikelihoodTestStat* testStat = dynamic_cast<ProfileLikelihoodTestStat*>(toymcsampler->GetTestStatistic());
-  testStat->SetOneSided(true);
-
-  // Since this tool needs to throw toy MC the PDF needs to be
-  // extended or the tool needs to know how many entries in a dataset
-  // per pseudo experiment.
-  // In the 'number counting form' where the entries in the dataset
-  // are counts, and not values of discriminating variables, the
-  // datasets typically only have one entry and the PDF is not
-  // extended.
-  if(!mc->GetPdf()->canBeExtended()){
-    if(data->numEntries()==1)
-      fc.FluctuateNumDataEntries(false);
-    else
-      cout <<"Not sure what to do about this model" <<endl;
-  }
-
-  // We can use PROOF to speed things along in parallel
-  // However, the test statistic has to be installed on the workers
-  // so either turn off PROOF or include the modified test statistic
-  // in your $ROOTSYS/roofit/roostats/inc directory,
-  // add the additional line to the LinkDef.h file,
-  // and recompile root.
-  if (useProof) {
-     ProofConfig pc(*w, nworkers, "", false);
-     toymcsampler->SetProofConfig(&pc); // enable proof
-  }
-
-  if(mc->GetGlobalObservables()){
-     cout << "will use global observables for unconditional ensemble"<<endl;
-     mc->GetGlobalObservables()->Print();
-     toymcsampler->SetGlobalObservables(*mc->GetGlobalObservables());
-  }
-
-
-  // Now get the interval
-  PointSetInterval* interval = fc.GetInterval();
-  ConfidenceBelt* belt = fc.GetConfidenceBelt();
-
-  // print out the iterval on the first Parameter of Interest
-  cout << "\n95% interval on " <<firstPOI->GetName()<<" is : ["<<
-    interval->LowerLimit(*firstPOI) << ", "<<
-    interval->UpperLimit(*firstPOI) <<"] "<<endl;
-
-  // get observed UL and value of test statistic evaluated there
-  RooArgSet tmpPOI(*firstPOI);
-  double observedUL = interval->UpperLimit(*firstPOI);
-  firstPOI->setVal(observedUL);
-  double obsTSatObsUL = fc.GetTestStatSampler()->EvaluateTestStatistic(*data,tmpPOI);
-
-
-  // Ask the calculator which points were scanned
-  RooDataSet* parameterScan = (RooDataSet*) fc.GetPointsToScan();
-  RooArgSet* tmpPoint;
-
-  // make a histogram of parameter vs. threshold
-  TH1F* histOfThresholds = new TH1F("histOfThresholds","",
-                                    parameterScan->numEntries(),
-                                    firstPOI->getMin(),
-                                    firstPOI->getMax());
-  histOfThresholds->GetXaxis()->SetTitle(firstPOI->GetName());
-  histOfThresholds->GetYaxis()->SetTitle("Threshold");
-
-  // loop through the points that were tested and ask confidence belt
-  // what the upper/lower thresholds were.
-  // For FeldmanCousins, the lower cut off is always 0
-  for(Int_t i=0; i<parameterScan->numEntries(); ++i){
-    tmpPoint = (RooArgSet*) parameterScan->get(i)->clone("temp");
-    //cout <<"get threshold"<<endl;
-    double arMax = belt->GetAcceptanceRegionMax(*tmpPoint);
-    double poiVal = tmpPoint->getRealValue(firstPOI->GetName()) ;
-    histOfThresholds->Fill(poiVal,arMax);
-  }
-  TCanvas* c1 = new TCanvas();
-  c1->Divide(2);
-  c1->cd(1);
-  histOfThresholds->SetMinimum(0);
-  histOfThresholds->Draw();
-  c1->cd(2);
+   // get the modelConfig out of the file
+   RooAbsData* data = w->data(dataName);
 
-  /////////////////////////////////////////////////////////////
-  // Now we generate the expected bands and power-constriant
-  ////////////////////////////////////////////////////////////
+   // make sure ingredients are found
+   if(!data || !mc){
+      w->Print();
+      cout << "data or ModelConfig was not found" <<endl;
+      return;
+   }
 
-  // First: find parameter point for mu=0, with conditional MLEs for nuisance parameters
-  RooAbsReal* nll = mc->GetPdf()->createNLL(*data);
-  RooAbsReal* profile = nll->createProfile(*mc->GetParametersOfInterest());
-  firstPOI->setVal(0.);
-  profile->getVal(); // this will do fit and set nuisance parameters to profiled values
-  RooArgSet* poiAndNuisance = new RooArgSet();
-  if(mc->GetNuisanceParameters())
-    poiAndNuisance->add(*mc->GetNuisanceParameters());
-  poiAndNuisance->add(*mc->GetParametersOfInterest());
-  w->saveSnapshot("paramsToGenerateData",*poiAndNuisance);
-  RooArgSet* paramsToGenerateData = (RooArgSet*) poiAndNuisance->snapshot();
-  cout << "\nWill use these parameter points to generate pseudo data for bkg only" << endl;
-  paramsToGenerateData->Print("v");
-
-
-  RooArgSet unconditionalObs;
-  unconditionalObs.add(*mc->GetObservables());
-  unconditionalObs.add(*mc->GetGlobalObservables()); // comment this out for the original conditional ensemble
-
-  double CLb=0;
-  double CLbinclusive=0;
-
-  // Now we generate background only and find distribution of upper limits
-  TH1F* histOfUL = new TH1F("histOfUL","",100,0,firstPOI->getMax());
-  histOfUL->GetXaxis()->SetTitle("Upper Limit (background only)");
-  histOfUL->GetYaxis()->SetTitle("Entries");
-  for(int imc=0; imc<nToyMC; ++imc){
-
-    // set parameters back to values for generating pseudo data
-    //    cout << "\n get current nuis, set vals, print again" << endl;
-    w->loadSnapshot("paramsToGenerateData");
-    //    poiAndNuisance->Print("v");
-
-    RooDataSet* toyData = 0;
-    // now generate a toy dataset
-    if(!mc->GetPdf()->canBeExtended()){
+   /////////////////////////////////////////////////////////////
+   // Now get the POI for convenience
+   // you may want to adjust the range of your POI
+   ////////////////////////////////////////////////////////////
+   RooRealVar* firstPOI = (RooRealVar*) mc->GetParametersOfInterest()->first();
+   //  firstPOI->setMin(0);
+   //  firstPOI->setMax(10);
+
+   /////////////////////////////////////////////
+   // create and use the FeldmanCousins tool
+   // to find and plot the 95% confidence interval
+   // on the parameter of interest as specified
+   // in the model config
+   // REMEMBER, we will change the test statistic
+   // so this is NOT a Feldman-Cousins interval
+   FeldmanCousins fc(*data,*mc);
+   fc.SetConfidenceLevel(confidenceLevel);
+   //  fc.AdditionalNToysFactor(0.25); // degrade/improve sampling that defines confidence belt
+   //  fc.UseAdaptiveSampling(true); // speed it up a bit, don't use for expectd limits
+   fc.SetNBins(nPointsToScan); // set how many points per parameter of interest to scan
+   fc.CreateConfBelt(true); // save the information in the belt for plotting
+
+   /////////////////////////////////////////////
+   // Feldman-Cousins is a unified limit by definition
+   // but the tool takes care of a few things for us like which values
+   // of the nuisance parameters should be used to generate toys.
+   // so let's just change the test statistic and realize this is
+   // no longer "Feldman-Cousins" but is a fully frequentist Neyman-Construction.
+   //  ProfileLikelihoodTestStatModified onesided(*mc->GetPdf());
+   //  fc.GetTestStatSampler()->SetTestStatistic(&onesided);
+   // ((ToyMCSampler*) fc.GetTestStatSampler())->SetGenerateBinned(true);
+   ToyMCSampler*  toymcsampler = (ToyMCSampler*) fc.GetTestStatSampler();
+   ProfileLikelihoodTestStat* testStat = dynamic_cast<ProfileLikelihoodTestStat*>(toymcsampler->GetTestStatistic());
+   testStat->SetOneSided(true);
+
+   // Since this tool needs to throw toy MC the PDF needs to be
+   // extended or the tool needs to know how many entries in a dataset
+   // per pseudo experiment.
+   // In the 'number counting form' where the entries in the dataset
+   // are counts, and not values of discriminating variables, the
+   // datasets typically only have one entry and the PDF is not
+   // extended.
+   if(!mc->GetPdf()->canBeExtended()){
       if(data->numEntries()==1)
-         toyData = mc->GetPdf()->generate(*mc->GetObservables(),1);
+         fc.FluctuateNumDataEntries(false);
       else
          cout <<"Not sure what to do about this model" <<endl;
-    } else{
-       //      cout << "generating extended dataset"<<endl;
-       toyData = mc->GetPdf()->generate(*mc->GetObservables(),Extended());
-    }
-
-    // generate global observables
-    // need to be careful for simpdf
-    //    RooDataSet* globalData = mc->GetPdf()->generate(*mc->GetGlobalObservables(),1);
-
-    RooSimultaneous* simPdf = dynamic_cast<RooSimultaneous*>(mc->GetPdf());
-    if(!simPdf){
-      RooDataSet *one = mc->GetPdf()->generate(*mc->GetGlobalObservables(), 1);
-      const RooArgSet *values = one->get();
-      RooArgSet *allVars = mc->GetPdf()->getVariables();
-      *allVars = *values;
-      delete allVars;
-      delete values;
-      delete one;
-    } else {
-
-      //try fix for sim pdf
-      TIterator* iter = simPdf->indexCat().typeIterator() ;
-      RooCatType* tt = NULL;
-      while((tt=(RooCatType*) iter->Next())) {
-
-         // Get pdf associated with state from simpdf
-         RooAbsPdf* pdftmp = simPdf->getPdf(tt->GetName()) ;
-
-         // Generate only global variables defined by the pdf associated with this state
-         RooArgSet* globtmp = pdftmp->getObservables(*mc->GetGlobalObservables()) ;
-         RooDataSet* tmp = pdftmp->generate(*globtmp,1) ;
-
-         // Transfer values to output placeholder
-         *globtmp = *tmp->get(0) ;
-
-         // Cleanup
-         delete globtmp ;
-         delete tmp ;
-      }
-    }
-
-    //    globalData->Print("v");
-    //    unconditionalObs = *globalData->get();
-    //    mc->GetGlobalObservables()->Print("v");
-    //    delete globalData;
-    //    cout << "toy data = " << endl;
-    //    toyData->get()->Print("v");
-
-    // get test stat at observed UL in observed data
-    firstPOI->setVal(observedUL);
-    double toyTSatObsUL = fc.GetTestStatSampler()->EvaluateTestStatistic(*toyData,tmpPOI);
-    //    toyData->get()->Print("v");
-    //    cout <<"obsTSatObsUL " <<obsTSatObsUL << "toyTS " << toyTSatObsUL << endl;
-    if(obsTSatObsUL < toyTSatObsUL) // not sure about <= part yet
-      CLb+= (1.)/nToyMC;
-    if(obsTSatObsUL <= toyTSatObsUL) // not sure about <= part yet
-      CLbinclusive+= (1.)/nToyMC;
-
-
-    // loop over points in belt to find upper limit for this toy data
-    double thisUL = 0;
-    for(Int_t i=0; i<parameterScan->numEntries(); ++i){
+   }
+
+   // We can use PROOF to speed things along in parallel
+   // However, the test statistic has to be installed on the workers
+   // so either turn off PROOF or include the modified test statistic
+   // in your $ROOTSYS/roofit/roostats/inc directory,
+   // add the additional line to the LinkDef.h file,
+   // and recompile root.
+   if (useProof) {
+      ProofConfig pc(*w, nworkers, "", false);
+      toymcsampler->SetProofConfig(&pc); // enable proof
+   }
+
+   if(mc->GetGlobalObservables()){
+      cout << "will use global observables for unconditional ensemble"<<endl;
+      mc->GetGlobalObservables()->Print();
+      toymcsampler->SetGlobalObservables(*mc->GetGlobalObservables());
+   }
+
+
+   // Now get the interval
+   PointSetInterval* interval = fc.GetInterval();
+   ConfidenceBelt* belt = fc.GetConfidenceBelt();
+
+   // print out the iterval on the first Parameter of Interest
+   cout << "\n95% interval on " <<firstPOI->GetName()<<" is : ["<<
+      interval->LowerLimit(*firstPOI) << ", "<<
+      interval->UpperLimit(*firstPOI) <<"] "<<endl;
+
+   // get observed UL and value of test statistic evaluated there
+   RooArgSet tmpPOI(*firstPOI);
+   double observedUL = interval->UpperLimit(*firstPOI);
+   firstPOI->setVal(observedUL);
+   double obsTSatObsUL = fc.GetTestStatSampler()->EvaluateTestStatistic(*data,tmpPOI);
+
+
+   // Ask the calculator which points were scanned
+   RooDataSet* parameterScan = (RooDataSet*) fc.GetPointsToScan();
+   RooArgSet* tmpPoint;
+
+   // make a histogram of parameter vs. threshold
+   TH1F* histOfThresholds = new TH1F("histOfThresholds","",
+                                       parameterScan->numEntries(),
+                                       firstPOI->getMin(),
+                                       firstPOI->getMax());
+   histOfThresholds->GetXaxis()->SetTitle(firstPOI->GetName());
+   histOfThresholds->GetYaxis()->SetTitle("Threshold");
+
+   // loop through the points that were tested and ask confidence belt
+   // what the upper/lower thresholds were.
+   // For FeldmanCousins, the lower cut off is always 0
+   for(Int_t i=0; i<parameterScan->numEntries(); ++i){
       tmpPoint = (RooArgSet*) parameterScan->get(i)->clone("temp");
+      //cout <<"get threshold"<<endl;
       double arMax = belt->GetAcceptanceRegionMax(*tmpPoint);
-      firstPOI->setVal( tmpPoint->getRealValue(firstPOI->GetName()) );
-      //   double thisTS = profile->getVal();
-      double thisTS = fc.GetTestStatSampler()->EvaluateTestStatistic(*toyData,tmpPOI);
-
-      //   cout << "poi = " << firstPOI->getVal()
-      // << " max is " << arMax << " this profile = " << thisTS << endl;
-      //      cout << "thisTS = " << thisTS<<endl;
-      if(thisTS<=arMax){
-         thisUL = firstPOI->getVal();
+      double poiVal = tmpPoint->getRealValue(firstPOI->GetName()) ;
+      histOfThresholds->Fill(poiVal,arMax);
+   }
+   TCanvas* c1 = new TCanvas();
+   c1->Divide(2);
+   c1->cd(1);
+   histOfThresholds->SetMinimum(0);
+   histOfThresholds->Draw();
+   c1->cd(2);
+
+   /////////////////////////////////////////////////////////////
+   // Now we generate the expected bands and power-constriant
+   ////////////////////////////////////////////////////////////
+
+   // First: find parameter point for mu=0, with conditional MLEs for nuisance parameters
+   RooAbsReal* nll = mc->GetPdf()->createNLL(*data);
+   RooAbsReal* profile = nll->createProfile(*mc->GetParametersOfInterest());
+   firstPOI->setVal(0.);
+   profile->getVal(); // this will do fit and set nuisance parameters to profiled values
+   RooArgSet* poiAndNuisance = new RooArgSet();
+   if(mc->GetNuisanceParameters())
+      poiAndNuisance->add(*mc->GetNuisanceParameters());
+   poiAndNuisance->add(*mc->GetParametersOfInterest());
+   w->saveSnapshot("paramsToGenerateData",*poiAndNuisance);
+   RooArgSet* paramsToGenerateData = (RooArgSet*) poiAndNuisance->snapshot();
+   cout << "\nWill use these parameter points to generate pseudo data for bkg only" << endl;
+   paramsToGenerateData->Print("v");
+
+
+   RooArgSet unconditionalObs;
+   unconditionalObs.add(*mc->GetObservables());
+   unconditionalObs.add(*mc->GetGlobalObservables()); // comment this out for the original conditional ensemble
+
+   double CLb=0;
+   double CLbinclusive=0;
+
+   // Now we generate background only and find distribution of upper limits
+   TH1F* histOfUL = new TH1F("histOfUL","",100,0,firstPOI->getMax());
+   histOfUL->GetXaxis()->SetTitle("Upper Limit (background only)");
+   histOfUL->GetYaxis()->SetTitle("Entries");
+   for(int imc=0; imc<nToyMC; ++imc){
+
+      // set parameters back to values for generating pseudo data
+      //    cout << "\n get current nuis, set vals, print again" << endl;
+      w->loadSnapshot("paramsToGenerateData");
+      //    poiAndNuisance->Print("v");
+
+      RooDataSet* toyData = 0;
+      // now generate a toy dataset
+      if(!mc->GetPdf()->canBeExtended()){
+         if(data->numEntries()==1)
+            toyData = mc->GetPdf()->generate(*mc->GetObservables(),1);
+         else
+            cout <<"Not sure what to do about this model" <<endl;
       } else{
-         break;
+         //      cout << "generating extended dataset"<<endl;
+         toyData = mc->GetPdf()->generate(*mc->GetObservables(),Extended());
       }
-    }
 
+      // generate global observables
+      // need to be careful for simpdf
+      //    RooDataSet* globalData = mc->GetPdf()->generate(*mc->GetGlobalObservables(),1);
+
+      RooSimultaneous* simPdf = dynamic_cast<RooSimultaneous*>(mc->GetPdf());
+      if(!simPdf){
+         RooDataSet *one = mc->GetPdf()->generate(*mc->GetGlobalObservables(), 1);
+         const RooArgSet *values = one->get();
+         RooArgSet *allVars = mc->GetPdf()->getVariables();
+         *allVars = *values;
+         delete allVars;
+         delete values;
+         delete one;
+      } else {
+
+         //try fix for sim pdf
+         TIterator* iter = simPdf->indexCat().typeIterator() ;
+         RooCatType* tt = NULL;
+         while((tt=(RooCatType*) iter->Next())) {
+
+            // Get pdf associated with state from simpdf
+            RooAbsPdf* pdftmp = simPdf->getPdf(tt->GetName()) ;
+
+            // Generate only global variables defined by the pdf associated with this state
+            RooArgSet* globtmp = pdftmp->getObservables(*mc->GetGlobalObservables()) ;
+            RooDataSet* tmp = pdftmp->generate(*globtmp,1) ;
+
+            // Transfer values to output placeholder
+            *globtmp = *tmp->get(0) ;
+
+            // Cleanup
+            delete globtmp ;
+            delete tmp ;
+         }
+      }
 
+      //    globalData->Print("v");
+      //    unconditionalObs = *globalData->get();
+      //    mc->GetGlobalObservables()->Print("v");
+      //    delete globalData;
+      //    cout << "toy data = " << endl;
+      //    toyData->get()->Print("v");
+
+      // get test stat at observed UL in observed data
+      firstPOI->setVal(observedUL);
+      double toyTSatObsUL = fc.GetTestStatSampler()->EvaluateTestStatistic(*toyData,tmpPOI);
+      //    toyData->get()->Print("v");
+      //    cout <<"obsTSatObsUL " <<obsTSatObsUL << "toyTS " << toyTSatObsUL << endl;
+      if(obsTSatObsUL < toyTSatObsUL) // not sure about <= part yet
+         CLb+= (1.)/nToyMC;
+      if(obsTSatObsUL <= toyTSatObsUL) // not sure about <= part yet
+         CLbinclusive+= (1.)/nToyMC;
+
+
+      // loop over points in belt to find upper limit for this toy data
+      double thisUL = 0;
+      for(Int_t i=0; i<parameterScan->numEntries(); ++i){
+         tmpPoint = (RooArgSet*) parameterScan->get(i)->clone("temp");
+         double arMax = belt->GetAcceptanceRegionMax(*tmpPoint);
+         firstPOI->setVal( tmpPoint->getRealValue(firstPOI->GetName()) );
+         //   double thisTS = profile->getVal();
+         double thisTS = fc.GetTestStatSampler()->EvaluateTestStatistic(*toyData,tmpPOI);
+
+         //   cout << "poi = " << firstPOI->getVal()
+         // << " max is " << arMax << " this profile = " << thisTS << endl;
+         //      cout << "thisTS = " << thisTS<<endl;
+         if(thisTS<=arMax){
+            thisUL = firstPOI->getVal();
+         } else{
+            break;
+         }
+      }
 
-    /*
-    // loop over points in belt to find upper limit for this toy data
-    double thisUL = 0;
-    for(Int_t i=0; i<histOfThresholds->GetNbinsX(); ++i){
-      tmpPoint = (RooArgSet*) parameterScan->get(i)->clone("temp");
-      cout <<"----------------  "<<i<<endl;
-      tmpPoint->Print("v");
-      cout << "from hist " << histOfThresholds->GetBinCenter(i+1) <<endl;
-      double arMax = histOfThresholds->GetBinContent(i+1);
-      // cout << " threhold from Hist = aMax " << arMax<<endl;
-      // double arMax2 = belt->GetAcceptanceRegionMax(*tmpPoint);
-      // cout << "from scan arMax2 = "<< arMax2 << endl; // not the same due to TH1F not TH1D
-      // cout << "scan - hist" << arMax2-arMax << endl;
-      firstPOI->setVal( histOfThresholds->GetBinCenter(i+1));
-      //   double thisTS = profile->getVal();
-      double thisTS = fc.GetTestStatSampler()->EvaluateTestStatistic(*toyData,tmpPOI);
-
-      //   cout << "poi = " << firstPOI->getVal()
-      // << " max is " << arMax << " this profile = " << thisTS << endl;
-      //      cout << "thisTS = " << thisTS<<endl;
-
-      // NOTE: need to add a small epsilon term for single precision vs. double precision
-      if(thisTS<=arMax + 1e-7){
-         thisUL = firstPOI->getVal();
-      } else{
-         break;
+
+
+      /*
+      // loop over points in belt to find upper limit for this toy data
+      double thisUL = 0;
+      for(Int_t i=0; i<histOfThresholds->GetNbinsX(); ++i){
+         tmpPoint = (RooArgSet*) parameterScan->get(i)->clone("temp");
+         cout <<"----------------  "<<i<<endl;
+         tmpPoint->Print("v");
+         cout << "from hist " << histOfThresholds->GetBinCenter(i+1) <<endl;
+         double arMax = histOfThresholds->GetBinContent(i+1);
+         // cout << " threhold from Hist = aMax " << arMax<<endl;
+         // double arMax2 = belt->GetAcceptanceRegionMax(*tmpPoint);
+         // cout << "from scan arMax2 = "<< arMax2 << endl; // not the same due to TH1F not TH1D
+         // cout << "scan - hist" << arMax2-arMax << endl;
+         firstPOI->setVal( histOfThresholds->GetBinCenter(i+1));
+         //   double thisTS = profile->getVal();
+         double thisTS = fc.GetTestStatSampler()->EvaluateTestStatistic(*toyData,tmpPOI);
+
+         //   cout << "poi = " << firstPOI->getVal()
+         // << " max is " << arMax << " this profile = " << thisTS << endl;
+         //      cout << "thisTS = " << thisTS<<endl;
+
+         // NOTE: need to add a small epsilon term for single precision vs. double precision
+         if(thisTS<=arMax + 1e-7){
+            thisUL = firstPOI->getVal();
+         } else{
+            break;
+         }
       }
-    }
-    */
-
-    histOfUL->Fill(thisUL);
-
-    // for few events, data is often the same, and UL is often the same
-    //    cout << "thisUL = " << thisUL<<endl;
-
-    delete toyData;
-  }
-  histOfUL->Draw();
-  c1->SaveAs("one-sided_upper_limit_output.pdf");
-
-  // if you want to see a plot of the sampling distribution for a particular scan point:
-  /*
-  SamplingDistPlot sampPlot;
-  int indexInScan = 0;
-  tmpPoint = (RooArgSet*) parameterScan->get(indexInScan)->clone("temp");
-  firstPOI->setVal( tmpPoint->getRealValue(firstPOI->GetName()) );
-  toymcsampler->SetParametersForTestStat(tmpPOI);
-  SamplingDistribution* samp = toymcsampler->GetSamplingDistribution(*tmpPoint);
-  sampPlot.AddSamplingDistribution(samp);
-  sampPlot.Draw();
-   */
-
-  // Now find bands and power constraint
-  Double_t* bins = histOfUL->GetIntegral();
-  TH1F* cumulative = (TH1F*) histOfUL->Clone("cumulative");
-  cumulative->SetContent(bins);
-  double band2sigDown, band1sigDown, bandMedian, band1sigUp,band2sigUp;
-  for(int i=1; i<=cumulative->GetNbinsX(); ++i){
-    if(bins[i]<RooStats::SignificanceToPValue(2))
-      band2sigDown=cumulative->GetBinCenter(i);
-    if(bins[i]<RooStats::SignificanceToPValue(1))
-      band1sigDown=cumulative->GetBinCenter(i);
-    if(bins[i]<0.5)
-      bandMedian=cumulative->GetBinCenter(i);
-    if(bins[i]<RooStats::SignificanceToPValue(-1))
-      band1sigUp=cumulative->GetBinCenter(i);
-    if(bins[i]<RooStats::SignificanceToPValue(-2))
-      band2sigUp=cumulative->GetBinCenter(i);
-  }
-  cout << "-2 sigma  band " << band2sigDown << endl;
-  cout << "-1 sigma  band " << band1sigDown << " [Power Constriant)]" << endl;
-  cout << "median of band " << bandMedian << endl;
-  cout << "+1 sigma  band " << band1sigUp << endl;
-  cout << "+2 sigma  band " << band2sigUp << endl;
-
-  // print out the iterval on the first Parameter of Interest
-  cout << "\nobserved 95% upper-limit "<< interval->UpperLimit(*firstPOI) <<endl;
-  cout << "CLb strict [P(toy>obs|0)] for observed 95% upper-limit "<< CLb <<endl;
-  cout << "CLb inclusive [P(toy>=obs|0)] for observed 95% upper-limit "<< CLbinclusive <<endl;
-
-  delete profile;
-  delete nll;
+      */
+
+      histOfUL->Fill(thisUL);
+
+      // for few events, data is often the same, and UL is often the same
+      //    cout << "thisUL = " << thisUL<<endl;
+
+      delete toyData;
+   }
+   histOfUL->Draw();
+   c1->SaveAs("one-sided_upper_limit_output.pdf");
+
+   // if you want to see a plot of the sampling distribution for a particular scan point:
+   /*
+   SamplingDistPlot sampPlot;
+   int indexInScan = 0;
+   tmpPoint = (RooArgSet*) parameterScan->get(indexInScan)->clone("temp");
+   firstPOI->setVal( tmpPoint->getRealValue(firstPOI->GetName()) );
+   toymcsampler->SetParametersForTestStat(tmpPOI);
+   SamplingDistribution* samp = toymcsampler->GetSamplingDistribution(*tmpPoint);
+   sampPlot.AddSamplingDistribution(samp);
+   sampPlot.Draw();
+      */
+
+   // Now find bands and power constraint
+   Double_t* bins = histOfUL->GetIntegral();
+   TH1F* cumulative = (TH1F*) histOfUL->Clone("cumulative");
+   cumulative->SetContent(bins);
+   double band2sigDown, band1sigDown, bandMedian, band1sigUp,band2sigUp;
+   for(int i=1; i<=cumulative->GetNbinsX(); ++i){
+      if(bins[i]<RooStats::SignificanceToPValue(2))
+         band2sigDown=cumulative->GetBinCenter(i);
+      if(bins[i]<RooStats::SignificanceToPValue(1))
+         band1sigDown=cumulative->GetBinCenter(i);
+      if(bins[i]<0.5)
+         bandMedian=cumulative->GetBinCenter(i);
+      if(bins[i]<RooStats::SignificanceToPValue(-1))
+         band1sigUp=cumulative->GetBinCenter(i);
+      if(bins[i]<RooStats::SignificanceToPValue(-2))
+         band2sigUp=cumulative->GetBinCenter(i);
+   }
+   cout << "-2 sigma  band " << band2sigDown << endl;
+   cout << "-1 sigma  band " << band1sigDown << " [Power Constriant)]" << endl;
+   cout << "median of band " << bandMedian << endl;
+   cout << "+1 sigma  band " << band1sigUp << endl;
+   cout << "+2 sigma  band " << band2sigUp << endl;
+
+   // print out the iterval on the first Parameter of Interest
+   cout << "\nobserved 95% upper-limit "<< interval->UpperLimit(*firstPOI) <<endl;
+   cout << "CLb strict [P(toy>obs|0)] for observed 95% upper-limit "<< CLb <<endl;
+   cout << "CLb inclusive [P(toy>=obs|0)] for observed 95% upper-limit "<< CLbinclusive <<endl;
+
+   delete profile;
+   delete nll;
 
 }
diff --git a/tutorials/roostats/rs401d_FeldmanCousins.C b/tutorials/roostats/rs401d_FeldmanCousins.C
index 0f4a244393b53da7d4552c9181deda52b4047148..133a01297eac876bc58e58e96b11e12d49598a98 100644
--- a/tutorials/roostats/rs401d_FeldmanCousins.C
+++ b/tutorials/roostats/rs401d_FeldmanCousins.C
@@ -72,25 +72,25 @@ void rs401d_FeldmanCousins(bool doFeldmanCousins=false, bool doMCMC = true)
    t.Start();
 
 
-   /*
-      Taken from Feldman & Cousins paper, Phys.Rev.D57:3873-3889,1998.
-      e-Print: physics/9711021 (see page 13.)
-
-      Quantum mechanics dictates that the probability of such a transformation is given by the formula
-      P (νµ → ν e ) = sin^2 (2θ) sin^2 (1.27 ∆m^2 L /E )
-      where P is the probability for a νµ to transform into a νe , L is the distance in km between
-      the creation of the neutrino from meson decay and its interaction in the detector, E is the
-      neutrino energy in GeV, and ∆m^2 = |m^2− m^2 | in (eV/c^2 )^2 .
-
-      To demonstrate how this works in practice, and how it compares to alternative approaches
-      that have been used, we consider a toy model of a typical neutrino oscillation experiment.
-      The toy model is defined by the following parameters: Mesons are assumed to decay to
-      neutrinos uniformly in a region 600 m to 1000 m from the detector. The expected background
-      from conventional νe interactions and misidentified νµ interactions is assumed to be 100
-      events in each of 5 energy bins which span the region from 10 to 60 GeV. We assume that
-      the νµ flux is such that if P (νµ → ν e ) = 0.01 averaged over any bin, then that bin would
-      have an expected additional contribution of 100 events due to νµ → ν e oscillations.
-      */
+
+   // Taken from Feldman & Cousins paper, Phys.Rev.D57:3873-3889,1998.
+   // e-Print: physics/9711021 (see page 13.)
+   //
+   // Quantum mechanics dictates that the probability of such a transformation is given by the formula
+   // $P (\nu\mu \rightarrow \nu e ) = sin^2 (2\theta) sin^2 (1.27 \Delta m^2 L /E )$
+   // where P is the probability for a $\nu\mu$ to transform into a $\nu e$ , L is the distance in km between
+   // the creation of the neutrino from meson decay and its interaction in the detector, E is the
+   // neutrino energy in GeV, and $\Delta m^2 = |m^2 - m^2 |$ in $(eV/c^2 )^2$ .
+   //
+   // To demonstrate how this works in practice, and how it compares to alternative approaches
+   // that have been used, we consider a toy model of a typical neutrino oscillation experiment.
+   // The toy model is defined by the following parameters: Mesons are assumed to decay to
+   // neutrinos uniformly in a region 600 m to 1000 m from the detector. The expected background
+   // from conventional $\nu e$ interactions and misidentified $\nu\mu$ interactions is assumed to be 100
+   // events in each of 5 energy bins which span the region from 10 to 60 GeV. We assume that
+   // the $\nu\mu$ flux is such that if $P (\nu\mu \rightarrow \nu e ) = 0.01$ averaged over any bin, then that bin would
+   // have an expected additional contribution of 100 events due to $\nu\mu \rightarrow \nu e$ oscillations.
+
 
    // Make signal model model
    RooRealVar E("E","", 15,10,60,"GeV");
@@ -108,7 +108,7 @@ void rs401d_FeldmanCousins(bool doFeldmanCousins=false, bool doMCMC = true)
    // only E is observable, so create the signal model by integrating out L
    RooAbsPdf* sigModel = PnmuTone.createProjection(L);
 
-   // create   \int dE' dL' P(E',L' | \Delta m^2).
+   // create  $ \int dE' dL' P(E',L' | \Delta m^2)$.
    // Given RooFit will renormalize the PDF in the range of the observables,
    // the average probability to oscillate in the experiment's acceptance
    // needs to be incorporated into the extended term in the likelihood.
@@ -126,7 +126,7 @@ void rs401d_FeldmanCousins(bool doFeldmanCousins=false, bool doMCMC = true)
 
    // Getting the flux is a bit tricky.  It is more celear to include a cross section term that is not
    // explicitly refered to in the text, eg.
-   // # events in bin = flux * cross-section for nu_e interaction in E bin * average prob nu_mu osc. to nu_e in bin
+   // number events in bin = flux * cross-section for nu_e interaction in E bin * average prob nu_mu osc. to nu_e in bin
    // let maxEventsInBin = flux * cross-section for nu_e interaction in E bin
    // maxEventsInBin * 1% chance per bin =  100 events / bin
    // therefore maxEventsInBin = 10,000.
@@ -135,7 +135,7 @@ void rs401d_FeldmanCousins(bool doFeldmanCousins=false, bool doMCMC = true)
    RooConstVar inverseArea("inverseArea","1/(#Delta E #Delta L)",
                            1./(EPrime.getMax()-EPrime.getMin())/(LPrime.getMax()-LPrime.getMin()));
 
-   // sigNorm = maxEventsTot * (\int dE dL prob to oscillate in experiment / Area) * sin^2(2\theta)
+   // $sigNorm = maxEventsTot \cdot \int dE dL \frac{P_{oscillate\ in\ experiment}}{Area} \cdot {sin}^2(2\theta)$
    RooProduct sigNorm("sigNorm", "", RooArgSet(maxEventsTot, *intProbToOscInExp, inverseArea, sinSq2theta));
    // bkg = 5 bins * 100 events / bin
    RooConstVar bkgNorm("bkgNorm","normalization for background",500);