Skip to content
Snippets Groups Projects
Commit 1d36d61f authored by Lorenzo Moneta's avatar Lorenzo Moneta
Browse files

fix ~ 15 high impact coverity issues (Resource leaks in...

fix ~ 15 high impact coverity issues (Resource leaks in NumberCountingPdfFactory and Uninitilized variables  in other classes)


git-svn-id: http://root.cern.ch/svn/root/trunk@43992 27541ba8-7e3a-0410-8455-c3a389f83636
parent 65b89dc1
No related branches found
No related tags found
No related merge requests found
...@@ -113,7 +113,6 @@ namespace RooStats { ...@@ -113,7 +113,6 @@ namespace RooStats {
private: private:
bool fOneSided; // for one sided PL test statistic bool fOneSided; // for one sided PL test statistic
bool fNominalAsimov;
mutable int fUseQTilde; // flag to indicate if using qtilde or not (-1 (default based on RooRealVar)), 0 false, 1 (true) mutable int fUseQTilde; // flag to indicate if using qtilde or not (-1 (default based on RooRealVar)), 0 false, 1 (true)
static int fgPrintLevel; // control print level (0 minimal, 1 normal, 2 debug) static int fgPrintLevel; // control print level (0 minimal, 1 normal, 2 debug)
mutable double fNLLObs; mutable double fNLLObs;
......
...@@ -625,6 +625,7 @@ BayesianCalculator::BayesianCalculator() : ...@@ -625,6 +625,7 @@ BayesianCalculator::BayesianCalculator() :
fSize(0.05), fLeftSideFraction(0.5), fSize(0.05), fLeftSideFraction(0.5),
fBrfPrecision(0.00005), fBrfPrecision(0.00005),
fNScanBins(-1), fNScanBins(-1),
fNumIterations(0),
fValidInterval(false) fValidInterval(false)
{ {
// default constructor // default constructor
...@@ -1280,7 +1281,7 @@ void BayesianCalculator::ComputeIntervalFromApproxPosterior(double lowerCutOff, ...@@ -1280,7 +1281,7 @@ void BayesianCalculator::ComputeIntervalFromApproxPosterior(double lowerCutOff,
if (!fApproxPosterior) return; if (!fApproxPosterior) return;
double prob[2]; double prob[2];
double limits[2]; double limits[2] = {0,0};
prob[0] = lowerCutOff; prob[0] = lowerCutOff;
prob[1] = upperCutOff; prob[1] = upperCutOff;
fApproxPosterior->GetQuantiles(2,limits,prob); fApproxPosterior->GetQuantiles(2,limits,prob);
......
...@@ -663,8 +663,8 @@ SamplingDistribution * HypoTestInverterResult::GetLimitDistribution(bool lower ...@@ -663,8 +663,8 @@ SamplingDistribution * HypoTestInverterResult::GetLimitDistribution(bool lower
delete distVec[i]; distVec[i] = 0; delete distVec[i]; distVec[i] = 0;
std::sort(pvalues.begin(), pvalues.end()); std::sort(pvalues.begin(), pvalues.end());
// find the quantiles of the distribution // find the quantiles of the distribution
double p[1]; double p[1] = {0};
double q[1]; double q[1] = {0};
quantVec[i] = std::vector<double>(size); quantVec[i] = std::vector<double>(size);
for (int ibin = 0; ibin < size; ++ibin) { for (int ibin = 0; ibin < size; ++ibin) {
...@@ -689,7 +689,7 @@ SamplingDistribution * HypoTestInverterResult::GetLimitDistribution(bool lower ...@@ -689,7 +689,7 @@ SamplingDistribution * HypoTestInverterResult::GetLimitDistribution(bool lower
// loop on the p values and find the limit for each expected point in the quantiles vector // loop on the p values and find the limit for each expected point in the quantiles vector
for (int j = 0; j < size; ++j ) { for (int j = 0; j < size; ++j ) {
TGraph g(ArraySize() ); TGraph g( npoints );
for (int k = 0; k < npoints ; ++k) { for (int k = 0; k < npoints ; ++k) {
g.SetPoint(k, GetXValue(index[k]), (quantVec[index[k]])[j] ); g.SetPoint(k, GetXValue(index[k]), (quantVec[index[k]])[j] );
} }
...@@ -749,8 +749,11 @@ double HypoTestInverterResult::GetExpectedLimit(double nsig, bool lower, const ...@@ -749,8 +749,11 @@ double HypoTestInverterResult::GetExpectedLimit(double nsig, bool lower, const
// else (default) find expected limit by obtaining first a full limit distributions // else (default) find expected limit by obtaining first a full limit distributions
// The last one is in general more correct // The last one is in general more correct
const int nEntries = ArraySize();
if (nEntries <= 0) return (lower) ? 1 : 0; // return 1 for lower, 0 for upper
HypoTestResult * r = dynamic_cast<HypoTestResult *> (fYObjects.First() ); HypoTestResult * r = dynamic_cast<HypoTestResult *> (fYObjects.First() );
assert(r != 0);
if (!r->GetNullDistribution() && !r->GetAltDistribution() ) { if (!r->GetNullDistribution() && !r->GetAltDistribution() ) {
// we are in the asymptotic case // we are in the asymptotic case
// get the limits obtained at the different sigma values // get the limits obtained at the different sigma values
...@@ -763,8 +766,8 @@ double HypoTestInverterResult::GetExpectedLimit(double nsig, bool lower, const ...@@ -763,8 +766,8 @@ double HypoTestInverterResult::GetExpectedLimit(double nsig, bool lower, const
return values[i]; return values[i];
} }
double p[1]; double p[1] = {0};
double q[1]; double q[1] = {0};
p[0] = ROOT::Math::normal_cdf(nsig,1); p[0] = ROOT::Math::normal_cdf(nsig,1);
// for CLs+b can get the quantiles of p-value distribution and // for CLs+b can get the quantiles of p-value distribution and
...@@ -776,7 +779,6 @@ double HypoTestInverterResult::GetExpectedLimit(double nsig, bool lower, const ...@@ -776,7 +779,6 @@ double HypoTestInverterResult::GetExpectedLimit(double nsig, bool lower, const
option.ToUpper(); option.ToUpper();
if (option.Contains("P")) { if (option.Contains("P")) {
const int nEntries = ArraySize();
TGraph g(nEntries); TGraph g(nEntries);
// sort the arrays based on the x values // sort the arrays based on the x values
......
...@@ -165,14 +165,13 @@ void NumberCountingPdfFactory::AddExpData(Double_t* sig, ...@@ -165,14 +165,13 @@ void NumberCountingPdfFactory::AddExpData(Double_t* sig,
// Arguements are an array of expected signal, expected background, and relative // Arguements are an array of expected signal, expected background, and relative
// background uncertainty (eg. 0.1 for 10% uncertainty), and the number of channels. // background uncertainty (eg. 0.1 for 10% uncertainty), and the number of channels.
using std::vector; std::vector<Double_t> mainMeas(nbins);
Double_t* mainMeas = new Double_t[nbins];
// loop over channels // loop over channels
for(Int_t i=0; i<nbins; ++i){ for(Int_t i=0; i<nbins; ++i){
mainMeas[i] = sig[i] + back[i]; mainMeas[i] = sig[i] + back[i];
} }
return AddData(mainMeas, back, back_syst, nbins, ws, dsName); return AddData(&mainMeas[0], back, back_syst, nbins, ws, dsName);
} }
//_______________________________________________________ //_______________________________________________________
...@@ -185,13 +184,13 @@ void NumberCountingPdfFactory::AddExpDataWithSideband(Double_t* sigExp, ...@@ -185,13 +184,13 @@ void NumberCountingPdfFactory::AddExpDataWithSideband(Double_t* sigExp,
// Arguements are an array of expected signal, expected background, and relative // Arguements are an array of expected signal, expected background, and relative
// ratio of background expected in the sideband to that expected in signal region, and the number of channels. // ratio of background expected in the sideband to that expected in signal region, and the number of channels.
Double_t* mainMeas = new Double_t[nbins]; std::vector<Double_t> mainMeas(nbins);
Double_t* sideband = new Double_t[nbins]; std::vector<Double_t> sideband(nbins);
for(Int_t i=0; i<nbins; ++i){ for(Int_t i=0; i<nbins; ++i){
mainMeas[i] = sigExp[i] + backExp[i]; mainMeas[i] = sigExp[i] + backExp[i];
sideband[i] = backExp[i]*tau[i]; sideband[i] = backExp[i]*tau[i];
} }
return AddDataWithSideband(mainMeas, sideband, tau, nbins, ws, dsName); return AddDataWithSideband(&mainMeas[0], &sideband[0], tau, nbins, ws, dsName);
} }
...@@ -239,8 +238,8 @@ void NumberCountingPdfFactory::AddData(Double_t* mainMeas, ...@@ -239,8 +238,8 @@ void NumberCountingPdfFactory::AddData(Double_t* mainMeas,
TList observablesCollection; TList observablesCollection;
TTree* tree = new TTree(); TTree* tree = new TTree();
Double_t* xForTree = new Double_t[nbins]; std::vector<Double_t> xForTree(nbins);
Double_t* yForTree = new Double_t[nbins]; std::vector<Double_t> yForTree(nbins);
// loop over channels // loop over channels
for(Int_t i=0; i<nbins; ++i){ for(Int_t i=0; i<nbins; ++i){
...@@ -274,8 +273,8 @@ void NumberCountingPdfFactory::AddData(Double_t* mainMeas, ...@@ -274,8 +273,8 @@ void NumberCountingPdfFactory::AddData(Double_t* mainMeas,
xForTree[i] = mainMeas[i]; xForTree[i] = mainMeas[i];
yForTree[i] = back[i]*_tau; yForTree[i] = back[i]*_tau;
tree->Branch(("x"+str.str()).c_str(), xForTree+i ,("x"+str.str()+"/D").c_str()); tree->Branch(("x"+str.str()).c_str(), &xForTree[i] ,("x"+str.str()+"/D").c_str());
tree->Branch(("y"+str.str()).c_str(), yForTree+i ,("y"+str.str()+"/D").c_str()); tree->Branch(("y"+str.str()).c_str(), &yForTree[i] ,("y"+str.str()+"/D").c_str());
ws->var(("b"+str.str()).c_str())->setMax( 1.2*back[i]+MaxSigma*(sqrt(back[i])+back[i]*back_syst[i]) ); ws->var(("b"+str.str()).c_str())->setMax( 1.2*back[i]+MaxSigma*(sqrt(back[i])+back[i]*back_syst[i]) );
ws->var(("b"+str.str()).c_str())->setVal( back[i] ); ws->var(("b"+str.str()).c_str())->setVal( back[i] );
...@@ -319,8 +318,10 @@ void NumberCountingPdfFactory::AddDataWithSideband(Double_t* mainMeas, ...@@ -319,8 +318,10 @@ void NumberCountingPdfFactory::AddDataWithSideband(Double_t* mainMeas,
TList observablesCollection; TList observablesCollection;
TTree* tree = new TTree(); TTree* tree = new TTree();
Double_t* xForTree = new Double_t[nbins];
Double_t* yForTree = new Double_t[nbins]; std::vector<Double_t> xForTree(nbins);
std::vector<Double_t> yForTree(nbins);
// loop over channels // loop over channels
for(Int_t i=0; i<nbins; ++i){ for(Int_t i=0; i<nbins; ++i){
...@@ -355,8 +356,8 @@ void NumberCountingPdfFactory::AddDataWithSideband(Double_t* mainMeas, ...@@ -355,8 +356,8 @@ void NumberCountingPdfFactory::AddDataWithSideband(Double_t* mainMeas,
xForTree[i] = mainMeas[i]; xForTree[i] = mainMeas[i];
yForTree[i] = sideband[i]; yForTree[i] = sideband[i];
tree->Branch(("x"+str.str()).c_str(), xForTree+i ,("x"+str.str()+"/D").c_str()); tree->Branch(("x"+str.str()).c_str(), &xForTree[i] ,("x"+str.str()+"/D").c_str());
tree->Branch(("y"+str.str()).c_str(), yForTree+i ,("y"+str.str()+"/D").c_str()); tree->Branch(("y"+str.str()).c_str(), &yForTree[i] ,("y"+str.str()+"/D").c_str());
ws->var(("b"+str.str()).c_str())->setMax( 1.2*back+MaxSigma*(sqrt(back)+back*back_syst) ); ws->var(("b"+str.str()).c_str())->setMax( 1.2*back+MaxSigma*(sqrt(back)+back*back_syst) );
ws->var(("b"+str.str()).c_str())->setVal( back ); ws->var(("b"+str.str()).c_str())->setVal( back );
......
...@@ -143,7 +143,7 @@ Bool_t PointSetInterval::CheckParameters(const RooArgSet &parameterPoint) const ...@@ -143,7 +143,7 @@ Bool_t PointSetInterval::CheckParameters(const RooArgSet &parameterPoint) const
Double_t PointSetInterval::UpperLimit(RooRealVar& param ) Double_t PointSetInterval::UpperLimit(RooRealVar& param )
{ {
RooDataSet* tree = dynamic_cast<RooDataSet*>( fParameterPointsInInterval ); RooDataSet* tree = dynamic_cast<RooDataSet*>( fParameterPointsInInterval );
Double_t low, high; Double_t low = 0, high = 0;
if( tree ){ if( tree ){
tree->getRange(param, low, high); tree->getRange(param, low, high);
return high; return high;
...@@ -155,7 +155,7 @@ Double_t PointSetInterval::UpperLimit(RooRealVar& param ) ...@@ -155,7 +155,7 @@ Double_t PointSetInterval::UpperLimit(RooRealVar& param )
Double_t PointSetInterval::LowerLimit(RooRealVar& param ) Double_t PointSetInterval::LowerLimit(RooRealVar& param )
{ {
RooDataSet* tree = dynamic_cast<RooDataSet*>( fParameterPointsInInterval ); RooDataSet* tree = dynamic_cast<RooDataSet*>( fParameterPointsInInterval );
Double_t low, high; Double_t low = 0, high = 0;
if( tree ){ if( tree ){
tree->getRange(param, low, high); tree->getRange(param, low, high);
return low; return low;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment