// @(#)root/splot:$Id$ // Author: Muriel Pivk, Anna Kreshuk 10/2005 /********************************************************************** * * * Copyright (c) 2005 ROOT Foundation, CERN/PH-SFT * * * **********************************************************************/ #include "TSPlot.h" #include "TVirtualFitter.h" #include "TH1.h" #include "TTreePlayer.h" #include "TTreeFormula.h" #include "TTreeFormulaManager.h" #include "TSelectorDraw.h" #include "TBrowser.h" #include "TClass.h" #include "TMath.h" extern void Yields(Int_t &, Double_t *, Double_t &f, Double_t *x, Int_t iflag); ClassImp(TSPlot); /** \class TSPlot A common method used in High Energy Physics to perform measurements is the maximum Likelihood method, exploiting discriminating variables to disentangle signal from background. The crucial point for such an analysis to be reliable is to use an exhaustive list of sources of events combined with an accurate description of all the Probability Density Functions (PDF). To assess the validity of the fit, a convincing quality check is to explore further the data sample by examining the distributions of control variables. A control variable can be obtained for instance by removing one of the discriminating variables before performing again the maximum Likelihood fit: this removed variable is a control variable. The expected distribution of this control variable, for signal, is to be compared to the one extracted, for signal, from the data sample. In order to be able to do so, one must be able to unfold from the distribution of the whole data sample. The TSPlot method allows to reconstruct the distributions for the control variable, independently for each of the various sources of events, without making use of any a priori knowledge on this variable. The aim is thus to use the knowledge available for the discriminating variables to infer the behaviour of the individual sources of events with respect to the control variable. TSPlot is optimal if the control variable is uncorrelated with the discriminating variables. A detail description of the formalism itself, called \f$\hbox{$_s$}{\cal P}lot\f$, is given in [1]. ### The method The \f$\hbox{$_s$}{\cal P}lot\f$ technique is developed in the above context of a maximum Likelihood method making use of discriminating variables. One considers a data sample in which are merged several species of events. These species represent various signal components and background components which all together account for the data sample. The different terms of the log-Likelihood are: - \f$N\f$ : the total number of events in the data sample, - \f${\rm N}_{\rm s}\f$ : the number of species of events populating the data sample, - \f$N_i\f$ : the number of events expected on the average for the \f$i^{\rm th}\f$ species, - \f${\rm f}_i(y_e)\f$" : the value of the PDFs of the discriminating variables \f$y\f$" for the\f$i^{th}\f$ species and for event\f$e\f$", - \f$x\f$" : the set of control variables which, by definition, do not appear in the expression of the Likelihood function \f${\cal L}\f$. The extended log-Likelihood reads: \f[ {\cal L}=\sum_{e=1}^{N}\ln \Big\{ \sum_{i=1}^{{\rm N}_{\rm s}}N_i{\rm f}_i(y_e) \Big\} -\sum_{i=1}^{{\rm N}_{\rm s}}N_i \tag{1} \f] From this expression, after maximization of \f${\cal L}\f$ with respect to the \f$N_i\f$ parameters, a weight can be computed for every event and each species, in order to obtain later the true distribution \f$\hbox{M}_i(x)\f$ of variable \f$x\f$. If \f${\rm n}\f$ is one of the \f${\rm N}_{\rm s}\f$ species present in the data sample, the weight for this species is defined by: \f[ \fbox{$ {_s{\cal P}}_{\rm n}(y_e)={\sum_{j=1}^{{\rm N}_{\rm s}} \hbox{V}_{{\rm n}j}{\rm f}_j(y_e)\over\sum_{k=1}^{{\rm N}_{\rm s}}N_k{\rm f}_k(y_e) } $} , \tag{2} \f] where \f$\hbox{V}_{{\rm n}j}\f$ is the covariance matrix resulting from the Likelihood maximization. This matrix can be used directly from the fit, but this is numerically less accurate than the direct computation: \f[ \hbox{ V}^{-1}_{{\rm n}j}~=~ {\partial^2(-{\cal L})\over\partial N_{\rm n}\partial N_j}~=~ \sum_{e=1}^N {{\rm f}_{\rm n}(y_e){\rm f}_j(y_e)\over(\sum_{k=1}^{{\rm N}_{\rm s}}N_k{\rm f}_k(y_e))^2} . \tag{3} \f] The distribution of the control variable \f$x\f$ obtained by histogramming the weighted events reproduces, on average, the true distribution \f${\hbox{ {M}}}_{\rm n}(x)\f$ The class TSPlot allows to reconstruct the true distribution \f${\hbox{ {M}}}_{\rm n}(x)\f$ of a control variable \f$x\f$ for each of the \f${\rm N}_{\rm s}\f$ species from the sole knowledge of the PDFs of the discriminating variables \f${\rm f}_i(y)\f$. The plots obtained thanks to the TSPlot class are called \f$\hbox {$_s$}{\cal P}lots\f$. ### Some properties and checks Beside reproducing the true distribution,\f$\hbox {$_s$}{\cal P}lots\f$ bear remarkable properties: - Each \f$x\f$ - distribution is properly normalized: \f[ \sum_{e=1}^{N} {_s{\cal P}}_{\rm n}(y_e)~=~N_{\rm n} ~. \tag{4} \f] - For any event: \f[ \sum_{l=1}^{{\rm N}_{\rm s}} {_s{\cal P}}_l(y_e) ~=~1 ~. \tag{5} \f] That is to say that, summing up the \f${\rm N}_{\rm s}\f$ \f$\hbox {$_s$}{\cal P}lots\f$, one recovers the data sample distribution in \f$x\f$, and summing up the number of events entering in a \f$\hbox{$_s$}{\cal P}lot\f$ for a given species, one recovers the yield of the species, as provided by the fit. The property 4 is implemented in the TSPlot class as a check. - the sum of the statistical uncertainties per bin \f[ \sigma[N_{\rm n}\ _s\tilde{\rm M}_{\rm n}(x) {\delta x}]~=~\sqrt{\sum_{e \subset {\delta x}} ({_s{\cal P}}_{\rm n})^2} ~. \tag{6} \f] reproduces the statistical uncertainty on the yield \f$N_{\rm n}\f$, as provided by the fit: \f$\sigma[N_{\rm n}]\equiv\sqrt{\hbox{ V}_{{\rm n}{\rm n}}}\f$ . Because of that and since the determination of the yields is optimal when obtained using a Likelihood fit, one can conclude that the \f$\hbox{$_s$}{\cal P}lot\f$ technique is itself an optimal method to reconstruct distributions of control variables. ### Different steps followed by TSPlot 1. A maximum Likelihood fit is performed to obtain the yields \f$N_i\f$ of the various species.The fit relies on discriminating variables \f$y\f$ uncorrelated with a control variable \f$x\f$: the later is therefore totally absent from the fit. 2. The weights \f${_s{\cal P}}\f$ are calculated using Eq. (2) where the covariance matrix is taken from Minuit. 3. Histograms of \f$x\f$ are filled by weighting the events with \f${_s{\cal P}}\f$ . 4. Error bars per bin are given by Eq. (6). The \f$\hbox {$_s$}{\cal P}lots\f$ reproduce the true distributions of the species in the control variable \f$x\f$, within the above defined statistical uncertainties. ### Illustrations To illustrate the technique, one considers an example derived from the analysis where \f$\hbox {$_s$}{\cal P}lots\f$ have been first used (charmless B decays). One is dealing with a data sample in which two species are present: the first is termed signal and the second background. A maximum Likelihood fit is performed to obtain the two yields \f$N_1\f$ and \f$N_2\f$ . The fit relies on two discriminating variables collectively denoted \f$y\f$ which are chosen within three possible variables denoted \f${m_{\rm ES}}\f$ , \f$\Delta E\f$ and \f${\cal F}\f$. The variable which is not incorporated in \f$y\f$ is used as the control variable \f$x\f$ . The six distributions of the three variables are assumed to be the ones depicted in Fig. 1. \image html splot_pdfmesNIM.png width=800 #### Figure 1: Distributions of the three discriminating variables available to perform the Likelihood fit: \f${m_{\rm ES}}\f$ , \f$\Delta E\f$ , \f${\cal F}\f$ . Among the three variables, two are used to perform the fit while one is kept out of the fit to serve the purpose of a control variable. The three distributions on the top (resp. bottom) of the figure correspond to the signal (resp. background). The unit of the vertical axis is chosen such that it indicates the number of entries per bin, if one slices the histograms in 25 bins. A data sample being built through a Monte Carlo simulation based on the distributions shown in Fig. 1, one obtains the three distributions of Fig. 2. Whereas the distribution of \f$\Delta E\f$ clearly indicates the presence of the signal, the distribution of \f${m_{\rm ES}}\f$ and \f${\cal F}\f$ are less obviously populated by signal. \image html splot_genfiTOTNIM.png width=800 #### Figure 2: Distributions of the three discriminating variables for signal plus background. The three distributions are the ones obtained from a data sample obtained through a Monte Carlo simulation based on the distributions shown in Fig. 1. The data sample consists of 500 signal events and 5000 background events. Choosing \f$\Delta E\f$ and \f${\cal F}\f$ as discriminating variables to determine \f$N_1\f$ and \f$N_2\f$ through a maximum Likelihood fit, one builds, for the control variable \f${m_{\rm ES}}\f$ which is unknown to the fit, the two \f$\hbox {$_s$}{\cal P}lots\f$ for signal and background shown in Fig. 3. One observes that the \f$\hbox{$_s$}{\cal P}lot\f$ for signal reproduces correctly the PDF even where the latter vanishes, although the error bars remain sizeable. This results from the almost complete cancellation between positive and negative weights: the sum of weights is close to zero while the sum of weights squared is not. The occurence of negative weights occurs through the appearance of the covariance matrix, and its negative components, in the definition of Eq. (2). A word of caution is in order with respect to the error bars. Whereas their sum in quadrature is identical to the statistical uncertainties of the yields determined by the fit, and if, in addition, they are asymptotically correct, the error bars should be handled with care for low statistics and/or for too fine binning. This is because the error bars do not incorporate two known properties of the PDFs: PDFs are positive definite and can be non-zero in a given x-bin, even if in the particular data sample at hand, no event is observed in this bin. The latter limitation is not specific to \f$\hbox {$_s$}{\cal P}lots\f$ , rather it is always present when one is willing to infer the PDF at the origin of an histogram, when, for some bins, the number of entries does not guaranty the applicability of the Gaussian regime. In such situations, a satisfactory practice is to attach allowed ranges to the histogram to indicate the upper and lower limits of the PDF value which are consistent with the actual observation, at a given confidence level. \image html splot_mass-bkg-sPlot.png width=600 #### Figure 3: The \f$\hbox {$_s$}{\cal P}lots\f$ (signal on top, background on bottom) obtained for \f${m_{\rm ES}}\f$ are represented as dots with error bars. They are obtained from a fit using only information from \f$\Delta E\f$ and \f${\cal F}\f$
Choosing \f${m_{\rm ES}}\f$ and \f$\Delta E\f$ as discriminating variables to
determine \f$N_1\f$ and \f$N_2\f$ through a maximum Likelihood fit, one builds,
for the control variable \f${\cal F}\f$ which is unknown to the fit, the two
\f$\hbox {$_s$}{\cal P}lots\f$ for signal and background shown in
Fig. 4.
In the \f$\hbox{$_s$}{\cal P}lot\f$ for signal one observes that error bars are
the largest in the \f$x\f$ regions where the background is the largest.
\image html splot_fisher-bkg-sPlot.png width=600
#### Figure 4:
The \f$\hbox {$_s$}{\cal P}lots\f$ (signal on top, background on bottom) obtained
for \f${\cal F}\f$ are represented as dots with error bars. They are obtained
from a fit using only information from \f${m_{\rm ES}}\f$ and \f$\Delta E\f$
The results above can be obtained by running the tutorial TestSPlot.C
*/
////////////////////////////////////////////////////////////////////////////////
/// default constructor (used by I/O only)
TSPlot::TSPlot() :
fTree(0),
fTreename(0),
fVarexp(0),
fSelection(0)
{
fNx = 0;
fNy=0;
fNevents = 0;
fNSpecies=0;
fNumbersOfEvents=0;
}
////////////////////////////////////////////////////////////////////////////////
/// Normal TSPlot constructor
/// - nx : number of control variables
/// - ny : number of discriminating variables
/// - ne : total number of events
/// - ns : number of species
/// - tree: input data
TSPlot::TSPlot(Int_t nx, Int_t ny, Int_t ne, Int_t ns, TTree *tree) :
fTreename(0),
fVarexp(0),
fSelection(0)
{
fNx = nx;
fNy=ny;
fNevents = ne;
fNSpecies=ns;
fXvar.ResizeTo(fNevents, fNx);
fYvar.ResizeTo(fNevents, fNy);
fYpdf.ResizeTo(fNevents, fNSpecies*fNy);
fSWeights.ResizeTo(fNevents, fNSpecies*(fNy+1));
fTree = tree;
fNumbersOfEvents = 0;
}
////////////////////////////////////////////////////////////////////////////////
/// Destructor
TSPlot::~TSPlot()
{
if (fNumbersOfEvents)
delete [] fNumbersOfEvents;
if (!fXvarHists.IsEmpty())
fXvarHists.Delete();
if (!fYvarHists.IsEmpty())
fYvarHists.Delete();
if (!fYpdfHists.IsEmpty())
fYpdfHists.Delete();
}
////////////////////////////////////////////////////////////////////////////////
/// To browse the histograms
void TSPlot::Browse(TBrowser *b)
{
if (!fSWeightsHists.IsEmpty()) {
TIter next(&fSWeightsHists);
TH1D* h = 0;
while ((h = (TH1D*)next()))
b->Add(h,h->GetName());
}
if (!fYpdfHists.IsEmpty()) {
TIter next(&fYpdfHists);
TH1D* h = 0;
while ((h = (TH1D*)next()))
b->Add(h,h->GetName());
}
if (!fYvarHists.IsEmpty()) {
TIter next(&fYvarHists);
TH1D* h = 0;
while ((h = (TH1D*)next()))
b->Add(h,h->GetName());
}
if (!fXvarHists.IsEmpty()) {
TIter next(&fXvarHists);
TH1D* h = 0;
while ((h = (TH1D*)next()))
b->Add(h,h->GetName());
}
b->Add(&fSWeights, "sWeights");
}
////////////////////////////////////////////////////////////////////////////////
/// Set the initial number of events of each species - used
/// as initial estimates in minuit
void TSPlot::SetInitialNumbersOfSpecies(Int_t *numbers)
{
if (!fNumbersOfEvents)
fNumbersOfEvents = new Double_t[fNSpecies];
for (Int_t i=0; i