// PCAProc.cc // Contact person: Freija Descamps // See PCAProc.hh for more details // ———————————————————————// // G4 stuff #include #include #include // Root stuff #include #include #include #include #include #include #include #include #include // RAT stuff #include #include #include #include #include #include #include #include #include #include #include #include // C++ stuff #include #include #include #include using CLHEP::nm; using std::ostringstream; using std::vector; using std::setw; using std::setfill; namespace RAT { const double tiny = 1.0e-20; PCAProc::PCAProc() : Processor("PCAproc") { // Initialize some variables fDistTolerance = 0.0; fMedian = 0.0; fWidth = 0.0; fWidth2 = 0.0; fSig = 0.0; fVar = 0.0; fGrad = 0.0; fRunCheckOK = 0; fRunCalcTransitTimes = 0; fMeanhhp = 0.0; fMinhhp = 0.0; fMaxhhp = 0.0; fIsValid = 1; // Determines if we try to extract the PCA constants. // Is true by default. fIsFirstRun = 1; // We need to keep track of minimum and maximum occupancy. // This requires the maximum and minimum number of hits per event. fNHitsMax = -9000; fNHitsMin = 9000; // What is the total number of channels? All database arrays concerning // PMT channels need to have the same length // Namely: (number of crates)*(number of cards)*(number of channels) fTotPMTs = 19 * 16 * 32; // 9728 // An instance of BitManip needed for managing the status flags fBits = new BitManip(); fOverallMask = 0; // Status of this PCA analysis // Set bit 7 to 1 to flag that end of this PCA analysis was not reached fOverallMask = fBits->SetBit(fOverallMask, 7); fPMTTWMask.resize(fTotPMTs); // Status of the TW calibration for each PMT fPMTGFMask.resize(fTotPMTs); // Status of the GF calibration for each PMT // Initialise PMT status to all bits cleared for (int n = 0; n < fTotPMTs; n++) { fPMTTWMask[n] = 0; fPMTGFMask[n] = 0; } } void PCAProc::BeginOfRun(DS::Run& run) { fPCABits.BeginOfRun(run); // Get the run ID for this run, fRunID = run.GetRunID(); // Keep track of the lowest and highest runnumber if (fRunID < fStartRunID) fStartRunID = fRunID; if (fRunID > fStopRunID) fStopRunID = fRunID; // All DB access needs to happen here, but there is a subtilty: // PCAProc runs over a lot of runs at a time to accumulate all the // statistics for the TELLIE array. So, truth is that there are two types of // DB access: // (1) DB access to get user settings. These only need to be done for first run // (2) DB access to get run-dependent settings, for example fiber number. // Let's get to (1) first, using 'fFirstRun' Boolean to tell us if this is the // first run or not. if (fIsFirstRun) { // First thing is to figure out what type of calibration is requested // Check how many sources of injected light we have. // = 1 for laserball // > 1 for TELLIE fiber array // Read in some of the user input from the PCA_GENERATION.ratdb file PCAProc::ReadInUserInput(); fNSource = DS::INVALID; // the number of sources if (fPCASource == 0) fNSource = 1; // There is only one source of light if (fPCASource == 1) { fNSource = fMaxNumFiber; // Resize fiber position and direction fLEDPositionX.resize(fMaxNumFiber); fLEDPositionY.resize(fMaxNumFiber); fLEDPositionZ.resize(fMaxNumFiber); fLEDDirX.resize(fMaxNumFiber); fLEDDirY.resize(fMaxNumFiber); fLEDDirZ.resize(fMaxNumFiber); fLEDWavelength.resize(fMaxNumFiber); } // Resize the charge and time vectors fQHSArray.resize(fTotPMTs); fQHLArray.resize(fTotPMTs); fTArray.resize(fTotPMTs); fQHSMin.resize(fTotPMTs); fQHSMax.resize(fTotPMTs); fQHLMin.resize(fTotPMTs); fQHLMax.resize(fTotPMTs); fTransitTimes.resize(fTotPMTs); fViewingAngles.resize(fTotPMTs); for (int h = 0; h < fTotPMTs; h++) { fQHSMin[h].resize(fNSource); fQHSMax[h].resize(fNSource); fQHLMin[h].resize(fNSource); fQHLMax[h].resize(fNSource); fQHSArray[h].resize(fNSource); fQHLArray[h].resize(fNSource); fTArray[h].resize(fNSource); fTransitTimes[h].resize(fNSource); fViewingAngles[h].resize(fNSource); for (int k = 0; k < fNSource; k++) { fQHSArray[h][k].clear(); fQHLArray[h][k].clear(); fTArray[h][k].clear(); fQHSMin[h][k] = 9999.0; fQHLMin[h][k] = 9999.0; fQHSMax[h][k] = 0.0; fQHLMax[h][k] = 0.0; fTransitTimes[h][k] = 0.0; fViewingAngles[h][k] = 0.0; } } // TW = time walk // The timewalk results come in a 3D array: // [NumPMTs][NumSources][ # interpolation points] if (fDoTWPCA) { fTWinterXQHS.resize(fTotPMTs); fTWinterYQHS.resize(fTotPMTs); fTWinterXErrQHS.resize(fTotPMTs); fTWinterYErrQHS.resize(fTotPMTs); fTWinterI.resize(fTotPMTs); fTWinterR.resize(fTotPMTs); fTWinterIErr.resize(fTotPMTs); fTWinterRErr.resize(fTotPMTs); fTWRms.resize(fTotPMTs); fTQHSLess.resize(fTotPMTs); fTMore.resize(fTotPMTs); for (int h = 0; h < fTotPMTs; h++) { fTWinterXQHS[h].resize(fNSource); fTWinterYQHS[h].resize(fNSource); fTWinterXErrQHS[h].resize(fNSource); fTWinterYErrQHS[h].resize(fNSource); fTWinterI[h].resize(fNSource); fTWinterR[h].resize(fNSource); fTWinterIErr[h].resize(fNSource); fTWinterRErr[h].resize(fNSource); fTWRms[h].resize(fNSource); fTQHSLess[h].resize(fNSource); fTMore[h].resize(fNSource); for (int k = 0; k < fNSource; k++) { fTWinterXQHS[h][k].resize(fTotIntPoints); fTWinterYQHS[h][k].resize(fTotIntPoints); fTWinterXErrQHS[h][k].resize(fTotIntPoints); fTWinterYErrQHS[h][k].resize(fTotIntPoints); } } for (int i = 0; i < fTotPMTs; i++) { for (int j = 0; j < fNSource; j++) { for (int k = 0; k < fTotIntPoints; k++) { fTWinterXQHS[i][j][k] = DS::INVALID; fTWinterYQHS[i][j][k] = DS::INVALID; fTWinterXErrQHS[i][j][k] = DS::INVALID; fTWinterYErrQHS[i][j][k] = DS::INVALID; } fTWinterI[i][j] = DS::INVALID; fTWinterR[i][j] = DS::INVALID; fTWinterIErr[i][j] = DS::INVALID; fTWinterRErr[i][j] = DS::INVALID; fTWRms[i][j] = 0; fTMore[i][j] = 0; fTQHSLess[i][j] = 0; } } // The bootstrap method will only be called in the TW and TELLIE case. // In this case, resize the relevant vectors and initialize! if (fNSource != 1) { // TELLIE case fLEDCheckRATid.resize(fTotPMTs); for (int i = 0; i < fTotPMTs; i++) { fLEDCheckRATid[i].resize(fNSource); } for (int i = 0; i < fTotPMTs; i++) { for (int j = 0; j < fNSource; j++) { fLEDCheckRATid[i][j] = 0; } } } } // End of if( fDoTWPCA ) // GF = Gain Fit if (fDoGFPCA) { fGFpeaQHS.resize(fTotPMTs); fGFthrQHS.resize(fTotPMTs); fGFhhpQHS.resize(fTotPMTs); fGFpeaQHL.resize(fTotPMTs); fGFthrQHL.resize(fTotPMTs); fGFhhpQHL.resize(fTotPMTs); fGFhhpQHSfit.resize(fTotPMTs); for (int n = 0; n < fTotPMTs; n++) { fGFpeaQHS[n] = -9999; fGFthrQHS[n] = -9999; fGFhhpQHS[n] = -9999; fGFpeaQHL[n] = -9999; fGFthrQHL[n] = -9999; fGFhhpQHL[n] = -9999; } } // End of if( fDoGFPCA ) // Set up the output files int countDB = 0; std::ostringstream runIDString; runIDString << fRunID; // Name output files according to calibration type and run # // Check whether those output files already exist, // if so, increment file name // Inspired by ECAProc.cc fPCALogName = "NullPCA.ratdb"; // Write out to two DB files: // one for the timewalk and one for the gainfit results if (fDoTWPCA) { fPCATWDBName = "NullPCATW.ratdb"; // TW TimwWalk output file} fPCATWDBName = dformat("PCATW_%i_%i.ratdb", fRunID, countDB); while (PCAProc::FileExists(fPCATWDBName)) { ++countDB; fPCATWDBName = dformat("PCATW_%i_%i.ratdb", fRunID, countDB); } } if (fDoGFPCA) { fPCAGFDBName = "NullPCAGF.ratdb"; // GF GainFit DB output file countDB = 0; fPCAGFDBName = dformat("PCAGF_%i_%i.ratdb", fRunID, countDB); while (PCAProc::FileExists(fPCAGFDBName)) { ++countDB; fPCAGFDBName = dformat("PCAGF_%i_%i.ratdb", fRunID, countDB); } } if (fDebugPCA) { int countLog = 0; int countRoot = 0; fPCARootName = "NullPCA.root"; fPCARootName = dformat("PCA_%i_%i.root", fRunID, countRoot); while (PCAProc::FileExists(fPCARootName)) { ++countRoot; fPCARootName = dformat("PCA_%i_%i.root", fRunID, countRoot); } fPCALogName = dformat("PCA_log_%i_%i.ratdb", fRunID, countLog); while (PCAProc::FileExists(fPCALogName)) { ++countLog; fPCALogName = dformat("PCA_log_%i_%i.ratdb", fRunID, countLog); } } // Let the user know what output to expect if (fVerbosePCA) { detail << "PCAProc::PCAProc(): Writing out the calibration constants to " << fPCATWDBName << ", " << fPCAGFDBName << endl; } if (fDebugPCA) { // Initialise the root output file, this contains the comparison // histograms and the crosscheck histograms. // Only in verbose mode PCAProc::InitPCARoot(fPCARootName); detail << "PCAProc::PCAProc: Writing out some debugging histograms to " << fPCARootName << endl; detail << "PCAProc::PCAProc: Writing out the log to " << fPCALogName << endl; } DBLinkPtr fPmtCalib = DB::Get()->GetLink("PMTCALIB"); if (fDoGFPCA) { // Retrieve the current values from the database for comparison //PMT/LED information fGFpeaQHSdb = fPmtCalib->GetFArrayFromD("QHS_peak"); fGFthrQHSdb = fPmtCalib->GetFArrayFromD("QHS_threshold"); fGFhhpQHSdb = fPmtCalib->GetFArrayFromD("QHS_hhp"); fGFpeaQHLdb = fPmtCalib->GetFArrayFromD("QHL_peak"); fGFthrQHLdb = fPmtCalib->GetFArrayFromD("QHL_threshold"); fGFhhpQHLdb = fPmtCalib->GetFArrayFromD("QHL_hhp"); } // End of if( fDoGFPCA ) // Grab positions and calibration constants from the database DBLinkPtr pmtInfo = DB::Get()->GetLink("PMTINFO"); DBLinkPtr pmtCalLink = DB::Get()->GetLink("PMTCAL"); // Grab the tube type from the data-base fTubeStatus = pmtInfo->GetIArray("pmt_type"); // Grab the tube positions form the data-base fTubePositionX = pmtInfo->GetDArray("x"); fTubePositionY = pmtInfo->GetDArray("y"); fTubePositionZ = pmtInfo->GetDArray("z"); // Grab the tube directions form the data-base fTubeDirX = pmtInfo->GetDArray("u"); fTubeDirY = pmtInfo->GetDArray("v"); fTubeDirZ = pmtInfo->GetDArray("w"); if (fPCASource == 1) { // Grab the LED positions form the data-base // For this, loop over all fiber positions, for (int k = 0; k < fMaxNumFiber; k++) { fLEDPositionX[k] = DS::INVALID; fLEDPositionY[k] = DS::INVALID; fLEDPositionZ[k] = DS::INVALID; fLEDDirX[k] = DS::INVALID; fLEDDirY[k] = DS::INVALID; fLEDDirZ[k] = DS::INVALID; // Try to retrieve the position and direction from the database // For this, we need to construct ostringstream oss; oss.str(""); // Take care of zero-padding oss << "FT" << setw(3) << setfill('0') << k; // hard-coded to position 'A' at this time. // FIXME: in future, we need to find out which fibers are used // from a 'installed/used' entry in the database oss << "A"; try { DBLinkPtr ledInfo = DB::Get()->GetLink("FIBRE", oss.str()); fLEDPositionX[k] = ledInfo->GetD("x"); fLEDPositionY[k] = ledInfo->GetD("y"); fLEDPositionZ[k] = ledInfo->GetD("z"); fLEDDirX[k] = ledInfo->GetD("u"); fLEDDirY[k] = ledInfo->GetD("v"); fLEDDirZ[k] = ledInfo->GetD("w"); // FIXME FREIJA: hardcoded peak wavelength for now. fLEDWavelength[k] = 503.0; } catch (DBNotFoundError& e) { detail << "PCAProc(): Fiber " << oss.str() << " does not exist. Skipping." << endl; } } } // Look at the Gain Fit (GF) calibration values that were in place // before this calibration run fMeanhhpdb = fPmtCalib->GetD("mean_hhp"); fMinhhpdb = fPmtCalib->GetD("min_hhp"); fMaxhhpdb = fPmtCalib->GetD("max_hhp"); // We are done with the DB access that only needs to be done // at the start of the analysis fIsFirstRun=0; } // Now follows the DB access that needs to be done for every run. // First, check out what the LED was that was firing for this run // For now, we expect one run per fiber. // FIXME FREIJA: treating MC and data differently at this time, waiting for // the long-term solution to be implemented fIsMC = run.GetMCFlag(); if (fNSource > 1) { // LED if (!fIsMC) { /*// This is real TELLIE data so contact the DB json::Value subRunInfo; json::Value channelMapping; json::Value fiberMapping; int channel = 0; // Get the TELLIE mapping from ratdb try { channelMapping = DB::Get()->GetLink("mapping")->GetJSON("channels"); fiberMapping = DB::Get()->GetLink("mapping")->GetJSON("fibres"); } catch (DBNotFoundError& e) { ostringstream dieMessage; dieMessage.str(""); dieMessage << "PCAProc(): No TELLIE mapping DB information found for run " << fRunID << "\n"; Log::Die(dieMessage.str()); } // Get the TELLIE channel number from ratdb try { subRunInfo = DB::Get()->GetLink("tellie_run")->GetJSON("sub_run_info"); } catch (DBNotFoundError& e) { ostringstream dieMessage; dieMessage.str(""); dieMessage << "PCAProc(): No TELLIE run DB information found for run " << fRunID << "\n"; Log::Die(dieMessage.str()); } // Let's do some basic checks // 1) Check if there is any subrun information if (subRunInfo.getArraySize() == 0) { ostringstream dieMessage; dieMessage.str(""); dieMessage << "PCAProc(): No TELLIE subrun information found for run " << fRunID << "\n"; Log::Die(dieMessage.str()); } // 2) Does this run have more than 1 channel? If so, do not use the run // since we have no way of knowing which event is which fiber. if (subRunInfo.getArraySize() > 0) { channel = subRunInfo[0]["channel"].getInteger(); for (size_t i = 0; i < subRunInfo.getArraySize(); i++) { if (subRunInfo[i]["channel"].getInteger() != channel) { ostringstream dieMessage; dieMessage.str(""); dieMessage << "PCAProc(): Mutiple TELLIE channels fired for run " << fRunID << "\n"; dieMessage << "PCAProc(): First TELLIE channel is " << channel << "\n"; Log::Die(dieMessage.str()); } } } // 3) Now figure out which fiber this channel was connected to std::vector channelMapAsInt = channelMapping.toVector(); std::vector fiberMapAsstring = fiberMapping.toVector(); // Find the index of the channel in channelMapAsInt int index = find(channelMapAsInt.begin(), channelMapAsInt.end(), channel) - channelMapAsInt.begin(); std::string fibername = fiberMapAsstring[index].substr(2, 3); fLEDnr = atoi(fibername.erase(0, fibername.find_first_not_of('0')).c_str()); */ fLEDnr = 50; // Now set the position an direction for this LEd here so that we do not // do it in the eventloop. fLEDPos.SetX(fLEDPositionX[fLEDnr]); fLEDPos.SetY(fLEDPositionY[fLEDnr]); fLEDPos.SetZ(fLEDPositionZ[fLEDnr]); fLEDDir.SetX(fLEDDirX[fLEDnr]); fLEDDir.SetY(fLEDDirY[fLEDnr]); fLEDDir.SetZ(fLEDDirZ[fLEDnr]); fUsedLEDs.push_back(fLEDnr); info << "PCAProc::BeginOfRun: LED number is " << fLEDnr << newline; } } // Reset the flag that indicates that general run checks were done fRunCheckOK = 0; // Reset the flag that indicates that the transittimes where calculated fRunCalcTransitTimes = 0; } PCAProc::~PCAProc() { // The actual extraction of the PCA calibration constant happens here. // Since we want to do this at the end of the loop over different runs. // This should be fixed once an 'EndOfEverything' call exists: FIXME if (fIsValid == 0) { if (fVerbosePCA) { detail << "PCAProc::~PCAProc: No PCA calibration performed" << newline; } // Delete the files that were created if (PCAProc::FileExists(fPCALogName)) remove(fPCALogName.c_str()); if (PCAProc::FileExists(fPCATWDBName)) remove(fPCATWDBName.c_str()); if (PCAProc::FileExists(fPCAGFDBName)) remove(fPCAGFDBName.c_str()); if (PCAProc::FileExists(fPCARootName)) remove(fPCARootName.c_str()); } else { if (fVerbosePCA) { detail << "PCAProc::~PCAProc: Starting the PCA calibration" << newline; } // First make sure that the occupancy is not too high // to do calibration correctly. // Warn if it is but continue anyway... PCAProc::CheckOccupancy(); // If we are in the TELLE case // Extract the individual LED offsets if (fNSource > 1) { int testBoot = PCAProc::Bootstrap(); if (testBoot != 0) { fOverallMask = fBits->SetBit(fOverallMask, 18); // Mark this tube as TW FAIL fOverallMask = fBits->SetBit(fOverallMask, 1); // LED corrections failed PCAProc::Die("", 7, 0, 0); } } // Find out which time-corrected LED has the most hits // in this PMT for calibration. fSourceSelected.resize(fTotPMTs); for (int i = 0; i < fTotPMTs; i++) { if (fNSource > 1) { int maxSize = 0; for (int k = 0; k < fNSource; k++) { int QHSArraySize = fQHSArray[i][k].size(); if ((QHSArraySize > maxSize) && (fTOffsetErrArray[k] < fMaxErrLEDTime)) { maxSize = fQHSArray[i][k].size(); // TELLIE : select the LED that had the most hits // AND has a extracted offset with a small enough error fSourceSelected[i] = k; } } } else { fSourceSelected[i] = 0; // LASER : source is '0' } } int result = 0; if (fDoTWPCA) { // Use the neatly organised data now to extract the timewalk for // every PMT that has over fMinHitsPerChan total hits. if (fVerbosePCA) { detail << "PCAProc::~PCAProc: Starting the Time Walk interpolation calculation" << endl; } // Vectors to store local results std::vector interX; std::vector interY; std::vector interXErr; std::vector interYErr; std::vector interFit; // Intercept, gradient and errors define the linear // fit to the high charge tail. interFit.resize(4); interX.resize(fTotIntPoints); interY.resize(fTotIntPoints); interXErr.resize(fTotIntPoints); interYErr.resize(fTotIntPoints); for (int k = 0; k < fTotIntPoints; k++) { interX[k] = -9999.; interY[k] = -9999.; interXErr[k] = -9999.; interYErr[k] = -9999.; } interFit[0] = -9999.; interFit[1] = -9999.; interFit[2] = -9999.; interFit[3] = -9999.; // Loop over all PMTs and extract the timewalk interpolation points for (int i = 0; i < fTotPMTs; i++) { if (fTubeStatus[i] != 1) { // Abnormal or offline tube fPMTTWMask[i] = fBits->SetBit(fPMTTWMask[i], 1); // Mark this tube as TW FAIL fPMTTWMask[i] = fBits->SetBit(fPMTTWMask[i], 0); continue; } if (fQHSArray[i][fSourceSelected[i]].size() == 0) { // Flag this channels as zero occupancy fPMTTWMask[i] = fBits->SetBit(fPMTTWMask[i], 2); // Mark this tube as TW FAIL fPMTTWMask[i] = fBits->SetBit(fPMTTWMask[i], 0); continue; } int QHSArraySize = fQHSArray[i][fSourceSelected[i]].size(); if (QHSArraySize < fMinHitsPerChan) { // Flag this channels as low occupancy fPMTTWMask[i] = fBits->SetBit(fPMTTWMask[i], 3); // Mark this tube as TW FAIL fPMTTWMask[i] = fBits->SetBit(fPMTTWMask[i], 0); continue; } // Catch the strange possibility that the charge and time arrays // are not the same size, no special flag for that exists if ((fQHSArray[i][fSourceSelected[i]].size() != fTArray[i][fSourceSelected[i]].size())) { // Mark this tube as TW FAIL fPMTTWMask[i] = fBits->SetBit(fPMTTWMask[i], 0); continue; } // Channel has not failed yet, give it a shot! if (!(fBits->TestBit(fPMTGFMask[i], 0))) { result = PCAProc::WalkFitPoints(i, fSourceSelected[i], interX, interY, interXErr, interYErr, interFit); if (result == 0) { for (int j = 0; j < fTotIntPoints; j++) { fTWinterXQHS[i][fSourceSelected[i]][j] = interX[j]; fTWinterXErrQHS[i][fSourceSelected[i]][j] = interXErr[j]; fTWinterYQHS[i][fSourceSelected[i]][j] = interY[j]; fTWinterYErrQHS[i][fSourceSelected[i]][j] = interYErr[j]; } fTWinterI[i][fSourceSelected[i]] = interFit[0]; fTWinterR[i][fSourceSelected[i]] = interFit[2]; fTWinterIErr[i][fSourceSelected[i]] = interFit[1]; fTWinterRErr[i][fSourceSelected[i]] = interFit[3]; } else { // TW failed.. Let's make sure that the interpolation // variables are set to -9999 for (int j = 0; j < fTotIntPoints; j++) { fTWinterXQHS[i][fSourceSelected[i]][j] = -9999.; fTWinterXErrQHS[i][fSourceSelected[i]][j] = -9999.; fTWinterYQHS[i][fSourceSelected[i]][j] = -9999.; fTWinterYErrQHS[i][fSourceSelected[i]][j] = -9999.; } fTWinterI[i][fSourceSelected[i]] = -9999.; fTWinterR[i][fSourceSelected[i]] = -9999.; fTWinterIErr[i][fSourceSelected[i]] = -9999.; fTWinterRErr[i][fSourceSelected[i]] = -9999.; } } } // End of loop over PMTs // In the TELLIE case, we need to correct the timewalk by // the LED offsets. If this cannot be done, then the TW has // failed for all PMTs that used this LED. if (fNSource > 1) { // fTOffsetArray contains the offset compared to LED 0. // For all LEDs that have an offset that was extracted // with a reasonable error: correct the interpolation points // for the offset of the LED that was used. // Loop over all PMTs for (int i = 0; i < fTotPMTs; i++) { for (int j = 0; j < fTotIntPoints; j++) { if (fTWinterXQHS[i][fSourceSelected[i]][j] > -9990.0) { fTWinterYQHS[i][fSourceSelected[i]][j] = fTWinterYQHS[i][fSourceSelected[i]][j] - fTOffsetArray[fSourceSelected[i]]; fTWinterYErrQHS[i][fSourceSelected[i]][j] = TMath::Sqrt (TMath::Power (fTWinterYErrQHS[i][fSourceSelected[i]][j], 2) + TMath::Power (fTOffsetErrArray[fSourceSelected[i]], 2)); } } if (fTWinterI[i][fSourceSelected[i]] > -9990.0) { fTWinterI[i][fSourceSelected[i]] = fTWinterI[i][fSourceSelected[i]] - fTOffsetArray[fSourceSelected[i]]; fTWinterIErr[i][fSourceSelected[i]] = TMath::Sqrt (TMath::Power (fTWinterIErr[i][fSourceSelected[i]], 2) + TMath::Power (fTOffsetErrArray[fSourceSelected[i]], 2)); } } } } if (fDoGFPCA) { if (fVerbosePCA) { detail << "PCAProc::~PCAProc: Starting the Gain Fit for both QHS and QHL" << endl; } // Extract GainFitPoints for all pmts and write to result vectors double peakBinVal = 0.0; double threshVal = 0.0; double hhpVal = 0.0; // Loop over all PMTs for (int i = 0; i < fTotPMTs; i++) { fGFpeaQHL[i] = -9999; fGFthrQHL[i] = -9999; fGFhhpQHL[i] = -9999; // Perform some preliminary checks if (fTubeStatus[i] != 1) { // Abnormal or offline tube fPMTGFMask[i] = fBits->SetBit(fPMTGFMask[i], 1); // Mark this tube as GF FAIL fPMTGFMask[i] = fBits->SetBit(fPMTGFMask[i], 0); continue; } if (fQHSArray[i][fSourceSelected[i]].size() == 0) { // Flag this channels as zero occupancy fPMTGFMask[i] = fBits->SetBit(fPMTGFMask[i], 2); // Mark this tube as GF FAIL fPMTGFMask[i] = fBits->SetBit(fPMTGFMask[i], 0); continue; } int QHSArraySize = fQHSArray[i][fSourceSelected[i]].size(); if (QHSArraySize < fMinHitsPerChan) { // Flag this channels as low TW occupancy fPMTGFMask[i] = fBits->SetBit(fPMTGFMask[i], 3); // Mark this tube as GF FAIL fPMTGFMask[i] = fBits->SetBit(fPMTGFMask[i], 0); continue; } // Catch the strange possibility that the charge and time arrays // are not the same size, no special flag for that exists if ((fQHSArray[i][fSourceSelected[i]].size() != fTArray[i][fSourceSelected[i]].size()) || (fQHLArray[i][fSourceSelected[i]].size() != fTArray[i][fSourceSelected[i]].size())) { // Mark this tube as GF FAIL fPMTGFMask[i] = fBits->SetBit(fPMTGFMask[i], 0); continue; } if (!(fBits->TestBit(fPMTGFMask[i], 0))) { // QHS result = PCAProc::GainFitPoints(i, 0, peakBinVal, threshVal, hhpVal); if (result == 0) { fGFpeaQHS[i] = peakBinVal; fGFthrQHS[i] = threshVal; fGFhhpQHS[i] = hhpVal; } else { fGFpeaQHS[i] = -9999.; fGFthrQHS[i] = -9999.; fGFhhpQHS[i] = -9999.; } // QHL result = PCAProc::GainFitPoints(i, 1, peakBinVal, threshVal, hhpVal); if (result == 0) { fGFpeaQHL[i] = peakBinVal; fGFthrQHL[i] = threshVal; fGFhhpQHL[i] = hhpVal; } else { fGFpeaQHL[i] = -9999.; fGFthrQHL[i] = -9999.; fGFhhpQHL[i] = -9999.; } } } // Calculate mean, min and max hhp (for QHS) fMinhhp = 1e9; fMaxhhp = 0.0; fMeanhhp = 0.0; int numberOfSuccess = 0; int GFhhpQHSSize = fGFhhpQHS.size(); for (int t = 0; t < GFhhpQHSSize; t++) { if (fGFhhpQHS[t] > -9000) { if (fGFhhpQHS[t] > fMaxhhp) { fMaxhhp = fGFhhpQHS[t]; } if (fGFhhpQHS[t] < fMinhhp) { fMinhhp = fGFhhpQHS[t]; } fMeanhhp = fMeanhhp + fGFhhpQHS[t]; numberOfSuccess++; } } if (numberOfSuccess > 0) { fMeanhhp = fMeanhhp / static_cast(numberOfSuccess); } else { fMeanhhp = DS::INVALID; } } // Wrapping up // Full run quality checks, writes out general PCA status to PCA logfile PCAProc::CheckPCARun(); if (fDebugPCA) { // Write histograms to rootfile and close it PCAProc::ClosePCARoot(); } // Create the database files with the results PCAProc::PCATitles(); } // Clean up fPCARootFile->Delete(); } Processor::Result PCAProc::DSEvent(DS::Run&, DS::Entry& ds) { for (size_t iEV = 0; iEV < ds.GetEVCount(); iEV++) Event(ds, ds.GetEV(iEV)); return OK; } Processor::Result PCAProc::Event(DS::Entry& ds, DS::EV& ev) { // Essentially all this is going to do is loop through all of // the PMTs of the event and compile the hit time and charge // for each of them in all events. // The magic will happen in the destructor (Yikes!) for now. if (ev.GetTrigType() != fTrigType) return OK; // Require EXTASYNC trigger TVector3 PMTDir; TVector3 PMTPos; TVector3 LEDPos; TVector3 injectedPos; // lightpath instance is needed to retrieve the transittime // for the refracted path const DS::Calib& calib = ds.GetCalib(); DU::LightPathCalculator lightPath = DU::Utility::Get()->GetLightPathCalculator(); const DU::ChanHWStatus& CHS = DU::Utility::Get()->GetChanHWStatus(); // Some checks to be sure that this is an event that we want to use: // Move these checks to the before-run-stuff once the calibration info // goes into the database. For now, check if the runcheck was done already // to avoid doing these run-level checks for every event if (fRunCheckOK != 1) { if (fNSource == 1) { // Laserball double laserballPosCheck = 0.0; double injectedLambda = 0.0; // 1) Make sure that this is a laserball run // Check the name of the source FIXME PHIL if (calib.GetSourceName().compare("Laserball") != 0) { fIsValid = 0; // This is not a PCA laserball run, warn and abort PCAProc::Die("", 3, 0, 0); } // 2) Check the position of the laserball, if it is not central, then // die... no PCA calibrations with non-central laserball! laserballPosCheck = TMath::Sqrt(TMath::Power(injectedPos.X(), 2) + TMath::Power(injectedPos.Y(), 2) + TMath::Power(injectedPos.Z(), 2)); if (laserballPosCheck > fMaxDistFromCenter) { fIsValid = 0; // This is not a central PCA laserball run, warn and abort PCAProc::Die("", 4, 0, laserballPosCheck); } // 3) Check the wavelength of the laserball injectedLambda = calib.GetMode(); // wavelength if (!(injectedLambda == fLaserballWL)) { fIsValid = 0; // This is not a 505nm laserball run, warn and abort PCAProc::Die("", 5, fLaserballWL, injectedLambda); } } if ((fNSource > 1) && fIsMC) { if (calib.GetSourceName().substr(0, 2).compare("FT") != 0) { fIsValid = 0; // This is not a PCA LED run, warn and abort PCAProc::Die("", 6, 0, 0); } // Also figure out what the lednumber, position and direction is fLEDnr = calib.GetID(); fLEDPos.SetX(fLEDPositionX[fLEDnr]); fLEDPos.SetY(fLEDPositionY[fLEDnr]); fLEDPos.SetZ(fLEDPositionZ[fLEDnr]); fLEDDir.SetX(fLEDDirX[fLEDnr]); fLEDDir.SetY(fLEDDirY[fLEDnr]); fLEDDir.SetZ(fLEDDirZ[fLEDnr]); fUsedLEDs.push_back(fLEDnr); std::cout << "LED number is " << fLEDnr << std::endl; } fRunCheckOK = 1; } // Calculating the transittime from the source position to the PMTs is slow, // therefore, do it at the start of the run. For now, Check if we already // calculated all the transitimes before continuing // Also calculate all the viewing angles here. if (fRunCalcTransitTimes != 1) { double distInInnerAV = 0.0; double distInAV = 0.0; double distInWater = 0.0; for (int i = 0; i < fTotPMTs; i++) { // First, set the PMT position PMTPos.SetX(fTubePositionX[i]); PMTPos.SetY(fTubePositionY[i]); PMTPos.SetZ(fTubePositionZ[i]); // Set the direction of this PMT PMTDir.SetX(fTubeDirX[i]); PMTDir.SetY(fTubeDirY[i]); PMTDir.SetZ(fTubeDirZ[i]); if (fNSource == 1) { // LASER // Get the angle TVector3 straightPath = injectedPos - PMTPos; fViewingAngles[i][0] = TMath::RadToDeg() * TMath::ACos(-1 * straightPath.Dot(PMTDir) / (straightPath.Mag() * PMTDir.Mag())); // FIXME FREIJA: add position from DB file here when available injectedPos = calib.GetPos(); double lambda = calib.GetMode(); lightPath.CalcByPosition(injectedPos, PMTPos, RAT::util::WavelengthToEnergy(lambda * CLHEP::nm), 30.0); distInInnerAV = lightPath.GetDistInInnerAV(); distInAV = lightPath.GetDistInAV(); distInWater = lightPath.GetDistInWater(); fTransitTimes[i][0] = DU::Utility::Get()->GetGroupVelocity() .CalcByDistance(distInInnerAV, distInAV, distInWater, (h_Planck * c_light / (lambda * CLHEP::nm))); } else { // TELLIE if (fLEDPos.X() != DS::INVALID) { // Get the angle TVector3 straightPath = PMTPos - fLEDPos; fViewingAngles[i][fLEDnr] = TMath::RadToDeg() * TMath::ACos(straightPath.Dot(fLEDDir) / (straightPath.Mag() * fLEDDir.Mag())); double lambda = fLEDWavelength[fLEDnr]; lightPath.CalcByPosition(fLEDPos, PMTPos, RAT::util::WavelengthToEnergy(lambda * CLHEP::nm), 30.0); distInInnerAV = lightPath.GetDistInInnerAV(); distInAV = lightPath.GetDistInAV(); distInWater = lightPath.GetDistInWater(); fTransitTimes[i][fLEDnr] = DU::Utility::Get()->GetGroupVelocity() .CalcByDistance(distInInnerAV, distInAV, distInWater, RAT::util::WavelengthToEnergy(lambda * 1.0e-6)); } else { fTransitTimes[i][fLEDnr] = DS::INVALID; } } } // We have calculated the transittimes, // so no need to do this anymore for this run. fRunCalcTransitTimes = 1; } // Find out how many PMTs were hit in this event // Store this information to figure out occupancy later int numberCalPMTs = ev.GetCalPMTs().GetCount(); if (numberCalPMTs < fNHitsMin) fNHitsMin = numberCalPMTs; if (numberCalPMTs > fNHitsMax) fNHitsMax = numberCalPMTs; // Loop over all these hit PMTs and extract time and QHS QHL for each hit for (int involvPMT = 0; involvPMT < numberCalPMTs; involvPMT++) { const DS::PMTCal& pmtCal = ev.GetCalPMTs().GetPMT(involvPMT); int PMTID = pmtCal.GetID(); // Set the position of this PMT PMTPos.SetX(fTubePositionX[PMTID]); PMTPos.SetY(fTubePositionY[PMTID]); PMTPos.SetZ(fTubePositionZ[PMTID]); // Do some checks before we decide that this is a hit we want to use: // 0) Was the channel online? if (!CHS.IsTubeOnline(PMTID)) continue; // 1) Is this a normal PMT? if (fTubeStatus[PMTID] != 1) continue; // 2) Check the ECA calibration status word // Use a costum ECA mask, just excluding the hits that were set to -9998 if (fBits->AndWords(pmtCal.GetStatus().GetULong64_t(0), 4194310)) { // Skip over this PMT hit since something was // fishy with the ECA calibration continue; } // 3) Viewing angle // Direction of the path of light from source to PMT: // For LASER if ((fNSource == 1) && (std::abs(fViewingAngles[PMTID][0]) > 180)) { // This should not catch a single hit at this point continue; } // For TELLIE - this should be < fSelectAngle (as defined by user) // in order to get rid of the reflections if ((fNSource > 1) && (std::abs(fViewingAngles[PMTID][fLEDnr]) > fSelectAngle)) { // This will eliminate a lot of hits continue; } // 4) For TELLIE - we need to make sure we have the position/direction for // this fiber. This will eliminate all hits from this fiber if ((fNSource > 1) && (fLEDPositionX[fLEDnr] == DS::INVALID)) continue; // Now that we are sure that this is a hit we want to use, // fill in the vectors // Get the ECA calibrated time and charges for this PMT hit double PMTTime = pmtCal.GetTime(); double qhs = pmtCal.GetQHS(); double qhl = pmtCal.GetQHL(); if (fNSource == 1) { // Keep track of min and max QHS and QHL, will come in handy later! if (qhs < fQHSMin[PMTID][0]) fQHSMin[PMTID][0] = qhs; if (qhs > fQHSMax[PMTID][0]) fQHSMax[PMTID][0] = qhs; if (qhl < fQHLMin[PMTID][0]) fQHLMin[PMTID][0] = qhl; if (qhl > fQHLMax[PMTID][0]) fQHLMax[PMTID][0] = qhl; // Fill the QHS and QHL vectors for this PMT fQHSArray[PMTID][0].push_back(qhs); fQHLArray[PMTID][0].push_back(qhl); // Fill the time vector, corrected for the transittime fTArray[PMTID][0].push_back(PMTTime - fTransitTimes[PMTID][0]); } else { // Keep track of min and max QHS and QHL, will come in handy later! if (qhs < fQHSMin[PMTID][fLEDnr]) fQHSMin[PMTID][fLEDnr] = qhs; if (qhs > fQHSMax[PMTID][fLEDnr]) fQHSMax[PMTID][fLEDnr] = qhs; if (qhl < fQHLMin[PMTID][fLEDnr]) fQHLMin[PMTID][fLEDnr] = qhl; if (qhl > fQHLMax[PMTID][fLEDnr]) fQHLMax[PMTID][fLEDnr] = qhl; // Fill the QHS and QHL vectors for this PMT fQHSArray[PMTID][fLEDnr].push_back(qhs); fQHLArray[PMTID][fLEDnr].push_back(qhl); // Fill the time vector, corrected for the transittime fTArray[PMTID][fLEDnr].push_back(PMTTime - fTransitTimes[PMTID][fLEDnr]); // Keep track of how many hits by each LED in each PMT fLEDCheckRATid[PMTID][fLEDnr]++; } } return OK; } int PCAProc::WalkFitPoints(const int PMTNum, const int LEDNum, std::vector& interX, std::vector& interY, std::vector& interXErr, std::vector& interYErr, std::vector& interFit) { // This one determines the time walk in each channel. // idea is that the first solution will be done with interpolations points // as in SNO better than that would be a functional form fit to each walk // spectrum from the PMT even better than that would be a predictive // functional form which takes the threshold and predicts the walk from that // Get the total number of hits for this channel int vectorSize = fQHSArray[PMTNum][LEDNum].size(); double rmsTotal = 0.0; double QHSMin = 1e9, QHSMax = 0.0; double maxWinInt = 0.0; double timeWindowLow = 0.0; double timeWindowHigh = 0.0; double chargeWindowLow = 0.0; double chargeWindowHigh = 0.0; double newHistRangeSum = 0.0; double histRangeSum = 0.0; // Sliding window size in ns used to find the peak, // see for example discussion // in J. Cameron thesis page 60. We might need to change // this in scintillator. double timeWindow = 8.5; vector nPointsInThisBin; nPointsInThisBin.resize(fTotIntPoints); // These might come in handy later... // vector binGradient; // binGradient.resize(fTotIntPoints); vector binVariance; binVariance.resize(fTotIntPoints); vector binRms; binRms.resize(fTotIntPoints); double pointGap = 0.0; int newUpIndex = 0; int newDownIndex = 0; int upIndex = 0; int downIndex = 0; int window_charge_max_bin = 0; int maxWinIntBin = 0; // Ensure the 0.5ns resolution required int timeBins = static_cast((fTimeMaxVal - fTimeMinVal) * 2); int timeWindowBins = static_cast(timeWindow * 2); int bin = 0; // vectors for the MultiCalc function vector > YValsVector; YValsVector.resize(fTotIntPoints); vector > XValsVector; XValsVector.resize(fTotIntPoints); TH1D *tempTimeHist; TH1D *tempChargeHist; int nvar = 0; float var = 0.0; // make sure all of these are set to default for (int k = 0; k < fTotIntPoints; k++) { nPointsInThisBin[k] = 0; } interFit[0] = -9999.; interFit[1] = -9999.; interFit[2] = -9999.; interFit[3] = -9999.; // let's also just be sure these are empty... interX.clear(); interXErr.clear(); interY.clear(); interYErr.clear(); // find the max and min of the QHS values, // luckily we saved them while in the eventloop QHSMax = fQHSMax[PMTNum][LEDNum]; QHSMin = fQHSMin[PMTNum][LEDNum]; // Set up the necessary temporary histograms tempTimeHist = new TH1D("tempTimeHist", "Time Histogram", timeBins, fTimeMinVal, fTimeMaxVal); // Do not underestimate the importance of binning here! tempChargeHist = new TH1D("tempChargeHist", "Charge Histogram", (QHSMax - QHSMin), QHSMin, QHSMax); // Fill time histograms for (int j = 0; j < vectorSize; j++) { tempTimeHist->Fill(fTArray[PMTNum][LEDNum].at(j)); } // find the bin/time of the prompt peak in the time histogram. // This is done by finding the sliding 8.5ns section that // has the largest integral. Each bin is 0.5ns, so 8.5ns = 17 bins. for (int winPlace = 1; winPlace < (timeBins - timeWindowBins); winPlace++) { if (tempTimeHist->Integral(winPlace, winPlace + timeWindowBins) > maxWinInt) { // the maximum winning bin is in fact the bin that is // in the middle of the timewindow maxWinIntBin = winPlace + ((timeWindowBins + 1) / 2); maxWinInt = tempTimeHist->Integral(winPlace, winPlace + timeWindowBins); } // Quickly tried fitting a Gaussian here. // It did not improve precision due to the big spread in peak shapes. } // The data used in the analysis will be within 10ns of the peak time // We might need to go to a smaller timewindow when in scintillator phase timeWindowLow = tempTimeHist->GetBinCenter(maxWinIntBin) - 10.0; timeWindowHigh = tempTimeHist->GetBinCenter(maxWinIntBin) + 10.0; for (int j = 0; j < vectorSize; j++) { // Fill a temporary charge histogram if ((fTArray[PMTNum][LEDNum].at(j) > timeWindowLow) && (fTArray[PMTNum][LEDNum].at(j) < timeWindowHigh)) { tempChargeHist->Fill(fQHSArray[PMTNum][LEDNum].at(j)); } // keep track of the number of hits that arrive after the prompt peak if (fTArray[PMTNum][LEDNum].at(j) > timeWindowHigh) { fTMore[PMTNum][LEDNum]++; } } // now that the prompt peak in the time spectrum is found, // figure out the smallest charge range containing 70% of the data // Get the bin number of the maximum window_charge_max_bin = tempChargeHist->GetMaximumBin(); histRangeSum += tempChargeHist->GetBinContent(window_charge_max_bin); upIndex = window_charge_max_bin; downIndex = window_charge_max_bin; double prevHistRangeSum = 0.0; while (histRangeSum < 0.7 * tempChargeHist->Integral() && prevHistRangeSum != histRangeSum) { prevHistRangeSum = histRangeSum; if (((tempChargeHist->GetBinContent(upIndex + 1) >= tempChargeHist->GetBinContent(downIndex - 1)) || (downIndex == 1)) && (upIndex != (QHSMax - QHSMin))) { upIndex++; histRangeSum += tempChargeHist->GetBinContent(upIndex); } else { downIndex--; histRangeSum += tempChargeHist->GetBinContent(downIndex); } } // The range downIndex to upIndex now contains ~70% of the data // Drop anything more than 50 cap above upIndex. for (int i = 0; i < tempChargeHist->GetNbinsX(); i++) { if (tempChargeHist->GetBinCenter(i) > tempChargeHist->GetBinCenter(upIndex) + 50) { tempChargeHist->SetBinContent(i, 0); } } // Go through the trouble of finding the range that contains // 90% of the data again newHistRangeSum = 0.0; newHistRangeSum += tempChargeHist->GetBinContent(window_charge_max_bin); newUpIndex = window_charge_max_bin; newDownIndex = window_charge_max_bin; // This is a while loop.. hmm.. it could be infinite.. // Set up a counter tp catch this int infiniteLoopCheck = 0; double lastNewHistRangeSum = 0.; while (newHistRangeSum < 0.9 * tempChargeHist->Integral() && infiniteLoopCheck < 10) { if (newHistRangeSum == lastNewHistRangeSum) infiniteLoopCheck++; lastNewHistRangeSum = newHistRangeSum; if (((tempChargeHist->GetBinContent(newUpIndex + 1) >= tempChargeHist->GetBinContent(newDownIndex - 1)) || (newDownIndex == 1)) && (newUpIndex != (QHSMax - QHSMin))) { newUpIndex++; newHistRangeSum += tempChargeHist->GetBinContent(newUpIndex); } else { newDownIndex--; newHistRangeSum += tempChargeHist->GetBinContent(newDownIndex); } } chargeWindowLow = tempChargeHist->GetBinCenter(newDownIndex); chargeWindowHigh = tempChargeHist->GetBinCenter(newUpIndex); // determine for each bin: // the x value: the average charge of the bin // the y value: the median of the values in that bin // The first fPeakIntPoints bins of the peak are small bins, // the width is pointGap, the next two bins have a width of pointGap*2 // The fTailIntPoints bins of the tail are large bins, // width is defined by the tailgap array // the sections in (newUpIndex, newDownIndex) pointGap = (chargeWindowHigh - chargeWindowLow) / static_cast(fPeakIntPoints + 2 * fDropIntPoints); bin = 0; for (int k = 0; k < vectorSize; k++) { // Keep track of how many hits are outside the // [chargeWindowLow,chargeWindowHigh] region // These values will be used to determine FOut and FLate fractions if ((fQHSArray[PMTNum][LEDNum].at(k) < chargeWindowLow) || (fTArray[PMTNum][LEDNum].at(k) < timeWindowLow)) { fTQHSLess[PMTNum][LEDNum]++; } // Make sure the data is in the prompt time peak if ((fTArray[PMTNum][LEDNum].at(k) > timeWindowLow) && (fTArray[PMTNum][LEDNum].at(k) < timeWindowHigh)) { // The peak is divided in (fPeakIntPoints+2*fDropIntPoints) sections // The first fPeakIntPoints are kept, // the last ones are merged to form 2 sections if ((fQHSArray[PMTNum][LEDNum].at(k) > chargeWindowLow) && (fQHSArray[PMTNum][LEDNum].at(k) < chargeWindowHigh)) { bin = static_cast((fQHSArray[PMTNum][LEDNum].at(k) - chargeWindowLow) / pointGap); // For the first fPeakIntPoints sections if (bin < fPeakIntPoints) { YValsVector[bin].push_back(fTArray[PMTNum][LEDNum].at(k)); XValsVector[bin].push_back(fQHSArray[PMTNum][LEDNum].at(k)); nPointsInThisBin[bin]++; } else if (bin < (fPeakIntPoints + 2 * fDropIntPoints)) { int dropbin = static_cast((bin - (fPeakIntPoints - 1)) / 2.0) + fPeakIntPoints; YValsVector[dropbin].push_back(fTArray[PMTNum][LEDNum] .at(k)); XValsVector[dropbin].push_back(fQHSArray[PMTNum][LEDNum] .at(k)); nPointsInThisBin[dropbin]++; } } // The tail is the section above chargeWindowHigh. We use the // fTailgap vector to define #fTailIntPoints more regions. for (int h = 0; h < fTailIntPoints; h++) { if ((fQHSArray[PMTNum][LEDNum].at(k) > chargeWindowHigh + fTailGap[h]) && (fQHSArray[PMTNum][LEDNum].at(k) < (chargeWindowHigh + fTailGap[h + 1]))) { YValsVector[fPeakIntPoints + fDropIntPoints + h] .push_back(fTArray[PMTNum][LEDNum].at(k)); XValsVector[fPeakIntPoints + fDropIntPoints + h] .push_back(fQHSArray[PMTNum][LEDNum].at(k)); nPointsInThisBin[fPeakIntPoints + fDropIntPoints + h]++; } } } } // The data is now divided in fTotIntPoints regions. // Now we can extract the interpolation point for each region. for (int j = 0; j < fTotIntPoints; j++) { interXErr[j] = 0.0; // Require at least fMinPointsPerBin points in all bins if (nPointsInThisBin[j] > fMinPointsPerBin) { // First, determine the x value as the average charge of the regions double averageCharge = 0.0; for (int i = 0; i < nPointsInThisBin[j]; i++) { averageCharge += XValsVector[j][i]; } averageCharge /= nPointsInThisBin[j]; interX[j] = averageCharge; // Second, determine the y value (time) by calculating the median PCAProc::MultiCalc(2, YValsVector[j], XValsVector[j]); interY[j] = fMedian; interYErr[j] = fSig; // binGradient[j]=fGrad; binVariance[j] = fVar; if (binVariance[j] > 0) { binRms[j] = TMath::Sqrt(binVariance[j]); } interYErr[j] = binRms[j]; } else { interY[j] = DS::INVALID; interYErr[j] = DS::INVALID; interX[j] = DS::INVALID; interXErr[j] = DS::INVALID; } } // Implement estimate of global weighted rms, as done in SNO. // Knuths online variance calculation technique is used here, // see "Note on a Method for Calculating Corrected Sums of // Squares and Products" by B. P. Welford in Technometrics , // Vol. 4, No. 3 (Aug., 1962), pp. 419-420 to be convinced. for (int ibin = 1; ibin < fTotIntPoints; ibin++) { if (binRms[ibin] > 0) { nvar = nvar + nPointsInThisBin[ibin]; var = var + static_cast(nPointsInThisBin[ibin]) * (TMath::Power(binRms[ibin], 2) - var) / static_cast(nvar); } } rmsTotal = -9999.; if ((nvar > 1) && (var > 0)) { rmsTotal = TMath::Sqrt(var); } fTWRms[PMTNum][LEDNum] = rmsTotal; // In order to do the high-charge tail fit, only the last // interpolation point can be -9999 (missing) if ((interX[fTotIntPoints - 2] > -9000) && (interX[fTotIntPoints - 3] > -9000) && (interX[fTotIntPoints - 4] > -9000)) { // Need to fill these arrays to feed to TGraphErrors double *xarray = new double[fTotIntPoints]; double *xerrarray = new double[fTotIntPoints]; double *yarray = new double[fTotIntPoints]; double *yerrarray = new double[fTotIntPoints]; for (int i = 0; i < fTotIntPoints; i++) { xarray[i] = interX[i]; yarray[i] = interY[i]; xerrarray[i] = interXErr[i]; yerrarray[i] = interYErr[i]; } // Use TGraphErrors for the linear fit TGraphErrors *test = new TGraphErrors(fTotIntPoints, xarray, yarray, xerrarray, yerrarray); TF1 *f1; // If we have all interpolation points in the tail, fit the last 4 if (interX[fTotIntPoints - 1] > -9000) { f1 = new TF1("f1", "pol1", xarray[fTotIntPoints - 4] - 10, xarray[fTotIntPoints - 1] + 10); } else { f1 = new TF1("f1", "pol1", xarray[fTotIntPoints - 4] - 10, xarray[fTotIntPoints - 2] + 10); } // Here is where we perform the actual linear fit test->Fit("f1", "Rq"); interFit[0] = f1->GetParameter(0); // intercept interFit[1] = f1->GetParError(0); // intercept error interFit[2] = f1->GetParameter(1); // gradient interFit[3] = f1->GetParError(1); // gradient error // Clean up delete[] xarray; delete[] yarray; delete[] xerrarray; delete[] yerrarray; delete test; delete f1; xarray = NULL; yarray = NULL; xerrarray = NULL; yerrarray = NULL; test = NULL; f1 = NULL; } else { // High Q tail was not fitted: flag this PMT as such! fPMTTWMask[PMTNum] = fBits->SetBit(fPMTTWMask[PMTNum], 9); // Set the fit parameters to undefined interFit[0] = -9999.; interFit[1] = -9999.; interFit[2] = -9999.; interFit[3] = -9999.; } // Clean up tempTimeHist->Delete(); tempChargeHist->Delete(); tempTimeHist = NULL; tempChargeHist = NULL; return 0; } int PCAProc::GainFitPoints(int PMTNum, int chargeFlag, double& peakBinVal, double& threshVal, double& hhpVal) { // Set up the gain calculation as it was done in SNO this can be // modified later when/if we think up something cooler. // Make sure these are 0 peakBinVal = 0.0; threshVal = 0.0; hhpVal = 0.0; int peakBin = 0; int spanbins = 0; int window = 0; int vectorSize = 0; int QMax = 0; int QMin = 1e9; int nhitsInWindow = 0; int peakWidth = 0; double maxBinSum = 0.0; double binSum = 0.0; double peakBin_height_val = 0.0; double lowhp_peak_avg = 0.0; std::vector localChargeVector; // First fill the ChargeVector with the QHS or QHL values // Combine all sources to take advantage of maximum statistics if (chargeFlag == 0) { for (int g = 0; g < fNSource; g++) { localChargeVector.insert(localChargeVector.end(), fQHSArray[PMTNum][g].begin(), fQHSArray[PMTNum][g].end()); if (fQHSMax[PMTNum][g] > QMax) { QMax = fQHSMax[PMTNum][g]; } if (fQHSMin[PMTNum][g] < QMin) { QMin = fQHSMin[PMTNum][g]; } } } else if (chargeFlag == 1) { for (int g = 0; g < fNSource; g++) { localChargeVector.insert(localChargeVector.end(), fQHLArray[PMTNum][g].begin(), fQHLArray[PMTNum][g].end()); if (fQHLMax[PMTNum][g] > QMax) QMax = fQHLMax[PMTNum][g]; if (fQHLMin[PMTNum][g] < QMin) QMin = fQHLMin[PMTNum][g]; } } else { return -1; } // Set up the temporary charge histogram, for now with 1 bin per ADC bin vectorSize = localChargeVector.size(); TH1D *tempChargeHist = new TH1D("tempChargeHist", "Charge Histogram", (QMax - QMin), QMin, QMax); for (int j = 0; j < vectorSize; j++) { tempChargeHist->Fill(localChargeVector.at(j)); } // First identify the "span" as in J.Cameron's thesis: the 100 count wide // sliding window that contains the most hits for (int j = 0; j < tempChargeHist->GetNbinsX() - 100; j++) { binSum = 0.0; for (int k = 0; k < 100; k++) { binSum += tempChargeHist->GetBinContent(j + k); } if (binSum > maxBinSum) { maxBinSum = binSum; peakBin = j; nhitsInWindow = tempChargeHist->Integral(j, j + 100); } } // This first window needs to contain at least 100 hits, if not: // flag the channel accordingly if (nhitsInWindow < 100) { // reminder: chargeFlag=0 for QHS and =1 for QHL fPMTGFMask[PMTNum] = fBits->SetBit(fPMTGFMask[PMTNum], 4 + chargeFlag); } // Slide windows of increasing sizes until we found the smallest one that // contains 65% of the data. // Start with a width of 20 bins for (int width = 20; width < 100; width++) { for (int j = peakBin; j < peakBin + 100 - width; j++) { binSum = 0.0; for (int k = 0; k < width; k++) { binSum += tempChargeHist->GetBinContent(j + k); } if (binSum > 0.65 * maxBinSum) { peakWidth = width - 1; width = 100; peakBin = j; break; } } } spanbins = peakWidth; int peakbinwidth = static_cast(spanbins * 0.4); // 0.4: as defined in J. Cameron's thesis // 1) Find peak // Peak is found by using sliding window technique, // the width of the window is peakbinwidth peakBin = 0; maxBinSum = 0.0; int maxbinnumber = tempChargeHist->GetNbinsX() - peakbinwidth; for (int j = 0; j < maxbinnumber; j++) { binSum = 0.0; for (int k = 0; k < peakbinwidth; k++) { binSum += tempChargeHist->GetBinContent(j + k); } if (binSum > maxBinSum) { maxBinSum = binSum; peakBin = j; } } int peakbinplace = peakBin + static_cast(peakbinwidth * 0.5); // Get the peak position peakBinVal = tempChargeHist->GetBinCenter(peakBin + (peakbinwidth * 0.5)); peakBin_height_val = maxBinSum / static_cast(peakbinwidth); // Special case for peaks that are at low cap: make sure we are not // fitting a noise peak. if (peakBinVal < 5) { // Use TSpectrum to find the peak candidates TSpectrum *s = new TSpectrum(5); int nfound = s->Search(tempChargeHist, 5, "new"); #if ROOT_VERSION_CODE >= ROOT_VERSION(6, 0, 0) double *xpeaks = s->GetPositionX(); #else float *xpeaks = s->GetPositionX(); #endif if ((nfound > 1) && (xpeaks[1] > peakBinVal + 5)) { peakBin = 0; maxBinSum = 0.0; for (int j = tempChargeHist->FindBin(xpeaks[0]) + 5; j < tempChargeHist->GetNbinsX() - peakbinwidth; j++) { binSum = 0.0; for (int k = 0; k < peakbinwidth; k++) { binSum += tempChargeHist->GetBinContent(j + k); } if (binSum > maxBinSum) { maxBinSum = binSum; peakBin = j; } } peakbinplace = peakBin + static_cast(peakbinwidth * 0.5); peakBinVal = tempChargeHist->GetBinCenter(peakBin + (peakbinwidth * 0.5)); peakBin_height_val = maxBinSum / static_cast(peakbinwidth); fPMTGFMask[PMTNum] = fBits->SetBit(fPMTGFMask[PMTNum], 22); } s->Delete(); s = NULL; } // 2) Find HHP for (int k = peakBin; k < tempChargeHist->GetNbinsX() - peakbinwidth; k++) { // Find when the sum over the peakbinwidth bins drops // To 1/2 of the sum over peakbinwidth bins of the max window binSum = 0.0; for (int m = 0; m < peakbinwidth; m++) { binSum += tempChargeHist->GetBinContent(k + m); } if (binSum < (0.5 * maxBinSum)) { // The high half point hhpVal = tempChargeHist->GetBinCenter(k + (peakbinwidth * 0.5)); break; } } // 3) Use the SNO method to find the threshold int new_avg_sum = 0; if (spanbins < 40) { window = 1; } else if (spanbins < 80) { window = 2; } else { window = 3; } for (int k = static_cast(peakbinplace); k > window; k--) { binSum = 0.0; int temp_low_edge = 0; temp_low_edge = k - (static_cast(window / 2.0)); for (int m = 0; m < window; m++) { binSum += tempChargeHist->GetBinContent(static_cast(temp_low_edge + m)); } if ((binSum / static_cast(window)) < (0.5 * peakBin_height_val)) { lowhp_peak_avg = 0.0; for (int n = k; n < peakbinplace; n++) { lowhp_peak_avg += tempChargeHist->GetBinContent(n); } lowhp_peak_avg /= static_cast(peakbinplace - k); new_avg_sum = 0; for (int o = k; o > window; o--) { new_avg_sum = 0; temp_low_edge = o - (static_cast(window / 2.0)); for (int p = 0; p < window; p++) { new_avg_sum += static_cast (tempChargeHist->GetBinContent(temp_low_edge + p)); } if ((new_avg_sum / static_cast(window)) < 0.5 * lowhp_peak_avg) { threshVal = o; o = window; k = window; break; } } } } threshVal = tempChargeHist->GetBinCenter(threshVal); // The threshold // Clean up tempChargeHist->Delete(); tempChargeHist = NULL; return 0; } int PCAProc::Bootstrap() { // The idea of the bootstrap routine is to figure out a constant time // offset for LEDs so that the extracted interpolation points can be // corrected for it. This is still in test-phase, things will get // interesting once data starts pouring in. // Set up local vectors and other variables // nLEDs x nPMTs : arrival time for all LED PMT combinations std::vector > tDelta; // nLEDs x nPMTs : errors on the above std::vector > tDeltaErr; std::vector > PMTHitBy; // nPMTs std::vector > overlapMatrix; // nLEDs x nLEDs std::vector > overlapErrMatrix; // nLEDs x nLEDs // nLEDs x nLEDs x number of overlaps std::vector > > overlapVector; // nLEDs x nLEDs x number of overlaps std::vector > > overlapVectorErr; std::vector > overlapCountMatrix; // nLEDs x nLEDs std::vector calLEDs; std::vector diffTransitTime; std::vector diffLEDTime; double tempDelta = 0.0; // Resize the vectors PMTHitBy.resize(fTotPMTs); overlapMatrix.resize(fNSource); overlapErrMatrix.resize(fNSource); overlapVector.resize(fNSource); overlapVectorErr.resize(fNSource); overlapCountMatrix.resize(fNSource); tDelta.resize(fNSource); tDeltaErr.resize(fNSource); fTOffsetArray.resize(fNSource); fTOffsetErrArray.resize(fNSource); for (int j = 0; j < fNSource; j++) { overlapMatrix[j].resize(fNSource); overlapErrMatrix[j].resize(fNSource); overlapVector[j].resize(fNSource); overlapVectorErr[j].resize(fNSource); overlapCountMatrix[j].resize(fNSource); tDelta[j].resize(fTotPMTs); tDeltaErr[j].resize(fTotPMTs); fTOffsetArray[j] = 0.0; fTOffsetErrArray[j] = 0.0; } for (int i = 0; i < fTotPMTs; i++) { for (int j = 0; j < fNSource; j++) { tDelta[j][i] = 0.0; tDeltaErr[j][i] = 0.0; } } for (int i = 0; i < fNSource; i++) { for (int j = 0; j < fNSource; j++) { overlapCountMatrix[i][j] = 0; overlapMatrix[i][j] = 0; overlapErrMatrix[i][j] = 0; } } for (int i = 0; i < fTotPMTs; i++) PMTHitBy[i].clear(); // Find the position of the prompt time peak for each // PMT/LED combination that has enough hits // to be stored as fTDelta[LED#][PMT#] for (int i = 0; i < fTotPMTs; i++) { for (int j = 0; j < fNSource; j++) { tempDelta = 0.0; int tArraySize = fTArray[i][j].size(); if ((fLEDCheckRATid[i][j] != 0) && (tArraySize > fMinNPointsBoot)) { // List of LEDs (j) that caused a hit in PMT #i PMTHitBy[i].push_back(j); TH1D *tempTimeHist = new TH1D("tempTimeHist", "Time Histogram", static_cast((fTimeMaxVal - fTimeMinVal) * 2), fTimeMinVal, fTimeMaxVal); for (int k = 0; k < tArraySize; k++) { tempTimeHist->Fill(fTArray[i][j].at(k)); tempDelta += fTArray[i][j].at(k); } double max = tempTimeHist->GetBinCenter(tempTimeHist->GetMaximumBin()); TF1 *gaussian = new TF1("gaussian", "gaus", max - 8, max + 8); tempTimeHist->Fit(gaussian, "Rq"); tDelta[j][i] = tempDelta / static_cast(fTArray[i][j].size()); tDelta[j][i] = gaussian->GetParameter(1); tDeltaErr[j][i] = gaussian->GetParError(1); tempDelta = 0.0; tempTimeHist->Delete(); gaussian->Delete(); tempTimeHist = NULL; gaussian = NULL; } else { tDelta[j][i] = DS::INVALID; } } } // Set up some histograms that will be written out to the rootfile TH1D *offsetHist = new TH1D("offsetHist", "Offset Histogram", 1000, -20, +20); TH1D *singleoffsetHist = new TH1D("singleoffsetHist", "Offset Histogram", 1000, -10, +10); TH1D *singleoffsetErrHist = new TH1D("singleoffseterrHist", "Offset Error Histogram", 1000, -10, +10); TH2D *transitHist = new TH2D("transitHist", "transit Histogram", 1000, -50, +50, 1000, -50, 50); TH1D *absOffsetHist = new TH1D("absOffsetHist", "absolute offset Histogram", 200, -10, +10); TH1D *absOffsetErrHist = new TH1D("absOffsetErrHist", "error absolute offset Histogram", 200, -10, +10); TH2D *finalOffsetHist = new TH2D("finalOffsetHist", "final offset Histogram", 115, 0, 114, 100, -5, 5); TGraph2D *ledCoverage = new TGraph2D(); ledCoverage->SetName("ledCoverage"); // Separate the hits on the PMT to which came from which LEDs // in order to determine into which overlap category they are placed for (int i = 0; i < fTotPMTs; i++) { TVector3 PMTPos; PMTPos.SetX(fTubePositionX[i]); PMTPos.SetY(fTubePositionY[i]); PMTPos.SetZ(fTubePositionZ[i]); TVector2 PMTPosFlat; // Use the FlatMap function to get the 2D position RAT::SphereToIcosahedron(PMTPos, PMTPosFlat, 1, 2.12); ledCoverage->SetPoint(i, PMTPosFlat.X(), PMTPosFlat.Y(), PMTHitBy[i].size()); if (PMTHitBy[i].size() > 1) { // Proceed if this PMT received hits from more // than 1 LED. for (unsigned int primaryLED = 0; primaryLED < PMTHitBy[i].size(); primaryLED++) { for (unsigned int secondaryLED = primaryLED + 1; secondaryLED < PMTHitBy[i].size(); secondaryLED++) { // Get the LED IDs of the primary and secondary LEDs int LEDID1 = PMTHitBy[i].at(primaryLED); int LEDID2 = PMTHitBy[i].at(secondaryLED); // This PMT has registered hits from both LEDs and // the offset between the LEDs can be determined. The // resultant offset is recorded to // fOverlapVector[primaryLED#][secondaryLED#]. // Select overlaps that have a small difference in // transittime as determined by the user if (abs(fTransitTimes[i][LEDID1] - fTransitTimes[i][LEDID2]) < fSelectTTransit) { overlapVector[LEDID1][LEDID2].push_back(tDelta[LEDID1][i] - tDelta[LEDID2][i]); overlapVectorErr[LEDID1][LEDID2].push_back(tDeltaErr[LEDID1][i] + tDeltaErr[LEDID2][i]); overlapCountMatrix[LEDID1][LEDID2]++; diffTransitTime.push_back(fTransitTimes[i][LEDID1] - fTransitTimes[i][LEDID2]); diffLEDTime.push_back(tDelta[LEDID1][i] - tDelta[LEDID2][i]); if (fDebugPCA) { offsetHist->Fill(tDelta[LEDID1][i] - tDelta[LEDID2][i]); // Check the correlation between difference // in transittime and LEDtime offsets transitHist->Fill(tDelta[LEDID1][i] - tDelta[LEDID2][i], fTransitTimes[i][LEDID1] - fTransitTimes[i][LEDID2]); } } } } } } // Calculate Pearson correlation coefficient, from Numerical Recipes int n = diffLEDTime.size(); double r = 0.0; double syy = 0.0, sxy = 0.0, sxx = 0.0, ay = 0.0, ax = 0.0; for (int j = 0; j < n; j++) { // Find the means ax += diffLEDTime[j]; ay += diffTransitTime[j]; } ax /= n; ay /= n; for (int j = 0; j < n; j++) { // Compute the correlation coefficient double xt = diffLEDTime[j] - ax; double yt = diffTransitTime[j] - ay; sxx += xt * xt; syy += yt * yt; sxy += xt * yt; } r = sxy / (TMath::Sqrt(sxx * syy) + tiny); if (abs(r) > fMaxCorr) { fOverallMask = fBits->SetBit(fOverallMask, 17); return -1; // Bootstrapping failed... we are clearly not controlling // the transit time systematic effect! This means that the // PCA TW failed since we cannot correct for the LED time offsets. } // Calculate the overlap between two LEDs. // For now.. Just take the median. for (int i = 0; i < fNSource; i++) { for (int j = i; j < fNSource; j++) { overlapMatrix[i][j] = 0.0; overlapErrMatrix[i][j] = 0.0; if (overlapVector[i][j].size() > 20) { // Make sure there are enough datapoint PCAProc::MultiCalc(1, overlapVector[i][j], overlapVector[i][j]); overlapMatrix[i][j] = fMedian; overlapErrMatrix[i][j] = fSig; overlapMatrix[j][i] = -1 * overlapMatrix[i][j]; overlapErrMatrix[j][i] = overlapErrMatrix[i][j]; } else { overlapMatrix[i][j] = 0.0; overlapErrMatrix[i][j] = 0.0; } if (fDebugPCA) { singleoffsetHist->Fill(overlapMatrix[i][j]); singleoffsetErrHist->Fill(overlapErrMatrix[i][j]); } } } // Now we have a fNSource x fNSource matrix that holds all the // extracted offsets and we also have the errors. This is now a // classic graph problem: we need to find the lowest-cost (error) // way to bootstrap all LED times. This means: pick // a reference LED and find the lowest-cost tree. This can be done with the // Dijkstra algoritm. Find shortest path solution for all LEDs as start point, // then pick the one with the most resolved LEDs and smallest global error. std::vector tOffsetArrayInter; // nLEDs - intermediate std::vector tOffsetErrArrayInter; // nLEDs - intermediate tOffsetArrayInter.resize(fNSource); tOffsetErrArrayInter.resize(fNSource); for (int i = 0; i < fNSource; i++) { tOffsetArrayInter[i] = 0.0; tOffsetErrArrayInter[i] = 0.0; } int maxleds = 0; double minerror = 90000.0; int winningled = 0; // Loop over all LEDs // Keep track of which LED is the best (lowest error) detail << " PCAProc::Bootstrap: Dijkstra results: " << newline; for (int i = 0; i < fNSource; i++) { // Now solve the min-error grapg using the Dijkstra algoritm std::vector dijkstraQuality = Dijkstra(overlapErrMatrix, overlapMatrix, i, tOffsetArrayInter, tOffsetErrArrayInter); detail << i << " -> " << dijkstraQuality[0] << " " << dijkstraQuality[1] << newline; if ((dijkstraQuality[1] >= maxleds) && (dijkstraQuality[0] < minerror)) { maxleds = dijkstraQuality[1]; minerror = dijkstraQuality[0]; detail << " " << maxleds << " " << minerror << newline; // Set the result to the shortest path tree for this LED fTOffsetArray = tOffsetArrayInter; fTOffsetErrArray = tOffsetErrArrayInter; // Keep track of which LED won winningled = i; } } detail << "PCAProc::Bootstrap: the winning LED is " << winningled << newline; // Need to fill these arrays to feed to TGraphErrors double *xarray = new double[fNSource]; double *xerrarray = new double[fNSource]; double *yarray = new double[fNSource]; double *yerrarray = new double[fNSource]; detail << " PCAProc::Bootstrap: Final results: " << newline; for (int i = 0; i < fNSource; i++) { if (fTOffsetArray[i] > -100) { xarray[i] = i; yarray[i] = fTOffsetArray[i]; xerrarray[i] = 0.0; yerrarray[i] = fTOffsetErrArray[i]; if (i == winningled) { xarray[i] = i; yarray[i] = 0.0; xerrarray[i] = 0.0; yerrarray[i] = 0.0; } detail << i << " -> " << fTOffsetArray[i] << " " << fTOffsetErrArray[i] << newline; } else { xarray[i] = i; yarray[i] = 0.0; xerrarray[i] = 0.0; yerrarray[i] = 0.0; } } TGraphErrors *finalGraph = new TGraphErrors(fNSource, xarray, yarray, xerrarray, yerrarray); finalGraph->SetName("ledOffsets"); if (fDebugPCA) { fHists.Beautify(); fHists.WriteSingleHist(offsetHist, fPCARootFile); fHists.WriteSingleHist(transitHist, fPCARootFile); fHists.WriteSingleHist(singleoffsetHist, fPCARootFile); fHists.WriteSingleHist(singleoffsetErrHist, fPCARootFile); fHists.WriteSingleHist(absOffsetHist, fPCARootFile); fHists.WriteSingleHist(absOffsetErrHist, fPCARootFile); fHists.WriteSingleHist(finalOffsetHist, fPCARootFile); fHists.WriteSingleHist(finalGraph, fPCARootFile); fHists.WriteSingleHist(ledCoverage, fPCARootFile); } // Clean up delete[] xarray; delete[] yarray; delete[] xerrarray; delete[] yerrarray; xarray = NULL; yarray = NULL; xerrarray = NULL; yerrarray = NULL; // Remember that fTOffsetArray gives the offset from each LED // when compared to LED #0 *yay* if (fVerbosePCA) { detail << "PCAProc::Bootstrap(): Ended up calibrating " << static_cast(calLEDs.size() - 1) << "LEDs\n" << endl; detail << "PCAProc: Done in Bootstrap()\n" << endl; } offsetHist->Delete(); transitHist->Delete(); singleoffsetHist->Delete(); absOffsetHist->Delete(); return 0; } std::vectorPCAProc::Dijkstra(std::vector >& LEDErrmatrix, std::vector >& LEDmatrix, int startLED, std::vector & tOffsetArrayInter, std::vector & tOffsetErrArrayInter) { int src = startLED; // Which LED do we want to start with? // vector for lowest error from src to i double *dist = new double[fNSource]; // vector for corresponding lowest error time-offsets from src to i double *offset = new double[fNSource]; // vector for lowest propagated error from src to i double *error = new double[fNSource]; // Set to true is LED i is included in path. bool *sptSet = new bool[fNSource]; // Return two values that reflect the global quality of the solution std::vector quality; quality.resize(2); quality[0] = 0.0; quality[1] = 0.0; // Initialize all errors as INFINITE and all stpSet[] as false for (int i = 0; i < fNSource; i++) { dist[i] = INT_MAX; offset[i] = INT_MAX; error[i] = INT_MAX; sptSet[i] = false; // no LEDs have been included } // Distance of source vertex from itself is always 0 dist[src] = 0; offset[src] = 0; error[src] = 0; // Find shortest path for all vertices for (int count = 0; count < fNSource - 1; count++) { // Pick the minimum distance vertex from the set of vertices not // yet processed. u is always equal to src in first iteration. // Initialize min value int min = INT_MAX; int u = 0; for (int v = 0; v < fNSource; v++) { if ((sptSet[v] == false) && (dist[v] <= min)) { min = dist[v]; u = v; } } // Mark the picked vertex as processed sptSet[u] = true; // Update dist value of the adjacent vertices of the picked vertex. for (int v = 0; v < fNSource; v++) { // Update dist[v] only if is not in sptSet, there is an edge from // u to v, and total weight of path from src to v through u is // smaller than current value of dist[v] if (!sptSet[v] && LEDErrmatrix[u][v] && (dist[u] != INT_MAX) && (dist[u] + LEDErrmatrix[u][v] < dist[v])) { dist[v] = dist[u] + LEDErrmatrix[u][v]; offset[v] = offset[u] + LEDmatrix[u][v]; error[v] = TMath::Sqrt(TMath::Power(error[u], 2) + TMath::Power(LEDErrmatrix[u][v], 2)); } } } // Parse the results for (int i = 0; i < fNSource; i++) { tOffsetArrayInter[i] = -1 * offset[i]; tOffsetErrArrayInter[i] = error[i]; if (tOffsetErrArrayInter[i] != INT_MAX) { quality[0] = quality[0] + error[i]; quality[1]++; } } quality[0] = quality[0] / quality[1]; if (quality[0] == 0) { quality[0] = INT_MAX; } delete[] dist; delete[] sptSet; delete[] offset; delete[] error; offset = NULL; error = NULL; dist = NULL; sptSet = NULL; // Calculate the quality factor return quality; } bool PCAProc::FileExists(const std::string& filename) { struct stat test; if (stat(filename.c_str(), &test) != -1) { return true; } else { return false; } } void PCAProc::InitPCARoot(const std::string& rootfilename) { fPCARootName = rootfilename; TString hold = fPCARootName; // Open root file fPCARootFile = new TFile(hold, "RECREATE"); // Initialise required histograms fHists.Beautify(); fHists.InitialiseHists(fDoTWPCA, fDoGFPCA); } void PCAProc::ClosePCARoot() { detail << "PCAProc::ClosePCARoot: Writing to root file" << endl; // Write hists to file fPCARootFile->cd(); fHists.Beautify(); fHists.SetAxes(fDoTWPCA, fDoGFPCA); fHists.WriteHists(fDoTWPCA, fDoGFPCA, fPCARootFile); detail << "PCAProc::ClosePCARoot: Closing root file" << endl; fPCARootFile->Close(); } void PCAProc::PCATitles() { if (fDoTWPCA) { int packedinteger = 0; double datapoint1 = 0.0; double datapoint2 = 0.0; std::vector intStatusTW; std::vector interPoints; std::vector interRMS; // TW just write out the interpolation points for the moment: // 10 for each PMT + the intercept and gradient for the high-Q tail // Use the PCA Packer to pack the (Q,T) and (I,R) // datapoints into single integer for (int n = 0; n < fTotPMTs; ++n) { intStatusTW.push_back(static_cast(fPMTTWMask[n])); } for (int n = 0; n < fTotPMTs; n++) { for (int t = 0; t < fTotIntPoints; t++) { // Pack the data-point into an integer: packedinteger = 0; datapoint1 = fTWinterXQHS[n][fSourceSelected[n]][t]; datapoint2 = fTWinterYQHS[n][fSourceSelected[n]][t]; int result = fPCABits.PCAPacker(PCABits::PACK, PCABits::QTINTERPOLATION, packedinteger, datapoint1, datapoint2); if (result > -2) { interPoints.push_back(packedinteger); } else { PCAProc::Die("", 1, 0, 0); } } // Add the intercept and gradient of the high Q tail fit packedinteger = 0; datapoint1 = fTWinterI[n][fSourceSelected[n]]; datapoint2 = fTWinterR[n][fSourceSelected[n]]; int result = fPCABits.PCAPacker(PCABits::PACK, PCABits::GRADIENT_INTERCEPT, packedinteger, datapoint1, datapoint2); if (result > -2) interPoints.push_back(packedinteger); else PCAProc::Die("", 1, 0, 0); } for (int n = 0; n < fTotPMTs; n++) { for (int t = 0; t < fTotIntPoints; t++) { interRMS.push_back(fTWinterYErrQHS[n][fSourceSelected[n]][t]); } } // Record the results in the database file RAT::DBTable TWtable("PCA_TW"); TWtable.SetPassNumber(-2); TWtable.SetI("tw_npoints", fTotIntPoints); TWtable.SetI("is_sno", 0); TWtable.SetIArray("PCATW_status", intStatusTW); TWtable.SetIArray("twinter", interPoints); TWtable.SetDArray("twinterrms", interRMS); TWtable.SaveAs(fPCATWDBName); } // End of if( fDoTWPCA ) if (fDoGFPCA) { std::vector intStatusGF; RAT::DBTable GFtable("PCA_GF"); // GF write out the charge peak, the threshold and // the hhp for both QHS and QHL GFtable.SetPassNumber(-2); GFtable.SetD("mean_hhp", fMeanhhp); GFtable.SetD("min_hhp", fMinhhp); GFtable.SetD("max_hhp", fMaxhhp); for (int n = 0; n < fTotPMTs; ++n) { intStatusGF.push_back(static_cast(fPMTGFMask[n])); } GFtable.SetIArray("PCAGF_status", intStatusGF); GFtable.SetDArray("QHS_threshold", fGFthrQHS); GFtable.SetDArray("QHS_peak", fGFpeaQHS); GFtable.SetDArray("QHS_hhp", fGFhhpQHS); GFtable.SetDArray("QHL_threshold", fGFthrQHL); GFtable.SetDArray("QHL_peak", fGFpeaQHL); GFtable.SetDArray("QHL_hhp", fGFhhpQHL); GFtable.SaveAs(fPCAGFDBName); } // End of if( fDoGFPCA ) } void PCAProc::CheckOccupancy() { bool online = false; int countvalid = 0; int countabnormal = 0; int Ntubes = fTubeStatus.size(); double MinOcc = 1e9; double MaxOcc = 0; const DU::ChanHWStatus& channelHardwareStatus = DU::Utility::Get()->GetChanHWStatus(); // Grabbing the number of online normal tubes for this run for (int np = 0; np < Ntubes; np++) { // If it's a normal tube, check online status and // add to the sum and count if (fTubeStatus[np] == 1) { online = channelHardwareStatus.IsTubeOnline(np); if (online) { ++countvalid; // This is an online, normal tube } } if (fTubeStatus[np] != 1) { online = channelHardwareStatus.IsTubeOnline(np); if (online) { ++countabnormal; // This is an online, abnormal tube } } } if (fDebugPCA) { detail << "PCAProc::CheckOccupancy: Number of online normal PMTs is " << countvalid << endl; detail << "PCAProc::CheckOccupancy: Number of online abnormal PMTs is " << countabnormal << endl; } // Now figure out the minimum and maximum occupancy for this run, // for online, normal tubes only MinOcc = 100 * (fNHitsMin / (static_cast(countvalid))); MaxOcc = 100 * (fNHitsMax / (static_cast(countvalid))); if (fVerbosePCA) { detail << "PCAProc::CheckOccupancy: Minimum Occupancy is " << MinOcc << endl; detail << "PCAProc::CheckOccupancy: Maximum Occupancy is " << MaxOcc << endl; } // warn if occupancy is too high: this could mean multi-photon hits! if (((MinOcc + MaxOcc) / 2.0) > 10.0) { PCAProc::DontDie("", 2, MinOcc, MaxOcc); } } void PCAProc::ReadInUserInput() { // Get PCA user-defined flags // See PCA_GENERATION.ratdb for details about each specific variable DBLinkPtr PCAbank = DB::Get()->GetLink("PCA_GENERATION"); fPCAflag = PCAbank->GetI("pca_flag"); // this will always be 0 (SNO method) // for now, later we will add other, more optimized PCA processes fPCAtype = PCAbank->GetI("pca_type"); fDoTWPCA = false; fDoGFPCA = false; if ((fPCAtype == 0) || (fPCAtype == 1)) { fDoTWPCA = true; } if ((fPCAtype == 0) || (fPCAtype == 2)) { fDoGFPCA = true; } // Need to fix how to find out if this is a laserball or a LED run // .. for now.. have it as user input... fPCASource = PCAbank->GetI("pca_source"); // Check selected verbosity/debug-level and print out message fExtaSyncDelay = PCAbank->GetD("extasyncDelay"); fTimeMinVal = PCAbank->GetD("timeMinVal"); fTimeMaxVal = PCAbank->GetD("timeMaxVal"); fVerbosePCA = false; fDebugPCA = false; if (PCAbank->GetI("pca_verbosity") == 1) { fVerbosePCA = true; } if (PCAbank->GetI("pca_verbosity") == 2) { fVerbosePCA = true; fDebugPCA = true; } if (fVerbosePCA) { detail << "PCAProc::ReadInUserInput: Running PCA in verbose mode" << endl; } else { detail << "PCAProc::ReadInUserInput: Running PCA in non-verbose mode" << endl; } if (fDebugPCA) { detail << "PCAProc::ReadInUserInput: Running PCA in debugging mode" << endl; } else { detail << "PCAProc::ReadInUserInput: Running PCA in non-debugging mode" << endl; } // Retrieve the required number of interpolation points in each region fPeakIntPoints = PCAbank->GetI("peakintpoints"); fDropIntPoints = PCAbank->GetI("dropintpoints"); fTailIntPoints = PCAbank->GetI("tailintpoints"); // Calculate the total number of interpolation points fTotIntPoints = fPeakIntPoints + fDropIntPoints + fTailIntPoints; fTailGap = PCAbank->GetDArray("tailgap"); fMinPointsPerBin = PCAbank->GetI("minpointsperbin"); fMinHitsPerChan = PCAbank->GetI("low_occ_lim"); fTrigType = PCAbank->GetI("trig_type"); fMaxNumFiber = PCAbank->GetI("max_num_fiber"); fMaxDistFromCenter = PCAbank->GetD("max_dist_from_center"); fLaserballWL = PCAbank->GetD("laser_wl"); fSelectAngle = PCAbank->GetD("select_angle"); // For bootstrapping fMaxCorr = PCAbank->GetD("tw_corr_max"); fMaxErrLEDTime = PCAbank->GetD("tw_err_max"); // Minimum required hits for a PMT/LED pair fMinNPointsBoot = PCAbank->GetI("min_npoints_boot"); // Maximum difference in transittime for a PMT(LED1,LED2) combination fSelectTTransit = PCAbank->GetD("select_ttransit"); // Get the user-set limits for flagging a channel as bad from data-base // See PCA_GENERATION.ratdb for details about each specific variable fPeakQHSDiffLim = PCAbank->GetI("peakQHS_diff_lim"); fPeakQHLDiffLim = PCAbank->GetI("peakQHL_diff_lim"); fThreshQHSDiffLim = PCAbank->GetI("threshQHS_diff_lim"); fThreshQHLDiffLim = PCAbank->GetI("threshQHL_diff_lim"); fHhpQHSDiffLim = PCAbank->GetI("hhpQHS_diff_lim"); fHhpQHLDiffLim = PCAbank->GetI("hhpQHL_diff_lim"); fThreshQHSLimhigh = PCAbank->GetI("threshQHS_limhigh"); fThreshQHLLimhigh = PCAbank->GetI("threshQHL_limhigh"); fThreshQHSLimlow = PCAbank->GetI("threshQHS_limlow"); fThreshQHLLimlow = PCAbank->GetI("threshQHL_limlow"); fMaxHhpQHSDiff = PCAbank->GetI("max_hhpQHS_diff"); fMaxPeakQHSDiff = PCAbank->GetI("max_peakQHS_diff"); fMaxThreshQHSDiff = PCAbank->GetI("max_threshQHS_diff"); fMaxHhpQHLDiff = PCAbank->GetI("max_hhpQHL_diff"); fMaxPeakQHLDiff = PCAbank->GetI("max_peakQHL_diff"); fMaxThreshQHLDiff = PCAbank->GetI("max_threshQHL_diff"); fMaxThreshQHSHigh = PCAbank->GetI("max_threshQHS_high"); fMaxThreshQHLHigh = PCAbank->GetI("max_threshQHL_high"); fMaxThreshQHSLow = PCAbank->GetI("max_threshQHS_low"); fMaxThreshQHLLow = PCAbank->GetI("max_threshQHL_low"); fMaxNPWLow = PCAbank->GetI("max_NPW_low"); fMaxFailed = PCAbank->GetI("tw_max_failed_int"); fMaxOffline = PCAbank->GetI("max_offline"); fMaxZeroOcc = PCAbank->GetI("max_zero_occ"); fMaxLowOcc = PCAbank->GetI("max_low_occ"); fMaxRmsHigh = PCAbank->GetI("max_rms_high"); fMaxRmsVeryHigh = PCAbank->GetI("max_rms_very_high"); fMaxTWnoHighQFit = PCAbank->GetI("max_no_highq_fit"); fNMaxTStepCut = PCAbank->GetI("n_max_tstep"); fNMaxFLateCut = PCAbank->GetI("n_max_flate"); fNMaxFOutCut = PCAbank->GetI("n_max_fout"); fNMaxIntPointMiss = PCAbank->GetI("n_max_miss"); fNTWMaxGrad = PCAbank->GetI("n_tw_max_grad"); fNTWMinGrad = PCAbank->GetI("n_tw_min_grad"); fRmsHigh = PCAbank->GetD("rms_high"); fRmsVeryHigh = PCAbank->GetD("rms_very_high"); fMaxTStep = PCAbank->GetD("max_tstep"); fMaxFLate = PCAbank->GetD("max_flate"); fMaxFOut = PCAbank->GetD("max_fout"); fMaxGrad = PCAbank->GetD("tw_max_grad"); fMinGrad = PCAbank->GetD("tw_min_grad"); } void PCAProc::CheckPCARun() { // Set up some counters int countFailedInt = 0; int countFailedTW = 0; int countFailedGF = 0; int dQHSTH = 0; int dQHSPK = 0; int dQHSHP = 0; int dQHLTH = 0; int dQHLPK = 0; int dQHLHP = 0; int lQHSTHhigh = 0; int lQHLTHhigh = 0; int lQHSTHlow = 0; int lQHLTHlow = 0; int lNPW = 0; int nOffline = 0; int nZeroOcc = 0; int nLowOcc = 0; int nRmsHigh = 0; int nRmsVeryHigh = 0; int nTWnoHighQFit = 0; int nMaxFLate = 0; int nMaxFOut = 0; int nMaxTStep = 0; int nMaxMiss = 0; int nMaxLGrad = 0; int nMaxHGrad = 0; // Names of the histograms that will be written out to the output file. // For failed PMTs only. ostringstream oss; string failedHistName; // Array of flags to mark PMTs that will have their histograms // written out to the output rootfile std::vector makeHists; makeHists.resize(fTotPMTs); // 1) Compare TH, PK, HP values to the previous run // histogram the difference between the database // values and the extracted ones, for (int k = 0; k < fTotPMTs; k++) { // Loop over all channels makeHists[k] = 0; // By default, do not output histograms // for this PMT int nHitsPerPMT = 0; // In both GF and TW case, // fill the histogram that shows the hits per PMT for (int i = 0; i < fNSource; i++) { nHitsPerPMT = nHitsPerPMT + fQHSArray[k][i].size(); } if (fDebugPCA) { fHists.fPCAhitsPerPMT->Fill(nHitsPerPMT); } // Check the number of offline channels if (fBits->TestBit(fPMTGFMask[k], 1) || fBits->TestBit(fPMTTWMask[k], 1)) nOffline++; // Check the number of online, zero occupancy channels if (fBits->TestBit(fPMTGFMask[k], 2) || fBits->TestBit(fPMTTWMask[k], 2)) nZeroOcc++; // Check the number of online, low occupancy channels if (fBits->TestBit(fPMTGFMask[k], 3) || fBits->TestBit(fPMTTWMask[k], 3)) nLowOcc++; // Compare these results to the previous ones if (fDoGFPCA) { if (!(fBits->TestBit(fPMTGFMask[k], 0)) && !(fBits->TestBit(fPMTGFMask[k], 4)) && !(fBits->TestBit(fPMTGFMask[k], 5))) { if (fDebugPCA) { // 2D fHists.fPCAdiffPEAKQHS->Fill(k, fGFpeaQHSdb[k] - fGFpeaQHS[k]); fHists.fPCAdiffTHRESHQHS->Fill(k, fGFthrQHSdb[k] - fGFthrQHS[k]); fHists.fPCAdiffHHPQHS->Fill(k, fGFhhpQHSdb[k] - fGFhhpQHS[k]); fHists.fPCAdiffPEAKQHL->Fill(k, fGFpeaQHLdb[k] - fGFpeaQHL[k]); fHists.fPCAdiffTHRESHQHL->Fill(k, fGFthrQHLdb[k] - fGFthrQHL[k]); fHists.fPCAdiffHHPQHL->Fill(k, fGFhhpQHLdb[k] - fGFhhpQHL[k]); // 1D fHists.fPCAdiffPEAKQHS1d->Fill(fGFpeaQHSdb[k] - fGFpeaQHS[k]); fHists.fPCAdiffTHRESHQHS1d->Fill(fGFthrQHSdb[k] - fGFthrQHS[k]); fHists.fPCAdiffHHPQHS1d->Fill(fGFhhpQHSdb[k] - fGFhhpQHS[k]); fHists.fPCAdiffPEAKQHL1d->Fill(fGFpeaQHLdb[k] - fGFpeaQHL[k]); fHists.fPCAdiffTHRESHQHL1d->Fill(fGFthrQHLdb[k] - fGFthrQHL[k]); fHists.fPCAdiffHHPQHL1d->Fill(fGFhhpQHLdb[k] - fGFhhpQHL[k]); } // compare only if this channel did not fail already // Quality checks if (fGFthrQHS[k] > fThreshQHSLimhigh) { fPMTGFMask[k] = fBits->SetBit(fPMTGFMask[k], 20); lQHSTHhigh++; makeHists[k] = 1; } if (fGFthrQHL[k] > fThreshQHLLimhigh) { fPMTGFMask[k] = fBits->SetBit(fPMTGFMask[k], 21); lQHLTHhigh++; makeHists[k] = 1; } if (fBits->TestBit(fPMTGFMask[k], 22)) lNPW++; if (fGFthrQHS[k] < fThreshQHSLimlow) { fPMTGFMask[k] = fBits->SetBit(fPMTGFMask[k], 23); lQHLTHlow++; makeHists[k] = 1; } if (fGFthrQHL[k] < fThreshQHLLimlow) { fPMTGFMask[k] = fBits->SetBit(fPMTGFMask[k], 24); lQHLTHlow++; makeHists[k] = 1; } if (abs(fGFthrQHSdb[k] - fGFthrQHS[k]) > fThreshQHSDiffLim) { fPMTGFMask[k] = fBits->SetBit(fPMTGFMask[k], 10); dQHSTH++; makeHists[k] = 1; } if (abs(fGFpeaQHSdb[k] - fGFpeaQHS[k]) > fPeakQHSDiffLim) { fPMTGFMask[k] = fBits->SetBit(fPMTGFMask[k], 11); dQHSPK++; makeHists[k] = 1; } if (abs(fGFhhpQHSdb[k] - fGFhhpQHS[k]) > fHhpQHSDiffLim) { fPMTGFMask[k] = fBits->SetBit(fPMTGFMask[k], 12); dQHSHP++; makeHists[k] = 1; } if (abs(fGFthrQHLdb[k] - fGFthrQHL[k]) > fThreshQHLDiffLim) { fPMTGFMask[k] = fBits->SetBit(fPMTGFMask[k], 13); dQHLTH++; makeHists[k] = 1; } if (abs(fGFpeaQHLdb[k] - fGFpeaQHL[k]) > fPeakQHLDiffLim) { fPMTGFMask[k] = fBits->SetBit(fPMTGFMask[k], 14); dQHLPK++; makeHists[k] = 1; } if (abs(fGFhhpQHLdb[k] - fGFhhpQHL[k]) > fHhpQHLDiffLim) { fPMTGFMask[k] = fBits->SetBit(fPMTGFMask[k], 15); dQHLHP++; makeHists[k] = 1; } if (fPMTGFMask[k] != 0) { fPMTGFMask[k] = fBits->SetBit(fPMTGFMask[k], 0); countFailedGF++; } } } // end of if fDoGFPCA // Now check the TW results if (fDoTWPCA) { // Check the following flags only if the channel has not failed yet if (!(fBits->TestBit(fPMTTWMask[k], 0))) { if (fDebugPCA) { fHists.fRMSHist->Fill(fTWRms[k][fSourceSelected[k]]); fHists.fFlateHist->Fill(fTMore[k][fSourceSelected[k]] / static_cast (fQHSArray[k][fSourceSelected[k]] .size())); fHists.fFoutHist->Fill(fTQHSLess[k][fSourceSelected[k]] / static_cast (fQHSArray[k][fSourceSelected[k]] .size())); } // These are warning flags // channels are bad until checked manually. // 4) Check if channel RMS is very high if (fTWRms[k][fSourceSelected[k]] > fRmsVeryHigh) { fPMTTWMask[k] = fBits->SetBit(fPMTTWMask[k], 7); nRmsVeryHigh++; makeHists[k] = 1; } // 5) Check if channel RMS is too high if (fTWRms[k][fSourceSelected[k]] > fRmsHigh) { fPMTTWMask[k] = fBits->SetBit(fPMTTWMask[k], 8); nRmsHigh++; makeHists[k] = 1; } // 6) Count how many channels did not // get their high Q tail fitted if (fBits->TestBit(fPMTTWMask[k], 9)) nTWnoHighQFit++; // 7) Check the TStep // TStep is the difference in Y (time) between // two interpolation points for (int j = 0; j < fTotIntPoints - 1; j++) { if (fDebugPCA) { fHists.fTStepHist ->Fill(fTWinterYQHS[k][fSourceSelected[k]][j + 1] - fTWinterYQHS[k][fSourceSelected[k]][j]); } if (fTWinterYQHS[k][fSourceSelected[k]][j + 1] - fTWinterYQHS[k][fSourceSelected[k]][j] > fMaxTStep) { fPMTTWMask[k] = fBits->SetBit(fPMTTWMask[k], 10); nMaxTStep++; makeHists[k] = 1; } } // 8) Check the FLate: Fraction of hits that are late if ((fTMore[k][fSourceSelected[k]] / static_cast (fQHSArray[k][fSourceSelected[k]].size())) > fMaxFLate) { fPMTTWMask[k] = fBits->SetBit(fPMTTWMask[k], 11); nMaxFLate++; makeHists[k] = 1; } // 9) Check out the Fout: Fraction of hits that are early // or have low charge if ((fTQHSLess[k][fSourceSelected[k]] / static_cast (fQHSArray[k][fSourceSelected[k]].size())) > fMaxFOut) { fPMTTWMask[k] = fBits->SetBit(fPMTTWMask[k], 12); nMaxFOut++; makeHists[k] = 1; } // 10) Check if the extracted gradient is positive! if ((fTWinterR[k][fSourceSelected[k]] > fMaxGrad) && !(fBits->TestBit(fPMTTWMask[k], 9))) { fPMTTWMask[k] = fBits->SetBit(fPMTTWMask[k], 13); nMaxHGrad++; makeHists[k] = 1; } // 11) Check if the extracted gradient is too steep if ((fTWinterR[k][fSourceSelected[k]] < fMinGrad) && !(fBits->TestBit(fPMTTWMask[k], 9))) { fPMTTWMask[k] = fBits->SetBit(fPMTTWMask[k], 14); nMaxLGrad++; makeHists[k] = 1; } // 12) Check the nummber of extracted interpolation points countFailedInt = 0; for (int i = 0; i < fTotIntPoints; i++) { if (fTWinterYQHS[k][fSourceSelected[k]][i] == -9999) { countFailedInt++; } } // 13) Check the number of extracted interpolation points if (countFailedInt > fMaxFailed) { fPMTTWMask[k] = fBits->SetBit(fPMTTWMask[k], 31); nMaxMiss++; makeHists[k] = 1; } // If any of the above flags are set for this channel, set bit 0 if (fPMTTWMask[k] != 0) { fPMTTWMask[k] = fBits->SetBit(fPMTTWMask[k], 0); countFailedTW++; } } // End of if( !(fBits->TestBit(fPMTTWMask[k],0)) ) } // End of if( fDoTWPCA ) // If the channel failed any of the flags (except channels is offline // (1) or zero occupancy (2) or low occupancy (3)) // Make some plots and safe them to the output rootfile, // only if we are in debugging mode if (makeHists[k] && fDebugPCA) { // Set up the output histograms TH2D *tempTWHist; TH1D *tempChargeHistQHS; TH1D *tempChargeHistQHL; TH1D *tempChargeHistTeca; oss.str(""); oss << "PMT-" << k << " TW"; failedHistName = oss.str(); tempTWHist = new TH2D(failedHistName.c_str(), failedHistName.c_str(), (fQHSMax[k][fSourceSelected[k]] - fQHSMin[k][fSourceSelected[k]]), fQHSMin[k][fSourceSelected[k]], fQHSMax[k][fSourceSelected[k]], (fTimeMaxVal - fTimeMinVal) / 0.5, fTimeMinVal, fTimeMaxVal); oss.str(""); oss << "PMT-" << k << " QHS"; failedHistName = oss.str(); tempChargeHistQHS = new TH1D(failedHistName.c_str(), failedHistName.c_str(), (fQHSMax[k][fSourceSelected[k]] - fQHSMin[k][fSourceSelected[k]]), fQHSMin[k][fSourceSelected[k]], fQHSMax[k][fSourceSelected[k]]); oss.str(""); oss << "PMT-" << k << " QHL"; failedHistName = oss.str(); tempChargeHistQHL = new TH1D(failedHistName.c_str(), failedHistName.c_str(), (fQHLMax[k][fSourceSelected[k]] - fQHLMin[k][fSourceSelected[k]]), fQHLMin[k][fSourceSelected[k]], fQHLMax[k][fSourceSelected[k]]); oss.str(""); oss << "PMT-" << k << " Teca"; failedHistName = oss.str(); tempChargeHistTeca = new TH1D(failedHistName.c_str(), failedHistName.c_str(), (fTimeMaxVal - fTimeMinVal) / 2, fTimeMinVal, fTimeMaxVal); for (int g = 0; g < fNSource; g++) { int QHSArraysize = fQHSArray[k][g].size(); for (int j = 0; j < QHSArraysize; j++) { if (g == fSourceSelected[k]) { tempTWHist->Fill(fQHSArray[k][g].at(j), fTArray[k][g].at(j)); tempChargeHistTeca->Fill(fTArray[k][g].at(j)); } tempChargeHistQHS->Fill(fQHSArray[k][g].at(j)); tempChargeHistQHL->Fill(fQHLArray[k][g].at(j)); } } fHists.Beautify(); fHists.WriteSingleHist(tempChargeHistQHS, fPCARootFile); fHists.WriteSingleHist(tempChargeHistQHL, fPCARootFile); fHists.WriteSingleHist(tempChargeHistTeca, fPCARootFile); fHists.WriteSingleHist(tempTWHist, fPCARootFile); tempChargeHistQHS->Delete(); tempChargeHistQHS = NULL; tempChargeHistQHL->Delete(); tempChargeHistQHL = NULL; tempChargeHistTeca->Delete(); tempChargeHistTeca = NULL; tempTWHist->Delete(); tempTWHist = NULL; } } // End of loop over PMTs // Set up the general PCA flag // General flags // 1) Offline channels if (nOffline > fMaxOffline) { fOverallMask = fBits->SetBit(fOverallMask, 3); } // 2) Zero occupancy channels if (nZeroOcc > fMaxZeroOcc) { fOverallMask = fBits->SetBit(fOverallMask, 4); } // 3) Low occupancy channels if (nLowOcc > fMaxLowOcc) { fOverallMask = fBits->SetBit(fOverallMask, 5); } // 4) TW general flags if (nRmsHigh > fMaxRmsHigh) fOverallMask = fBits->SetBit(fOverallMask, 9); if (nRmsVeryHigh > fMaxRmsVeryHigh) fOverallMask = fBits->SetBit(fOverallMask, 8); if (nTWnoHighQFit > fMaxTWnoHighQFit) fOverallMask = fBits->SetBit(fOverallMask, 10); if (nMaxTStep > fNMaxTStepCut) fOverallMask = fBits->SetBit(fOverallMask, 11); if (nMaxFLate > fNMaxFLateCut) fOverallMask = fBits->SetBit(fOverallMask, 12); if (nMaxFOut > fNMaxFOutCut) fOverallMask = fBits->SetBit(fOverallMask, 13); if (nMaxMiss > fNMaxIntPointMiss) fOverallMask = fBits->SetBit(fOverallMask, 14); if (nMaxHGrad > fNTWMaxGrad) fOverallMask = fBits->SetBit(fOverallMask, 15); if (nMaxLGrad > fNTWMinGrad) fOverallMask = fBits->SetBit(fOverallMask, 16); // 5) GF general flags if (dQHSTH > fMaxThreshQHSDiff) fOverallMask = fBits->SetBit(fOverallMask, 20); if (dQHSPK > fMaxPeakQHSDiff) fOverallMask = fBits->SetBit(fOverallMask, 21); if (dQHSHP > fMaxHhpQHSDiff) fOverallMask = fBits->SetBit(fOverallMask, 22); if (dQHLTH > fMaxThreshQHLDiff) fOverallMask = fBits->SetBit(fOverallMask, 23); if (dQHLPK > fMaxPeakQHLDiff) fOverallMask = fBits->SetBit(fOverallMask, 24); if (dQHLHP > fMaxHhpQHLDiff) fOverallMask = fBits->SetBit(fOverallMask, 25); if (lQHSTHhigh > fMaxThreshQHSHigh) fOverallMask = fBits->SetBit(fOverallMask, 26); if (lQHLTHhigh > fMaxThreshQHLHigh) fOverallMask = fBits->SetBit(fOverallMask, 27); if (lNPW > fMaxNPWLow) fOverallMask = fBits->SetBit(fOverallMask, 28); if (lQHSTHlow > fMaxThreshQHSLow) fOverallMask = fBits->SetBit(fOverallMask, 29); if (lQHLTHlow > fMaxThreshQHLLow) fOverallMask = fBits->SetBit(fOverallMask, 30); // Set bit 7 to 0 to flag job completion fOverallMask = fBits->ClearBit(fOverallMask, 7); // Set the general flag if (!fDoTWPCA || ((fOverallMask & 1048448) != 0)) { fOverallMask = fBits->SetBit(fOverallMask, 1); fOverallMask = fBits->SetBit(fOverallMask, 0); } if (!fDoGFPCA || ((fOverallMask & 4293918720) != 0)) { fOverallMask = fBits->SetBit(fOverallMask, 2); fOverallMask = fBits->SetBit(fOverallMask, 0); } if (fDebugPCA) { // Write out a summary of all flags and limits to the PCA logfile // This output is needed as input to the PCA website.. only if we // have written out the rootfile - in debugmode! RAT::DBTable TWtable("PCA_Status"); TWtable.SetI("PCA_date", fDate); TWtable.SetI("PCATW_status", fOverallMask); TWtable.SetI("PCA_runnumber", fRunID); TWtable.SetI("PCA_gen_status", fOverallMask); // Make arrays of the limits static const int arr1[] = { 0, 0, 0, fMaxOffline, fMaxZeroOcc, fMaxLowOcc, fMaxRmsVeryHigh, fMaxRmsHigh, fMaxTWnoHighQFit, fNMaxTStepCut, fNMaxFLateCut, fMaxFOut, fMaxFOut, fMaxFailed, 0, 0, 0, 0, 0, fMaxThreshQHSDiff, fMaxPeakQHSDiff, fMaxHhpQHSDiff, fMaxThreshQHLDiff, fMaxPeakQHLDiff, fMaxHhpQHLDiff, fMaxThreshQHSHigh, fMaxThreshQHLHigh, fMaxNPWLow, fMaxThreshQHSLow, fMaxThreshQHLLow, 0 }; static const double arr2[] = { 0., 0., 0., 0., 0., 0., 0., fRmsVeryHigh, fRmsHigh, 0, fMaxTStep, fMaxFLate, fMaxFOut, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. }; static const int arr3[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, fThreshQHSDiffLim, fPeakQHSDiffLim, fHhpQHSDiffLim, fThreshQHLDiffLim, fPeakQHLDiffLim, fHhpQHLDiffLim, 0, 0, 0, 0, fThreshQHSLimhigh, fThreshQHLLimhigh, 0, fThreshQHSLimlow, fThreshQHLLimlow, 0, 0, 0, 0, 0, 0, 0 }; std::vector PCAGenLimits (arr1, arr1 + sizeof(arr1) / sizeof(arr1[0])); std::vector PCATWLimits (arr2, arr2 + sizeof(arr2) / sizeof(arr2[0])); std::vector PCAGFLimits (arr3, arr3 + sizeof(arr3) / sizeof(arr3[0])); // Write out these arrays to the logfile TWtable.SetIArray("PCA_gen_limits", PCAGenLimits); TWtable.SetDArray("PCA_TW_limits", PCATWLimits); TWtable.SetIArray("PCA_GF_limits", PCAGFLimits); TWtable.SaveAs(fPCALogName); } if (fVerbosePCA) { // Print run summary to screen fPCABits.InitialiseStatusWords(); if (fOverallMask != 0) { info << "PCAProc::CheckPCARun: PCA run-status word != 0:" << newline; for (int n = 0; n < 32; n++) { int bit = fBits->GetBits(fOverallMask, n, 1); if (bit != 0) { info << "PCA Bit " << n << " = " << bit << " : " << fPCABits.fOverallMask[n] << newline; } } } else { info << "PCAProc::CheckPCARun: PCA run-status word = 0 i.e. SUCCESS: a clean run" << newline; } } } // Handle non-fatal errors (adapted from ECAProc.cc) void PCAProc::DontDie(std::string message, int flag_err, float info1, float info2) { warn << newline; warn << BYELLOW << " PCA SETBACK: " << CLR << newline; warn << message << newline; if (flag_err == 2) { // Warn that the occupancy is too high warn << "The minimum occupancy is " << info1 << "%" << newline; warn << "The maximum occupancy is " << info2 << "%" << newline; warn << "This is too high to perform a correct gain calibration" << " since we are interested in the single pe peak " << newline; warn << "The output is not to be used for gain-calibrating data " << newline; // Set the general flag here fOverallMask = fBits->SetBit(fOverallMask, 6); warn << "Continuing anyway.. you have been warned " << newline; } if (flag_err == 4) { warn << "Bootstrapping failed. No PCA TW calibration is possible " << newline; warn << "Will be deleting the TW output .ratdb since it is junk anyway." << newline; warn << "Check transittime calculation and LED positions." << newline; warn << "Thank you for using PCA TELLIE calibrations!" << newline; } warn << newline; } // Handle fatal errors void PCAProc::Die(std::string message, int flag_err, float info1, float info2) { warn << BRED << " PCA PROBLEM: " << CLR << newline; warn << message << newline; if (flag_err == 1) { warn << "The packing of the data failed" << newline; } if (flag_err == 2) { warn << "TELLIE PCA currently not yet supported" << newline; } if (flag_err == 3) { warn << "Run " << info1 << " is not a laserball run" << newline; } if (flag_err == 4) { warn << "Run " << info1 << " is not a central laserball run, position is " << info2 << "mm from center" << newline; } if (flag_err == 5) { warn << "Wavelength mismatch: you asked for " << info1 << "nm, and the wavelength is" << info2 << "nm" << newline; } if (flag_err == 6) { warn << "This is not an LED run, but you specified that this is " << "a TELLIE PCA! " << newline; } if (flag_err == 7) { warn << "LED bootstrapping failed, oops! " << newline; } Log::Die("Encountered fatal problem and dying..."); } // Return the median and widths of the n values of vector arg. // This function is adapted from ECAProc.cc void PCAProc::MultiCalc(const int type, const std::vector& arg1, const std::vector& arg2) { fMedian = 0.0; fWidth = 0.0; fWidth2 = 0.0; fSig = 0.0; fVar = 0.0; if ((arg1.size() == 0) || (arg2.size() == 0)) { return; } if (arg2.size() != arg1.size()) { return; } if (!((type == 1) || (type == 2))) { return; } int n = arg1.size(); double *array1 = new double[n]; for (int i = 0; i < n; i++) { array1[i] = arg1[i]; } if (type == 2) { // Calculate gradient float qt = 0.0; float tt = 0.0; float t = 0.0; float q = 0.0; float qq = 0.0; for (int ia = 0; ia < n; ia++) { // Keep track of the sum of following quantities qt, tt, t, q, qq qt = qt + (arg1[ia] * arg2[ia]); tt = tt + (arg1[ia] * arg1[ia]); qq = qq + (arg2[ia] * arg2[ia]); q = q + arg2[ia]; t = t + arg1[ia]; } // Average qt, tt, t, q, qq qt = qt / n; qq = qq / n; tt = tt / n; t = t / n; q = q / n; // calculate the gradient, make sure that we are not dividing by 0 here if ((qq != (q * q)) && (n > 1)) { fGrad = (qt - (q * t)) / (qq - (q * q)); fVar = (n / (n - 1)) * ((tt - (t * t)) - 2 * fGrad * (qt - (q * t)) + (fGrad * fGrad) * (qq - (q * q))); } else { fGrad = -9999; fVar = -9999; } } if (n % 2 == 1) { // Odd number, return middle value fMedian = TMath::KOrdStat(n, array1, static_cast(n / 2)); } else { // Even number, return mean of middle 2 values fMedian = 0.5 * (TMath::KOrdStat(n, array1, static_cast(n / 2 - 1)) + TMath::KOrdStat(n, array1, static_cast(n / 2))); } // Return the width of the n values of vector arg // Span around median containing 50% of points fWidth = TMath::KOrdStat(n, array1, static_cast(0.75 * n)) - TMath::KOrdStat(n, array1, static_cast(0.25 * n)); if (fWidth == 0.0) { fWidth = (1.0 / 1.95) * (TMath::KOrdStat(n, array1, static_cast(0.9 * n)) - TMath::KOrdStat(n, array1, static_cast(0.1 * n))); } if (fWidth == 0.0) { fWidth = 1.0; } fWidth = static_cast(10 * fWidth * 1.48 / 2.0) / 10.0; if (fWidth < 0.5) fWidth = 0.5; // Return the 2nd width measure of the n values of vector arg // Span around median containing 20% of points fWidth2 = TMath::KOrdStat(n, array1, static_cast(0.7 * n)) - TMath::KOrdStat(n, array1, static_cast(0.4 * n)); // Estimate of 1 sigma width: // 40% of a gaussian's data lies inside 0.52*sigma of the range // plus for some reason estimating sigma from percentiles induces // a small bias...(?) // only makes sense if we have enough data if (n > 10) { fSig = TMath::KOrdStat(n, array1, static_cast(0.7 * n) - 1) - TMath::KOrdStat(n, array1, static_cast(0.3 * n) - 1); fSig = fSig * 1.0055 / (2.0 * 0.52); } else { fSig = -9999; } delete[] array1; array1 = NULL; } } // namespace RAT