#!/usr/bin/env python """ usage : double_bang_fit.py -f input_file Double_bang_fit is a reconstruction program for double cascade events. options: -f -input_file : Input file in any format aanet can read. default=/sps/km3net/users/tvaneede/simulations/official_tau_production/2020-10-14_first_try/mc/atm_neutrino/KM3NeT_-00000001_20171212/v5/reco/mcv5.gsg_nutau-CCHEDIS-shower_1e2-1e8GeV.sirene.jte.jchain.1.root -o -output_file : The output file, aanet format default=auto_pwd -d -detector_file : det or detx. Set this to "auto" to get it from the header of the input file. default=/pbs/throng/km3net/detectors/KM3NeT_-00000001_20171212.detx -n : process this many events default=100000000 -v : verbose (0,1 or 2) default=0 -h : display this message default=False -i : start python prompt when done default=False -E : minimum MC energy default=1e5 -e : elongation steps default=10 -m : use MC hits default=True -s : use MC start position default=True -b : blurring of the PDF default=0.0 -p : use first hit PDF default=True -X : shift starting position default=False -A : use ampltidue information default=False """ print("double bang fit starts...") import ROOT, os, sys, collections, array from math import * from copy import copy from array import array import numpy as np import scipy.optimize try : import aa except ImportError: print("Failed to import aa; Make sure AADIR is in your PYHTONPATH!") sys.exit() import rec from rec import rec2 from datetime import datetime options = aa.Options( __doc__, sys.argv[1:] ) print(options) from ROOT import vector, Hit, Det, Evt, Trk, Vec, Pmt, EventFile, Timer, TFile, TGraph, TH1D, TH3D, angle_between, TTree, gROOT # from ROOT import DigitalShowerPdf, MestShowerPdf, ShowerFit #----------------------------------------------------- # Fancy-smancy setting of members #----------------------------------------------------- # for T in Trk, DigitalShowerPdf, MestShowerPdf, ShowerFit : T.set = aa.__set #-------------------------------------------------- # Time slewing #-------------------------------------------------- aa.include("/pbs/throng/km3net/src/Jpp/master/software/JTrigger/JGetRiseTime.hh") slew = ROOT.JTRIGGER.JGetRiseTime() #-------------------------------------------------- # Constants #-------------------------------------------------- water_index = 1.3800851282 dndl = 0.0298 c_light = 299792458*1E-9 v_light = c_light/(water_index) jshowerfile = ["/sps/km3net/users/jseneca/jpp/data/J14p.dat",\ "/sps/km3net/users/jseneca/jpp/data/J13p.dat"] min_residual = -100 max_residual = 900 if options.m: R_bg = 0 else: R_bg = 7e3 #----------------------------------------- # input & output file #----------------------------------------- if options.o.startswith('auto') : # base output file on input file outfile = options.f+"."+"doublebang"+".root" if options.o == "auto_pwd" : outfile = outfile.split("/")[-1] else : outfile = options.o print (" input file =", options.f ) print (" output file =", outfile ) f = ROOT.EventFile( options.f ) f.set_output( outfile ) f.autosave = False # manually decide which events to write #----------------------------------------------- # detector file #----------------------------------------------- if options.d == "auto" : det = Det( f ) else : det = Det( options.d ) det.prnt() if det.doms.size() == 0 : print( "no detector -> exiting.") import sys sys.exit(1) # For mc-analysis, we need the coord-origin. If it is mising, we # use the cg of the detector and hope for the best. if f.header.have_line("coord_origin") : coord_origin = f.header.coord_origin() else : coord_origin = det.get_cg() #----------------------------------------------- # Add our info to the header and write it. #----------------------------------------------- f.header["vertex_fit_detector"] = options.d f.output_file.WriteObject( f.header ,"Head" ) f.output_file.ls() #----------------------------------------------- # Helper functions #----------------------------------------------- def find_lepton(PDG_number, mc_trks): i = 0 for track in mc_trks: if track.type == PDG_number: return i i+=1 return None def is_contained(pos): r = sqrt(pos.x**2 + pos.y**2) if pos.z <= (677 - 91) and pos.z >= (0 + 91) and r <= (530 - 91): return True else: return False def max_dist_func ( det, hits, pos ) : """ Return the radius of a sphere that contains almost all hits. """ rmax, fmax = 800, 2.0 rmin, fmin = 300, 0.2 f = ROOT.hit_to_dom_ratio( det, hits, pos, 200 ) if f == 0 : # no doms in radius return rmax f = log10(f) if f < fmin : return rmin return rmin + (f-fmin) * (rmax-rmin)/fmax; def visible_E(evt): E_shower1 = 0 E_shower2 = 0 for t in evt.mc_trks: if t.type == 16 or t.type == 14 or t.type == 12 or t.type == 15: continue if (t.mother_id == 4 or t.mother_id == 2): E_shower2+=t.E else: E_shower1+=t.E return E_shower1, E_shower2 #-------------------------------------------------- # setup the ShowerPdf #-------------------------------------------------- print("load pdf") # pdf4d = JppShowerPdf( jshowerfile[0], # jshowerfile[1], # options.b, # blurring of pdf # min_residual, # max_residual, # R_bg, # background rate # options.v ) # verbosity L = ROOT.CascadeLikelihood( options.p, # first hit probability options.b, # sigma blurring of pdf options.e, # energy elongation steps R_bg, min_residual, max_residual, 0.0, 1 ) #verbosity print("k40: ", L.pdf.get_k40_npe(1) ) # #-------------------------------------------------- # # Let's go # #-------------------------------------------------- iproc=0 for evt in f: with Timer("event"): # minimum mc neutrino energy requirement. if options.E > 0 and evt.neutrino() and evt.neutrino().E < options.E : continue if is_contained(evt.mc_trks[0].pos) != True: continue tau_index = find_lepton(15, evt.mc_trks) nu = evt.mc_trks[0] tau = evt.mc_trks[tau_index] tau_decay_pos = tau.pos + tau.dir * tau.len if is_contained( tau.pos + tau.dir*tau.len ) != True: continue # if tau.len < 10: continue # count actually processed (not skipped) events iproc+=1 if iproc > options.n : print ('requested number of events reached') break # if options.v > 0 : print(" ============= event ", evt.id , " =============") print("iproc", iproc) # E_shower1 = nu.E - tau.E # E_shower2 = tau.E # old print("nu.E", nu.E) print("tau.E", tau.E) print("tau.len", tau.len) print("tau.dir", tau.dir) E_shower1, E_shower2 = visible_E(evt) print("E_shower1", E_shower1) print("E_shower2", E_shower2) # for t in evt.mc_trks: # print(t.id, t.mother_id, t.type, t.E, t.t, t.len, t.pos, (t.pos - evt.mc_trks[1].pos).len() ) with Timer("calibration"): det.apply(evt) start_trk = Trk() if options.s: start_trk = nu else: start_trk = best_reco_track(evt, 101) with Timer("hit_selection"): if options.m: hits = evt.mc_hits max_dist = max_dist_func( det, hits, start_trk.pos ) max_dist = 1e10 else: hits = evt.hits for hit in hits: hit.t -= slew( hit.tot ) max_dist = max_dist_func( det, hits, start_trk.pos ) max_dist = 1e10 if not options.m and options.s: offset = evt.mc_t - evt.t.AsDouble()*1e9 for hit in evt.hits : hit.t -= offset vertex_hits = ROOT.select_hits( hits, start_trk, min_residual, max_residual, max_dist, options.p ); with Timer("vertex fit"): pmts = ROOT.vector(Pmt)() for i,pmt in det.pmts : pmts.push_back( pmt ) print("vertex hits", len(vertex_hits)) L.set_event( pmts, vertex_hits ) tau.dir.normalize() theta_start = acos(tau.dir.z) phi_start = atan2( tau.dir.y, tau.dir.x ) print("dir", tau.dir) print("theta", theta_start) print("phi", phi_start) # starting point if options.X: x = np.array([start_trk.pos.x + 1.0, start_trk.pos.y + 1.0, start_trk.pos.z + 1.0, start_trk.t + 1.0, tau.len*1.3, theta_start + 3*pi/180, phi_start + 3*pi/180, E_shower1*0.7, E_shower2*1.3]) # x = np.array([start_trk.pos.x + 1.0, start_trk.pos.y + 1.0, start_trk.pos.z + 1.0, start_trk.t + 1.0, tau.len*1.3, theta_start + 3*pi/180, phi_start + 3*pi/180]) else: x = np.array([start_trk.pos.x, start_trk.pos.y, start_trk.pos.z, start_trk.t, tau.len, theta_start, phi_start, E_shower1, E_shower2]) # x = np.array([start_trk.pos.x, start_trk.pos.y, start_trk.pos.z, start_trk.t, tau.len, theta_start, phi_start]) print("starting point", x, "shifted", options.X) T1_start = Trk() T1_start.dir = Vec().set_angles( x[5], x[6] ) T1_start.pos = ROOT.Vec(x[0], x[1], x[2]) T1_start.t = x[3] T1_start.E = x[7] # T1_start.E = E_shower1 T1_start.len = x[4] T1_start.rec_type = 55510 T2_start = Trk() T2_start.dir = Vec().set_angles( x[5], x[6] ) T2_start.pos = T1_start.pos + T1_start.dir * x[4] T2_start.t = x[3] + x[4]/c_light T2_start.E = x[8] # T2_start.E = E_shower2 T2_start.rec_type = 55520 # track to be fitted test_trk1 = Trk() # test_trk1.dir = tau.dir # test_trk1.E = E_shower1 test_trk2 = Trk() # test_trk2.dir = tau.dir # test_trk2.E = E_shower2 # with Timer("test"): # print( -L.eval_double( test_trk1, test_trk2 ) ) # Timer.report() # sys.exit() # function to be minimized def mf( x ) : with Timer("mx"): # function found in rec/JppShowerPdf test_trk1.pos = ROOT.Vec( x[0], x[1], x[2] ) test_trk1.dir = Vec().set_angles( x[5], x[6] ) test_trk1.t = x[3] test_trk1.E = x[7] test_trk2.pos = test_trk1.pos + test_trk1.dir*x[4] test_trk2.dir = Vec().set_angles( x[5], x[6] ) test_trk2.t = x[3] + x[4]/c_light test_trk2.E = x[8] # return pdf4d.eval_L_vertex( test_trk, vertex_hits, options.p, False, options.e ) return -L.eval_double( test_trk1, test_trk2, options.A ) lik_start = -mf(x) startTime = datetime.now() res = scipy.optimize.minimize(mf, x, method = 'Nelder-Mead', options={'maxfev':36000} ) fitting_time = (datetime.now() - startTime).total_seconds() print(res) T1 = Trk() T1.dir = Vec().set_angles( res.x[5], res.x[6] ) T1.pos = ROOT.Vec(res.x[0], res.x[1], res.x[2]) T1.t = res.x[3] T1.E = res.x[7] # T1.E = E_shower1 T1.len = res.x[4] T1.rec_type = 5551 T1.lik = -mf(res.x) T1.fitinf.push_back( 0 ) # start T1.fitinf.push_back(len(vertex_hits)) T1.fitinf.push_back(res.nfev) T1.fitinf.push_back(res.nit) T1.fitinf.push_back(fitting_time) T2 = Trk() T2.dir = Vec().set_angles( res.x[5], res.x[6] ) T2.pos = T1.pos + T1.dir * res.x[4] T2.t = res.x[3] + res.x[4]/c_light T2.E = res.x[8] # T2.E = E_shower2 T2.rec_type = 5552 T2.lik = -mf(res.x) T2.fitinf.push_back( 0 ) # start T2.fitinf.push_back(len(vertex_hits)) T2.fitinf.push_back(res.nfev) T2.fitinf.push_back(res.nit) T2.fitinf.push_back(fitting_time) # print("T1.pos", T1.pos) # print("T1.dir", T1.dir) # print("T1.t", T1.t) # print("T1.E", T1.E) # print("T2.pos", T2.pos) # print("T2.dir", T2.dir) # print("T2.t", T2.t) # print("T2.E", T2.E) # T = Trk().set( id = 100, # pos = Vec(res.x[0], res.x[1], res.x[2]), # dir = start_trk.dir, # t = res.x[3], # E = start_trk.E, # rec_type = 1010, # comment = "vertex_fit" ) evt.trks.push_back(T1) evt.trks.push_back(T2) evt.trks.push_back(T1_start) evt.trks.push_back(T2_start) # before d1 = start_trk.pos - tau.pos d1_along = d1.dot( tau.dir ) d1_perp = (d1 - d1_along * tau.dir).len() d2 = (start_trk.pos + tau.dir*tau.len) - tau_decay_pos d2_along = d2.dot( tau.dir ) d2_perp = (d2 - d2_along * tau.dir).len() print("---before---") print("d1_along_start", d1_along) print("d1_perp_start", d1_perp) print("d2_along_start", d2_along) print("d2_perp_start", d2_perp) print("len", tau.len ) print("lik start", lik_start ) # after d1_fit = T1.pos - tau.pos d1_fit_along = d1_fit.dot( tau.dir ) d1_fit_perp = (d1_fit - d1_fit_along * tau.dir).len() d2_fit = T2.pos - tau_decay_pos d2_fit_along = d2_fit.dot( tau.dir ) d2_fit_perp = (d2_fit - d2_fit_along * tau.dir).len() print("----after----") print("d1_along", d1_fit_along) print("d1_perp", d1_fit_perp) print("d2_along", d2_fit_along) print("d2_perp", d2_fit_perp) print("len", res.x[4]) print("lik end", L.eval_double( T1, T2, options.A ) ) print("fitted time", res.x[3]) print("E1fit/E1true", res.x[7]/E_shower1) print("E2fit/E2true", res.x[8]/E_shower2) print("angle w nu", ROOT.angle_between(nu.dir, T1.dir)*180/pi ) print("angle w tau", ROOT.angle_between(tau.dir, T1.dir)*180/pi ) print("the fit took", fitting_time) f.write(); evt.trks.clear() Timer.report() f.output_file.Write() # todo: is this really needed? del f print ("bye")