#!/usr/bin/env python # -*- coding: utf-8 -*- # Import the code needed to manage files. import os, glob #...for parsing the arguments. import argparse #...for the logging. import logging as lg # Import the JSON library. import json #...for processing the datasets. from cernatschool.dataset import Dataset #...for kluster finding. from cernatschool.kluster import KlusterFinder from helpers import getKlusterPropertiesJson from visualisation import makeKlusterImage if __name__ == "__main__": print("*") print("*==================================*") print("* CERN@school - cluster processing *") print("*==================================*") # Get the datafile path from the command line parser = argparse.ArgumentParser() parser.add_argument("inputPath", help="Path to the input dataset.") parser.add_argument("outputPath", help="The path for the output files.") parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_true") args = parser.parse_args() ## The path to the data file. datapath = args.inputPath # ## The basename of the data file. # filename = os.path.basename(datapath) # # basename = filename.split(".")[0] ## The output path. outputpath = args.outputPath # Set the logging level to DEBUG. if args.verbose: level=lg.DEBUG else: level=lg.INFO # Configure the logging. lg.basicConfig(filename='log_process-frames.txt', filemode='w', level=level) print("*") print("* Input path : '%s'" % (datapath)) # print("*--> Base name @ '%s'" % (basename)) # print("*") # print("* Output file : '%s'" % (outputpath)) print("*") # NO! We are just dealing with the raw frames here, right? # The metadata comes from the file itself. Hmm... # ## The dataset wrapper. # ds = Dataset(datapath) # # ## The frames from the dataset (geospatial info. doesn't matter). # frames = ds.getFrames((0.0, 0.0, 0.0)) ## The number of klusters found in all. n_klusters = 0 ## The number of non-gammas. n_non_gammas = 0 klusters = [] # Loop over the frames found in the input path. lg.info("* Found datafiles:") for frame in sorted(glob.glob(datapath + "/*.txt")): lg.info("*--> '%s'" % (frame)) # Read the contents of the frame (assuming x y C format). f = open(frame, "r") lines = f.readlines() f.close() ## The pixel dictionary for the frame. pixels = {} # Loop over the lines and extract the pixel information. for l in lines: vals = l.strip().split("\t") pixels[256 * int(vals[1]) + int(vals[0])] = int(vals[2]) # Find the clusters. kf = KlusterFinder(pixels, 256, 256, False) # Count the clusters. n_klusters += kf.getNumberOfKlusters() n_non_gammas += kf.getNumberOfNonGammas() ## The base of the cluster ID (from the frame name). frameid = os.path.basename(frame).split(".")[0] lg.info("* | Frame ID = '%s'." % (frameid)) # Loop over the clusters. for i, kl in enumerate(kf.getListOfKlusters()): ## The kluster ID. klusterid = frameid + "_k%05d" % (i) lg.info("* |--> Kluster ID: '%s'." % (klusterid)) if not kl.isGamma(): klusters.append(getKlusterPropertiesJson(klusterid, kl)) makeKlusterImage(klusterid, kl, outputpath) # TMP #break # TMP #break lg.info("* |") lg.info("*") lg.info("* Number of klusters found : %d" % (n_klusters)) lg.info("* Number of non-gammas found : %d" % (n_non_gammas)) lg.info("*") with open(outputpath + "/klusters.json", "w") as jf: json.dump(klusters, jf)