import numpy as N import pickle from neurospy import ndview from neurospy.pyhrf.tools.io import readImageWithAims from soma import aims from datamind.ml.func import glm from datamind.ml import dimred from datamind.data import fMRI_cog_simul Writer_Volume_FLOAT = aims.aimssip.Writer_Volume_FLOAT ############################################################################################################# #COMMON FUNCTIONS ############################################################################################################ def anova(X, Y, score_type, **kwargs): ''' Return the scores of the anova test INPUT : X : array of shape (n, m) where n is the number of new observations and m is the number of features Y : array of shape (n, p) where n is the number of observations and p is the number of regressors score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores of the anova test ''' lr =dimred.UnivRanking(func=glm.SimpleLinRegFstat(), **kwargs) lr.fit(X,Y) if score_type == 'p': data = lr.getPvalues() elif score_type == 'z': data = lr.getZvalues() else : data = lr.getStats() if len(kwargs) != 0: ind = lr.getSelectedFeatures() data[N.setdiff1d(range(data.shape[0]), N.array(ind))] = 0.0 ''' lr = glm.SimpleLinRegFstat() data = lr.eval(X,Y) if score_type == 'p': data = lr.pvalue() elif score_type == 'z': data = lr.zvalue() data = N.reshape(data, (data.shape[1])) ''' return data def student_test(X, Y, score_type, **kwargs): ''' Return the scores of the anova test INPUT : X : array of shape (n, m) where n is the number of new observations and m is the number of features Y : array of shape (n, p) where n is the number of observations and p is the number of regressors score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores of the anova test ''' py = N.linalg.pinv(Y) beta = N.transpose(N.dot(py, X)) error = X - N.dot(Y, N.transpose(beta)) dof = float(N.shape(Y)[0]-1) s2 = N.sum(error**2,0)/dof s2 = N.sqrt(s2) nvB = N.sqrt(N.diag(N.linalg.inv(N.dot(N.transpose(Y),Y)))) data = (N.transpose(beta)/ s2)/ nvB import scipy.stats as SS if score_type == 'p': data = SS.t.cdf(data,dof) elif score_type == 'z': p = SS.t.cdf(data,dof) data = N.minimum(10.,N.maximum(-10.,(SS.norm.ppf(p)))) if len(kwargs) != 0: i = N.nonzero(data >= kwargs[score_type])[0] data = data[i] return data[0,:] def load_data(file_name): ''' Returns the data registered in the file containing the contrast maps and the target chosen INPUT : fic : Name of the file where the data are registered OUTPUT : db_data : data registered in the file containing the contrast maps and the target chosen ''' fic = open(file_name, 'rb') db_data = pickle.load(fic) fic.close() return db_data def read_image(db_data, image_name, corrected_mask = None): ''' Show the scores map (if corrected_mask done, the scores of interest have the value 2) INPUT : db_data : data image_name : path of the image to read corrected_mask : threshold for the scores of interest (put the scores of interest equal to 2) ''' flatmask = db_data['is_kept_voxels_1D'] image = readImageWithAims(image_name)[0] maskNR = N.reshape(flatmask, image.shape).astype(int) if corrected_mask != None: maskNR[N.bitwise_and(image > 0, image<= corrected_mask )] = 2 ndview.view(image, mask = maskNR , maskName='mask') def stat_map(db_data, score_type, target_threshold=None, **kwargs): ''' Return the scores values obtained with an ANOVA and the inter-subjects mask (activated voxels in over half of the subjects) INPUT : db_data : data score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores values obtained with an ANOVA flatmask : inter-subjects mask (activated voxels in over half of the subjects) ''' X = db_data['db_X'] Y = db_data['db_Y'] if target_threshold : Y[Y > target_threshold] = N.nan Y[Y <= target_threshold] = 0.0 Y[N.isnan(Y)] = 1.0 flatmask = db_data['is_kept_voxels_1D'] #data = anova(X, Y, score_type, **kwargs) data = student_test(X, Y, score_type, **kwargs) return data, flatmask def stat_map_RFX(db_data, score_type, **kwargs): ''' Return the scores values obtained with an a student test for the mean effect and the inter-subjects mask (activated voxels in over half of the subjects) INPUT : db_data : data score_type : 'p' for p-values scores, 'z' for z-values scores, 't' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores values obtained with a student test flatmask : inter-subjects mask (activated voxels in over half of the subjects) ''' X = db_data['db_X'] Y = N.ones((X.shape[0], 1)) flatmask = db_data['is_kept_voxels_1D'] data = student_test(X, Y, score_type, **kwargs) return data, flatmask def read_data(db_data, target_threshold=None): ''' Return the contrast maps, the target data and the inter-subjects mask (activated voxels in over half of the subjects) INPUT : db_data : data OUTPUT : X : contrast maps, ie functional data Y : target data flatmask : inter-subjects mask (activated voxels in over half of the subjects) ''' X = db_data['db_X'] Y = db_data['db_Y'] flatmask = db_data['is_kept_voxels_1D'] if target_threshold : Y[Y > target_threshold] = N.nan Y[Y <= target_threshold] = 0.0 Y[N.isnan(Y)] = 1.0 return X, Y, flatmask def sum_anova_map(X, Y, score_type, nbFolds = 1, **kwargs): ''' Create a brain image where the features (ie voxels) values are the number of times the features are selected (ie their scores values obtained with an ANOVA are relevant) INPUT : X : array of shape (n,m) where n is the number of subjects and m the number of features (functional data) Y : array of shape (n,1) where n is the number of subjects (target) score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default nbFolds : number of folds of the cross-validation kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : features (ie voxels) values representing the number of times the features are selected ''' data = N.zeros(X.shape[1]) import datamind.ml.resampling as resample kfolds=resample.KFolds(X.shape[0], nbFolds) for train_i,test_i in kfolds : Xi = X[train_i] Yi = Y[train_i] datai = anova(Xi, Yi, score_type, **kwargs) datai[N.nonzero(datai != 0)[0]] = 1.0 data+= datai return data def load_old_image(contrast, flatmask): ''' Load a template image. this should be sampled like the images that werre used to create the parcels INPUT : contrast : Number of the functional contrast OUTPUT : image_data : data image ''' R = aims.Reader() old_image = R.read("/neurospin/lnao/Pmad/Localizer_Database/Localizer/bru2894/functional/fMRI/spm_analysis/con_%04d.img" % contrast) image_data = old_image # we take an old image to create a new image in aims ref_dim = (image_data.getSizeX(), image_data.getSizeY(), image_data.getSizeZ()) #Attention : dimz, dimy, dim x!!!!!!!!!!!!!!!! if flatmask != None : iz = N.nonzero(flatmask > 0) else : iz = N.nonzero(N.ones(ref_dim[0]*ref_dim[1]*ref_dim[2])) iz = N.reshape(iz,N.size(iz)) array_img = image_data.arraydata() array_img = N.reshape(array_img,(ref_dim[0]*ref_dim[1]*ref_dim[2])) for i in range(ref_dim[0]*ref_dim[1]*ref_dim[2]): array_img[i] = 0.0 return image_data, array_img, iz def write_image(image_name, image_data): ''' Write an image INPUT : image_name : name (and path) of the image image_data : data image ''' image_write = image_name #W = Writer_Volume_FLOAT(image_write) print "array_img:",N.nonzero(image_data)[0] print "image_name:",image_name W = aims.Writer() W.write(image_data, image_write) ############################################################################################################# #VOXELS FUNCTIONS ############################################################################################################ def load_data_voxels(target, contrast): ''' Returns the data registered in the file containing the contrast maps and the target chosen INPUT : target : Name of the target contrast : Number of the functional contrast OUTPUT : db_data : data registered in the file containing the contrast maps and the target chosen ''' return load_data(Swd+'voxels/%04d/db_%s_%d.pic' % (contrast, target, contrast)) def read_image_voxels(target, contrast, score_type, image_name = None, corrected_mask = None): ''' Show the scores map (if corrected_mask done, the scores of interest have the value 2) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : path of the image to read corrected_mask : threshold for the scores of interest (put the scores of interest equal to 2) ''' db_data = load_data_voxels(target, contrast) if image_name == None : image_name = Swd + "%cmap_%s_%04d.img" % (score_type,target, contrast) read_image(db_data, image_name, corrected_mask) def stat_map_voxels(target, contrast, score_type, target_threshold, **kwargs): ''' Return the scores values obtained with an ANOVA and the inter-subjects mask (activated voxels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores values obtained with an ANOVA flatmask : inter-subjects mask (activated voxels in over half of the subjects) ''' db_data = load_data_voxels(target, contrast) return stat_map(db_data, score_type, target_threshold, **kwargs) def stat_map_RFX_voxels(target, contrast, score_type, **kwargs): ''' Return the scores values obtained with a student test for the mean effect and the inter-subjects mask (activated voxels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 't' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores values obtained with a student test flatmask : inter-subjects mask (activated voxels in over half of the subjects) ''' db_data = load_data_voxels(target, contrast) return stat_map_RFX(db_data, score_type, **kwargs) def read_data_voxels(target, contrast, target_threshold): ''' Return the contrast maps, the target data and the inter-subjects mask (activated voxels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast OUTPUT : X : contrast maps, ie functional data Y : target data flatmask : inter-subjects mask (activated voxels in over half of the subjects) ''' db_data = load_data_voxels(target, contrast) return read_data(db_data, target_threshold) def anova_map_voxels(target, contrast, score_type, image_name= None, target_threshold = None, **kwargs): ''' Create the scores map, ie the brain image of the scores values obtained with an ANOVA INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : name of the image to create kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' data, flatmask = stat_map_voxels(target, contrast, score_type, target_threshold, **kwargs) if image_name == None: image_name = Swd + "%cmap_%s_%04d.img" % (score_type,target, contrast) write_image_voxels(image_name, contrast, flatmask, data) def map_RFX_voxels(target, contrast, score_type, image_name= None, **kwargs): ''' Create the scores map for the mean effect, ie the brain image of the scores values obtained with a student test INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 't' for fisher scores by default image_name : name of the image to create kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' data, flatmask = stat_map_RFX_voxels(target, contrast, score_type, **kwargs) if image_name == None: image_name = Swd + "%cmap_%s_%04d.img" % (score_type,target, contrast) write_image_voxels(image_name, contrast, flatmask, data) def sum_anova_map_voxels(target, contrast, score_type, image_name= None, nbFolds = 1, target_threshold = None, **kwargs): ''' Create a brain image where the voxels values are the number of times the voxels are selected (ie their scores values obtained with an ANOVA are relevant) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : name of the image to create nbFolds : number of folds of the cross-validation kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' X, Y, flatmask = read_data_voxels(target, contrast, target_threshold) data = sum_anova_map(X, Y, score_type, nbFolds, **kwargs ) if image_name == None: image_name = Swd + "sum_%cmap_%s_%04d.img" % (score_type,target, contrast) write_image_voxels(image_name, contrast, flatmask, data) def write_image_voxels(image_name, contrast, flatmask, data): ''' Create the scores map, ie the brain image of the scores values obtained with an ANOVA INPUT : image_name : path of the image to create contrast : Number of the functional contrast flatmask : inter-subjects mask (activated voxels in over half of the subjects) data : scores values obtained with an ANOVA ''' image_data, array_img, iz = load_old_image(contrast, flatmask) j = 0 for i in iz: array_img[i]= data[j] j = j+1 print "array_img:",type(array_img) print "array_img:",array_img[N.nonzero(array_img)[0]] write_image(image_name, image_data) ############################################################################################################# #VOXELS SMOOTHED 10mm FUNCTIONS ############################################################################################################ def load_data_voxels_smooth(target, contrast): ''' Returns the data registered in the file containing the contrast maps and the target chosen INPUT : target : Name of the target contrast : Number of the functional contrast OUTPUT : db_data : data registered in the file containing the contrast maps and the target chosen ''' return load_data(Swd+'voxels/%04d/db_smooth_%s_%d.pic' % (contrast, target, contrast)) def read_image_voxels_smooth(target, contrast, score_type, image_name = None, corrected_mask = None): ''' Show the scores map (if corrected_mask done, the scores of interest have the value 2) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : path of the image to read corrected_mask : threshold for the scores of interest (put the scores of interest equal to 2) ''' db_data = load_data_voxels_smooth(target, contrast) if image_name == None : image_name = Swd + "%cmap_smooth_%s_%04d.img" % (score_type,target, contrast) read_image(db_data, image_name, corrected_mask) def stat_map_voxels_smooth(target, contrast, score_type, target_threshold, **kwargs): ''' Return the scores values obtained with an ANOVA and the inter-subjects mask (activated smoothed voxels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores values obtained with an ANOVA flatmask : inter-subjects mask (activated voxels in over half of the subjects) ''' db_data = load_data_voxels_smooth(target, contrast) return stat_map(db_data, score_type, target_threshold, **kwargs) def stat_map_RFX_voxels_smooth(target, contrast, score_type, **kwargs): ''' Return the scores values obtained with a student test for the mean effect and the inter-subjects mask (activated smoothed voxels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 't' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores values obtained with a student test flatmask : inter-subjects mask (activated voxels in over half of the subjects) ''' db_data = load_data_voxels_smooth(target, contrast) return stat_map_RFX(db_data, score_type, **kwargs) def read_data_voxels_smooth(target, contrast, target_threshold): ''' Return the contrast maps, the target data and the inter-subjects mask (activated smoothed voxels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast OUTPUT : X : contrast maps, ie functional data Y : target data flatmask : inter-subjects mask (activated voxels in over half of the subjects) ''' db_data = load_data_voxels_smooth(target, contrast) return read_data(db_data, target_threshold) def anova_map_voxels_smooth(target, contrast, score_type, image_name= None, target_threshold = None, **kwargs): ''' Create the scores map, ie the brain image of the scores values obtained with an ANOVA INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : name of the image to create kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' data, flatmask = stat_map_voxels_smooth(target, contrast, score_type, target_threshold, **kwargs) if image_name == None: image_name = Swd + "%cmap_smooth_%s_%04d.img" % (score_type,target, contrast) write_image_voxels_smooth(image_name, contrast, flatmask, data) def map_RFX_voxels_smooth(target, contrast, score_type, image_name= None, **kwargs): ''' Create the scores map for the mean effect, ie the brain image of the scores values obtained with a student test INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 't' for fisher scores by default image_name : name of the image to create kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' data, flatmask = stat_map_RFX_voxels_smooth(target, contrast, score_type, **kwargs) if image_name == None: image_name = Swd + "%cmap_smooth_%s_%04d.img" % (score_type,target, contrast) write_image_voxels_smooth(image_name, contrast, flatmask, data) def sum_anova_map_voxels_smooth(target, contrast, score_type, image_name= None, nbFolds = 1, target_threshold = None, **kwargs): ''' Create a brain image where the smoothed voxels values are the number of times the voxels are selected (ie their scores values obtained with an ANOVA are relevant) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : name of the image to create nbFolds : number of folds of the cross-validation kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' X, Y, flatmask = read_data_voxels_smooth(target, contrast, target_threshold) data = sum_anova_map(X, Y, score_type, nbFolds, **kwargs ) if image_name == None: image_name = Swd + "sum_%cmap_smooth_%s_%04d.img" % (score_type,target, contrast) write_image_voxels_smooth(image_name, contrast, flatmask, data) def load_old_image_smooth(contrast, flatmask): ''' Load a smoothed template image. this should be sampled like the images that werre used to create the parcels INPUT : contrast : Number of the functional contrast OUTPUT : image_data : data image ''' R = aims.Reader() old_image = R.read("/neurospin/lnao/Pmad/Localizer_Database/Localizer/bru2894/functional/fMRI/spm_analysis/scon_%04d.img" % contrast) image_data = old_image # we take an old image to create a new image in aims ref_dim = (image_data.getSizeX(), image_data.getSizeY(), image_data.getSizeZ()) #Attention : dimz, dimy, dim x!!!!!!!!!!!!!!!! if flatmask != None : iz = N.nonzero(flatmask > 0) else : iz = N.nonzero(N.ones(ref_dim[0]*ref_dim[1]*ref_dim[2])) iz = N.reshape(iz,N.size(iz)) array_img = image_data.arraydata() array_img = N.reshape(array_img,(ref_dim[0]*ref_dim[1]*ref_dim[2])) for i in range(ref_dim[0]*ref_dim[1]*ref_dim[2]): array_img[i] = 0.0 return image_data, array_img, iz def write_image_voxels_smooth(image_name, contrast, flatmask, data): ''' Create the scores map, ie the brain image of the scores values obtained with an ANOVA INPUT : image_name : path of the image to create contrast : Number of the functional contrast flatmask : inter-subjects mask (activated voxels in over half of the subjects) data : scores values obtained with an ANOVA ''' image_data, array_img, iz = load_old_image_smooth(contrast, flatmask) j = 0 for i in iz: array_img[i]= data[j] j = j+1 write_image(image_name, image_data) ############################################################################################################# #PARCELS FUNCTIONS (Trivial) ############################################################################################################ def load_data_parcels(target, contrast): ''' Returns the data registered in the file containing the contrast maps and the target chosen INPUT : target : Name of the target contrast : Number of the functional contrast OUTPUT : db_data : data registered in the file containing the contrast maps and the target chosen ''' return load_data(Swd+'parcels/%04d/db_parcels_%s_%d.pic' % (contrast, target, contrast)) def read_image_parcels(target, contrast, score_type, image_name = None, corrected_mask = None): ''' Show the scores map (if corrected_mask done, the scores of interest have the value 2) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : path of the image to read corrected_mask : threshold for the scores of interest (put the scores of interest equal to 2) ''' db_data = load_data_parcels(target, contrast) if image_name == None : image_name = Swd + "%cmap_parcels_%s_%04d.img" % (score_type,target, contrast) read_image(db_data, image_name, corrected_mask) def stat_map_parcels(target, contrast, score_type, target_threshold = None, **kwargs): ''' Return the scores values obtained with an ANOVA and the inter-subjects mask (activated parcels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores values obtained with an ANOVA flatmask : inter-subjects mask (activated parcels in over half of the subjects) ''' db_data = load_data_parcels(target, contrast) return stat_map(db_data, score_type, target_threshold, **kwargs) def stat_map_RFX_parcels(target, contrast, score_type, **kwargs): ''' Return the scores values obtained with a student test for the mean effect and the inter-subjects mask (activated parcels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 't' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores values obtained with a student test flatmask : inter-subjects mask (activated parcels in over half of the subjects) ''' db_data = load_data_parcels(target, contrast) return stat_map_RFX(db_data, score_type, **kwargs) def read_data_parcels(target, contrast, target_threshold=None): ''' Return the contrast maps, the target data and the inter-subjects mask (activated parcels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast OUTPUT : X : contrast maps, ie functional data Y : target data flatmask : inter-subjects mask (activated parcels in over half of the subjects) ''' db_data = load_data_parcels(target, contrast) return read_data(db_data, target_threshold) def anova_map_parcels(target, contrast, score_type, image_name= None, target_threshold = None, **kwargs): ''' Create the scores map, ie the brain image of the scores values obtained with an ANOVA INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : name of the image to create kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' data, flatmask = stat_map_parcels(target, contrast, score_type, target_threshold, **kwargs) if image_name == None: image_name = Swd + "%cmap_parcels_%s_%04d.img" % (score_type,target, contrast) write_image_parcels(image_name, contrast, flatmask, data) def map_RFX_parcels(target, contrast, score_type, image_name= None, **kwargs): ''' Create the scores map for the mean effect, ie the brain image of the scores values obtained with a student test INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 't' for fisher scores by default image_name : name of the image to create kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' data, flatmask = stat_map_RFX_parcels(target, contrast, score_type, **kwargs) if image_name == None: image_name = Swd + "%cmap_parcels_%s_%04d.img" % (score_type,target, contrast) write_image_parcels(image_name, contrast, flatmask, data) def sum_anova_map_parcels(target, contrast, score_type, image_name= None, nbFolds = 1, target_threshold=None, **kwargs): ''' Create a brain image where the parcels values are the number of times the parcels are selected (ie their scores values obtained with an ANOVA are relevant) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : name of the image to create nbFolds : number of folds of the cross-validation kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' X, Y, flatmask = read_data_parcels(target, contrast, target_threshold) data =sum_anova_map(X, Y, score_type, nbFolds, **kwargs ) if image_name == None: image_name = Swd + "sum_%cmap_parcels_%s_%04d.img" % (score_type,target, contrast) write_image_parcels(image_name, contrast, flatmask, data) def write_image_parcels(image_name, contrast, flatmask, data): ''' Create the scores map, ie the brain image of the scores values obtained with an ANOVA INPUT : image_name : path of the image to create contrast : Number of the functional contrast flatmask : inter-subjects mask (activated parcels in over half of the subjects) data : scores values obtained with an ANOVA ''' import cPickle import os.path picname = os.path.join(Swd + "Parcel.pic") Pa = cPickle.load(open(picname)) image_data, array_img, iz = load_old_image(contrast, flatmask) ref_dim = (image_data.getSizeX(), image_data.getSizeY(), image_data.getSizeZ()) flatmask = image_data.arraydata()[0] flatmask[:, :, :] = 0 flatmask[Pa.ijk[:,0],Pa.ijk[:,1],Pa.ijk[:,2]]=1 flatmask = N.reshape(flatmask,(ref_dim[0]*ref_dim[1]*ref_dim[2])) #Attention : dimz, dimy, dim x!!!!!!!!!!!!!!!! iz = N.nonzero(flatmask) iz = N.reshape(iz,N.size(iz)) tlabs = Pa.group_labels array_img = image_data.arraydata() array_img = N.reshape(array_img,(ref_dim[0]*ref_dim[1]*ref_dim[2])) for i in range(ref_dim[0]*ref_dim[1]*ref_dim[2]): array_img[i] = 0.0 j = 0 for i in iz: array_img[i]= data[tlabs[j]] j = j+1 write_image(image_name, image_data) ############################################################################################################# #PARCELS FUNCTIONS (DEV) ############################################################################################################ def load_data_parcels_dev(target, contrast): ''' Returns the data registered in the file containing the contrast maps and the target chosen INPUT : target : Name of the target contrast : Number of the functional contrast OUTPUT : db_data : data registered in the file containing the contrast maps and the target chosen ''' return load_data(Swd+'parcels/%04d/db_parcels_dev_%s_%d.pic' % (contrast, target, contrast)) def read_image_parcels_dev(target, contrast, score_type, image_name = None, corrected_mask = None): ''' Show the scores map (if corrected_mask done, the scores of interest have the value 2) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : path of the image to read corrected_mask : threshold for the scores of interest (put the scores of interest equal to 2) ''' db_data = load_data_parcels_dev(target, contrast) if image_name == None : image_name = Swd + "%cmap_parcels_dev_%s_%04d.img" % (score_type,target, contrast) read_image(db_data, image_name, corrected_mask) def stat_map_parcels_dev(target, contrast, score_type, target_threshold = None, **kwargs): ''' Return the scores values obtained with an ANOVA and the inter-subjects mask (activated parcels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores values obtained with an ANOVA flatmask : inter-subjects mask (activated parcels in over half of the subjects) ''' db_data = load_data_parcels_dev(target, contrast) return stat_map(db_data, score_type, target_threshold, **kwargs) def stat_map_RFX_parcels_dev(target, contrast, score_type, **kwargs): ''' Return the scores values obtained with a student test for the mean effect and the inter-subjects mask (activated parcels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 't' for fisher scores by default kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) OUTPUT : data : scores values obtained with a student test flatmask : inter-subjects mask (activated parcels in over half of the subjects) ''' db_data = load_data_parcels_dev(target, contrast) return stat_map_RFX(db_data, score_type, **kwargs) def read_data_parcels_dev(target, contrast, target_threshold=None): ''' Return the contrast maps, the target data and the inter-subjects mask (activated parcels in over half of the subjects) INPUT : target : Name of the target contrast : Number of the functional contrast OUTPUT : X : contrast maps, ie functional data Y : target data flatmask : inter-subjects mask (activated parcels in over half of the subjects) ''' db_data = load_data_parcels_dev(target, contrast) return read_data(db_data, target_threshold) def anova_map_parcels_dev(target, contrast, score_type, image_name= None, target_threshold = None, **kwargs): ''' Create the scores map, ie the brain image of the scores values obtained with an ANOVA INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : name of the image to create kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' data, flatmask = stat_map_parcels_dev(target, contrast, score_type, target_threshold, **kwargs) if image_name == None: image_name = Swd + "%cmap_parcels_dev_%s_%04d.img" % (score_type,target, contrast) write_image_parcels_dev(image_name, contrast, flatmask, data) def map_RFX_parcels_dev(target, contrast, score_type, image_name= None, **kwargs): ''' Create the scores map for the mean effect, ie the brain image of the scores values obtained with a student test INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 't' for fisher scores by default image_name : name of the image to create kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' data, flatmask = stat_map_RFX_parcels_dev(target, contrast, score_type, **kwargs) if image_name == None: image_name = Swd + "%cmap_parcels_dev_%s_%04d.img" % (score_type,target, contrast) write_image_parcels_dev(image_name, contrast, flatmask, data) def sum_anova_map_parcels_dev(target, contrast, score_type, image_name= None, nbFolds = 1, target_threshold=None, **kwargs): ''' Create a brain image where the parcels values are the number of times the parcels are selected (ie their scores values obtained with an ANOVA are relevant) INPUT : target : Name of the target contrast : Number of the functional contrast score_type : 'p' for p-values scores, 'z' for z-values scores, 'f' for fisher scores by default image_name : name of the image to create nbFolds : number of folds of the cross-validation kwargs : contains threshold values: dim (dimension), pval (pvalue), zval (zvalue) ''' X, Y, flatmask = read_data_parcels_dev(target, contrast, target_threshold) data =sum_anova_map(X, Y, score_type, nbFolds, **kwargs ) if image_name == None: image_name = Swd + "sum_%cmap_parcels_dev_%s_%04d.img" % (score_type,target, contrast) write_image_parcels_dev(image_name, contrast, flatmask, data) def write_image_parcels_dev(image_name, contrast, flatmask, data): ''' Create the scores map, ie the brain image of the scores values obtained with an ANOVA INPUT : image_name : path of the image to create contrast : Number of the functional contrast flatmask : inter-subjects mask (activated parcels in over half of the subjects) data : scores values obtained with an ANOVA ''' import cPickle import os.path picname = os.path.join(Swd + "Parcel_dev.pic") Pa = cPickle.load(open(picname)) image_data, array_img, iz = load_old_image(contrast, flatmask) ref_dim = (image_data.getSizeX(), image_data.getSizeY(), image_data.getSizeZ()) flatmask = image_data.arraydata()[0] flatmask[:, :, :] = 0 flatmask[Pa.ijk[:,0],Pa.ijk[:,1],Pa.ijk[:,2]]=1 flatmask = N.reshape(flatmask,(ref_dim[0]*ref_dim[1]*ref_dim[2])) #Attention : dimz, dimy, dim x!!!!!!!!!!!!!!!! iz = N.nonzero(flatmask) iz = N.reshape(iz,N.size(iz)) tlabs = Pa.group_labels array_img = image_data.arraydata() array_img = N.reshape(array_img,(ref_dim[0]*ref_dim[1]*ref_dim[2])) for i in range(ref_dim[0]*ref_dim[1]*ref_dim[2]): array_img[i] = 0.0 j = 0 for i in iz: array_img[i]= data[tlabs[j]] j = j+1 write_image(image_name, image_data) def fonberroni_correction(target, contrast, alpha = 0.05, threshold = None): ''' Use test to perform one of your single degree-of-freedom tests. The reported p-value is not corrected for multiple comparisons. Apply a correction to account for the number of multiple comparisons you are performing. To obtain the Bonferroni adjusted p-value, multiply the uncorrected p-value by the total number of comparisons. (If the answer exceeds 1.0, the corrected p-value is reported as 1.0. pval <= alpha*nbtests => pval*nbtest <= alpha or si pval*nbtest>=1 alors pval*nbtest>=alpha quelque soit alpha) ''' print "Bonferroni correction for target %s and constrast %i and alpha %f " %(target, contrast, alpha) if threshold != None : data, flatmask = stat_mapBin(target, contrast, 'p', threshold) else : data, flatmask = stat_map(target, contrast, 'p') nbvox = data.shape[0] data[data * nbvox > 1.0] = 1.0 if threshold != None : image_name = "./images/bonferroni_pmap_%s_%04d_bin.img" % (target, contrast) else : image_name = "./images/bonferroni_pmap_%s_%04d.img" % (target, contrast) write_image(image_name, contrast, flatmask, data) Swd = '/volatile/Damon/group_analysis/' Swd_thirion = '/volatile/Cecilia/database/' ################################################################################################"" #Examples of use : ################################################################################################"" ''' #Create the Anova RFX map of the fMRI parcels data for the contrast 29 anova_map_RFX_parcels('calcul', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/fmap_parcels_RFX_0029.img") #Create the Anova map of the fMRI voxels data for the contrast 29 realted to binary scores obtained in calcul tasks anova_map_voxels('calcul', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/fmap_calculBin_0029.img", target_threshold = 0.7) #Create the Anova map of the fMRI voxels data for the contrast 29 realted to binary scores obtained in calcul tasks. #This map contains the number of times features are selected (pval <= 1.e-3) in a ten folds cross-validation sum_anova_map_voxels('calcul', 29, 'p', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sum_pmap_calculBin_0029_pval1e-3.img",nbFolds =10, target_threshold = 0.7, pval = 1.e-3) #Create the Anova map of the fMRI voxels data for the contrast 29 realted to scores obtained in calcul tasks #This map contains the number of times features are selected (pval <= 1.e-3) in a ten folds cross-validation sum_anova_map_voxels('calcul', 29, 'p', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sum_pmap__calcul_0029_pval1e-3.img",nbFolds =10, pval = 1.e-3) #Create the Anova map of the fMRI parcels data for the contrast 29 realted to scores obtained in calcul tasks #This map contains the features selected (pval <= 1.e-3) anova_map_parcels('calcul', 29, 'p', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/fmap_calcul_parcels_0029_pval1e-3.img", pval = 1.e-3) #Create the Anova map of the fMRI parcels dev data for the contrast 29 realted to binary scores obtained in calcul tasks anova_map_parcels_dev('calcul', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/fmap_parcels_dev_calculBin_0029.img", target_threshold = 0.7) ''' map_RFX_voxels('calcul', 31, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/zmap_RFX_0031.ima") #Create the Anova map of the fMRI voxels data for the contrast 29 realted to scores obtained in calcul tasks #This map contains the features selected (pval <= 1.e-2) ''' anova_map_voxels_smooth('calcul', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calcul/voxels/0029/fmap_smooth_calcul_0029.ima") anova_map_voxels_smooth('tache3D', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/tache3D/voxels/0029/fmap_smooth_tache3D_0029.ima") anova_map_voxels_smooth('sexe', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/voxels/0029/fmap_smooth_sex_0029.ima") anova_map_voxels_smooth('calcul', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calcul/voxels/0029/zmap_smooth_calcul_0029_zval3.ima", zval = 3.09) anova_map_voxels_smooth('tache3D', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/tache3D/voxels/0029/zmap_smooth_tache3D_0029_zval3.ima", zval = 3.09) anova_map_voxels_smooth('sexe', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/voxels/0029/zmap_smooth_sex_0029_zval3.ima", zval = 3.09) ''' ''' anova_map_voxels_smooth('calcul', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calculBin/voxels/0029/fmap_smooth_calculBin_0029.ima", target_threshold = 0.7) anova_map_voxels_smooth('lateralisation', 21, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lateralisation/voxels/0021/fmap_smooth_lateralisation_0021.ima") anova_map_voxels_smooth('age', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/age/voxels/0029/fmap_smooth_age_0029.ima") anova_map_voxels_smooth('lecture_ok', 21, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/voxels/0021/fmap_smooth_lecture_ok_0021.ima") anova_map_voxels_smooth('lecture_ok', 31, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/voxels/0031/fmap_smooth_lecture_ok_0031.ima") anova_map_voxels_smooth('calcul', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calculBin/voxels/0029/zmap_smooth_calculBin_0029_zval3.ima", target_threshold = 0.7, zval = 3.09) anova_map_voxels_smooth('lateralisation', 21, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lateralisation/voxels/0021/zmap_smooth_lateralisation_0021_zval3.ima", zval = 3.09) anova_map_voxels_smooth('age', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/age/voxels/0029/zmap_smooth_age_0029_zval3.ima", zval = 3.09) anova_map_voxels_smooth('lecture_ok', 21, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/voxels/0021/zmap_smooth_lecture_ok_0021_zval3.ima", zval = 3.09) anova_map_voxels_smooth('lecture_ok', 31, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/voxels/0031/zmap_smooth_lecture_ok_0031_zval3.ima", zval = 3.09) ''' ''' anova_map_voxels('calcul', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calcul/voxels/0029/fmap_calcul_0029.img") anova_map_parcels('calcul', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calcul/parcels/0029/fmap_parcels_calcul_0029.img") anova_map_parcels_dev('calcul', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calcul/parcels/0029/fmap_parcels_dev_calcul_0029.img") anova_map_voxels('lateralisation', 21, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lateralisation/voxels/0021/fmap_lateralisation_0021.img") anova_map_parcels('lateralisation', 21, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lateralisation/parcels/0021/fmap_parcels_lateralisation_0021.img" ) anova_map_parcels_dev('lecture_ok', 21, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lateralisation/parcels/0021/fmap_parcels_dev_lateralisation_0021.img") anova_map_parcels('lecture_ok', 31, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/parcels/0031/fmap_parcels_lecture_ok_0031.img" ) anova_map_parcels_dev('lecture_ok', 31, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/parcels/0031/fmap_parcels_dev_lecture_ok_0031.img") anova_map_parcels('lecture_ok', 21, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/parcels/0021/fmap_parcels_lecture_ok_0021.img") anova_map_parcels_dev('lecture_ok', 21, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/parcels/0021/fmap_parcels_dev_lecture_ok_0021.img") anova_map_voxels('sexe', 21, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/voxels/0021/fmap_sex_0021.img") anova_map_parcels('sexe', 21, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/parcels/0021/fmap_parcels_sex_0021.img") anova_map_parcels_dev('sexe', 21, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/parcels/0021/fmap_parcels_dev_sex_0021.img") anova_map_voxels('sexe', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/voxels/0029/fmap_sex_0029.img") anova_map_parcels('sexe', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/parcels/0029/fmap_parcels_sex_0029.img") anova_map_parcels_dev('sexe', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/parcels/0029/fmap_parcels_dev_sex_0029.img") anova_map_parcels('tache3D', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/tache3D/parcels/0029/fmap_parcels_tache3D_0029.img") anova_map_parcels_dev('tache3D', 29, 'F', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/tache3D/parcels/0029/fmap_parcels_dev_tache3D_0029.img") ''' ''' anova_map_voxels('calcul', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calcul/voxels/0029/zmap_calcul_0029_zval3.ima", zval = 3.09 ) anova_map_voxels('calcul', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calcul/voxels/0029/zmap_calcul_0029_zval3.ima", zval = 3.09) anova_map_parcels('calcul', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calcul/parcels/0029/zmap_parcels_calcul_0029_zval3.ima", zval = 3.09) anova_map_parcels_dev('calcul', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calcul/parcels/0029/zmap_parcels_dev_calcul_0029_zval3.ima", zval = 3.09) anova_map_voxels('tache3D', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/tache3D/voxels/0029/zmap_tache3D_0029_zval3.ima", zval = 3.09) anova_map_parcels('tache3D', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/tache3D/parcels/0029/zmap_parcels_tache3D_0029_zval3.ima", zval = 3.09) anova_map_parcels_dev('tache3D', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/tache3D/parcels/0029/zmap_parcels_dev_tache3D_0029_zval3.ima", zval = 3.09) anova_map_voxels('sexe', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/voxels/0029/zmap_sex_0029_zval3.ima", zval = 3.09) anova_map_parcels('sexe', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/parcels/0029/zmap_parcels_sex_0029_zval3.ima", zval = 3.09) anova_map_parcels_dev('sexe', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/parcels/0029/zmap_parcels_dev_sex_0029_zval3.ima", zval = 3.09) anova_map_voxels('calcul', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calculBin/voxels/0029/zmap_calculBin_0029_zval3.ima", target_threshold = 0.7, zval = 3.09 ) anova_map_parcels('calcul', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calculBin/parcels/0029/zmap_parcels_calculBin_0029_zval3.ima", target_threshold = 0.7, zval = 3.09 ) anova_map_parcels_dev('calcul', 29, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/calculBin/parcels/0029/zmap_parcels_dev_calculBin_0029_zval3.ima", target_threshold = 0.7, zval = 3.09 ) anova_map_voxels('lecture_ok', 31, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/voxels/0031/zmap_lecture_ok_0031_zval3.ima", zval = 3.09 ) anova_map_parcels('lecture_ok', 31, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/parcels/0031/zmap_parcels_lecture_ok_0031_zval3.ima", zval = 3.09 ) anova_map_parcels_dev('lecture_ok', 31, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/parcels/0031/zmap_parcels_dev_lecture_ok_0031_zval3.ima", zval = 3.09 ) anova_map_voxels('lecture_ok', 21, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/voxels/0021/zmap_lecture_ok_0021_zval3.ima", zval = 3.09 ) anova_map_parcels('lecture_ok', 21, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/parcels/0021/zmap_parcels_lecture_ok_0021_zval3.ima", zval = 3.09 ) anova_map_parcels_dev('lecture_ok', 21, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lecture_ok/parcels/0021/zmap_parcels_dev_lecture_ok_0021_zval3.ima", zval = 3.09 ) anova_map_voxels('lateralisation', 21, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lateralisation/voxels/0021/zmap_lateralisation_0021_zval3.ima", zval = 3.09 ) anova_map_parcels('lateralisation', 21, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lateralisation/parcels/0021/zmap_parcels_lateralisation_0021_zval3.ima", zval = 3.09 ) anova_map_parcels_dev('lecture_ok', 21, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/lateralisation/parcels/0021/zmap_parcels_dev_lateralisation_0021_zval3.ima", zval = 3.09 ) anova_map_voxels('sexe', 21, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/voxels/0021/zmap_sex_0021_zval3.ima", zval = 3.09 ) anova_map_parcels('sexe', 21, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/parcels/0021/zmap_parcels_sex_0021_zval3.ima", zval = 3.09 ) anova_map_parcels_dev('sexe', 21, 'z', image_name= "/neurospin/LNAO/Pmad/Cecilia/maps/sex/parcels/0021/zmap_parcels_dev_sex_0021_zval3.ima", zval = 3.09 ) '''