diff --git a/LabGym/__init__.py b/LabGym/__init__.py index 2c429dc..4b1e640 100644 --- a/LabGym/__init__.py +++ b/LabGym/__init__.py @@ -19,7 +19,7 @@ -__version__='2.7.0' +__version__='2.7.1' diff --git a/LabGym/analyzebehavior.py b/LabGym/analyzebehavior.py index 1b2ff95..b419a6e 100644 --- a/LabGym/analyzebehavior.py +++ b/LabGym/analyzebehavior.py @@ -83,6 +83,7 @@ def __init__(self): self.pattern_images={} self.event_probability={} self.all_behavior_parameters={} + self.log=[] def prepare_analysis(self, @@ -111,7 +112,9 @@ def prepare_analysis(self, ): print('Preparation started...') + self.log.append('Preparation started...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) self.path_to_video=path_to_video self.basename=os.path.basename(self.path_to_video) @@ -145,12 +148,15 @@ def prepare_analysis(self, capture.release() print('Video fps: '+str(self.fps)) + self.log.append('Video fps: '+str(self.fps)) print('The original video framesize: '+str(int(frame.shape[0]))+' X '+str(int(frame.shape[1]))) + self.log.append('The original video framesize: '+str(int(frame.shape[0]))+' X '+str(int(frame.shape[1]))) if self.framewidth is not None: self.frameheight=int(frame.shape[0]*self.framewidth/frame.shape[1]) self.background=cv2.resize(frame,(self.framewidth,self.frameheight),interpolation=cv2.INTER_AREA) print('The resized video framesize: '+str(self.frameheight)+' X '+str(self.framewidth)) + self.log.append('The resized video framesize: '+str(self.frameheight)+' X '+str(self.framewidth)) else: self.background=frame framesize=min(self.background.shape[0],self.background.shape[1]) @@ -175,6 +181,7 @@ def prepare_analysis(self, es_start=self.t constants=estimate_constants(self.path_to_video,self.delta,self.animal_number,framewidth=self.framewidth,frameheight=self.frameheight,stable_illumination=stable_illumination,ex_start=ex_start,ex_end=ex_end,t=es_start,duration=self.duration,animal_vs_bg=self.animal_vs_bg,path_background=path_background,kernel=self.kernel) self.animal_area=constants[4] + self.log.append('The area of single animal is: '+str(self.animal_area)+'.') self.background=constants[0] self.background_low=constants[1] self.background_high=constants[2] @@ -211,6 +218,7 @@ def prepare_analysis(self, self.pattern_images[i]=[np.zeros((self.dim_conv,self.dim_conv,3),dtype='uint8')]*self.total_analysis_framecount print('Preparation completed!') + self.log.append('Preparation completed!') def track_animal(self,frame_count_analyze,contours,centers,heights,inners=None,blobs=None): @@ -275,7 +283,9 @@ def acquire_information(self,background_free=True,black_background=True): # black_background: whether to set background black print('Acquiring information in each frame...') + self.log.append('Acquiring information in each frame...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) capture=cv2.VideoCapture(self.path_to_video) @@ -314,7 +324,9 @@ def acquire_information(self,background_free=True,black_background=True): if (frame_count_analyze+1)%1000==0: print(str(frame_count_analyze+1)+' frames processed...') + self.log.append(str(frame_count_analyze+1)+' frames processed...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) if self.framewidth is not None: frame=cv2.resize(frame,(self.framewidth,self.frameheight),interpolation=cv2.INTER_AREA) @@ -357,6 +369,7 @@ def acquire_information(self,background_free=True,black_background=True): capture.release() print('Information acquisition completed!') + self.log.append('Information acquisition completed!') def acquire_information_interact_basic(self,background_free=True,black_background=True): @@ -365,7 +378,9 @@ def acquire_information_interact_basic(self,background_free=True,black_backgroun # black_background: whether to set background black print('Acquiring information in each frame...') + self.log.append('Acquiring information in each frame...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) self.register_counts={} self.register_counts[0]=None @@ -416,7 +431,9 @@ def acquire_information_interact_basic(self,background_free=True,black_backgroun if (frame_count_analyze+1)%1000==0: print(str(frame_count_analyze+1)+' frames processed...') + self.log.append(str(frame_count_analyze+1)+' frames processed...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) if self.framewidth is not None: frame=cv2.resize(frame,(self.framewidth,self.frameheight),interpolation=cv2.INTER_AREA) @@ -473,12 +490,15 @@ def acquire_information_interact_basic(self,background_free=True,black_backgroun self.animal_centers[0]=self.animal_centers[0][:len(self.all_time)] print('Information acquisition completed!') + self.log.append('Information acquisition completed!') def craft_data(self): print('Crafting data...') + self.log.append('Crafting data...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) lengths=[] length=len(self.all_time) @@ -521,6 +541,7 @@ def craft_data(self): self.pattern_images[i]=self.pattern_images[i][:length] print('Data crafting completed!') + self.log.append('Data crafting completed!') def categorize_behaviors(self,path_to_categorizer,uncertain=0,min_length=None): @@ -530,7 +551,9 @@ def categorize_behaviors(self,path_to_categorizer,uncertain=0,min_length=None): # min_length: the minimum length (in frames) a behavior should last, can be used to filter out the brief false positives print('Categorizing behaviors...') + self.log.append('Categorizing behaviors...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) IDs=list(self.pattern_images.keys()) @@ -618,6 +641,7 @@ def categorize_behaviors(self,path_to_categorizer,uncertain=0,min_length=None): i+=1 print('Behavioral categorization completed!') + self.log.append('Behavioral categorization completed!') def annotate_video(self,behavior_to_include,show_legend=True,interact_all=False): @@ -627,7 +651,9 @@ def annotate_video(self,behavior_to_include,show_legend=True,interact_all=False) # interact_all: whether is the interactive basic mode print('Annotating video...') + self.log.append('Annotating video...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) text_scl=max(0.5,round((self.background.shape[0]+self.background.shape[1])/1080,1)) text_tk=max(1,round((self.background.shape[0]+self.background.shape[1])/540)) @@ -764,6 +790,7 @@ def annotate_video(self,behavior_to_include,show_legend=True,interact_all=False) cv2.imwrite(os.path.join(self.results_path,'Trajectory.jpg'),self.background) print('Video annotation completed!') + self.log.append('Video annotation completed!') def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): @@ -863,7 +890,7 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): if normalize_distance: calibrator=math.sqrt(self.animal_area) distance_traveled=distance_traveled/calibrator - speed=distance_traveled/(self.length/self.fps) + self.all_behavior_parameters[behavior_name]['speed'][i][n]=distance_traveled/(self.length/self.fps) end_center=self.animal_centers[i][n] if end_center is not None: displacements=[] @@ -877,6 +904,12 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): displacement=displacement/calibrator velocity=displacement/((self.length-np.argmax(displacements))/self.fps) self.all_behavior_parameters[behavior_name]['velocity'][i][n]=velocity + start_center=self.animal_centers[i][n-1] + if start_center is not None: + dt=math.dist(end_center,start_center) + if normalize_distance: + dt=dt/calibrator + self.all_behavior_parameters[behavior_name]['distance'][i]+=dt velocities_max=[] velocities_min=[] for v in self.all_behavior_parameters[behavior_name]['velocity'][i][n-self.length+1:n+1]: @@ -892,8 +925,6 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): if np.argmax(velocities_max)!=np.argmin(velocities_min): t=abs(np.argmax(velocities_max)-np.argmin(velocities_min))/self.fps self.all_behavior_parameters[behavior_name]['acceleration'][i][n]=(vmax-vmin)/t - self.all_behavior_parameters[behavior_name]['distance'][i]+=distance_traveled - self.all_behavior_parameters[behavior_name]['speed'][i][n]=speed if '3 areal parameters' in parameter_to_analyze: mask=np.zeros_like(self.background) @@ -969,7 +1000,7 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): if normalize_distance: calibrator=math.sqrt(self.animal_area) distance_traveled=distance_traveled/calibrator - speed=distance_traveled/(self.length/self.fps) + self.all_behavior_parameters['speed'][i][n]=distance_traveled/(self.length/self.fps) end_center=self.animal_centers[i][n] if end_center is not None: displacements=[] @@ -983,6 +1014,13 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): displacement=displacement/calibrator velocity=displacement/((self.length-np.argmax(displacements))/self.fps) self.all_behavior_parameters['velocity'][i][n]=velocity + start_center=self.animal_centers[i][n-1] + if start_center is not None: + dt=math.dist(end_center,start_center) + if normalize_distance: + dt=dt/calibrator + self.all_behavior_parameters['distance'][i]+=dt + velocities_max=[] velocities_min=[] for v in self.all_behavior_parameters['velocity'][i][n-self.length+1:n+1]: @@ -998,8 +1036,6 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): if np.argmax(velocities_max)!=np.argmin(velocities_min): t=abs(np.argmax(velocities_max)-np.argmin(velocities_min))/self.fps self.all_behavior_parameters['acceleration'][i][n]=(vmax-vmin)/t - self.all_behavior_parameters['distance'][i]+=distance_traveled - self.all_behavior_parameters['speed'][i][n]=speed if '3 areal parameters' in parameter_to_analyze: mask=np.zeros_like(self.background) @@ -1063,14 +1099,19 @@ def export_results(self,normalize_distance=True,parameter_to_analyze=[]): # parameter_to_analyze: the behavior parameters that are selected in the analysis print('Quantifying behaviors...') + self.log.append('Quantifying behaviors...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) self.analyze_parameters(normalize_distance=normalize_distance,parameter_to_analyze=parameter_to_analyze) print('Behavioral quantification completed!') + self.log.append('Behavioral quantification Completed!') print('Exporting results...') + self.log.append('Exporting results...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) if self.categorize_behavior: events_df=pd.DataFrame(self.event_probability,index=self.all_time) @@ -1135,6 +1176,11 @@ def export_results(self,normalize_distance=True,parameter_to_analyze=[]): pd.concat(summary,axis=1).to_excel(os.path.join(self.results_path,'all_summary.xlsx'),float_format='%.2f',index_label='ID/parameter') print('All results exported in: '+str(self.results_path)) + self.log.append('All results exported in: '+str(self.results_path)) + self.log.append('Analysis completed!') + if len(self.log)>0: + with open(os.path.join(self.results_path,'Analysis log.txt'),'w') as analysis_log: + analysis_log.write('\n'.join(str(i) for i in self.log)) def generate_data(self,background_free=True,black_background=True,skip_redundant=1): diff --git a/LabGym/analyzebehavior_dt.py b/LabGym/analyzebehavior_dt.py index 54d82bc..6010066 100644 --- a/LabGym/analyzebehavior_dt.py +++ b/LabGym/analyzebehavior_dt.py @@ -90,6 +90,7 @@ def __init__(self): self.animal_present={} self.temp_frames=None self.social_distance=0 + self.log=[] def prepare_analysis(self, @@ -115,7 +116,9 @@ def prepare_analysis(self, ): print('Preparation started...') + self.log.append('Preparation started...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) self.detector=Detector() self.detector.load(path_to_detector,animal_kinds) @@ -156,12 +159,15 @@ def prepare_analysis(self, capture.release() print('Video fps: '+str(self.fps)) + self.log.append('Video fps: '+str(self.fps)) print('The original video framesize: '+str(int(frame.shape[0]))+' X '+str(int(frame.shape[1]))) + self.log.append('The original video framesize: '+str(int(frame.shape[0]))+' X '+str(int(frame.shape[1]))) if self.framewidth is not None: self.frameheight=int(frame.shape[0]*self.framewidth/frame.shape[1]) self.background=cv2.resize(frame,(self.framewidth,self.frameheight),interpolation=cv2.INTER_AREA) print('The resized video framesize: '+str(self.frameheight)+' X '+str(self.framewidth)) + self.log.append('The resized video framesize: '+str(self.frameheight)+' X '+str(self.framewidth)) else: self.background=frame self.temp_frames=deque(maxlen=self.length) @@ -232,6 +238,7 @@ def prepare_analysis(self, self.kernel=11 print('Preparation completed!') + self.log.append('Preparation completed!') def track_animal(self,frame_count_analyze,animal_name,contours,centers,heights,inners=None,blobs=None): @@ -680,7 +687,9 @@ def acquire_information(self,batch_size=1,background_free=True,black_background= # black_background: whether to set background black print('Acquiring information in each frame...') + self.log.append('Acquiring information in each frame...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) capture=cv2.VideoCapture(self.path_to_video) batch=[] @@ -709,7 +718,9 @@ def acquire_information(self,batch_size=1,background_free=True,black_background= if (frame_count_analyze+1)%1000==0: print(str(frame_count_analyze+1)+' frames processed...') + self.log.append(str(frame_count_analyze+1)+' frames processed...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) if self.framewidth is not None: frame=cv2.resize(frame,(self.framewidth,self.frameheight),interpolation=cv2.INTER_AREA) @@ -733,8 +744,10 @@ def acquire_information(self,batch_size=1,background_free=True,black_background= for animal_name in self.animal_kinds: print('The area of '+str(animal_name)+' is: '+str(self.animal_area[animal_name])+'.') + self.log.append('The area of '+str(animal_name)+' is: '+str(self.animal_area[animal_name])+'.') print('Information acquisition completed!') + self.log.append('Information acquisition completed!') def acquire_information_interact_basic(self,batch_size=1,background_free=True,black_background=True): @@ -744,7 +757,9 @@ def acquire_information_interact_basic(self,batch_size=1,background_free=True,bl # black_background: whether to set background black print('Acquiring information in each frame...') + self.log.append('Acquiring information in each frame...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) name=self.animal_kinds[0] self.register_counts={} @@ -792,7 +807,9 @@ def acquire_information_interact_basic(self,batch_size=1,background_free=True,bl if (frame_count_analyze+1)%1000==0: print(str(frame_count_analyze+1)+' frames processed...') + self.log.append(str(frame_count_analyze+1)+' frames processed...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) if self.framewidth is not None: frame=cv2.resize(frame,(self.framewidth,self.frameheight),interpolation=cv2.INTER_AREA) @@ -904,12 +921,15 @@ def acquire_information_interact_basic(self,batch_size=1,background_free=True,bl self.animal_centers[name][0]=self.animal_centers[name][0][:length] print('Information acquisition completed!') + self.log.append('Information acquisition completed!') def craft_data(self): print('Crafting data...') + self.log.append('Crafting data...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) for animal_name in self.animal_kinds: @@ -958,6 +978,7 @@ def craft_data(self): self.pattern_images[animal_name][i]=self.pattern_images[animal_name][i][:length] print('Data crafting completed!') + self.log.append('Data crafting completed!') def categorize_behaviors(self,path_to_categorizer,uncertain=0,min_length=None): @@ -967,7 +988,9 @@ def categorize_behaviors(self,path_to_categorizer,uncertain=0,min_length=None): # min_length: the minimum length (in frames) a behavior should last, can be used to filter out the brief false positives print('Categorizing behaviors...') + self.log.append('Categorizing behaviors...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) categorizer=load_model(path_to_categorizer) @@ -1063,6 +1086,7 @@ def categorize_behaviors(self,path_to_categorizer,uncertain=0,min_length=None): i+=1 print('Behavioral categorization completed!') + self.log.append('Behavioral categorization completed!') def correct_identity(self,specific_behaviors): @@ -1070,7 +1094,9 @@ def correct_identity(self,specific_behaviors): # specific_behaviors: the sex / identity specific behaviors print('Initiating behavior-guided identity correction...') + self.log.append('Initiating behavior-guided identity correction...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) for animal_name in self.animal_kinds: @@ -1104,6 +1130,7 @@ def correct_identity(self,specific_behaviors): self.all_behavior_parameters[animal_name][behavior_name]['probability'][i][idx]=temp print('Identity correction completed!') + self.log.append('Identity correction completed!') def annotate_video(self,animal_to_include,behavior_to_include,show_legend=True): @@ -1113,7 +1140,9 @@ def annotate_video(self,animal_to_include,behavior_to_include,show_legend=True): # show_legend: whether to show the legend of behavior names in video frames print('Annotating video...') + self.log.append('Annotating video...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) text_scl=max(0.5,round((self.background.shape[0]+self.background.shape[1])/1080,1)) text_tk=max(1,round((self.background.shape[0]+self.background.shape[1])/540)) @@ -1260,6 +1289,7 @@ def annotate_video(self,animal_to_include,behavior_to_include,show_legend=True): cv2.imwrite(os.path.join(self.results_path,'Trajectory_black.jpg'),background) print('Video annotation completed!') + self.log.append('Video annotation completed!') def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): @@ -1358,7 +1388,7 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): if normalize_distance: calibrator=math.sqrt(self.animal_area[animal_name]) distance_traveled=distance_traveled/calibrator - speed=distance_traveled/(self.length/self.fps) + self.all_behavior_parameters[animal_name][behavior_name]['speed'][i][n]=distance_traveled/(self.length/self.fps) end_center=self.animal_centers[animal_name][i][n] if end_center is not None: displacements=[] @@ -1372,6 +1402,12 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): displacement=displacement/calibrator velocity=displacement/((self.length-np.argmax(displacements))/self.fps) self.all_behavior_parameters[animal_name][behavior_name]['velocity'][i][n]=velocity + start_center=self.animal_centers[animal_name][i][n-1] + if start_center is not None: + dt=math.dist(end_center,start_center) + if normalize_distance: + dt=dt/calibrator + self.all_behavior_parameters[animal_name][behavior_name]['distance'][i]+=dt velocities_max=[] velocities_min=[] for v in self.all_behavior_parameters[animal_name][behavior_name]['velocity'][i][n-self.length+1:n+1]: @@ -1387,8 +1423,6 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): if np.argmax(velocities_max)!=np.argmin(velocities_min): t=abs(np.argmax(velocities_max)-np.argmin(velocities_min))/self.fps self.all_behavior_parameters[animal_name][behavior_name]['acceleration'][i][n]=(vmax-vmin)/t - self.all_behavior_parameters[animal_name][behavior_name]['distance'][i]+=distance_traveled - self.all_behavior_parameters[animal_name][behavior_name]['speed'][i][n]=speed if '3 areal parameters' in parameter_to_analyze: mask=np.zeros_like(self.background) @@ -1464,7 +1498,7 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): if normalize_distance: calibrator=math.sqrt(self.animal_area[animal_name]) distance_traveled=distance_traveled/calibrator - speed=distance_traveled/(self.length/self.fps) + self.all_behavior_parameters[animal_name]['speed'][i][n]=distance_traveled/(self.length/self.fps) end_center=self.animal_centers[animal_name][i][n] if end_center is not None: displacements=[] @@ -1478,6 +1512,12 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): displacement=displacement/calibrator velocity=displacement/((self.length-np.argmax(displacements))/self.fps) self.all_behavior_parameters[animal_name]['velocity'][i][n]=velocity + start_center=self.animal_centers[animal_name][i][n-1] + if start_center is not None: + dt=math.dist(end_center,start_center) + if normalize_distance: + dt=dt/calibrator + self.all_behavior_parameters[animal_name]['distance'][i]+=dt velocities_max=[] velocities_min=[] for v in self.all_behavior_parameters[animal_name]['velocity'][i][n-self.length+1:n+1]: @@ -1493,8 +1533,6 @@ def analyze_parameters(self,normalize_distance=True,parameter_to_analyze=[]): if np.argmax(velocities_max)!=np.argmin(velocities_min): t=abs(np.argmax(velocities_max)-np.argmin(velocities_min))/self.fps self.all_behavior_parameters[animal_name]['acceleration'][i][n]=(vmax-vmin)/t - self.all_behavior_parameters[animal_name]['distance'][i]+=distance_traveled - self.all_behavior_parameters[animal_name]['speed'][i][n]=speed if '3 areal parameters' in parameter_to_analyze: mask=np.zeros_like(self.background) @@ -1558,14 +1596,19 @@ def export_results(self,normalize_distance=True,parameter_to_analyze=[]): # parameter_to_analyze: the behavior parameters that are selected in the analysis print('Quantifying behaviors...') + self.log.append('Quantifying behaviors...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) self.analyze_parameters(normalize_distance=normalize_distance,parameter_to_analyze=parameter_to_analyze) print('Behavioral quantification Completed!') + self.log.append('Behavioral quantification Completed!') print('Exporting results...') + self.log.append('Exporting results...') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) for animal_name in self.animal_kinds: @@ -1632,6 +1675,12 @@ def export_results(self,normalize_distance=True,parameter_to_analyze=[]): pd.concat(summary,axis=1).to_excel(os.path.join(self.results_path,animal_name+'_all_summary.xlsx'),float_format='%.2f',index_label='ID/parameter') print('All results exported in: '+str(self.results_path)) + self.log.append('All results exported in: '+str(self.results_path)) + self.log.append('Analysis completed!') + if len(self.log)>0: + with open(os.path.join(self.results_path,'Analysis log.txt'),'w') as analysis_log: + analysis_log.write('\n'.join(str(i) for i in self.log)) + def generate_data(self,background_free=True,black_background=True,skip_redundant=1): diff --git a/LabGym/categorizer.py b/LabGym/categorizer.py index 669f10d..c6d53f8 100644 --- a/LabGym/categorizer.py +++ b/LabGym/categorizer.py @@ -54,6 +54,7 @@ def __init__(self): self.extension_image=('.png','.PNG','.jpeg','.JPEG','.jpg','.JPG','.tiff','.TIFF','.bmp','.BMP') # the image formats that LabGym can accept self.extension_video=('.avi','.mpg','.wmv','.mp4','.mkv','.m4v','.mov') # the video formats that LabGym can accept self.classnames=None # the behavior category names in the trained Categorizer + self.log=[] def rename_label(self,file_path,new_path,resize=None): @@ -283,7 +284,9 @@ def build_data(self,path_to_animations,dim_tconv=0,dim_conv=64,channel=1,time_st for diff in range(time_step-frames_length): frames.append(np.zeros_like(original_frame)) print('Inconsistent duration of animation detected at: '+str(i)+'.') + self.log.append('Inconsistent duration of animation detected at: '+str(i)+'.') print('Zero padding has been used, which may decrease the training accuracy.') + self.log.append('Zero padding has been used, which may decrease the training accuracy.') for frame in frames: @@ -421,7 +424,9 @@ def build_data(self,path_to_animations,dim_tconv=0,dim_conv=64,channel=1,time_st amount+=1 if amount%10000==0: print('The augmented example amount: '+str(amount)) + self.log.append('The augmented example amount: '+str(amount)) print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) if dim_tconv!=0: animations=np.array(animations,dtype='float32')/255.0 @@ -838,6 +843,7 @@ def train_pattern_recognizer(self,data_path,model_path,out_path=None,dim=64,chan inputs=Input(shape=(dim,dim,channel)) print('Training the Categorizer w/ only Pattern Recognizer using the behavior examples in: '+str(data_path)) + self.log.append('Training the Categorizer w/ only Pattern Recognizer using the behavior examples in: '+str(data_path)) files=[i for i in os.listdir(data_path) if i.endswith(self.extension_image)] @@ -862,6 +868,7 @@ def train_pattern_recognizer(self,data_path,model_path,out_path=None,dim=64,chan else: print('Found behavior names: '+str(self.classnames)) + self.log.append('Found behavior names: '+str(self.classnames)) if include_bodyparts: inner_code=0 @@ -889,13 +896,17 @@ def train_pattern_recognizer(self,data_path,model_path,out_path=None,dim=64,chan (train_files,test_files,y1,y2)=train_test_split(path_files,labels,test_size=0.2,stratify=labels) print('Perform augmentation for the behavior examples...') + self.log.append('Perform augmentation for the behavior examples...') print('This might take hours or days, depending on the capacity of your computer.') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) print('Start to augment training examples...') + self.log.append('Start to augment training examples...') _,trainX,trainY=self.build_data(train_files,dim_tconv=0,dim_conv=dim,channel=channel,time_step=time_step,aug_methods=aug_methods,background_free=background_free,black_background=black_background,behavior_mode=behavior_mode) trainY=lb.fit_transform(trainY) print('Start to augment validation examples...') + self.log.append('Start to augment validation examples...') if augvalid: _,testX,testY=self.build_data(test_files,dim_tconv=0,dim_conv=dim,channel=channel,time_step=time_step,aug_methods=aug_methods,background_free=background_free,black_background=black_background,behavior_mode=behavior_mode) else: @@ -909,10 +920,15 @@ def train_pattern_recognizer(self,data_path,model_path,out_path=None,dim=64,chan testY_tensor=tf.convert_to_tensor(testY) print('Training example shape : '+str(trainX.shape)) + self.log.append('Training example shape : '+str(trainX.shape)) print('Training label shape : '+str(trainY.shape)) + self.log.append('Training label shape : '+str(trainY.shape)) print('Validation example shape : '+str(testX.shape)) + self.log.append('Validation example shape : '+str(testX.shape)) print('Validation label shape : '+str(testY.shape)) + self.log.append('Validation label shape : '+str(testY.shape)) print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) if trainX.shape[0]<5000: batch_size=8 @@ -938,6 +954,7 @@ def train_pattern_recognizer(self,data_path,model_path,out_path=None,dim=64,chan model.save(model_path) print('Trained Categorizer saved in: '+str(model_path)) + self.log.append('Trained Categorizer saved in: '+str(model_path)) try: @@ -969,6 +986,9 @@ def train_pattern_recognizer(self,data_path,model_path,out_path=None,dim=64,chan if out_path is not None: plt.savefig(os.path.join(out_path,'training_history.png')) print('Training reports saved in: '+str(out_path)) + if len(self.log)>0: + with open(os.path.join(out_path,'Training log.txt'),'w') as training_log: + training_log.write('\n'.join(str(i) for i in self.log)) plt.close('all') except: @@ -1002,6 +1022,7 @@ def train_animation_analyzer(self,data_path,model_path,out_path=None,dim=64,chan inputs=Input(shape=(time_step,dim,dim,channel)) print('Training the Categorizer w/o Pattern Recognizer using the behavior examples in: '+str(data_path)) + self.log.append('Training the Categorizer w/ only Pattern Recognizer using the behavior examples in: '+str(data_path)) files=[i for i in os.listdir(data_path) if i.endswith(self.extension_video)] @@ -1026,6 +1047,7 @@ def train_animation_analyzer(self,data_path,model_path,out_path=None,dim=64,chan else: print('Found behavior names: '+str(self.classnames)) + self.log.append('Found behavior names: '+str(self.classnames)) if include_bodyparts: inner_code=0 @@ -1049,13 +1071,17 @@ def train_animation_analyzer(self,data_path,model_path,out_path=None,dim=64,chan (train_files,test_files,y1,y2)=train_test_split(path_files,labels,test_size=0.2,stratify=labels) print('Perform augmentation for the behavior examples...') + self.log.append('Perform augmentation for the behavior examples...') print('This might take hours or days, depending on the capacity of your computer.') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) print('Start to augment training examples...') + self.log.append('Start to augment training examples...') trainX,_,trainY=self.build_data(train_files,dim_tconv=dim,dim_conv=dim,channel=channel,time_step=time_step,aug_methods=aug_methods,background_free=background_free,black_background=black_background,behavior_mode=behavior_mode) trainY=lb.fit_transform(trainY) print('Start to augment validation examples...') + self.log.append('Start to augment validation examples...') if augvalid: testX,_,testY=self.build_data(test_files,dim_tconv=dim,dim_conv=dim,channel=channel,time_step=time_step,aug_methods=aug_methods,background_free=background_free,black_background=black_background,behavior_mode=behavior_mode) else: @@ -1069,10 +1095,15 @@ def train_animation_analyzer(self,data_path,model_path,out_path=None,dim=64,chan testY_tensor=tf.convert_to_tensor(testY) print('Training example shape : '+str(trainX.shape)) + self.log.append('Training example shape : '+str(trainX.shape)) print('Training label shape : '+str(trainY.shape)) + self.log.append('Training label shape : '+str(trainY.shape)) print('Validation example shape : '+str(testX.shape)) + self.log.append('Validation example shape : '+str(testX.shape)) print('Validation label shape : '+str(testY.shape)) + self.log.append('Validation label shape : '+str(testY.shape)) print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) if trainX.shape[0]<5000: batch_size=8 @@ -1099,6 +1130,7 @@ def train_animation_analyzer(self,data_path,model_path,out_path=None,dim=64,chan model.save(model_path) print('Trained Categorizer saved in: '+str(model_path)) + self.log.append('Trained Categorizer saved in: '+str(model_path)) try: @@ -1130,6 +1162,9 @@ def train_animation_analyzer(self,data_path,model_path,out_path=None,dim=64,chan if out_path is not None: plt.savefig(os.path.join(out_path,'training_history.png')) print('Training reports saved in: '+str(out_path)) + if len(self.log)>0: + with open(os.path.join(out_path,'Training log.txt'),'w') as training_log: + training_log.write('\n'.join(str(i) for i in self.log)) plt.close('all') except: @@ -1158,6 +1193,7 @@ def train_combnet(self,data_path,model_path,out_path=None,dim_tconv=32,dim_conv= # social_distance: a threshold (folds of size of a single animal) on whether to include individuals that are not main character in behavior examples print('Training Categorizer with both Animation Analyzer and Pattern Recognizer using the behavior examples in: '+str(data_path)) + self.log.append('Training Categorizer with both Animation Analyzer and Pattern Recognizer using the behavior examples in: '+str(data_path)) files=[i for i in os.listdir(data_path) if i.endswith(self.extension_video)] @@ -1182,6 +1218,7 @@ def train_combnet(self,data_path,model_path,out_path=None,dim_tconv=32,dim_conv= else: print('Found behavior names: '+str(self.classnames)) + self.log.append('Found behavior names: '+str(self.classnames)) if include_bodyparts: inner_code=0 @@ -1205,13 +1242,17 @@ def train_combnet(self,data_path,model_path,out_path=None,dim_tconv=32,dim_conv= (train_files,test_files,y1,y2)=train_test_split(path_files,labels,test_size=0.2,stratify=labels) print('Perform augmentation for the behavior examples...') + self.log.append('Perform augmentation for the behavior examples...') print('This might take hours or days, depending on the capacity of your computer.') print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) print('Start to augment training examples...') + self.log.append('Start to augment training examples...') train_animations,train_pattern_images,trainY=self.build_data(train_files,dim_tconv=dim_tconv,dim_conv=dim_conv,channel=channel,time_step=time_step,aug_methods=aug_methods,background_free=background_free,black_background=black_background,behavior_mode=behavior_mode) trainY=lb.fit_transform(trainY) print('Start to augment validation examples...') + self.log.append('Start to augment validation examples...') if augvalid: test_animations,test_pattern_images,testY=self.build_data(test_files,dim_tconv=dim_tconv,dim_conv=dim_conv,channel=channel,time_step=time_step,aug_methods=aug_methods,background_free=background_free,black_background=black_background,behavior_mode=behavior_mode) else: @@ -1227,10 +1268,15 @@ def train_combnet(self,data_path,model_path,out_path=None,dim_tconv=32,dim_conv= testY_tensor=tf.convert_to_tensor(testY) print('Training example shape : '+str(train_animations.shape)+', '+str(train_pattern_images.shape)) + self.log.append('Training example shape : '+str(train_animations.shape)+', '+str(train_pattern_images.shape)) print('Training label shape : '+str(trainY.shape)) + self.log.append('Training label shape : '+str(trainY.shape)) print('Validation example shape : '+str(test_animations.shape)+', '+str(test_pattern_images.shape)) + self.log.append('Validation example shape : '+str(test_animations.shape)+', '+str(test_pattern_images.shape)) print('Validation label shape : '+str(testY.shape)) + self.log.append('Validation label shape : '+str(testY.shape)) print(datetime.datetime.now()) + self.log.append(str(datetime.datetime.now())) if train_animations.shape[0]<5000: batch_size=8 @@ -1253,6 +1299,7 @@ def train_combnet(self,data_path,model_path,out_path=None,dim_tconv=32,dim_conv= model.save(model_path) print('Trained Categorizer saved in: '+str(model_path)) + self.log.append('Trained Categorizer saved in: '+str(model_path)) try: @@ -1284,6 +1331,9 @@ def train_combnet(self,data_path,model_path,out_path=None,dim_tconv=32,dim_conv= if out_path is not None: plt.savefig(os.path.join(out_path,'training_history.png')) print('Training reports saved in: '+str(out_path)) + if len(self.log)>0: + with open(os.path.join(out_path,'Training log.txt'),'w') as training_log: + training_log.write('\n'.join(str(i) for i in self.log)) plt.close('all') except: