From fa9004096cd43275720c76a1be6dfc2ee60c2feb Mon Sep 17 00:00:00 2001 From: shenshan Date: Fri, 14 May 2021 17:39:29 -0700 Subject: [PATCH 01/64] remove 2p terms and adapt with inscopix miniscope --- .../{imaging.py => miniscope.py} | 335 +++++++++++++----- 1 file changed, 241 insertions(+), 94 deletions(-) rename element_miniscope/{imaging.py => miniscope.py} (77%) diff --git a/element_miniscope/imaging.py b/element_miniscope/miniscope.py similarity index 77% rename from element_miniscope/imaging.py rename to element_miniscope/miniscope.py index f3360a9..ab1edc6 100644 --- a/element_miniscope/imaging.py +++ b/element_miniscope/miniscope.py @@ -28,7 +28,7 @@ def activate(imaging_schema_name, scan_schema_name=None, *, Upstream tables: + Session: parent table to Scan, typically identifying a recording session Functions: - + get_imaging_root_data_dir() -> str + + get_miniscope_root_data_dir() -> str Retrieve the root data directory - e.g. containing all subject/sessions data :return: a string for full path to the root data directory """ @@ -41,7 +41,7 @@ def activate(imaging_schema_name, scan_schema_name=None, *, global _linking_module _linking_module = linking_module - scan.activate(scan_schema_name, create_schema=create_schema, + scan.activate(scan_schema_name, create_schema=create_schema, create_tables=create_tables, linking_module=linking_module) schema.activate(imaging_schema_name, create_schema=create_schema, create_tables=create_tables, add_objects=_linking_module.__dict__) @@ -49,17 +49,221 @@ def activate(imaging_schema_name, scan_schema_name=None, *, # -------------- Functions required by the element-calcium-imaging -------------- -def get_imaging_root_data_dir() -> str: +def get_miniscope_daq_v3_files(key: dict) -> str: """ - get_imaging_root_data_dir() -> str + Retrieve the Miniscope DAQ V3 files associated with a given Scan + :param scan_key: key of a Scan + :return: Miniscope DAQ V3 files full file-path + """ + return _linking_module.get_miniscope_daq_v3_files(key) + + +def get_miniscope_root_data_dir() -> str: + """ + get_miniscope_root_data_dir() -> str Retrieve the root data directory - e.g. containing all subject/sessions data :return: a string for full path to the root data directory """ - return _linking_module.get_imaging_root_data_dir() + return _linking_module.get_miniscope_root_data_dir() # -------------- Table declarations -------------- +@schema +class AcquisitionSoftware(dj.Lookup): + definition = """ # Name of acquisition software + acq_software: varchar(24) + """ + contents = zip([ + 'Miniscope-DAQ-V3', + 'Inscopix nVoke']) + + +@schema +class Channel(dj.Lookup): + definition = """ # Recording channel + channel : tinyint # 0-based indexing + """ + contents = zip(range(5)) + + +@schema +class Recording(dj.Manual): + definition = """ + -> Session + recording_id: int + --- + -> Equipment + -> AcquisitionSoftware + recording_notes='' : varchar(4095) # free-notes + """ + + +@schema +class RecordingLocation(dj.Manual): + definition = """ + -> Recording + --- + -> Location + """ + + +@schema +class RecordingInfo(dj.Imported): + definition = """ # general data about recording + -> Recording + --- + nchannels : tinyint # number of channels + nplanes : int # number of recording planes + nframes : int # number of recorded frames + x=null : float # (um) 0 point in the motor coordinate system + y=null : float # (um) 0 point in the motor coordinate system + z=null : float # (um) 0 point in the motor coordinate system + fps : float # (Hz) frames per second - volumetric scan rate + gain=null : float # recording gain + spatial_downsample=1 : tinyint # e.g. 1, 2, 4, 8. 1 for no downsampling + temporal_downsample=1: tinyint # e.g. 1, 2, 4, 8. 1 for no downsampling + led_power : float # LED power used in the given recording + """ + + class Plane(dj.Part): + definition = """ # field-specific scan information + -> master + plane_id : int + --- + px_height=null : smallint # height in pixels + px_width=null : smallint # width in pixels + um_height=null : float # height in microns + um_width=null : float # width in microns + plane_z=null : float # (um) relative depth of the recording plane + """ + + class File(dj.Part): + definition = """ + -> master + recording_file_path: varchar(255) # filepath relative to root data directory + """ + + def make(self, key): + """ Read and store some scan meta information.""" + acq_software = (Recording & key).fetch1('acq_software') + + if acq_software == 'Miniscope-DAQ-V3': + # Parse image dimension and frame rate + import cv2 + recording_filepaths = get_miniscope_daq_v3_files(key) + video = cv2.VideoCapture(recording_filepaths[0]) + fps = video.get(cv2.CAP_PROP_FPS) # TODO: Verify this method extracts correct value + _, frame = video.read() + frame_size = np.shape(frame) + + # Parse number of frames from timestamp.dat file + with open(recording_filepaths[-1]) as f: + next(f) + nframes = sum(1 for line in f if int(line[0]) == 0) + + # Insert in RecordingInfo + self.insert1(dict(key, + nchannels=1, + nframes=nframes, + nplanes=1, + fps=fps)) + + # Insert Plane(s) + self.Plane.insert1(dict(key, + plane_id=0, + px_height=frame_size[0], + px_width=frame_size[1])) + + else: + raise NotImplementedError( + f'Loading routine not implemented for {acq_software} acquisition software') + + # Insert file(s) + root = pathlib.Path(get_miniscope_root_data_dir()) + recording_files = [pathlib.Path(f).relative_to(root).as_posix() for f in recording_filepaths] + self.File.insert([{**key, 'recording_file_path': f} for f in recording_files]) + + +@schema +class PreprocessingMethod(dj.Lookup): + definition = """ + preprocessing_method : varchar(12) + --- + preprocessing_method_desc='': varchar(255) + """ + contents = [ + ['inscopix', ''], + ['no preprocessing', '']] + + +@schema +class Preprocessing(dj.Manual): + definition = """ + -> RecordingInfo + --- + -> PreprocessingMethod + """ + + class Plane(dj.Part): + definition = """ + -> RecordingInfo.Plane + --- + trim_initial_frames=0 : boolean + cropping_imaging=0 : boolean + crop_area_left=0 : smallint # (pixels) + crop_area_top=0 : smallint # (pixels) + crop_area_px_width=null : smallint # (pixels) + crop_area_px_height=null : smallint # (pixels) + fix_defective_pixels=0 : boolean + spatial_downsample=1 : tinyint # spatial downsample with respect to the raw movie + temporal_downsampl=1 : tinyint # temporal downsample with respect to the raw movie + """ + + class File(dj.Part): + definition = """ + -> master + preprocessing_filepath : varchar(255) # filepath of preprocessed video + """ + + @classmethod + def create1_from_recording_info(self, key: dict): + """ + A convenient function to create a new corresponding "Preprocessing" entry for a particular + "RecordingInfo" entry. + """ + if key not in RecordingInfo(): + raise ValueError(f'No corresponding entry in RecordingInfo for: {key}') + + self.insert1(dict( + key, preprocessing_method='no preprocessing')) + + if RecordingInfo.Plane & key: + self.insert( + [dict(key, crop_area_width=plane['px_width'], + crop_area_height=plane['px_height']) + for plane in (RecordingInfo.Plane & key).fetch(as_dict=True)]) + + if RecordingInfo.File & key: + self.insert( + [dict(key, preprocessing_filepath=file['recording_filepath']) + for file in (RecordingInfo.File & key).fetch(as_dict=True)]) + + +@schema +class MotionCorrectionMethod(dj.Lookup): + definition = """ + motion_correction_method: char(32) + --- + motion_correction_method_desc='': varchar(1000) + """ + + contents = [ + ('inscopix', 'Motion correction running through Inscopix software with manual interaction'), + ('suite2p', 'Motion correction using auto processed Suite2p') + ('caiman', 'Motion correction using auto processed caiman') + ] + @schema class ProcessingMethod(dj.Lookup): @@ -78,7 +282,7 @@ class ProcessingParamSet(dj.Lookup): definition = """ paramset_idx: smallint --- - -> ProcessingMethod + -> ProcessingMethod paramset_desc: varchar(128) param_set_hash: uuid unique index (param_set_hash) @@ -86,7 +290,7 @@ class ProcessingParamSet(dj.Lookup): """ @classmethod - def insert_new_params(cls, processing_method: str, + def insert_new_params(cls, processing_method: str, paramset_idx: int, paramset_desc: str, params: dict): param_dict = {'processing_method': processing_method, 'paramset_idx': paramset_idx, @@ -106,30 +310,12 @@ def insert_new_params(cls, processing_method: str, cls.insert1(param_dict) -@schema -class CellCompartment(dj.Lookup): - definition = """ # cell compartments that can be imaged - cell_compartment : char(16) - """ - - contents = zip(['axon', 'soma', 'bouton']) - - -@schema -class MaskType(dj.Lookup): - definition = """ # possible classifications for a segmented mask - mask_type : varchar(16) - """ - - contents = zip(['soma', 'axon', 'dendrite', 'neuropil', 'artefact', 'unknown']) - - # -------------- Trigger a processing routine -------------- @schema class ProcessingTask(dj.Manual): definition = """ - -> scan.Scan + -> Preprocessing -> ProcessingParamSet --- processing_output_dir: varchar(255) # output directory of the processed scan relative to root data directory @@ -179,10 +365,10 @@ class Curation(dj.Manual): -> Processing curation_id: int --- - curation_time: datetime # time of generation of this set of curated results + curation_time: datetime # time of generation of this set of curated results curation_output_dir: varchar(255) # output directory of the curated results, relative to root data directory manual_curation: bool # has manual curation been performed on this result? - curation_note='': varchar(2000) + curation_note='': varchar(2000) """ def create1_from_processing_task(self, key, is_curated=False, curation_note=''): @@ -217,62 +403,29 @@ def create1_from_processing_task(self, key, is_curated=False, curation_note=''): @schema class MotionCorrection(dj.Imported): - definition = """ + definition = """ -> Processing --- - -> scan.Channel.proj(motion_correct_channel='channel') # channel used for motion correction in this processing task + -> Channel.proj(motion_correct_channel='channel') # channel used for motion correction in this processing task """ class RigidMotionCorrection(dj.Part): - definition = """ + definition = """ -> master --- outlier_frames=null : longblob # mask with true for frames with outlier shifts (already corrected) y_shifts : longblob # (pixels) y motion correction shifts x_shifts : longblob # (pixels) x motion correction shifts - z_shifts=null : longblob # (pixels) z motion correction shifts (z-drift) + z_shifts=null : longblob # (pixels) z motion correction shifts (z-drift) y_std : float # (pixels) standard deviation of y shifts across all frames x_std : float # (pixels) standard deviation of x shifts across all frames z_std=null : float # (pixels) standard deviation of z shifts across all frames """ - class NonRigidMotionCorrection(dj.Part): - """ - Piece-wise rigid motion correction - - tile the FOV into multiple 3D blocks/patches - """ - definition = """ - -> master - --- - outlier_frames=null : longblob # mask with true for frames with outlier shifts (already corrected) - block_height : int # (pixels) - block_width : int # (pixels) - block_depth : int # (pixels) - block_count_y : int # number of blocks tiled in the y direction - block_count_x : int # number of blocks tiled in the x direction - block_count_z : int # number of blocks tiled in the z direction - """ - - class Block(dj.Part): - definition = """ # FOV-tiled blocks used for non-rigid motion correction - -> master.NonRigidMotionCorrection - block_id : int - --- - block_y : longblob # (y_start, y_end) in pixel of this block - block_x : longblob # (x_start, x_end) in pixel of this block - block_z : longblob # (z_start, z_end) in pixel of this block - y_shifts : longblob # (pixels) y motion correction shifts for every frame - x_shifts : longblob # (pixels) x motion correction shifts for every frame - z_shifts=null : longblob # (pixels) x motion correction shifts for every frame - y_std : float # (pixels) standard deviation of y shifts across all frames - x_std : float # (pixels) standard deviation of x shifts across all frames - z_std=null : float # (pixels) standard deviation of z shifts across all frames - """ - class Summary(dj.Part): definition = """ # summary images for each field and channel after corrections -> master - -> scan.ScanInfo.Field + -> RecordingInfo.Plane --- ref_image=null : longblob # image used as alignment template average_image : longblob # mean of registered frames @@ -409,28 +562,22 @@ def make(self, key): @schema class Segmentation(dj.Computed): definition = """ # Different mask segmentations. - -> Curation - --- - -> MotionCorrection + -> MotionCorrection """ - @property - def key_source(self): - return Curation & MotionCorrection - class Mask(dj.Part): definition = """ # A mask produced by segmentation. -> master mask : smallint --- - -> scan.Channel.proj(segmentation_channel='channel') # channel used for segmentation + -> Channel.proj(segmentation_channel='channel') # channel used for segmentation mask_npix : int # number of pixels in ROIs mask_center_x=null : int # center x coordinate in pixel # TODO: determine why some masks don't have information, thus null required mask_center_y=null : int # center y coordinate in pixel mask_center_z=null : int # center z coordinate in pixel mask_xpix=null : longblob # x coordinates in pixels - mask_ypix=null : longblob # y coordinates in pixels - mask_zpix=null : longblob # z coordinates in pixels + mask_ypix=null : longblob # y coordinates in pixels + mask_zpix=null : longblob # z coordinates in pixels mask_weights : longblob # weights of the mask at the indices above """ @@ -484,11 +631,11 @@ def make(self, key): # infer "segmentation_channel" - from params if available, else from miniscope analysis loader params = (ProcessingParamSet * ProcessingTask & key).fetch1('params') - segmentation_channel = params.get('segmentation_channel', + segmentation_channel = params.get('segmentation_channel', loaded_miniscope_analysis.segmentation_channel) self.insert1(key) - self.Mask.insert([{**key, + self.Mask.insert([{**key, 'segmentation_channel': segmentation_channel, 'mask': mask['mask_id'], 'mask_npix': mask['mask_npix'], @@ -497,8 +644,8 @@ def make(self, key): 'mask_xpix': mask['mask_xpix'], 'mask_ypix': mask['mask_ypix'], 'mask_weights': mask['mask_weights']} - for mask in loaded_miniscope_analysis.masks], - ignore_extra_fields=True) + for mask in loaded_miniscope_analysis.masks], + ignore_extra_fields=True) else: raise NotImplementedError(f'Unknown/unimplemented method: {method}') @@ -547,7 +694,7 @@ class Trace(dj.Part): definition = """ -> master -> Segmentation.Mask - -> scan.Channel.proj(fluorescence_channel='channel') # the channel that this trace comes from + -> Channel.proj(fluorescence_channel='channel') # the channel that this trace comes from --- fluorescence : longblob # fluorescence trace associated with this mask neuropil_fluorescence=null : longblob # Neuropil fluorescence trace @@ -565,23 +712,23 @@ def make(self, key): loaded_caiman.segmentation_channel) self.insert1(key) - self.Trace.insert([{**key, + self.Trace.insert([{**key, 'mask': mask['mask_id'], 'fluorescence_channel': segmentation_channel, 'fluorescence': mask['inferred_trace']} for mask in loaded_caiman.masks]) - + elif method == 'mcgill_miniscope_analysis': loaded_miniscope_analysis = loaded_result # infer "segmentation_channel" - from params if available, else from miniscope analysis loader params = (ProcessingParamSet * ProcessingTask & key).fetch1('params') - segmentation_channel = params.get('segmentation_channel', + segmentation_channel = params.get('segmentation_channel', loaded_miniscope_analysis.segmentation_channel) self.insert1(key) - self.Trace.insert([{**key, - 'mask': mask['mask_id'], + self.Trace.insert([{**key, + 'mask': mask['mask_id'], 'fluorescence_channel': segmentation_channel, 'fluorescence': mask['raw_trace']} for mask in loaded_miniscope_analysis.masks]) @@ -596,9 +743,9 @@ class ActivityExtractionMethod(dj.Lookup): extraction_method: varchar(200) """ - contents = zip(['caiman_deconvolution', - 'caiman_dff', - 'mcgill_miniscope_analysis_deconvolution', + contents = zip(['caiman_deconvolution', + 'caiman_dff', + 'mcgill_miniscope_analysis_deconvolution', 'mcgill_miniscope_analysis_dff']) @@ -614,7 +761,7 @@ class Trace(dj.Part): -> master -> Fluorescence.Trace --- - activity_trace: longblob # + activity_trace: longblob # """ @property @@ -647,7 +794,7 @@ def make(self, key): loaded_caiman.segmentation_channel) self.insert1(key) - self.Trace.insert([{**key, + self.Trace.insert([{**key, 'mask': mask['mask_id'], 'fluorescence_channel': segmentation_channel, 'activity_trace': mask[attr_mapper[key['extraction_method']]]} @@ -661,11 +808,11 @@ def make(self, key): # infer "segmentation_channel" - from params if available, else from miniscope analysis loader params = (ProcessingParamSet * ProcessingTask & key).fetch1('params') - segmentation_channel = params.get('segmentation_channel', + segmentation_channel = params.get('segmentation_channel', loaded_miniscope_analysis.segmentation_channel) self.insert1(key) - self.Trace.insert([{**key, + self.Trace.insert([{**key, 'mask': mask['mask_id'], 'fluorescence_channel': segmentation_channel, 'activity_trace': mask[attr_mapper[key['extraction_method']]]} @@ -693,7 +840,7 @@ def get_loader_result(key, table): method, output_dir = (ProcessingParamSet * table & key).fetch1( 'processing_method', _table_attribute_mapper[table.__name__]) - root_dir = pathlib.Path(get_imaging_root_data_dir()) + root_dir = pathlib.Path(get_miniscope_root_data_dir()) output_dir = root_dir / output_dir if method == 'caiman': From 7c046a94006244e183f01952ab4cbbc8665add28 Mon Sep 17 00:00:00 2001 From: shenshan Date: Mon, 17 May 2021 11:20:02 -0700 Subject: [PATCH 02/64] remove planes; preliminary separation of motion correction and segmentation --- element_miniscope/miniscope.py | 198 +++++++++++++-------------------- 1 file changed, 75 insertions(+), 123 deletions(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index ab1edc6..b19a5c7 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -114,11 +114,11 @@ class RecordingInfo(dj.Imported): -> Recording --- nchannels : tinyint # number of channels - nplanes : int # number of recording planes nframes : int # number of recorded frames - x=null : float # (um) 0 point in the motor coordinate system - y=null : float # (um) 0 point in the motor coordinate system - z=null : float # (um) 0 point in the motor coordinate system + px_height=null : smallint # height in pixels + px_width=null : smallint # width in pixels + um_height=null : float # height in microns + um_width=null : float # width in microns fps : float # (Hz) frames per second - volumetric scan rate gain=null : float # recording gain spatial_downsample=1 : tinyint # e.g. 1, 2, 4, 8. 1 for no downsampling @@ -126,21 +126,11 @@ class RecordingInfo(dj.Imported): led_power : float # LED power used in the given recording """ - class Plane(dj.Part): - definition = """ # field-specific scan information - -> master - plane_id : int - --- - px_height=null : smallint # height in pixels - px_width=null : smallint # width in pixels - um_height=null : float # height in microns - um_width=null : float # width in microns - plane_z=null : float # (um) relative depth of the recording plane - """ - class File(dj.Part): definition = """ -> master + recording_file_id : smallint unsigned + --- recording_file_path: varchar(255) # filepath relative to root data directory """ @@ -166,14 +156,9 @@ def make(self, key): self.insert1(dict(key, nchannels=1, nframes=nframes, - nplanes=1, - fps=fps)) - - # Insert Plane(s) - self.Plane.insert1(dict(key, - plane_id=0, - px_height=frame_size[0], - px_width=frame_size[1])) + fps=fps, + px_height=frame_size[0], + px_width=frame_size[1])) else: raise NotImplementedError( @@ -186,121 +171,88 @@ def make(self, key): @schema -class PreprocessingMethod(dj.Lookup): +class MotionCorrectionMethod(dj.Lookup): definition = """ - preprocessing_method : varchar(12) + motion_correction_method: char(32) --- - preprocessing_method_desc='': varchar(255) + motion_correction_method_desc='': varchar(1000) """ - contents = [ - ['inscopix', ''], - ['no preprocessing', '']] + contents = zip(['mcgill_miniscope_analysis', '']) @schema -class Preprocessing(dj.Manual): +class MotionCorretionParamSet(dj.Lookup): definition = """ - -> RecordingInfo + motion_correction_paramset_idx : smallint --- - -> PreprocessingMethod + -> MotionCorrectionMethod + motion_correction_paramset_desc='' : varchar(128) + motion_correction_paramset_hash : uuid + unique index (motion_correction_paramset_hash) + motion_correction_params : longblob # dictionary of all motion correction parameters """ - class Plane(dj.Part): - definition = """ - -> RecordingInfo.Plane - --- - trim_initial_frames=0 : boolean - cropping_imaging=0 : boolean - crop_area_left=0 : smallint # (pixels) - crop_area_top=0 : smallint # (pixels) - crop_area_px_width=null : smallint # (pixels) - crop_area_px_height=null : smallint # (pixels) - fix_defective_pixels=0 : boolean - spatial_downsample=1 : tinyint # spatial downsample with respect to the raw movie - temporal_downsampl=1 : tinyint # temporal downsample with respect to the raw movie - """ - - class File(dj.Part): - definition = """ - -> master - preprocessing_filepath : varchar(255) # filepath of preprocessed video - """ - @classmethod - def create1_from_recording_info(self, key: dict): - """ - A convenient function to create a new corresponding "Preprocessing" entry for a particular - "RecordingInfo" entry. - """ - if key not in RecordingInfo(): - raise ValueError(f'No corresponding entry in RecordingInfo for: {key}') - - self.insert1(dict( - key, preprocessing_method='no preprocessing')) - - if RecordingInfo.Plane & key: - self.insert( - [dict(key, crop_area_width=plane['px_width'], - crop_area_height=plane['px_height']) - for plane in (RecordingInfo.Plane & key).fetch(as_dict=True)]) + def insert_new_params(cls, motion_correction_method: str, + motion_correction_parameterset_idx: int, + motion_correction_paramset_desc: str, + motion_correction_params: dict): + param_dict = {'motion_correction_method': motion_correction_method, + 'motion_correction_paramset_idx': motion_correction_paramset_idx, + 'motion_correction_paramset_desc': motion_correction_paramset_desc, + 'motion_correction_params': motion_correction_params, + 'motion_correction_paramset_hash': dict_to_uuid(motion_correction_params)} + q_param = cls & {'motion_correction_param_set_hash': + motion_correction_param_dict['motion_correction_paramset_hash']} - if RecordingInfo.File & key: - self.insert( - [dict(key, preprocessing_filepath=file['recording_filepath']) - for file in (RecordingInfo.File & key).fetch(as_dict=True)]) - - -@schema -class MotionCorrectionMethod(dj.Lookup): - definition = """ - motion_correction_method: char(32) - --- - motion_correction_method_desc='': varchar(1000) - """ - - contents = [ - ('inscopix', 'Motion correction running through Inscopix software with manual interaction'), - ('suite2p', 'Motion correction using auto processed Suite2p') - ('caiman', 'Motion correction using auto processed caiman') - ] + if q_param: # If the specified param-set already exists + pname = q_param.fetch1('motion_correction_paramset_idx') + if pname == motion_correction_paramset_idx: # If the existed set has the same name: job done + return + else: # If not same name: human error, trying to add the same paramset with a different name + raise dj.DataJointError( + 'The specified param-set already exists - name: {}'.format(pname)) + else: + cls.insert1(param_dict) @schema -class ProcessingMethod(dj.Lookup): +class SegmentationMethod(dj.Lookup): definition = """ - processing_method: char(32) + segmentation_method : char(32) --- - processing_method_desc: varchar(1000) + segmentation_method_desc='': varchar(128) """ - - contents = [('caiman', 'CaImAn Analysis Suite'), - ('mcgill_miniscope_analysis', 'MiniscopeAnalysis from McGill University (https://github.com/etterguillaume/MiniscopeAnalysis)')] + contents = zip(['mcgill_miniscope_analysis', '']) @schema -class ProcessingParamSet(dj.Lookup): +class SegmentationParamSet(dj.Lookup): definition = """ - paramset_idx: smallint + segmentation_paramset_idx : smallint --- - -> ProcessingMethod - paramset_desc: varchar(128) - param_set_hash: uuid - unique index (param_set_hash) - params: longblob # dictionary of all applicable parameters + -> SegmentationMethod + segmentation_paramset_desc='' : varchar(128) + segmentation_paramset_hash : uuid + unique index (segmentation_paramset_hash) + segmentation_params : longblob # dictionary of all motion correction parameters """ @classmethod - def insert_new_params(cls, processing_method: str, - paramset_idx: int, paramset_desc: str, params: dict): - param_dict = {'processing_method': processing_method, - 'paramset_idx': paramset_idx, - 'paramset_desc': paramset_desc, - 'params': params, - 'param_set_hash': dict_to_uuid(params)} - q_param = cls & {'param_set_hash': param_dict['param_set_hash']} + def insert_new_params(cls, segmentation_method: str, + segmentation_idx: int, + segmentation_paramset_desc: str, + segmentation_params: dict): + param_dict = {'segmentation_processing_method': segmentation_processing_method, + 'segmentation_paramset_idx': segmentation_paramset_idx, + 'segmentation_paramset_desc': segmentation_paramset_desc, + 'segmentation_params': segmentation_params, + 'segmentation_paramset_hash': dict_to_uuid(segmentation_params)} + q_param = cls & {'segmentation_param_set_hash': + param_dict['segmentation_paramset_hash']} if q_param: # If the specified param-set already exists - pname = q_param.fetch1('paramset_idx') + pname = q_param.fetch1('segmentation_paramset_idx') if pname == paramset_idx: # If the existed set has the same name: job done return else: # If not same name: human error, trying to add the same paramset with different name @@ -315,11 +267,16 @@ def insert_new_params(cls, processing_method: str, @schema class ProcessingTask(dj.Manual): definition = """ - -> Preprocessing - -> ProcessingParamSet + # Manual table marking a processing task to be triggered or manually processed + -> RecordingInfo + processing_task_idx : smallint # processing task --- - processing_output_dir: varchar(255) # output directory of the processed scan relative to root data directory - task_mode='load': enum('load', 'trigger') # 'load': load computed analysis results, 'trigger': trigger computation + -> MotionCorrectionParamSet + -> RoiExtractionParamSet + processing_output_motion_correction_dir: varchar(255) # relative directory of motion relative to the root data directory + processing_segmentation_output_dir: varchar(255) # relative directory of roi extraction result respect to root directory + motion_correction_task_mode='load': enum('load', 'trigger') # 'load': load existing motion correction results, 'trigger': trigger motion correction procedure + segmentation_task_mode='load': enum('load', 'trigger') # 'load': load existing roi extraction results, 'trigger': trigger """ @@ -332,11 +289,6 @@ class Processing(dj.Computed): package_version='' : varchar(16) """ - # Run processing only on Scan with ScanInfo inserted - @property - def key_source(self): - return ProcessingTask & scan.ScanInfo - def make(self, key): task_mode = (ProcessingTask & key).fetch1('task_mode') method, loaded_result = get_loader_result(key, ProcessingTask) @@ -366,7 +318,8 @@ class Curation(dj.Manual): curation_id: int --- curation_time: datetime # time of generation of this set of curated results - curation_output_dir: varchar(255) # output directory of the curated results, relative to root data directory + curation_output_motion_correction_dir: varchar(255) # relative directory of motion relative to the root data directory + curation_segmentation_output_dir: varchar(255) # relative directory of roi extraction result respect to root directory manual_curation: bool # has manual curation been performed on this result? curation_note='': varchar(2000) """ @@ -404,7 +357,7 @@ def create1_from_processing_task(self, key, is_curated=False, curation_note=''): @schema class MotionCorrection(dj.Imported): definition = """ - -> Processing + -> Curation --- -> Channel.proj(motion_correct_channel='channel') # channel used for motion correction in this processing task """ @@ -425,7 +378,6 @@ class RigidMotionCorrection(dj.Part): class Summary(dj.Part): definition = """ # summary images for each field and channel after corrections -> master - -> RecordingInfo.Plane --- ref_image=null : longblob # image used as alignment template average_image : longblob # mean of registered frames From e53376ffa7f9d9c5fbb73d75b4319a5eebfa725a Mon Sep 17 00:00:00 2001 From: shenshan Date: Mon, 17 May 2021 12:22:44 -0700 Subject: [PATCH 03/64] Fix typos --- element_miniscope/miniscope.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index b19a5c7..4e4827a 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -181,7 +181,7 @@ class MotionCorrectionMethod(dj.Lookup): @schema -class MotionCorretionParamSet(dj.Lookup): +class MotionCorrectionParamSet(dj.Lookup): definition = """ motion_correction_paramset_idx : smallint --- @@ -194,7 +194,7 @@ class MotionCorretionParamSet(dj.Lookup): @classmethod def insert_new_params(cls, motion_correction_method: str, - motion_correction_parameterset_idx: int, + motion_correction_paramset_idx: int, motion_correction_paramset_desc: str, motion_correction_params: dict): param_dict = {'motion_correction_method': motion_correction_method, @@ -243,7 +243,7 @@ def insert_new_params(cls, segmentation_method: str, segmentation_idx: int, segmentation_paramset_desc: str, segmentation_params: dict): - param_dict = {'segmentation_processing_method': segmentation_processing_method, + param_dict = {'segmentation_method': segmentation_method, 'segmentation_paramset_idx': segmentation_paramset_idx, 'segmentation_paramset_desc': segmentation_paramset_desc, 'segmentation_params': segmentation_params, From 79caca6569e15fd7b542fd7eac123d9ea626157f Mon Sep 17 00:00:00 2001 From: shenshan Date: Mon, 17 May 2021 14:19:45 -0700 Subject: [PATCH 04/64] Update element_miniscope/miniscope.py Co-authored-by: Kabilar Gunalan --- element_miniscope/miniscope.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index 4e4827a..a125efe 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -167,7 +167,7 @@ def make(self, key): # Insert file(s) root = pathlib.Path(get_miniscope_root_data_dir()) recording_files = [pathlib.Path(f).relative_to(root).as_posix() for f in recording_filepaths] - self.File.insert([{**key, 'recording_file_path': f} for f in recording_files]) + self.File.insert([{**key, 'recording_file_id': i, 'recording_file_path': f} for i, f in enumerate(recording_files)]) @schema From 02792d9cbf117e9c6309980218c062a9fafc1cf2 Mon Sep 17 00:00:00 2001 From: shenshan Date: Mon, 17 May 2021 18:21:33 -0700 Subject: [PATCH 05/64] Change a few table definitions and remove scan --- element_miniscope/miniscope.py | 35 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 21 deletions(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index 4e4827a..e9056a9 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -7,26 +7,24 @@ import importlib import inspect -from . import scan - schema = dj.schema() _linking_module = None -def activate(imaging_schema_name, scan_schema_name=None, *, +def activate(miniscope_schema_name, *, create_schema=True, create_tables=True, linking_module=None): """ - activate(imaging_schema_name, *, scan_schema_name=None, create_schema=True, create_tables=True, linking_module=None) - :param imaging_schema_name: schema name on the database server to activate the `imaging` module - :param scan_schema_name: schema name on the database server to activate the `scan` module - - may be omitted if the `scan` module is already activated + activate(miniscope_schema_name, *, create_schema=True, create_tables=True, linking_module=None) + :param miniscope_schema_name: schema name on the database server to activate the `miniscope` module :param create_schema: when True (default), create schema in the database if it does not yet exist. :param create_tables: when True (default), create tables in the database if they do not yet exist. :param linking_module: a module name or a module containing the - required dependencies to activate the `imaging` module: + required dependencies to activate the `miniscope` module: Upstream tables: - + Session: parent table to Scan, typically identifying a recording session + + Session: parent table to Recording, typically identifying a recording session + + Equipment: Reference table for Recording, specifying the equipment used for the acquisition of this miniscope recording + + Location: Reference table for RecordingLocation, specifying the brain location where this miniscope recording is acquired Functions: + get_miniscope_root_data_dir() -> str Retrieve the root data directory - e.g. containing all subject/sessions data @@ -41,21 +39,19 @@ def activate(imaging_schema_name, scan_schema_name=None, *, global _linking_module _linking_module = linking_module - scan.activate(scan_schema_name, create_schema=create_schema, - create_tables=create_tables, linking_module=linking_module) schema.activate(imaging_schema_name, create_schema=create_schema, create_tables=create_tables, add_objects=_linking_module.__dict__) # -------------- Functions required by the element-calcium-imaging -------------- -def get_miniscope_daq_v3_files(key: dict) -> str: +def get_miniscope_daq_v3_files(recording_key: dict) -> str: """ - Retrieve the Miniscope DAQ V3 files associated with a given Scan - :param scan_key: key of a Scan + Retrieve the Miniscope DAQ V3 files associated with a given Recording + :param recording_key: key of a Recording :return: Miniscope DAQ V3 files full file-path """ - return _linking_module.get_miniscope_daq_v3_files(key) + return _linking_module.get_miniscope_daq_v3_files(recording_key) def get_miniscope_root_data_dir() -> str: @@ -119,10 +115,9 @@ class RecordingInfo(dj.Imported): px_width=null : smallint # width in pixels um_height=null : float # height in microns um_width=null : float # width in microns - fps : float # (Hz) frames per second - volumetric scan rate + fps : float # (Hz) frames per second gain=null : float # recording gain spatial_downsample=1 : tinyint # e.g. 1, 2, 4, 8. 1 for no downsampling - temporal_downsample=1: tinyint # e.g. 1, 2, 4, 8. 1 for no downsampling led_power : float # LED power used in the given recording """ @@ -135,7 +130,7 @@ class File(dj.Part): """ def make(self, key): - """ Read and store some scan meta information.""" + """ Read and store some meta information.""" acq_software = (Recording & key).fetch1('acq_software') if acq_software == 'Miniscope-DAQ-V3': @@ -240,7 +235,7 @@ class SegmentationParamSet(dj.Lookup): @classmethod def insert_new_params(cls, segmentation_method: str, - segmentation_idx: int, + segmentation_paramset_idx: int, segmentation_paramset_desc: str, segmentation_params: dict): param_dict = {'segmentation_method': segmentation_method, @@ -526,10 +521,8 @@ class Mask(dj.Part): mask_npix : int # number of pixels in ROIs mask_center_x=null : int # center x coordinate in pixel # TODO: determine why some masks don't have information, thus null required mask_center_y=null : int # center y coordinate in pixel - mask_center_z=null : int # center z coordinate in pixel mask_xpix=null : longblob # x coordinates in pixels mask_ypix=null : longblob # y coordinates in pixels - mask_zpix=null : longblob # z coordinates in pixels mask_weights : longblob # weights of the mask at the indices above """ From 6712f790c5a667f8829941e0b0e059f11ea5809e Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Wed, 19 May 2021 00:34:26 -0600 Subject: [PATCH 06/64] Delete CaImAn loader --- element_miniscope/readers/caiman_loader.py | 187 --------------------- 1 file changed, 187 deletions(-) delete mode 100644 element_miniscope/readers/caiman_loader.py diff --git a/element_miniscope/readers/caiman_loader.py b/element_miniscope/readers/caiman_loader.py deleted file mode 100644 index cde3e6d..0000000 --- a/element_miniscope/readers/caiman_loader.py +++ /dev/null @@ -1,187 +0,0 @@ -import h5py -import caiman as cm -import scipy -import numpy as np -from datetime import datetime -import os -import pathlib -from tqdm import tqdm - - -_required_hdf5_fields = ['/motion_correction/reference_image', - '/motion_correction/correlation_image', - '/motion_correction/average_image', - '/motion_correction/max_image', - '/estimates/A'] - - -class CaImAn: - """ - Parse the CaImAn output file - Expecting the following objects: - - 'dims': - - 'dview': - - 'estimates': Segmentations and traces - - 'mmap_file': - - 'params': Input parameters - - 'remove_very_bad_comps': - - 'skip_refinement': - - 'motion_correction': Motion correction shifts and summary images - CaImAn results doc: https://caiman.readthedocs.io/en/master/Getting_Started.html#result-variables-for-2p-batch-analysis - """ - - def __init__(self, caiman_dir): - # ---- Search and verify CaImAn output file exists ---- - caiman_dir = pathlib.Path(caiman_dir) - if not caiman_dir.exists(): - raise FileNotFoundError('CaImAn directory not found: {}'.format(caiman_dir)) - - for fp in caiman_dir.glob('*.hdf5'): - with h5py.File(fp, 'r') as h5f: - if all(s in h5f for s in _required_hdf5_fields): - self.caiman_fp = fp - - # ---- Initialize CaImAn's results ---- - self.cnmf = cm.source_extraction.cnmf.cnmf.load_CNMF(self.caiman_fp) - self.params = self.cnmf.params - - self.h5f = h5py.File(self.caiman_fp, 'r') - self.motion_correction = self.h5f['motion_correction'] - self._masks = None - - # ---- Metainfo ---- - self.creation_time = datetime.fromtimestamp(os.stat(self.caiman_fp).st_ctime) - self.curation_time = datetime.fromtimestamp(os.stat(self.caiman_fp).st_ctime) - - @property - def masks(self): - if self._masks is None: - self._masks = self.extract_masks() - return self._masks - - @property - def alignment_channel(self): - return 0 # hard-code to channel index 0 - - @property - def segmentation_channel(self): - return 0 # hard-code to channel index 0 - - def extract_masks(self): - if self.params.motion['is3D']: - raise NotImplemented('CaImAn mask extraction for volumetric data not yet implemented') - - comp_contours = cm.utils.visualization.get_contours( - self.cnmf.estimates.A, self.cnmf.dims) - - masks = [] - for comp_idx, comp_contour in enumerate(comp_contours): - ind, _, weights = scipy.sparse.find(self.cnmf.estimates.A[:, comp_idx]) - if self.cnmf.params.motion['is3D']: - xpix, ypix, zpix = np.unravel_index(ind, self.cnmf.dims, order='F') - center_x, center_y, center_z = comp_contour['CoM'].astype(int) - else: - xpix, ypix = np.unravel_index(ind, self.cnmf.dims, order='F') - center_x, center_y = comp_contour['CoM'].astype(int) - center_z = 0 - zpix = np.full(len(weights), center_z) - - masks.append({'mask_id': comp_contour['neuron_id'], - 'mask_npix': len(weights), 'mask_weights': weights, - 'mask_center_x': center_x, - 'mask_center_y': center_y, - 'mask_center_z': center_z, - 'mask_xpix': xpix, 'mask_ypix': ypix, 'mask_zpix': zpix, - 'inferred_trace': self.cnmf.estimates.C[comp_idx, :], - 'dff': self.cnmf.estimates.F_dff[comp_idx, :], - 'spikes': self.cnmf.estimates.S[comp_idx, :]}) - return masks - - -def save_mc(mc, caiman_fp, is3D): - """ - DataJoint Imaging Element - CaImAn Integration - Run these commands after the CaImAn analysis has completed. - This will save the relevant motion correction data into the '*.hdf5' file. - Please do not clear variables from memory prior to running these commands. - The motion correction (mc) object will be read from memory. - - 'mc' : CaImAn motion correction object - 'caiman_fp' : CaImAn output (*.hdf5) file path - - 'shifts_rig' : Rigid transformation x and y shifts per frame - 'x_shifts_els' : Non rigid transformation x shifts per frame per block - 'y_shifts_els' : Non rigid transformation y shifts per frame per block - """ - - # Load motion corrected mmap image - mc_image = cm.load(mc.mmap_file, is3D=is3D) - - # Compute motion corrected summary images - average_image = np.mean(mc_image, axis=0) - max_image = np.max(mc_image, axis=0) - - # Compute motion corrected correlation image - correlation_image = cm.local_correlations(mc_image.transpose((1, 2, 3, 0) - if is3D else (1, 2, 0))) - correlation_image[np.isnan(correlation_image)] = 0 - - # Compute mc.coord_shifts_els - grid = [] - if is3D: - for _, _, _, x, y, z, _ in cm.motion_correction.sliding_window_3d( - mc_image[0, :, :, :], mc.overlaps, mc.strides): - grid.append([x, x + mc.overlaps[0] + mc.strides[0], - y, y + mc.overlaps[1] + mc.strides[1], - z, z + mc.overlaps[2] + mc.strides[2]]) - else: - for _, _, x, y, _ in cm.motion_correction.sliding_window( - mc_image[0, :, :], mc.overlaps, mc.strides): - grid.append([x, x + mc.overlaps[0] + mc.strides[0], - y, y + mc.overlaps[1] + mc.strides[1]]) - - # Open hdf5 file and create 'motion_correction' group - h5f = h5py.File(caiman_fp, 'r+') - h5g = h5f.require_group("motion_correction") - - # Write motion correction shifts and motion corrected summary images to hdf5 file - if mc.pw_rigid: - h5g.require_dataset("x_shifts_els", shape=np.shape(mc.x_shifts_els), - data=mc.x_shifts_els, - dtype=mc.x_shifts_els[0][0].dtype) - h5g.require_dataset("y_shifts_els", shape=np.shape(mc.y_shifts_els), - data=mc.y_shifts_els, - dtype=mc.y_shifts_els[0][0].dtype) - if is3D: - h5g.require_dataset("z_shifts_els", shape=np.shape(mc.z_shifts_els), - data=mc.z_shifts_els, - dtype=mc.z_shifts_els[0][0].dtype) - - h5g.require_dataset("coord_shifts_els", shape=np.shape(grid), - data=grid, dtype=type(grid[0][0])) - - # For CaImAn, reference image is still a 2D array even for the case of 3D - # Assume that the same ref image is used for all the planes - reference_image = np.tile(mc.total_template_els, (1, 1, correlation_image.shape[-1]))\ - if is3D else mc.total_template_els - else: - h5g.require_dataset("shifts_rig", shape=np.shape(mc.shifts_rig), - data=mc.shifts_rig, dtype=mc.shifts_rig[0].dtype) - h5g.require_dataset("coord_shifts_rig", shape=np.shape(grid), - data=grid, dtype=type(grid[0][0])) - reference_image = np.tile(mc.total_template_rig, (1, 1, correlation_image.shape[-1]))\ - if is3D else mc.total_template_rig - - h5g.require_dataset("reference_image", shape=np.shape(reference_image), - data=reference_image, - dtype=reference_image.dtype) - h5g.require_dataset("correlation_image", shape=np.shape(correlation_image), - data=correlation_image, - dtype=correlation_image.dtype) - h5g.require_dataset("average_image", shape=np.shape(average_image), - data=average_image, dtype=average_image.dtype) - h5g.require_dataset("max_image", shape=np.shape(max_image), - data=max_image, dtype=max_image.dtype) - - # Close hdf5 file - h5f.close() From f9ec391b1ac66d5dbe280a5ad8e692e5381c0a86 Mon Sep 17 00:00:00 2001 From: shenshan Date: Fri, 21 May 2021 14:51:25 -0700 Subject: [PATCH 07/64] Update element_miniscope/miniscope.py Co-authored-by: Kabilar Gunalan --- element_miniscope/miniscope.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index 0252908..7ad9478 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -267,7 +267,7 @@ class ProcessingTask(dj.Manual): processing_task_idx : smallint # processing task --- -> MotionCorrectionParamSet - -> RoiExtractionParamSet + -> SegmentationParamSet processing_output_motion_correction_dir: varchar(255) # relative directory of motion relative to the root data directory processing_segmentation_output_dir: varchar(255) # relative directory of roi extraction result respect to root directory motion_correction_task_mode='load': enum('load', 'trigger') # 'load': load existing motion correction results, 'trigger': trigger motion correction procedure From 59137e22c92ca86d5d605f1ee6483641e6e41ca0 Mon Sep 17 00:00:00 2001 From: shenshan Date: Fri, 21 May 2021 14:51:57 -0700 Subject: [PATCH 08/64] Update element_miniscope/miniscope.py Co-authored-by: Kabilar Gunalan --- element_miniscope/miniscope.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index 7ad9478..3774b48 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -268,7 +268,7 @@ class ProcessingTask(dj.Manual): --- -> MotionCorrectionParamSet -> SegmentationParamSet - processing_output_motion_correction_dir: varchar(255) # relative directory of motion relative to the root data directory + processing_motion_correction_output_dir: varchar(255) # relative directory of motion relative to the root data directory processing_segmentation_output_dir: varchar(255) # relative directory of roi extraction result respect to root directory motion_correction_task_mode='load': enum('load', 'trigger') # 'load': load existing motion correction results, 'trigger': trigger motion correction procedure segmentation_task_mode='load': enum('load', 'trigger') # 'load': load existing roi extraction results, 'trigger': trigger From 74e8f712432701c27a5317897ad16c93c0961b02 Mon Sep 17 00:00:00 2001 From: shenshan Date: Fri, 21 May 2021 14:52:08 -0700 Subject: [PATCH 09/64] Update element_miniscope/miniscope.py Co-authored-by: Kabilar Gunalan --- element_miniscope/miniscope.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index 3774b48..fabb09a 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -313,7 +313,7 @@ class Curation(dj.Manual): curation_id: int --- curation_time: datetime # time of generation of this set of curated results - curation_output_motion_correction_dir: varchar(255) # relative directory of motion relative to the root data directory + curation_motion_correction_output_dir: varchar(255) # relative directory of motion relative to the root data directory curation_segmentation_output_dir: varchar(255) # relative directory of roi extraction result respect to root directory manual_curation: bool # has manual curation been performed on this result? curation_note='': varchar(2000) From c0cdfe4dc3db861ba6436faf906eae764d5ae2eb Mon Sep 17 00:00:00 2001 From: shenshan Date: Fri, 21 May 2021 14:55:58 -0700 Subject: [PATCH 10/64] add back non-rigid motion correction --- element_miniscope/miniscope.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index 0252908..03229df 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -370,6 +370,22 @@ class RigidMotionCorrection(dj.Part): z_std=null : float # (pixels) standard deviation of z shifts across all frames """ + class NonRigidMotionCorrection(dj.Part): + """ + Piece-wise rigid motion correction + """ + definition = """ + -> master + --- + outlier_frames=null : longblob # mask with true for frames with outlier shifts (already corrected) + block_height : int # (pixels) + block_width : int # (pixels) + block_depth : int # (pixels) + block_count_y : int # number of blocks tiled in the y direction + block_count_x : int # number of blocks tiled in the x direction + block_count_z : int # number of blocks tiled in the z direction + """ + class Summary(dj.Part): definition = """ # summary images for each field and channel after corrections -> master From 4faa0f9767315711cf9c28938ac8af7ab70c91d6 Mon Sep 17 00:00:00 2001 From: shenshan Date: Fri, 21 May 2021 14:57:43 -0700 Subject: [PATCH 11/64] Roi extraction -> segmentation --- element_miniscope/miniscope.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index e579a1e..5c91838 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -269,9 +269,9 @@ class ProcessingTask(dj.Manual): -> MotionCorrectionParamSet -> SegmentationParamSet processing_motion_correction_output_dir: varchar(255) # relative directory of motion relative to the root data directory - processing_segmentation_output_dir: varchar(255) # relative directory of roi extraction result respect to root directory + processing_segmentation_output_dir: varchar(255) # relative directory of segmentation result respect to root directory motion_correction_task_mode='load': enum('load', 'trigger') # 'load': load existing motion correction results, 'trigger': trigger motion correction procedure - segmentation_task_mode='load': enum('load', 'trigger') # 'load': load existing roi extraction results, 'trigger': trigger + segmentation_task_mode='load': enum('load', 'trigger') # 'load': load existing segmentation results, 'trigger': trigger """ @@ -314,7 +314,7 @@ class Curation(dj.Manual): --- curation_time: datetime # time of generation of this set of curated results curation_motion_correction_output_dir: varchar(255) # relative directory of motion relative to the root data directory - curation_segmentation_output_dir: varchar(255) # relative directory of roi extraction result respect to root directory + curation_segmentation_output_dir: varchar(255) # relative directory of segmentation result respect to root directory manual_curation: bool # has manual curation been performed on this result? curation_note='': varchar(2000) """ @@ -534,7 +534,7 @@ class Mask(dj.Part): mask : smallint --- -> Channel.proj(segmentation_channel='channel') # channel used for segmentation - mask_npix : int # number of pixels in ROIs + mask_npix : int # number of pixels in this mask mask_center_x=null : int # center x coordinate in pixel # TODO: determine why some masks don't have information, thus null required mask_center_y=null : int # center y coordinate in pixel mask_xpix=null : longblob # x coordinates in pixels From 3a2116861df6c35c70acffb5599077dcb61ec9e4 Mon Sep 17 00:00:00 2001 From: shenshan Date: Fri, 23 Jul 2021 10:42:07 -0700 Subject: [PATCH 12/64] remove scan module --- element_miniscope/miniscope.py | 16 +++ element_miniscope/scan.py | 185 --------------------------------- 2 files changed, 16 insertions(+), 185 deletions(-) delete mode 100644 element_miniscope/scan.py diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index 5c91838..02a4b7f 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -386,6 +386,22 @@ class NonRigidMotionCorrection(dj.Part): block_count_z : int # number of blocks tiled in the z direction """ + class Block(dj.Part): + definition = """ # FOV-tiled blocks used for non-rigid motion correction + -> master.NonRigidMotionCorrection + block_id : int + --- + block_y : longblob # (y_start, y_end) in pixel of this block + block_x : longblob # (x_start, x_end) in pixel of this block + block_z : longblob # (z_start, z_end) in pixel of this block + y_shifts : longblob # (pixels) y motion correction shifts for every frame + x_shifts : longblob # (pixels) x motion correction shifts for every frame + z_shifts=null : longblob # (pixels) x motion correction shifts for every frame + y_std : float # (pixels) standard deviation of y shifts across all frames + x_std : float # (pixels) standard deviation of x shifts across all frames + z_std=null : float # (pixels) standard deviation of z shifts across all frames + """ + class Summary(dj.Part): definition = """ # summary images for each field and channel after corrections -> master diff --git a/element_miniscope/scan.py b/element_miniscope/scan.py deleted file mode 100644 index c37cf51..0000000 --- a/element_miniscope/scan.py +++ /dev/null @@ -1,185 +0,0 @@ -import datajoint as dj -import pathlib -import importlib -import inspect -import numpy as np - -schema = dj.schema() - -_linking_module = None - - -def activate(scan_schema_name, *, create_schema=True, create_tables=True, linking_module=None): - """ - activate(scan_schema_name, *, create_schema=True, create_tables=True, linking_module=None) - :param scan_schema_name: schema name on the database server to activate the `scan` module - :param create_schema: when True (default), create schema in the database if it does not yet exist. - :param create_tables: when True (default), create tables in the database if they do not yet exist. - :param linking_module: a module name or a module containing the - required dependencies to activate the `scan` module: - Upstream tables: - + Session: parent table to Scan, typically identifying a recording session - + Equipment: Reference table for Scan, specifying the equipment used for the acquisition of this scan - + Location: Reference table for ScanLocation, specifying the brain location where this scan is acquired - Functions: - + get_imaging_root_data_dir() -> str - Retrieve the full path for the root data directory (e.g. the mounted drive) - :return: a string with full path to the root data directory - + get_miniscope_daq_v3_file(scan_key: dict) -> str - Retrieve the Miniscope DAQ V3 files associated with a given Scan - :param scan_key: key of a Scan - :return: Miniscope DAQ V3 files full file-path - """ - - if isinstance(linking_module, str): - linking_module = importlib.import_module(linking_module) - assert inspect.ismodule(linking_module),\ - "The argument 'dependency' must be a module's name or a module" - - global _linking_module - _linking_module = linking_module - - schema.activate(scan_schema_name, create_schema=create_schema, - create_tables=create_tables, add_objects=_linking_module.__dict__) - - -# ---------------- Functions required by the element-miniscope ---------------- - - -def get_imaging_root_data_dir() -> str: - """ - Retrieve the full path for the root data directory (e.g. the mounted drive) - :return: a string with full path to the root data directory - """ - return _linking_module.get_imaging_root_data_dir() - -def get_miniscope_daq_v3_files(scan_key: dict) -> str: - """ - Retrieve the Miniscope DAQ V3 files associated with a given Scan - :param scan_key: key of a Scan - :return: Miniscope DAQ V3 files full file-path - """ - return _linking_module.get_miniscope_daq_v3_files(scan_key) - - -# ----------------------------- Table declarations ----------------------------- - - -@schema -class AcquisitionSoftware(dj.Lookup): - definition = """ # Name of acquisition software - acq_software: varchar(24) - """ - contents = zip(['Miniscope-DAQ-V3']) - - -@schema -class Channel(dj.Lookup): - definition = """ # Recording channel - channel : tinyint # 0-based indexing - """ - contents = zip(range(5)) - - -# ------------------------------------ Scan ------------------------------------ - - -@schema -class Scan(dj.Manual): - definition = """ - -> Session - scan_id: int - --- - -> Equipment - -> AcquisitionSoftware - scan_notes='' : varchar(4095) # free-notes - """ - - -@schema -class ScanLocation(dj.Manual): - definition = """ - -> Scan - --- - -> Location - """ - - -@schema -class ScanInfo(dj.Imported): - definition = """ # general data about the reso/meso scans - -> Scan - --- - nfields : tinyint # number of fields - nchannels : tinyint # number of channels - ndepths : int # Number of scanning depths (planes) - nframes : int # number of recorded frames - nrois : tinyint # number of regions of interest - x=null : float # (um) 0 point in the motor coordinate system - y=null : float # (um) 0 point in the motor coordinate system - z=null : float # (um) 0 point in the motor coordinate system - fps : float # (Hz) frames per second - volumetric scan rate - """ - - class Field(dj.Part): - definition = """ # field-specific scan information - -> master - field_idx : int - --- - px_height : smallint # height in pixels - px_width : smallint # width in pixels - um_height=null : float # height in microns - um_width=null : float # width in microns - field_x=null : float # (um) center of field in the motor coordinate system - field_y=null : float # (um) center of field in the motor coordinate system - field_z=null : float # (um) relative depth of field - delay_image=null : longblob # (ms) delay between the start of the scan and pixels in this field - """ - - class ScanFile(dj.Part): - definition = """ - -> master - file_path: varchar(255) # filepath relative to root data directory - """ - - def make(self, key): - """ Read and store some scan meta information.""" - acq_software = (Scan & key).fetch1('acq_software') - - if acq_software == 'Miniscope-DAQ-V3': - # Parse image dimension and frame rate - import cv2 - scan_filepaths = get_miniscope_daq_v3_files(key) - video = cv2.VideoCapture(scan_filepaths[0]) - fps = video.get(cv2.CAP_PROP_FPS) # TODO: Verify this method extracts correct value - _, frame = video.read() - frame_size = np.shape(frame) - - # Parse number of frames from timestamp.dat file - with open(scan_filepaths[-1]) as f: - next(f) - nframes = sum(1 for line in f if int(line[0]) == 0) - - # Insert in ScanInfo - self.insert1(dict(key, - nfields=1, - nchannels=1, - nframes=nframes, - ndepths=1, - fps=fps, - nrois=0)) - - # Insert Field(s) - self.Field.insert1(dict(key, - field_idx=0, - px_height=frame_size[0], - px_width=frame_size[1])) - - else: - raise NotImplementedError( - f'Loading routine not implemented for {acq_software} acquisition software') - - # Insert file(s) - root = pathlib.Path(get_imaging_root_data_dir()) - scan_files = [pathlib.Path(f).relative_to(root).as_posix() for f in scan_filepaths] - self.ScanFile.insert([{**key, 'file_path': f} for f in scan_files]) From f6348ef1418cd9fdeeb24418788e642b193564ec Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 26 Sep 2021 19:42:26 -0500 Subject: [PATCH 13/64] Update version --- element_miniscope/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/element_miniscope/version.py b/element_miniscope/version.py index cc61870..1173db0 100644 --- a/element_miniscope/version.py +++ b/element_miniscope/version.py @@ -1,2 +1,2 @@ """Package metadata""" -__version__ = '0.1.1' \ No newline at end of file +__version__ = '0.1.0a1' \ No newline at end of file From 3373b0fa4502bc5deb0a651ef454c5c5f88166a2 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 26 Sep 2021 22:36:04 -0500 Subject: [PATCH 14/64] Move loader to `element_data_loader` --- .../readers/miniscope_analysis_loader.py | 88 ------------------- 1 file changed, 88 deletions(-) delete mode 100644 element_miniscope/readers/miniscope_analysis_loader.py diff --git a/element_miniscope/readers/miniscope_analysis_loader.py b/element_miniscope/readers/miniscope_analysis_loader.py deleted file mode 100644 index c35d189..0000000 --- a/element_miniscope/readers/miniscope_analysis_loader.py +++ /dev/null @@ -1,88 +0,0 @@ -import numpy as np -import h5py -from datetime import datetime -import os -import pathlib -import scipy.ndimage - -_required_mat_ms_fields = ['Options', - 'meanFrame', - 'CorrProj', - 'PeakToNoiseProj', - 'RawTraces', - 'FiltTraces', - 'DeconvolvedTraces'] - - -class MiniscopeAnalysis: - """ - Parse the Miniscope Analysis output files - Miniscope Analysis repository: https://github.com/etterguillaume/MiniscopeAnalysis - Expecting the following objects: - - 'SFP.mat': Spatial footprints of the cells found while performing CNMFE extraction. - - 'ms.mat': - - 'ms[Options]': Parameters used to perform CNMFE. - - 'ms[meanFrame]': - - 'ms[CorrProj]': Correlation projection from the CNMFE. Displays which pixels are correlated together and suggests the location of your cells. - - 'ms[PeaktoNoiseProj]': Peak-to-noise ratio of the correlation projection. Gives you an idea of most prominent cells in your recording. - - 'ms[RawTraces]': - - 'ms[FiltTraces]': - - 'ms[DeconvolvedTraces]': - """ - - def __init__(self, miniscope_analysis_dir): - # ---- Search and verify Miniscope Analysis output file exists ---- - miniscope_analysis_dir = pathlib.Path(miniscope_analysis_dir) - if not miniscope_analysis_dir.exists(): - raise FileNotFoundError(f'Miniscope Analysis directory not found: {miniscope_analysis_dir}') - - self.miniscope_fp_ms = f'{miniscope_analysis_dir}/ms.mat' - self.miniscope_fp_sfp = f'{miniscope_analysis_dir}/SFP.mat' - self.mat_ms = h5py.File(self.miniscope_fp_ms, 'r') - self.mat_sfp = h5py.File(self.miniscope_fp_sfp, 'r') - - if not all(s in self.mat_ms['ms'] for s in _required_mat_ms_fields): - raise ValueError(f'Miniscope Analysis file {self.miniscope_fp_ms} does not have all required fields.') - - # ---- Initialize Miniscope Analysis results ---- - self.params = self.mat_ms['ms']['Options'] - self.average_image = self.mat_ms['ms']['meanFrame'][...] - self.correlation_image = self.mat_ms['ms']['CorrProj'][...] - self._masks = None - - # ---- Metainfo ---- - self.creation_time = datetime.fromtimestamp(os.stat(self.miniscope_fp_ms).st_ctime) - self.curation_time = datetime.fromtimestamp(os.stat(self.miniscope_fp_ms).st_ctime) - - @property - def masks(self): - if self._masks is None: - self._masks = self.extract_masks() - return self._masks - - @property - def alignment_channel(self): - return 0 # hard-code to channel index 0 - - @property - def segmentation_channel(self): - return 0 # hard-code to channel index 0 - - def extract_masks(self): - masks = [] - for i in range(int(self.mat_ms['ms']['numNeurons'][0,0])): - center_y, center_x = scipy.ndimage.measurements.center_of_mass(self.mat_sfp['SFP'][i,:,:]) - xpix, ypix, weights = scipy.sparse.find(self.mat_sfp['SFP'][i,:,:]) - - masks.append({'mask_id': i, - 'mask_npix': len(weights), - 'mask_center_x': center_x, - 'mask_center_y': center_y, - 'mask_xpix': xpix, - 'mask_ypix': ypix, - 'mask_weights': weights, - 'raw_trace': self.mat_ms['ms']['RawTraces'][i,:], - 'dff': self.mat_ms['ms']['FiltTraces'][i,:], - 'spikes': self.mat_ms['ms']['DeconvolvedTraces'][i,:]}) - return masks - From aa97bdc1377d3ca94a7d4e6b5c5974f4b7032197 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 26 Sep 2021 22:37:29 -0500 Subject: [PATCH 15/64] Remove directory --- element_miniscope/readers/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 element_miniscope/readers/__init__.py diff --git a/element_miniscope/readers/__init__.py b/element_miniscope/readers/__init__.py deleted file mode 100644 index e69de29..0000000 From ef9e423fc0bb4596e322ff91c61ff8b3935e2937 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 26 Sep 2021 22:39:16 -0500 Subject: [PATCH 16/64] Move loader to `element_data_loader` --- element_miniscope/miniscope.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index 02a4b7f..ecf726d 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -821,10 +821,10 @@ def get_loader_result(key, table): output_dir = root_dir / output_dir if method == 'caiman': - from .readers import caiman_loader + from element_data_loader import caiman_loader loaded_output = caiman_loader.CaImAn(output_dir) elif method == 'mcgill_miniscope_analysis': - from .readers import miniscope_analysis_loader + from element_data_loader import miniscope_analysis_loader loaded_output = miniscope_analysis_loader.MiniscopeAnalysis(output_dir) else: raise NotImplementedError('Unknown/unimplemented method: {}'.format(method)) From c94b5a2118fe9fe65a4bf038294348d6dcbbf1cf Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 26 Sep 2021 22:41:34 -0500 Subject: [PATCH 17/64] Clean up requirements --- requirements.txt | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/requirements.txt b/requirements.txt index ee5c550..6a497d8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1 @@ -datajoint -scipy -pyfftw -imreg_dft -sklearn -h5py -opencv-python -scanreader @ git+https://github.com/atlab/scanreader.git \ No newline at end of file +datajoint>=0.13.0 \ No newline at end of file From 6c217067dd502855d28a4bc555bd9ec569908541 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 26 Sep 2021 22:51:00 -0500 Subject: [PATCH 18/64] Add timestamps & minor fixes --- element_miniscope/miniscope.py | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index ecf726d..205d6d9 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -39,7 +39,7 @@ def activate(miniscope_schema_name, *, global _linking_module _linking_module = linking_module - schema.activate(imaging_schema_name, create_schema=create_schema, + schema.activate(miniscope_schema_name, create_schema=create_schema, create_tables=create_tables, add_objects=_linking_module.__dict__) @@ -72,6 +72,7 @@ class AcquisitionSoftware(dj.Lookup): """ contents = zip([ 'Miniscope-DAQ-V3', + 'Miniscope-DAQ-V4', 'Inscopix nVoke']) @@ -111,14 +112,15 @@ class RecordingInfo(dj.Imported): --- nchannels : tinyint # number of channels nframes : int # number of recorded frames - px_height=null : smallint # height in pixels - px_width=null : smallint # width in pixels - um_height=null : float # height in microns - um_width=null : float # width in microns + px_height=null : smallint # height in pixels + px_width=null : smallint # width in pixels + um_height=null : float # height in microns + um_width=null : float # width in microns fps : float # (Hz) frames per second gain=null : float # recording gain spatial_downsample=1 : tinyint # e.g. 1, 2, 4, 8. 1 for no downsampling led_power : float # LED power used in the given recording + time_stamps : longblob # time stamps of each frame """ class File(dj.Part): @@ -151,9 +153,9 @@ def make(self, key): self.insert1(dict(key, nchannels=1, nframes=nframes, - fps=fps, px_height=frame_size[0], - px_width=frame_size[1])) + px_width=frame_size[1], + fps=fps)) else: raise NotImplementedError( @@ -268,10 +270,10 @@ class ProcessingTask(dj.Manual): --- -> MotionCorrectionParamSet -> SegmentationParamSet - processing_motion_correction_output_dir: varchar(255) # relative directory of motion relative to the root data directory - processing_segmentation_output_dir: varchar(255) # relative directory of segmentation result respect to root directory - motion_correction_task_mode='load': enum('load', 'trigger') # 'load': load existing motion correction results, 'trigger': trigger motion correction procedure - segmentation_task_mode='load': enum('load', 'trigger') # 'load': load existing segmentation results, 'trigger': trigger + processing_motion_correction_output_dir : varchar(255) # relative directory of motion relative to the root data directory + processing_segmentation_output_dir : varchar(255) # relative directory of segmentation result respect to root directory + motion_correction_task_mode='load' : enum('load', 'trigger') # 'load': load existing motion correction results, 'trigger': trigger motion correction procedure + segmentation_task_mode='load' : enum('load', 'trigger') # 'load': load existing segmentation results, 'trigger': trigger """ @@ -312,11 +314,11 @@ class Curation(dj.Manual): -> Processing curation_id: int --- - curation_time: datetime # time of generation of this set of curated results - curation_motion_correction_output_dir: varchar(255) # relative directory of motion relative to the root data directory - curation_segmentation_output_dir: varchar(255) # relative directory of segmentation result respect to root directory - manual_curation: bool # has manual curation been performed on this result? - curation_note='': varchar(2000) + curation_time : datetime # time of generation of this set of curated results + curation_motion_correction_output_dir : varchar(255) # relative directory of motion relative to the root data directory + curation_segmentation_output_dir : varchar(255) # relative directory of segmentation result respect to root directory + manual_curation : bool # has manual curation been performed on this result? + curation_note='' : varchar(2000) """ def create1_from_processing_task(self, key, is_curated=False, curation_note=''): From 4aeb097d5adc8175ca81a401aa80e56d814e9ebf Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Tue, 5 Oct 2021 10:24:50 -0500 Subject: [PATCH 19/64] Move populate routine from workflow to element --- element_miniscope/miniscope.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index 205d6d9..aa76873 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -833,13 +833,20 @@ def get_loader_result(key, table): return method, loaded_output +def populate_all(display_progress=True): -def dict_to_uuid(key): - """ - Given a dictionary `key`, returns a hash string as UUID - """ - hashed = hashlib.md5() - for k, v in sorted(key.items()): - hashed.update(str(k).encode()) - hashed.update(str(v).encode()) - return uuid.UUID(hex=hashed.hexdigest()) + populate_settings = {'display_progress': display_progress, 'reserve_jobs': False, 'suppress_errors': False} + + RecordingInfo.populate(**populate_settings) + + Processing.populate(**populate_settings) + + MotionCorrection.populate(**populate_settings) + + Segmentation.populate(**populate_settings) + + MaskClassification.populate(**populate_settings) + + Fluorescence.populate(**populate_settings) + + Activity.populate(**populate_settings) \ No newline at end of file From 90833b2b770d3babc934f29be53079f830c97978 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Tue, 5 Oct 2021 10:25:12 -0500 Subject: [PATCH 20/64] Minor requirements update --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6a497d8..fafe15e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -datajoint>=0.13.0 \ No newline at end of file +datajoint>=0.13 \ No newline at end of file From a7a18de3f011d805f88fcb810c8962f8e5307055 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Mon, 18 Oct 2021 01:11:22 -0500 Subject: [PATCH 21/64] Add support for Miniscope DAQ V4 --- element_miniscope/miniscope.py | 294 ++++++++++++--------------------- element_miniscope/version.py | 2 +- 2 files changed, 110 insertions(+), 186 deletions(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index aa76873..fd2053c 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -2,10 +2,12 @@ import numpy as np import pathlib from datetime import datetime -import uuid -import hashlib import importlib import inspect +import cv2 +import json +import csv +from element_data_loader.utils import dict_to_uuid, find_full_path, find_root_directory schema = dj.schema() @@ -22,12 +24,14 @@ def activate(miniscope_schema_name, *, :param linking_module: a module name or a module containing the required dependencies to activate the `miniscope` module: Upstream tables: - + Session: parent table to Recording, typically identifying a recording session - + Equipment: Reference table for Recording, specifying the equipment used for the acquisition of this miniscope recording - + Location: Reference table for RecordingLocation, specifying the brain location where this miniscope recording is acquired + + Session: parent table to Recording, + typically identifying a recording session + + Equipment: Reference table for Recording, + specifying the equipment used for the acquisition Functions: - + get_miniscope_root_data_dir() -> str - Retrieve the root data directory - e.g. containing all subject/sessions data + + get_miniscope_root_data_dir() -> list + Retrieve the root data directory + Contains all subject/sessions data :return: a string for full path to the root data directory """ @@ -43,32 +47,25 @@ def activate(miniscope_schema_name, *, create_tables=create_tables, add_objects=_linking_module.__dict__) -# -------------- Functions required by the element-calcium-imaging -------------- +# Functions required by the element-miniscope --------------------------------- -def get_miniscope_daq_v3_files(recording_key: dict) -> str: +def get_miniscope_root_data_dir() -> list: """ - Retrieve the Miniscope DAQ V3 files associated with a given Recording - :param recording_key: key of a Recording - :return: Miniscope DAQ V3 files full file-path - """ - return _linking_module.get_miniscope_daq_v3_files(recording_key) - - -def get_miniscope_root_data_dir() -> str: - """ - get_miniscope_root_data_dir() -> str - Retrieve the root data directory - e.g. containing all subject/sessions data - :return: a string for full path to the root data directory + get_miniscope_root_data_dir() -> list + Retrieve the root data directory + Containing the raw ephys recording files for all subject/sessions. + :return: a string for full path to the root data directory, + or list of strings for possible root data directories """ return _linking_module.get_miniscope_root_data_dir() -# -------------- Table declarations -------------- +# Experiment and analysis meta information ------------------------------------- @schema class AcquisitionSoftware(dj.Lookup): - definition = """ # Name of acquisition software - acq_software: varchar(24) + definition = """ + acquisition_software: varchar(24) """ contents = zip([ 'Miniscope-DAQ-V3', @@ -78,7 +75,7 @@ class AcquisitionSoftware(dj.Lookup): @schema class Channel(dj.Lookup): - definition = """ # Recording channel + definition = """ channel : tinyint # 0-based indexing """ contents = zip(range(5)) @@ -92,13 +89,15 @@ class Recording(dj.Manual): --- -> Equipment -> AcquisitionSoftware - recording_notes='' : varchar(4095) # free-notes + recording_directory: varchar(255) # relative to root data directory + recording_notes='' : varchar(4095) # free-notes """ @schema class RecordingLocation(dj.Manual): definition = """ + # Brain location where this miniscope recording is acquired -> Recording --- -> Location @@ -107,7 +106,8 @@ class RecordingLocation(dj.Manual): @schema class RecordingInfo(dj.Imported): - definition = """ # general data about recording + definition = """ + # Store metadata about recording -> Recording --- nchannels : tinyint # number of channels @@ -128,17 +128,33 @@ class File(dj.Part): -> master recording_file_id : smallint unsigned --- - recording_file_path: varchar(255) # filepath relative to root data directory + recording_file_path: varchar(255) # relative to root data directory """ def make(self, key): - """ Read and store some meta information.""" - acq_software = (Recording & key).fetch1('acq_software') - if acq_software == 'Miniscope-DAQ-V3': + # Search recording directory for miniscope raw files + acquisition_software, recording_directory = \ + (Recording & key).fetch1('acquisition_software' , + 'recording_directory') + + recording_path = find_full_path(get_miniscope_root_data_dir(), + recording_directory) + + recording_filepaths = [file_path.as_posix() for file_path + in recording_path.glob('*.avi')] + recording_metadata = list(recording_path.glob('*.json'))[0] + recording_timestamps = list(recording_path.glob('*.csv'))[0] + + if not recording_filepaths: + raise FileNotFoundError(f'No .avi files found in {recording_directory}') + elif not recording_metadata.exists(): + raise FileNotFoundError(f'No .json file found in {recording_directory}') + elif not recording_timestamps.exists(): + raise FileNotFoundError(f'No .csv file found in {recording_directory}') + + if acquisition_software == 'Miniscope-DAQ-V3': # Parse image dimension and frame rate - import cv2 - recording_filepaths = get_miniscope_daq_v3_files(key) video = cv2.VideoCapture(recording_filepaths[0]) fps = video.get(cv2.CAP_PROP_FPS) # TODO: Verify this method extracts correct value _, frame = video.read() @@ -149,117 +165,61 @@ def make(self, key): next(f) nframes = sum(1 for line in f if int(line[0]) == 0) - # Insert in RecordingInfo - self.insert1(dict(key, - nchannels=1, - nframes=nframes, - px_height=frame_size[0], - px_width=frame_size[1], - fps=fps)) + nchannels=1 + px_height=frame_size[0] + px_width=frame_size[1] + + elif acquisition_software == 'Miniscope-DAQ-V4': + with open(recording_metadata.as_posix()) as f: + metadata = json.loads(f.read()) + + with open(recording_timestamps, newline= '') as f: + time_stamps = list(csv.reader(f, delimiter=',')) + + time_stamps = np.array([list(map(int, time_stamps[i])) + for i in range(1,len(time_stamps))]) + nchannels = 1 + nframes = len(time_stamps) + px_height = metadata['ROI']['height'] + px_width = metadata['ROI']['width'] + fps = int(metadata['frameRate'].replace('FPS','')) + gain = metadata['gain'] + spatial_downsample = 1 + led_power = metadata['led0'] else: raise NotImplementedError( - f'Loading routine not implemented for {acq_software} acquisition software') + f'Loading routine not implemented for {acquisition_software}' + ' acquisition software') + + # Insert in RecordingInfo + self.insert1(dict(key, + nchannels=nchannels, + nframes=nframes, + px_height=px_height, + px_width=px_width, + fps=fps, + # um_height=0, + # um_width=0, + gain=gain, + spatial_downsample=spatial_downsample, + led_power=led_power, + time_stamps=time_stamps)) # Insert file(s) - root = pathlib.Path(get_miniscope_root_data_dir()) - recording_files = [pathlib.Path(f).relative_to(root).as_posix() for f in recording_filepaths] - self.File.insert([{**key, 'recording_file_id': i, 'recording_file_path': f} for i, f in enumerate(recording_files)]) - - -@schema -class MotionCorrectionMethod(dj.Lookup): - definition = """ - motion_correction_method: char(32) - --- - motion_correction_method_desc='': varchar(1000) - """ - contents = zip(['mcgill_miniscope_analysis', '']) - - -@schema -class MotionCorrectionParamSet(dj.Lookup): - definition = """ - motion_correction_paramset_idx : smallint - --- - -> MotionCorrectionMethod - motion_correction_paramset_desc='' : varchar(128) - motion_correction_paramset_hash : uuid - unique index (motion_correction_paramset_hash) - motion_correction_params : longblob # dictionary of all motion correction parameters - """ - - @classmethod - def insert_new_params(cls, motion_correction_method: str, - motion_correction_paramset_idx: int, - motion_correction_paramset_desc: str, - motion_correction_params: dict): - param_dict = {'motion_correction_method': motion_correction_method, - 'motion_correction_paramset_idx': motion_correction_paramset_idx, - 'motion_correction_paramset_desc': motion_correction_paramset_desc, - 'motion_correction_params': motion_correction_params, - 'motion_correction_paramset_hash': dict_to_uuid(motion_correction_params)} - q_param = cls & {'motion_correction_param_set_hash': - motion_correction_param_dict['motion_correction_paramset_hash']} - - if q_param: # If the specified param-set already exists - pname = q_param.fetch1('motion_correction_paramset_idx') - if pname == motion_correction_paramset_idx: # If the existed set has the same name: job done - return - else: # If not same name: human error, trying to add the same paramset with a different name - raise dj.DataJointError( - 'The specified param-set already exists - name: {}'.format(pname)) - else: - cls.insert1(param_dict) - - -@schema -class SegmentationMethod(dj.Lookup): - definition = """ - segmentation_method : char(32) - --- - segmentation_method_desc='': varchar(128) - """ - contents = zip(['mcgill_miniscope_analysis', '']) - - -@schema -class SegmentationParamSet(dj.Lookup): - definition = """ - segmentation_paramset_idx : smallint - --- - -> SegmentationMethod - segmentation_paramset_desc='' : varchar(128) - segmentation_paramset_hash : uuid - unique index (segmentation_paramset_hash) - segmentation_params : longblob # dictionary of all motion correction parameters - """ + recording_files = [pathlib.Path(f).relative_to( + find_root_directory( + get_miniscope_root_data_dir(), + f)).as_posix() + for f in recording_filepaths] - @classmethod - def insert_new_params(cls, segmentation_method: str, - segmentation_paramset_idx: int, - segmentation_paramset_desc: str, - segmentation_params: dict): - param_dict = {'segmentation_method': segmentation_method, - 'segmentation_paramset_idx': segmentation_paramset_idx, - 'segmentation_paramset_desc': segmentation_paramset_desc, - 'segmentation_params': segmentation_params, - 'segmentation_paramset_hash': dict_to_uuid(segmentation_params)} - q_param = cls & {'segmentation_param_set_hash': - param_dict['segmentation_paramset_hash']} - - if q_param: # If the specified param-set already exists - pname = q_param.fetch1('segmentation_paramset_idx') - if pname == paramset_idx: # If the existed set has the same name: job done - return - else: # If not same name: human error, trying to add the same paramset with different name - raise dj.DataJointError( - 'The specified param-set already exists - name: {}'.format(pname)) - else: - cls.insert1(param_dict) + self.File.insert([{**key, + 'recording_file_id': i, + 'recording_file_path': f} + for i, f in enumerate(recording_files)]) -# -------------- Trigger a processing routine -------------- +# Trigger a processing routine ------------------------------------------------- @schema class ProcessingTask(dj.Manual): @@ -276,7 +236,6 @@ class ProcessingTask(dj.Manual): segmentation_task_mode='load' : enum('load', 'trigger') # 'load': load existing segmentation results, 'trigger': trigger """ - @schema class Processing(dj.Computed): definition = """ @@ -308,53 +267,12 @@ def make(self, key): self.insert1(key) -@schema -class Curation(dj.Manual): - definition = """ - -> Processing - curation_id: int - --- - curation_time : datetime # time of generation of this set of curated results - curation_motion_correction_output_dir : varchar(255) # relative directory of motion relative to the root data directory - curation_segmentation_output_dir : varchar(255) # relative directory of segmentation result respect to root directory - manual_curation : bool # has manual curation been performed on this result? - curation_note='' : varchar(2000) - """ - - def create1_from_processing_task(self, key, is_curated=False, curation_note=''): - """ - A convenient function to create a new corresponding "Curation" for a particular "ProcessingTask" - """ - if key not in Processing(): - raise ValueError(f'No corresponding entry in Processing available for: {key};' - f' do `Processing.populate(key)`') - - output_dir = (ProcessingTask & key).fetch1('processing_output_dir') - method, loaded_result = get_loader_result(key, ProcessingTask) - - if method == 'caiman': - loaded_caiman = loaded_result - curation_time = loaded_caiman.creation_time - elif method == 'mcgill_miniscope_analysis': - loaded_miniscope_analysis = loaded_result - curation_time = loaded_miniscope_analysis.creation_time - else: - raise NotImplementedError('Unknown method: {}'.format(method)) - - # Synthesize curation_id - curation_id = dj.U().aggr(self & key, n='ifnull(max(curation_id)+1,1)').fetch1('n') - self.insert1({**key, 'curation_id': curation_id, - 'curation_time': curation_time, 'curation_output_dir': output_dir, - 'manual_curation': is_curated, - 'curation_note': curation_note}) - - -# -------------- Motion Correction -------------- +# Motion Correction ------------------------------------------------------------ @schema class MotionCorrection(dj.Imported): definition = """ - -> Curation + -> Processing --- -> Channel.proj(motion_correct_channel='channel') # channel used for motion correction in this processing task """ @@ -660,8 +578,7 @@ def make(self, key): pass -# -------------- Activity Trace -------------- - +# Activity Trace --------------------------------------------------------------- @schema class Fluorescence(dj.Computed): @@ -800,8 +717,8 @@ def make(self, key): else: raise NotImplementedError('Unknown/unimplemented method: {}'.format(method)) -# ---------------- HELPER FUNCTIONS ---------------- +# Helper Functions ------------------------------------------------------------- _table_attribute_mapper = {'ProcessingTask': 'processing_output_dir', 'Curation': 'curation_output_dir'} @@ -828,14 +745,20 @@ def get_loader_result(key, table): elif method == 'mcgill_miniscope_analysis': from element_data_loader import miniscope_analysis_loader loaded_output = miniscope_analysis_loader.MiniscopeAnalysis(output_dir) + elif method == 'minian': + from element_data_loader import minian_loader + loaded_output = minian_loader.MiniAn(output_dir) else: raise NotImplementedError('Unknown/unimplemented method: {}'.format(method)) return method, loaded_output + def populate_all(display_progress=True): - populate_settings = {'display_progress': display_progress, 'reserve_jobs': False, 'suppress_errors': False} + populate_settings = {'display_progress': display_progress, + 'reserve_jobs': False, + 'suppress_errors': False} RecordingInfo.populate(**populate_settings) @@ -849,4 +772,5 @@ def populate_all(display_progress=True): Fluorescence.populate(**populate_settings) - Activity.populate(**populate_settings) \ No newline at end of file + Activity.populate(**populate_settings) + diff --git a/element_miniscope/version.py b/element_miniscope/version.py index 1173db0..523b6b7 100644 --- a/element_miniscope/version.py +++ b/element_miniscope/version.py @@ -1,2 +1,2 @@ """Package metadata""" -__version__ = '0.1.0a1' \ No newline at end of file +__version__ = '0.1.0a2' \ No newline at end of file From 542037dbc8e796458c539e0708bb27589bd40c09 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Tue, 26 Oct 2021 15:29:20 -0500 Subject: [PATCH 22/64] Refactor to handle both V3 and V4 files --- element_miniscope/miniscope.py | 50 ++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index fd2053c..dcbf0fe 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -143,50 +143,60 @@ def make(self, key): recording_filepaths = [file_path.as_posix() for file_path in recording_path.glob('*.avi')] - recording_metadata = list(recording_path.glob('*.json'))[0] - recording_timestamps = list(recording_path.glob('*.csv'))[0] if not recording_filepaths: - raise FileNotFoundError(f'No .avi files found in {recording_directory}') - elif not recording_metadata.exists(): - raise FileNotFoundError(f'No .json file found in {recording_directory}') - elif not recording_timestamps.exists(): - raise FileNotFoundError(f'No .csv file found in {recording_directory}') - + raise FileNotFoundError(f'No .avi files found in ' + f'{recording_directory}') + if acquisition_software == 'Miniscope-DAQ-V3': - # Parse image dimension and frame rate - video = cv2.VideoCapture(recording_filepaths[0]) - fps = video.get(cv2.CAP_PROP_FPS) # TODO: Verify this method extracts correct value - _, frame = video.read() - frame_size = np.shape(frame) + recording_timestamps = recording_path / 'timestamp.dat' + if not recording_timestamps.exists(): + raise FileNotFoundError(f'No timestamp file found in ' + f'{recording_directory}') + + nchannels=1 # Parse number of frames from timestamp.dat file with open(recording_filepaths[-1]) as f: next(f) nframes = sum(1 for line in f if int(line[0]) == 0) - nchannels=1 + # Parse image dimension and frame rate + video = cv2.VideoCapture(recording_filepaths[0]) + _, frame = video.read() + frame_size = np.shape(frame) px_height=frame_size[0] px_width=frame_size[1] + fps = video.get(cv2.CAP_PROP_FPS) # TODO: Verify this method extracts correct value + elif acquisition_software == 'Miniscope-DAQ-V4': + recording_metadata = list(recording_path.glob('*.json'))[0] + recording_timestamps = list(recording_path.glob('*.csv'))[0] + + if not recording_metadata.exists(): + raise FileNotFoundError(f'No .json file found in ' + f'{recording_directory}') + if not recording_timestamps.exists(): + raise FileNotFoundError(f'No timestamp (*.csv) file found in ' + f'{recording_directory}') + with open(recording_metadata.as_posix()) as f: metadata = json.loads(f.read()) - + with open(recording_timestamps, newline= '') as f: time_stamps = list(csv.reader(f, delimiter=',')) - time_stamps = np.array([list(map(int, time_stamps[i])) - for i in range(1,len(time_stamps))]) nchannels = 1 nframes = len(time_stamps) px_height = metadata['ROI']['height'] px_width = metadata['ROI']['width'] fps = int(metadata['frameRate'].replace('FPS','')) gain = metadata['gain'] - spatial_downsample = 1 + spatial_downsample = 1 # TODO verify led_power = metadata['led0'] - + time_stamps = np.array([list(map(int, time_stamps[i])) + for i in range(1,len(time_stamps))]) else: raise NotImplementedError( f'Loading routine not implemented for {acquisition_software}' @@ -199,8 +209,6 @@ def make(self, key): px_height=px_height, px_width=px_width, fps=fps, - # um_height=0, - # um_width=0, gain=gain, spatial_downsample=spatial_downsample, led_power=led_power, From c9d2e5dfad85935a9d182a4a2de57467376944f4 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Wed, 23 Mar 2022 11:46:33 -0500 Subject: [PATCH 23/64] Update license --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index d394fe3..2f92789 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2021 DataJoint NEURO +Copyright (c) 2022 DataJoint Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From fc7111ceaaca9bb685617c81ce7d8b992e087e3f Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Wed, 23 Mar 2022 11:54:37 -0500 Subject: [PATCH 24/64] Update package name --- element_miniscope/miniscope.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index dcbf0fe..b6db870 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -7,7 +7,7 @@ import cv2 import json import csv -from element_data_loader.utils import dict_to_uuid, find_full_path, find_root_directory +from element_interface.utils import dict_to_uuid, find_full_path, find_root_directory schema = dj.schema() @@ -69,8 +69,7 @@ class AcquisitionSoftware(dj.Lookup): """ contents = zip([ 'Miniscope-DAQ-V3', - 'Miniscope-DAQ-V4', - 'Inscopix nVoke']) + 'Miniscope-DAQ-V4']) @schema @@ -748,13 +747,13 @@ def get_loader_result(key, table): output_dir = root_dir / output_dir if method == 'caiman': - from element_data_loader import caiman_loader + from element_interface import caiman_loader loaded_output = caiman_loader.CaImAn(output_dir) elif method == 'mcgill_miniscope_analysis': - from element_data_loader import miniscope_analysis_loader + from element_interface import miniscope_analysis_loader loaded_output = miniscope_analysis_loader.MiniscopeAnalysis(output_dir) elif method == 'minian': - from element_data_loader import minian_loader + from element_interface import minian_loader loaded_output = minian_loader.MiniAn(output_dir) else: raise NotImplementedError('Unknown/unimplemented method: {}'.format(method)) @@ -780,5 +779,4 @@ def populate_all(display_progress=True): Fluorescence.populate(**populate_settings) - Activity.populate(**populate_settings) - + Activity.populate(**populate_settings) \ No newline at end of file From 4867cefb30a6518dbd8961537dec7efdfc1dd9e8 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Wed, 23 Mar 2022 13:47:27 -0500 Subject: [PATCH 25/64] Add files for pypi release --- .github/workflows/development.yaml | 147 +++++++++++++++++++++++++++++ Dockerfile | 15 +++ docker-compose-build.yaml | 26 +++++ 3 files changed, 188 insertions(+) create mode 100644 .github/workflows/development.yaml create mode 100644 Dockerfile create mode 100644 docker-compose-build.yaml diff --git a/.github/workflows/development.yaml b/.github/workflows/development.yaml new file mode 100644 index 0000000..549f126 --- /dev/null +++ b/.github/workflows/development.yaml @@ -0,0 +1,147 @@ +name: Development +on: + pull_request: + push: + tags: + - '*.*.*' +jobs: + test-changelog: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Get changelog entry + id: changelog_reader + uses: guzman-raphael/changelog-reader-action@v5 + with: + path: ./CHANGELOG.md + - name: Verify changelog parsing + env: + TAG_NAME: ${{steps.changelog_reader.outputs.version}} + RELEASE_NAME: Release ${{steps.changelog_reader.outputs.version}} + BODY: ${{steps.changelog_reader.outputs.changes}} + PRERELEASE: ${{steps.changelog_reader.outputs.status == 'prereleased'}} + DRAFT: ${{steps.changelog_reader.outputs.status == 'unreleased'}} + run: | + echo "TAG_NAME=${TAG_NAME}" + echo "RELEASE_NAME=${RELEASE_NAME}" + echo "BODY=${BODY}" + echo "PRERELEASE=${PRERELEASE}" + echo "DRAFT=${DRAFT}" + build: + needs: test-changelog + runs-on: ubuntu-latest + strategy: + matrix: + include: + - py_ver: 3.8 + distro: alpine + image: djbase + env: + PY_VER: ${{matrix.py_ver}} + DISTRO: ${{matrix.distro}} + IMAGE: ${{matrix.image}} + DOCKER_CLIENT_TIMEOUT: "120" + COMPOSE_HTTP_TIMEOUT: "120" + steps: + - uses: actions/checkout@v2 + - name: Compile image + run: | + export PKG_NAME=$(python3 -c "print([p for p in __import__('setuptools').find_packages() if '.' not in p][0])") + export PKG_VERSION=$(cat ${PKG_NAME}/version.py | awk -F\' '/__version__ = / {print $2}') + export HOST_UID=$(id -u) + docker-compose -f docker-compose-build.yaml up --exit-code-from element --build + IMAGE=$(docker images --filter "reference=datajoint/${PKG_NAME}*" \ + --format "{{.Repository}}") + TAG=$(docker images --filter "reference=datajoint/${PKG_NAME}*" --format "{{.Tag}}") + docker save "${IMAGE}:${TAG}" | \ + gzip > "image-${PKG_NAME}-${PKG_VERSION}-py${PY_VER}-${DISTRO}.tar.gz" + echo "PKG_NAME=${PKG_NAME}" >> $GITHUB_ENV + echo "PKG_VERSION=${PKG_VERSION}" >> $GITHUB_ENV + - name: Add image artifact + uses: actions/upload-artifact@v2 + with: + name: image-${{env.PKG_NAME}}-${{env.PKG_VERSION}}-py${{matrix.py_ver}}-${{matrix.distro}} + path: "image-${{env.PKG_NAME}}-${{env.PKG_VERSION}}-py${{matrix.py_ver}}-\ + ${{matrix.distro}}.tar.gz" + retention-days: 1 + - if: matrix.py_ver == '3.8' && matrix.distro == 'alpine' + name: Add pip artifacts + uses: actions/upload-artifact@v2 + with: + name: pip-${{env.PKG_NAME}}-${{env.PKG_VERSION}} + path: dist + retention-days: 1 + publish-release: + if: github.event_name == 'push' + needs: build + runs-on: ubuntu-latest + env: + TWINE_USERNAME: ${{secrets.twine_username}} + TWINE_PASSWORD: ${{secrets.twine_password}} + outputs: + release_upload_url: ${{steps.create_gh_release.outputs.upload_url}} + steps: + - uses: actions/checkout@v2 + - name: Determine package version + run: | + PKG_NAME=$(python3 -c "print([p for p in __import__('setuptools').find_packages() if '.' not in p][0])") + SDIST_PKG_NAME=$(echo ${PKG_NAME} | sed 's|_|-|g') + PKG_VERSION=$(cat ${PKG_NAME}/version.py | awk -F\' '/__version__ = / {print $2}') + echo "PKG_NAME=${PKG_NAME}" >> $GITHUB_ENV + echo "PKG_VERSION=${PKG_VERSION}" >> $GITHUB_ENV + echo "SDIST_PKG_NAME=${SDIST_PKG_NAME}" >> $GITHUB_ENV + - name: Get changelog entry + id: changelog_reader + uses: guzman-raphael/changelog-reader-action@v5 + with: + path: ./CHANGELOG.md + version: ${{env.PKG_VERSION}} + - name: Create GH release + id: create_gh_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + with: + tag_name: ${{steps.changelog_reader.outputs.version}} + release_name: Release ${{steps.changelog_reader.outputs.version}} + body: ${{steps.changelog_reader.outputs.changes}} + prerelease: ${{steps.changelog_reader.outputs.status == 'prereleased'}} + draft: ${{steps.changelog_reader.outputs.status == 'unreleased'}} + - name: Fetch image artifact + uses: actions/download-artifact@v2 + with: + name: image-${{env.PKG_NAME}}-${{env.PKG_VERSION}}-py3.8-alpine + - name: Fetch pip artifacts + uses: actions/download-artifact@v2 + with: + name: pip-${{env.PKG_NAME}}-${{env.PKG_VERSION}} + path: dist + - name: Publish pip release + run: | + export HOST_UID=$(id -u) + docker load < "image-${{env.PKG_NAME}}-${PKG_VERSION}-py3.8-alpine.tar.gz" + docker-compose -f docker-compose-build.yaml run \ + -e TWINE_USERNAME=${TWINE_USERNAME} -e TWINE_PASSWORD=${TWINE_PASSWORD} element \ + sh -lc "pip install twine && python -m twine upload dist/*" + - name: Determine pip artifact paths + run: | + echo "PKG_WHEEL_PATH=$(ls dist/${PKG_NAME}-*.whl)" >> $GITHUB_ENV + echo "PKG_SDIST_PATH=$(ls dist/${SDIST_PKG_NAME}-*.tar.gz)" >> $GITHUB_ENV + - name: Upload pip wheel asset to release + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + with: + upload_url: ${{steps.create_gh_release.outputs.upload_url}} + asset_path: ${{env.PKG_WHEEL_PATH}} + asset_name: pip-${{env.PKG_NAME}}-${{env.PKG_VERSION}}.whl + asset_content_type: application/zip + - name: Upload pip sdist asset to release + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + with: + upload_url: ${{steps.create_gh_release.outputs.upload_url}} + asset_path: ${{env.PKG_SDIST_PATH}} + asset_name: pip-${{env.SDIST_PKG_NAME}}-${{env.PKG_VERSION}}.tar.gz + asset_content_type: application/gzip diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..c144a40 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,15 @@ +ARG PY_VER +ARG DISTRO +ARG IMAGE +ARG PKG_NAME +ARG PKG_VERSION + +FROM datajoint/${IMAGE}:py${PY_VER}-${DISTRO} +COPY --chown=anaconda:anaconda ./requirements.txt ./setup.py \ + /main/ +COPY --chown=anaconda:anaconda ./${PKG_NAME} /main/${PKG_NAME} +RUN \ + cd /main && \ + pip install . && \ + rm -R /main/* +WORKDIR /main diff --git a/docker-compose-build.yaml b/docker-compose-build.yaml new file mode 100644 index 0000000..81984c7 --- /dev/null +++ b/docker-compose-build.yaml @@ -0,0 +1,26 @@ +# PY_VER=3.8 IMAGE=djbase DISTRO=alpine PKG_NAME=$(python -c "print([p for p in __import__('setuptools').find_packages() if '.' not in p][0])") PKG_VERSION=$(cat ${PKG_NAME}/version.py | awk -F\' '/__version__/ {print $2}') HOST_UID=$(id -u) docker-compose -f docker-compose-build.yaml up --exit-code-from element --build +# +# Intended for updating dependencies and docker image. +# Used to build release artifacts. +version: "2.4" +services: + element: + build: + context: . + args: + - PY_VER + - DISTRO + - IMAGE + - PKG_NAME + - PKG_VERSION + image: datajoint/${PKG_NAME}:${PKG_VERSION} + user: ${HOST_UID}:anaconda + volumes: + - .:/main + command: + - sh + - -lc + - | + set -e + rm -R build dist *.egg-info || echo "No prev build" + python setup.py bdist_wheel sdist From 31c2cac8852a998a154d61303b2b65f0b4a6e14f Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Wed, 23 Mar 2022 14:33:08 -0500 Subject: [PATCH 26/64] Add issue templates --- .github/ISSUE_TEMPLATE/bug_report.md | 39 ++++++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 5 ++ .github/ISSUE_TEMPLATE/feature_request.md | 57 +++++++++++++++++++++++ 3 files changed, 101 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..31fe9fc --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,39 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: 'bug' +assignees: '' + +--- + +## Bug Report + +### Description + +A clear and concise description of what is the overall operation that is intended to be +performed that resulted in an error. + +### Reproducibility +Include: +- OS (WIN | MACOS | Linux) +- DataJoint Element Version +- MySQL Version +- MySQL Deployment Strategy (local-native | local-docker | remote) +- Minimum number of steps to reliably reproduce the issue +- Complete error stack as a result of evaluating the above steps + +### Expected Behavior +A clear and concise description of what you expected to happen. + +### Screenshots +If applicable, add screenshots to help explain your problem. + +### Additional Research and Context +Add any additional research or context that was conducted in creating this report. + +For example: +- Related GitHub issues and PR's either within this repository or in other relevant + repositories. +- Specific links to specific lines or a focus within source code. +- Relevant summary of Maintainers development meetings, milestones, projects, etc. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..d31fbac --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: DataJoint Contribution Guideline + url: https://docs.datajoint.org/python/community/02-Contribute.html + about: Please make sure to review the DataJoint Contribution Guidelines \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..1f2b784 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,57 @@ +--- +name: Feature request +about: Suggest an idea for a new feature +title: '' +labels: 'enhancement' +assignees: '' + +--- + +## Feature Request + +### Problem + +A clear and concise description how this idea has manifested and the context. Elaborate +on the need for this feature and/or what could be improved. Ex. I'm always frustrated +when [...] + +### Requirements + +A clear and concise description of the requirements to satisfy the new feature. Detail +what you expect from a successful implementation of the feature. Ex. When using this +feature, it should [...] + +### Justification + +Provide the key benefits in making this a supported feature. Ex. Adding support for this +feature would ensure [...] + +### Alternative Considerations + +Do you currently have a work-around for this? Provide any alternative solutions or +features you've considered. + +### Related Errors +Add any errors as a direct result of not exposing this feature. + +Please include steps to reproduce provided errors as follows: +- OS (WIN | MACOS | Linux) +- DataJoint Element Version +- MySQL Version +- MySQL Deployment Strategy (local-native | local-docker | remote) +- Minimum number of steps to reliably reproduce the issue +- Complete error stack as a result of evaluating the above steps + +### Screenshots +If applicable, add screenshots to help explain your feature. + +### Additional Research and Context +Add any additional research or context that was conducted in creating this feature request. + +For example: +- Related GitHub issues and PR's either within this repository or in other relevant + repositories. +- Specific links to specific lines or a focus within source code. +- Relevant summary of Maintainers development meetings, milestones, projects, etc. +- Any additional supplemental web references or links that would further justify this + feature request. From f725dd3ae167a6b6ed412d57a471e591b4285d69 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Wed, 23 Mar 2022 15:21:56 -0500 Subject: [PATCH 27/64] Remove MiniscopeAnalysis --- element_miniscope/miniscope.py | 95 ++-------------------------------- 1 file changed, 5 insertions(+), 90 deletions(-) diff --git a/element_miniscope/miniscope.py b/element_miniscope/miniscope.py index b6db870..39a4e17 100644 --- a/element_miniscope/miniscope.py +++ b/element_miniscope/miniscope.py @@ -67,9 +67,8 @@ class AcquisitionSoftware(dj.Lookup): definition = """ acquisition_software: varchar(24) """ - contents = zip([ - 'Miniscope-DAQ-V3', - 'Miniscope-DAQ-V4']) + contents = zip(['Miniscope-DAQ-V3', + 'Miniscope-DAQ-V4']) @schema @@ -260,9 +259,6 @@ def make(self, key): if method == 'caiman': loaded_caiman = loaded_result key = {**key, 'processing_time': loaded_caiman.creation_time} - elif method == 'mcgill_miniscope_analysis': - loaded_miniscope_analysis = loaded_result - key = {**key, 'processing_time': loaded_miniscope_analysis.creation_time} else: raise NotImplementedError('Unknown method: {}'.format(method)) elif task_mode == 'trigger': @@ -445,20 +441,6 @@ def make(self, key): 'max_image'][...][np.newaxis, ...])] self.Summary.insert(summary_images) - elif method == 'mcgill_miniscope_analysis': - loaded_miniscope_analysis = loaded_result - - # TODO: add motion correction and block data - - # -- summary images -- - mc_key = (scan.ScanInfo.Field * ProcessingTask & key).fetch1('KEY') - summary_images = {**mc_key, - 'average_image': loaded_miniscope_analysis.average_image, - 'correlation_image': loaded_miniscope_analysis.correlation_image} - - self.insert1({**key, 'motion_correct_channel': loaded_miniscope_analysis.alignment_channel}) - self.Summary.insert1(summary_images) - else: raise NotImplementedError('Unknown/unimplemented method: {}'.format(method)) @@ -530,27 +512,6 @@ def make(self, key): ignore_extra_fields=True, allow_direct_insert=True) - elif method == 'mcgill_miniscope_analysis': - loaded_miniscope_analysis = loaded_result - - # infer "segmentation_channel" - from params if available, else from miniscope analysis loader - params = (ProcessingParamSet * ProcessingTask & key).fetch1('params') - segmentation_channel = params.get('segmentation_channel', - loaded_miniscope_analysis.segmentation_channel) - - self.insert1(key) - self.Mask.insert([{**key, - 'segmentation_channel': segmentation_channel, - 'mask': mask['mask_id'], - 'mask_npix': mask['mask_npix'], - 'mask_center_x': mask['mask_center_x'], - 'mask_center_y': mask['mask_center_y'], - 'mask_xpix': mask['mask_xpix'], - 'mask_ypix': mask['mask_ypix'], - 'mask_weights': mask['mask_weights']} - for mask in loaded_miniscope_analysis.masks], - ignore_extra_fields=True) - else: raise NotImplementedError(f'Unknown/unimplemented method: {method}') @@ -561,8 +522,7 @@ class MaskClassificationMethod(dj.Lookup): mask_classification_method: varchar(48) """ - contents = zip(['caiman_default_classifier', - 'miniscope_analysis_default_classifier']) + contents = zip(['caiman_default_classifier']) @schema @@ -621,20 +581,6 @@ def make(self, key): 'fluorescence': mask['inferred_trace']} for mask in loaded_caiman.masks]) - elif method == 'mcgill_miniscope_analysis': - loaded_miniscope_analysis = loaded_result - - # infer "segmentation_channel" - from params if available, else from miniscope analysis loader - params = (ProcessingParamSet * ProcessingTask & key).fetch1('params') - segmentation_channel = params.get('segmentation_channel', - loaded_miniscope_analysis.segmentation_channel) - - self.insert1(key) - self.Trace.insert([{**key, - 'mask': mask['mask_id'], - 'fluorescence_channel': segmentation_channel, - 'fluorescence': mask['raw_trace']} - for mask in loaded_miniscope_analysis.masks]) else: raise NotImplementedError('Unknown/unimplemented method: {}'.format(method)) @@ -647,9 +593,7 @@ class ActivityExtractionMethod(dj.Lookup): """ contents = zip(['caiman_deconvolution', - 'caiman_dff', - 'mcgill_miniscope_analysis_deconvolution', - 'mcgill_miniscope_analysis_dff']) + 'caiman_dff']) @schema @@ -674,13 +618,7 @@ def key_source(self): & 'processing_method = "caiman"' & 'extraction_method LIKE "caiman%"') - miniscope_analysis_key_source = (Fluorescence * ActivityExtractionMethod - * ProcessingParamSet.proj('processing_method') - & 'processing_method = "mcgill_miniscope_analysis"' - & 'extraction_method LIKE "mcgill_miniscope_analysis%"') - - # TODO: fix #caiman_key_source.proj() + miniscope_analysis_key_source.proj() - return miniscope_analysis_key_source.proj() + return caiman_key_source.proj() def make(self, key): method, loaded_result = get_loader_result(key, Curation) @@ -703,23 +641,6 @@ def make(self, key): 'activity_trace': mask[attr_mapper[key['extraction_method']]]} for mask in loaded_caiman.masks]) - elif method == 'mcgill_miniscope_analysis': - if key['extraction_method'] in ('mcgill_miniscope_analysis_deconvolution', 'mcgill_miniscope_analysis_dff'): - attr_mapper = {'mcgill_miniscope_analysis_deconvolution': 'spikes', 'mcgill_miniscope_analysis_dff': 'dff'} - - loaded_miniscope_analysis = loaded_result - - # infer "segmentation_channel" - from params if available, else from miniscope analysis loader - params = (ProcessingParamSet * ProcessingTask & key).fetch1('params') - segmentation_channel = params.get('segmentation_channel', - loaded_miniscope_analysis.segmentation_channel) - - self.insert1(key) - self.Trace.insert([{**key, - 'mask': mask['mask_id'], - 'fluorescence_channel': segmentation_channel, - 'activity_trace': mask[attr_mapper[key['extraction_method']]]} - for mask in loaded_miniscope_analysis.masks]) else: raise NotImplementedError('Unknown/unimplemented method: {}'.format(method)) @@ -749,12 +670,6 @@ def get_loader_result(key, table): if method == 'caiman': from element_interface import caiman_loader loaded_output = caiman_loader.CaImAn(output_dir) - elif method == 'mcgill_miniscope_analysis': - from element_interface import miniscope_analysis_loader - loaded_output = miniscope_analysis_loader.MiniscopeAnalysis(output_dir) - elif method == 'minian': - from element_interface import minian_loader - loaded_output = minian_loader.MiniAn(output_dir) else: raise NotImplementedError('Unknown/unimplemented method: {}'.format(method)) From 8e5fe8df72b5c16006123aa3a730b163f99c27cf Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Fri, 15 Apr 2022 17:47:04 -0500 Subject: [PATCH 28/64] Move background to elements.datajoint.org --- Background.md | 81 --------------------------------------------------- 1 file changed, 81 deletions(-) delete mode 100644 Background.md diff --git a/Background.md b/Background.md deleted file mode 100644 index c1895d1..0000000 --- a/Background.md +++ /dev/null @@ -1,81 +0,0 @@ -
- -# Miniscope Element - -## Description of modality, user population - -Miniature fluorescence microscopes (miniscopes) are a head-mounted calcium imaging full-frame video modality first introduced in 2005 by Mark Schnitzer's lab ([Flusberg et al., Optics Letters 2005](https://pubmed.ncbi.nlm.nih.gov/16190441/)). Due to their light weight, these miniscopes allow measuring the dynamic activity of populations of cortical neurons in freely behaving animals. In 2011, Inscopix Inc. was founded to support one-photon miniscopes as a commercial neuroscience research platform, providing proprietary hardware, acquisition software, and analysis software. Today, they estimate their active user base is 491 labs with a total of 1179 installs. An open-source alternative was launched by a UCLA team led by Daniel Aharoni and Peyman Golshani ([Cai et al., Nature 2016](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5063500/); [Aharoni and Hoogland, Frontiers in Cellular Neuroscience 2019](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6461004/)). In our conversation with Dr. Aharoni, he estimated about 700 labs currently using the UCLA system alone. The Inscopix user base is smaller but more established. Several two-photon miniscopes have been developed but lack widespread adoption likely due to the expensive hardware required for the two-photon excitation ([Helmchen et al., Neuron 2001](https://pubmed.ncbi.nlm.nih.gov/11580892/); [Zong et al., Nature Methods 2017](https://pubmed.ncbi.nlm.nih.gov/28553965/); [Aharoni and Hoogland, Frontiers in Cellular Neuroscience 2019](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6461004/)). Due to the low costs and ability to record during natural behaviors, one-photon miniscope imaging appears to be the fastest growing calcium imaging modality in the field today. In Year 1, we focused our efforts on supporting the UCLA platform due its fast growth and deficiency of standardization in acquisition and processing pipelines. In future phases, we will reach out to Inscopix to support their platform as well. - - -## Acquisition tools -Daniel Aharoni's lab has developed iterations of the UCLA Miniscope platform. Based on interviews, we have found labs using the two most recent versions including [Miniscope DAQ V3](http://miniscope.org/index.php/Information_on_the_(previous_Version_3)_Miniscope_platform) and [Miniscope DAQ V4](https://github.com/Aharoni-Lab/Miniscope-v4/wiki). Labs also use the Bonsai OpenEphys tool for data acquisition with the UCLA miniscope. Inscopix provides the Inscopix Data Acquisition Software (IDAS) for the nVista and nVoke systems. - -## Preprocessing tools -The preprocessing workflow for miniscope imaging includes denoising, motion correction, cell segmentation, and calcium event extraction (sometimes described as "deconvolution" or "spike inference"). For the UCLA Miniscopes, the following [analysis packages](https://github.com/Aharoni-Lab/Miniscope-v4/wiki/Analysis-Packages) are commonly used: - -(Package, Developer [Affiliation], Programming Language) - -+ [Miniscope Denoising](https://github.com/Aharoni-Lab/Miniscope-v4/wiki/Removing-Horizontal-Noise-from-Recordings), Daniel Aharoni (UCLA), Python -+ [NoRMCorre](https://github.com/flatironinstitute/NoRMCorre), Flatiron Institute, MATLAB -+ [CNMF-E](https://github.com/zhoupc/CNMF_E), Pengcheng Zhou (Liam Paninski’s Lab, Columbia University), MATLAB -+ [CaImAn](https://github.com/flatironinstitute/CaImAn), Flatiron Institute, Python -+ [miniscoPy](https://github.com/PeyracheLab/miniscoPy), Guillaume Viejo (Adrien Peyrache’s Lab, McGill University), Python -+ [MIN1PIPE](https://github.com/JinghaoLu/MIN1PIPE), Jinghao Lu (Fan Wang’s Lab, MIT), MATLAB -+ [CIAtah](https://github.com/bahanonu/calciumImagingAnalysis), Biafra Ahanonu, MATLAB -+ [MiniAn](https://github.com/DeniseCaiLab/minian), Phil Dong (Denise Cai's Lab, Mount Sinai), Python -+ [MiniscopeAnalysis](https://github.com/etterguillaume/MiniscopeAnalysis), Guillaume Etter (Sylvain Williams’ Lab, McGill University), MATLAB -+ [PIMPN](https://github.com/etterguillaume/PIMPN), Guillaume Etter (Sylvain Williams’ Lab, McGill University), Python -+ [CellReg](https://github.com/zivlab/CellReg), Liron Sheintuch (Yaniv Ziv’s Lab, Weizmann Institute of Science), MATLAB -+ Inscopix Data Processing Software (IDPS) -+ Inscopix Multimodal Image Registration and Analysis (MIRA) - -Based on interviews with UCLA and Inscopix miniscope users and developers, each research lab uses a different preprocessing workflow. These custom workflows are often closed source and not tracked with version control software. For the preprocessing tools that are open source, they are often developed by an individual during their training period and lack funding for long term maintenance. These factors result in a lack of standardization for miniscope preprocessing tools, which is a major obstacle to adoption for new labs. - - -## Precursor projects and interviews -Until recently, DataJoint had not been used for miniscope pipelines. However, labs we have contacted have been eager to engage and adopt DataJoint-based workflows in their labs. - -(Date, Person [Role], Lab, Institution) - -+ March 16, 2021, Niccoló Calcini (Postdoctoral scholar) & Antoine Adamantidis (Associate Professor), Antoine Adamantidis’ Lab, University of Bern -+ March 12, 2021, Biafra Ahanonu (Postdoctoral scholar), Allan Basbaum’s Lab, UCSF -+ March 1, 2021, Lukas Oesch (Postdoctoral scholar), Anne Churchland’s Lab, UCLA -+ February 25, 2021, Manolis Froudarakis (Assistant Professor), Manolis Froudarakis’ Lab, FORTH -+ February 22, 2021, Jinghao Lu (Doctoral student) & Vincent Prevosto (Research scientist), Fan Wang’s Lab, MIT -+ February 12, 2021, Guillaume Viejo (Postdoctoral scholar) & Adrien Peyrache (Assistant Professor), Adrien Peyrache’s Lab, McGill University -+ February 11, 2021, Daniel Aharoni (Assistant Professor), Daniel Aharoni’s Lab, UCLA -+ January 29, 2021, Pingping Zhao & Ronen Reshef (Postdoctoral scholars), Peyman Golshani’s Lab, UCLA - -## Pipeline Development - -With assistance from Peyman Golshani’s Lab (UCLA) we have added support for the UCLA Miniscope DAQ V3 acquisition tool and MiniscopeAnalysis preprocessing tool in `element-miniscope` and `workflow-miniscope`. They have provided example data for development, and will begin validating in March 2021. - -Based on interviews, we are considering adding support for the tools listed below. The deciding factors include the number of users, long term support, quality controls, and python programming language (so that the preprocessing tool can be triggered within the element). - -+ Acquisition tools - + Miniscope DAQ V4 - + Inscopix Data Acquisition Software (IDAS) -+ Preprocessing tools - + Inscopix Data Processing Software (IDPS) - + Inscopix Multimodal Image Registration and Analysis (MIRA) - + MiniAn - + CaImAn - + CNMF-E - + CellReg - -## Alpha release: Validation sites - -We have recruited the following teams as alpha users to jointly develop and validate the miniscope workflow in their experiments. - -(Lab [PI], Institution, Hosting arrangement, Start time, Experiment 0 target) - -+ Peyman Golshani, UCLA, djHub, January 2021, March 2021 -+ Anne Churchland, UCLA, Lab server, April 2021, May 2021 -+ Fan Wang, MIT, djHub, April 2021, May 2021 -+ Antoine Adamantidis, University of Bern, djHub, April 2021, June 2021 -+ Manolis Froudarakis, FORTH, Lab server, April 2021, June 2021 - -## Beta release -As the validation progresses, we expect to produce a beta version of the workflow for users to adopt independently by May 1, 2021. - -
\ No newline at end of file From 6b734ff38a8d51207ba6f88f75a593d4bc620cc1 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Fri, 15 Apr 2022 17:48:05 -0500 Subject: [PATCH 29/64] Add collapsible section --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index b84bd9e..074916a 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,10 @@ repository for an example usage of `element-miniscope`. + `Location` as a dependency for `ScanLocation` +## Table definitions +
+Click to expand details + ### Scan + A `Session` (more specifically an experimental session) may have multiple scans, where each scan describes a complete 4D dataset (i.e. 3D volume over time) from one scanning session, typically from the moment of pressing the *start* button to pressing the *stop* button. @@ -69,3 +73,4 @@ repository for an example usage of `element-miniscope`. + `Activity` - computed neuronal activity trace from fluorescence trace (e.g. spikes) +
\ No newline at end of file From 66b460305d6353ba7f8e7d632fc8a041fd45b2f8 Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sat, 16 Apr 2022 11:12:52 -0500 Subject: [PATCH 30/64] Add citation section --- README.md | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 074916a..47e16fb 100644 --- a/README.md +++ b/README.md @@ -73,4 +73,18 @@ repository for an example usage of `element-miniscope`. + `Activity` - computed neuronal activity trace from fluorescence trace (e.g. spikes) - \ No newline at end of file + + +## Citation + ++ If your work uses DataJoint and DataJoint Elements, please cite the respective Research Resource Identifiers (RRIDs) and manuscripts. + ++ DataJoint for Python or MATLAB + + Yatsenko D, Reimer J, Ecker AS, Walker EY, Sinz F, Berens P, Hoenselaar A, Cotton RJ, Siapas AS, Tolias AS. DataJoint: managing big scientific data using MATLAB or Python. bioRxiv. 2015 Jan 1:031658. doi: https://doi.org/10.1101/031658 + + + DataJoint ([RRID:SCR_014543](https://scicrunch.org/resolver/SCR_014543)) - DataJoint for < Python or MATLAB > (version < enter version number >) + ++ DataJoint Elements + + Yatsenko D, Nguyen T, Shen S, Gunalan K, Turner CA, Guzman R, Sasaki M, Sitonic D, Reimer J, Walker EY, Tolias AS. DataJoint Elements: Data Workflows for Neurophysiology. bioRxiv. 2021 Jan 1. doi: https://doi.org/10.1101/2021.03.30.437358 + + + DataJoint Elements ([RRID:SCR_021894](https://scicrunch.org/resolver/SCR_021894)) - Element Miniscope (version < enter version number >) \ No newline at end of file From 5c738be79aeb7bea2c4cc8ca10f79757ca5f76ef Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Sun, 17 Apr 2022 10:12:17 -0500 Subject: [PATCH 31/64] Add links to elements.datajoint.org --- README.md | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 47e16fb..f7fc126 100644 --- a/README.md +++ b/README.md @@ -10,14 +10,9 @@ with `Miniscope DAQ V3` acquisition system and `MiniscopeAnalysis` suite for ana any particular design of experiment session, thus assembling a fully functional calcium imaging workflow. -+ See [Background](Background.md) for the background information and development timeline. ++ See the [Element Miniscope documentation](https://elements.datajoint.org/description/miniscope/) for the background information and development timeline. -+ See [DataJoint Elements](https://github.com/datajoint/datajoint-elements) for descriptions of the other `elements` and `workflows` developed as part of this initiative. - -## Element usage - -+ See [workflow-miniscope](https://github.com/datajoint/workflow-miniscope) -repository for an example usage of `element-miniscope`. ++ For more information on the DataJoint Elements project, please visit https://elements.datajoint.org. This work is supported by the National Institutes of Health. ## Element architecture From b25ca4ca5aa8c267ce0c866d4c7d4745598b509a Mon Sep 17 00:00:00 2001 From: Kabilar Gunalan Date: Mon, 18 Apr 2022 15:30:44 -0500 Subject: [PATCH 32/64] Update format --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f7fc126..e876977 100644 --- a/README.md +++ b/README.md @@ -77,9 +77,9 @@ a fully functional calcium imaging workflow. + DataJoint for Python or MATLAB + Yatsenko D, Reimer J, Ecker AS, Walker EY, Sinz F, Berens P, Hoenselaar A, Cotton RJ, Siapas AS, Tolias AS. DataJoint: managing big scientific data using MATLAB or Python. bioRxiv. 2015 Jan 1:031658. doi: https://doi.org/10.1101/031658 - + DataJoint ([RRID:SCR_014543](https://scicrunch.org/resolver/SCR_014543)) - DataJoint for < Python or MATLAB > (version < enter version number >) + + DataJoint ([RRID:SCR_014543](https://scicrunch.org/resolver/SCR_014543)) - DataJoint for `