From 3d92fb12ea9c68d4f63c16bc7b94a4edc11eb030 Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Thu, 10 Aug 2023 16:37:07 -0500 Subject: [PATCH 01/11] Update prairie_view_loader.py --- element_interface/prairie_view_loader.py | 172 ++++++++++++++++------- 1 file changed, 120 insertions(+), 52 deletions(-) diff --git a/element_interface/prairie_view_loader.py b/element_interface/prairie_view_loader.py index 841e87a..ee306cd 100644 --- a/element_interface/prairie_view_loader.py +++ b/element_interface/prairie_view_loader.py @@ -1,65 +1,90 @@ import pathlib +from pathlib import Path import xml.etree.ElementTree as ET from datetime import datetime - import numpy as np -def get_prairieview_metadata(ome_tif_filepath: str) -> dict: - """Extract metadata for scans generated by Prairie View acquisition software. +class PrairieViewMeta: - The Prairie View software generates one `.ome.tif` imaging file per frame - acquired. The metadata for all frames is contained in one .xml file. This - function locates the .xml file and generates a dictionary necessary to - populate the DataJoint `ScanInfo` and `Field` tables. Prairie View works - with resonance scanners with a single field. Prairie View does not support - bidirectional x and y scanning. ROI information is not contained in the - `.xml` file. All images generated using Prairie View have square dimensions(e.g. 512x512). + def __init__(self, prairieview_dir: str): + """Initialize PrairieViewMeta loader class - Args: - ome_tif_filepath: An absolute path to the .ome.tif image file. + Args: + prairieview_dir (str): string, absolute file path to directory containing PrairieView dataset + """ + # ---- Search and verify CaImAn output file exists ---- + # May return multiple xml files. Only need one that contains scan metadata. + self.prairieview_dir = Path(prairieview_dir) - Raises: - FileNotFoundError: No .xml file containing information about the acquired scan - was found at path in parent directory at `ome_tif_filepath`. + for file in self.prairieview_dir.glob("*.xml"): + xml_tree = ET.parse(file) + xml_root = xml_tree.getroot() + if xml_root.find(".//Sequence"): + self.xml_file = file + self._xml_root = xml_root + break + else: + raise FileNotFoundError( + f"No PrarieView metadata .xml file found at {prairieview_dir}" + ) - Returns: - metainfo: A dict mapping keys to corresponding metadata values fetched from the - .xml file. - """ + self._meta = None - # May return multiple xml files. Only need one that contains scan metadata. - xml_files_list = pathlib.Path(ome_tif_filepath).parent.glob("*.xml") + @property + def meta(self): + if self._meta is None: + self._meta = _extract_prairieview_metadata(self.xml_file) + return self._meta - for file in xml_files_list: - xml_tree = ET.parse(file) - xml_file = xml_tree.getroot() - if xml_file.find(".//Sequence"): - break - else: - raise FileNotFoundError( - f"No PrarieView metadata .xml file found at {pathlib.Path(ome_tif_filepath).parent}" - ) + def get_prairieview_files(self, plane_idx=None, channel=None): + if plane_idx is None: + if self.meta['num_planes'] > 1: + raise ValueError(f"Please specify 'plane_idx' - Plane indices: {self.meta['plane_indices']}") + else: + plane_idx = self.meta['plane_indices'][0] + else: + assert plane_idx in self.meta['plane_indices'], f"Invalid 'plane_idx' - Plane indices: {self.meta['plane_indices']}" + + if channel is None: + if self.meta['num_channels'] > 1: + raise ValueError(f"Please specify 'channel' - Channels: {self.meta['channels']}") + else: + plane_idx = self.meta['channels'][0] + else: + assert channel in self.meta['channels'], f"Invalid 'channel' - Channels: {self.meta['channels']}" + + frames = self._xml_root.findall(f".//Sequence/Frame/[@index='{plane_idx}']/File/[@channel='{channel}']") + return [f.attrib['filename'] for f in frames] + + +def _extract_prairieview_metadata(xml_filepath: str): + xml_filepath = Path(xml_filepath) + if not xml_filepath.exists(): + raise FileNotFoundError(f"{xml_filepath} does not exist") + xml_tree = ET.parse(xml_filepath) + xml_root = xml_tree.getroot() bidirectional_scan = False # Does not support bidirectional roi = 0 n_fields = 1 # Always contains 1 field - recording_start_time = xml_file.find(".//Sequence/[@cycle='1']").attrib.get("time") + recording_start_time = xml_root.find(".//Sequence/[@cycle='1']").attrib.get("time") # Get all channels and find unique values channel_list = [ int(channel.attrib.get("channel")) - for channel in xml_file.iterfind(".//Sequence/Frame/File/[@channel]") + for channel in xml_root.iterfind(".//Sequence/Frame/File/[@channel]") ] - n_channels = len(set(channel_list)) - n_frames = len(xml_file.findall(".//Sequence/Frame")) + channels = set(channel_list) + n_channels = len(channels) + n_frames = len(xml_root.findall(".//Sequence/Frame")) framerate = 1 / float( - xml_file.findall('.//PVStateValue/[@key="framePeriod"]')[0].attrib.get("value") + xml_root.findall('.//PVStateValue/[@key="framePeriod"]')[0].attrib.get("value") ) # rate = 1/framePeriod usec_per_line = ( float( - xml_file.findall(".//PVStateValue/[@key='scanLinePeriod']")[0].attrib.get( + xml_root.findall(".//PVStateValue/[@key='scanLinePeriod']")[0].attrib.get( "value" ) ) @@ -67,15 +92,15 @@ def get_prairieview_metadata(ome_tif_filepath: str) -> dict: ) # Convert from seconds to microseconds scan_datetime = datetime.strptime( - xml_file.attrib.get("date"), "%m/%d/%Y %I:%M:%S %p" + xml_root.attrib.get("date"), "%m/%d/%Y %I:%M:%S %p" ) total_scan_duration = float( - xml_file.findall(".//Sequence/Frame")[-1].attrib.get("relativeTime") + xml_root.findall(".//Sequence/Frame")[-1].attrib.get("relativeTime") ) pixel_height = int( - xml_file.findall(".//PVStateValue/[@key='pixelsPerLine']")[0].attrib.get( + xml_root.findall(".//PVStateValue/[@key='pixelsPerLine']")[0].attrib.get( "value" ) ) @@ -83,7 +108,7 @@ def get_prairieview_metadata(ome_tif_filepath: str) -> dict: pixel_width = pixel_height um_per_pixel = float( - xml_file.find( + xml_root.find( ".//PVStateValue/[@key='micronsPerPixel']/IndexedValue/[@index='XAxis']" ).attrib.get("value") ) @@ -92,43 +117,45 @@ def get_prairieview_metadata(ome_tif_filepath: str) -> dict: # x and y coordinate values for the center of the field x_field = float( - xml_file.find( + xml_root.find( ".//PVStateValue/[@key='currentScanCenter']/IndexedValue/[@index='XAxis']" ).attrib.get("value") ) y_field = float( - xml_file.find( + xml_root.find( ".//PVStateValue/[@key='currentScanCenter']/IndexedValue/[@index='YAxis']" ).attrib.get("value") ) + if ( - xml_file.find( + xml_root.find( ".//Sequence/[@cycle='1']/Frame/PVStateShard/PVStateValue/[@key='positionCurrent']/SubindexedValues/[@index='ZAxis']" ) is None ): z_fields = np.float64( - xml_file.find( + xml_root.find( ".//PVStateValue/[@key='positionCurrent']/SubindexedValues/[@index='ZAxis']/SubindexedValue" ).attrib.get("value") ) n_depths = 1 + plane_indices = {0} assert z_fields.size == n_depths bidirection_z = False - else: bidirection_z = ( - xml_file.find(".//Sequence").attrib.get("bidirectionalZ") == "True" + xml_root.find(".//Sequence").attrib.get("bidirectionalZ") == "True" ) # One "Frame" per depth in the .xml file. Gets number of frames in first sequence planes = [ int(plane.attrib.get("index")) - for plane in xml_file.findall(".//Sequence/[@cycle='1']/Frame") + for plane in xml_root.findall(".//Sequence/[@cycle='1']/Frame") ] - n_depths = len(set(planes)) + plane_indices = set(planes) + n_depths = len(plane_indices) - z_controllers = xml_file.findall( + z_controllers = xml_root.findall( ".//Sequence/[@cycle='1']/Frame/[@index='1']/PVStateShard/PVStateValue/[@key='positionCurrent']/SubindexedValues/[@index='ZAxis']/SubindexedValue" ) @@ -137,13 +164,13 @@ def get_prairieview_metadata(ome_tif_filepath: str) -> dict: # must change depths. if len(z_controllers) > 1: z_repeats = [] - for controller in xml_file.findall( + for controller in xml_root.findall( ".//Sequence/[@cycle='1']/Frame/[@index='1']/PVStateShard/PVStateValue/[@key='positionCurrent']/SubindexedValues/[@index='ZAxis']/" ): z_repeats.append( [ float(z.attrib.get("value")) - for z in xml_file.findall( + for z in xml_root.findall( ".//Sequence/[@cycle='1']/Frame/PVStateShard/PVStateValue/[@key='positionCurrent']/SubindexedValues/[@index='ZAxis']/SubindexedValue/[@subindex='{0}']".format( controller.attrib.get("subindex") ) @@ -163,7 +190,7 @@ def get_prairieview_metadata(ome_tif_filepath: str) -> dict: else: z_fields = [ z.attrib.get("value") - for z in xml_file.findall( + for z in xml_root.findall( ".//Sequence/[@cycle='1']/Frame/PVStateShard/PVStateValue/[@key='positionCurrent']/SubindexedValues/[@index='ZAxis']/SubindexedValue/[@subindex='0']" ) ] @@ -195,6 +222,47 @@ def get_prairieview_metadata(ome_tif_filepath: str) -> dict: fieldY=y_field, fieldZ=z_fields, recording_time=recording_start_time, + channels=list(channels), + plane_indices=list(plane_indices), ) return metainfo + + +def get_prairieview_metadata(ome_tif_filepath: str) -> dict: + """Extract metadata for scans generated by Prairie View acquisition software. + + The Prairie View software generates one `.ome.tif` imaging file per frame + acquired. The metadata for all frames is contained in one .xml file. This + function locates the .xml file and generates a dictionary necessary to + populate the DataJoint `ScanInfo` and `Field` tables. Prairie View works + with resonance scanners with a single field. Prairie View does not support + bidirectional x and y scanning. ROI information is not contained in the + `.xml` file. All images generated using Prairie View have square dimensions(e.g. 512x512). + + Args: + ome_tif_filepath: An absolute path to the .ome.tif image file. + + Raises: + FileNotFoundError: No .xml file containing information about the acquired scan + was found at path in parent directory at `ome_tif_filepath`. + + Returns: + metainfo: A dict mapping keys to corresponding metadata values fetched from the + .xml file. + """ + + # May return multiple xml files. Only need one that contains scan metadata. + xml_files_list = pathlib.Path(ome_tif_filepath).parent.glob("*.xml") + + for file in xml_files_list: + xml_tree = ET.parse(file) + xml_file = xml_tree.getroot() + if xml_file.find(".//Sequence"): + break + else: + raise FileNotFoundError( + f"No PrarieView metadata .xml file found at {pathlib.Path(ome_tif_filepath).parent}" + ) + + return _extract_prairieview_metadata(file) From 701a5a443f3565b6c72737706007f61552b8c714 Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Wed, 30 Aug 2023 19:00:16 -0500 Subject: [PATCH 02/11] subprocess `shell` as input argument --- element_interface/dandi.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/element_interface/dandi.py b/element_interface/dandi.py index 01553f4..dbaeee9 100644 --- a/element_interface/dandi.py +++ b/element_interface/dandi.py @@ -13,6 +13,7 @@ def upload_to_dandi( api_key: str = None, sync: bool = False, existing: str = "refresh", + shell=True, # without this param, subprocess interprets first arg as file/dir ): """Upload NWB files to DANDI Archive @@ -38,25 +39,35 @@ def upload_to_dandi( working_directory, str(dandiset_id) ) # enforce str - dandiset_url = f"https://gui-staging.dandiarchive.org/#/dandiset/{dandiset_id}" if staging else f"https://dandiarchive.org/dandiset/{dandiset_id}/draft" + dandiset_url = ( + f"https://gui-staging.dandiarchive.org/#/dandiset/{dandiset_id}" + if staging + else f"https://dandiarchive.org/dandiset/{dandiset_id}/draft" + ) subprocess.run( - ["dandi", "download", "--download", "dandiset.yaml", "-o", working_directory, dandiset_url], - shell=True, + [ + "dandi", + "download", + "--download", + "dandiset.yaml", + "-o", + working_directory, + dandiset_url, + ], + shell=shell, ) subprocess.run( ["dandi", "organize", "-d", dandiset_directory, data_directory, "-f", "dry"], - shell=True, # without this param, subprocess interprets first arg as file/dir + shell=shell, # without this param, subprocess interprets first arg as file/dir ) subprocess.run( - ["dandi", "organize", "-d", dandiset_directory, data_directory], shell=True + ["dandi", "organize", "-d", dandiset_directory, data_directory], shell=shell ) - subprocess.run( - ["dandi", "validate", dandiset_directory], shell=True - ) + subprocess.run(["dandi", "validate", dandiset_directory], shell=shell) upload( paths=[dandiset_directory], From 29bc09b6d46a7cd40bae17e6962b6d074dccb2d6 Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Wed, 30 Aug 2023 19:29:20 -0500 Subject: [PATCH 03/11] version pin dandi --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 75d95e8..b18b774 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -dandi +dandi>=0.56.0 numpy From 26c38b6c43cdb282df48637890e148a715a00a6a Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Sun, 3 Sep 2023 11:21:31 -0500 Subject: [PATCH 04/11] Update dandi.py --- element_interface/dandi.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/element_interface/dandi.py b/element_interface/dandi.py index dbaeee9..078e58a 100644 --- a/element_interface/dandi.py +++ b/element_interface/dandi.py @@ -1,7 +1,6 @@ import os import subprocess -from dandi.download import download from dandi.upload import upload @@ -64,7 +63,18 @@ def upload_to_dandi( ) subprocess.run( - ["dandi", "organize", "-d", dandiset_directory, data_directory], shell=shell + [ + "dandi", + "organize", + "-d", + dandiset_directory, + data_directory, + "--required-field", + "subject_id", + "--required-field", + "session_id", + ], + shell=shell, ) subprocess.run(["dandi", "validate", dandiset_directory], shell=shell) From 1727705c636a5c48b99eeaf66bac4bbf9742a0cc Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Tue, 5 Sep 2023 22:16:28 -0500 Subject: [PATCH 05/11] add `validation` argument to dandi upload --- element_interface/dandi.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/element_interface/dandi.py b/element_interface/dandi.py index 078e58a..22486b3 100644 --- a/element_interface/dandi.py +++ b/element_interface/dandi.py @@ -12,6 +12,7 @@ def upload_to_dandi( api_key: str = None, sync: bool = False, existing: str = "refresh", + validation: str = "required", shell=True, # without this param, subprocess interprets first arg as file/dir ): """Upload NWB files to DANDI Archive @@ -27,6 +28,7 @@ def upload_to_dandi( sync (str, optional): If True, delete all files in archive that are not present in the local directory. existing (str, optional): see full description from `dandi upload --help` + validation (str, optional): [require|skip|ignore] see full description from `dandi upload --help` """ working_directory = working_directory or os.path.curdir @@ -84,4 +86,5 @@ def upload_to_dandi( dandi_instance="dandi-staging" if staging else "dandi", existing=existing, sync=sync, + validation=validation, ) From 6582a2dfd3a157d3910086063f0dc6101fd2670c Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Tue, 5 Sep 2023 22:24:18 -0500 Subject: [PATCH 06/11] remove dry run --- element_interface/dandi.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/element_interface/dandi.py b/element_interface/dandi.py index 22486b3..ef9fd13 100644 --- a/element_interface/dandi.py +++ b/element_interface/dandi.py @@ -59,11 +59,6 @@ def upload_to_dandi( shell=shell, ) - subprocess.run( - ["dandi", "organize", "-d", dandiset_directory, data_directory, "-f", "dry"], - shell=shell, # without this param, subprocess interprets first arg as file/dir - ) - subprocess.run( [ "dandi", From a2429940928f027a36b88095568b04849df6f649 Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Thu, 1 Feb 2024 13:39:05 -0600 Subject: [PATCH 07/11] feat(utils): decorator for results memoization for expensive function calls --- element_interface/utils.py | 69 +++++++++++++++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/element_interface/utils.py b/element_interface/utils.py index 14d4eee..c3832f4 100644 --- a/element_interface/utils.py +++ b/element_interface/utils.py @@ -5,7 +5,9 @@ import pathlib import sys import uuid - +import json +import pickle +from datetime import datetime from datajoint.utils import to_camel_case logger = logging.getLogger("datajoint") @@ -187,3 +189,68 @@ def __exit__(self, *args): logger.setLevel(self.prev_log_level) sys.stdout.close() sys.stdout = self._original_stdout + + +def memoized_result(parameters: dict, output_directory: str): + """ + This is a decorator factory designed to cache the results of a function based on its input parameters and the state of the output directory. + If the function is called with the same parameters and the output files in the directory remain unchanged, + it returns the cached results; otherwise, it executes the function and caches the new results along with metadata. + Conditions for robust usage: + - the "output_directory" is to store exclusively the resulting files generated by this function call only, not a shared space with other functions/processes + - the "parameters" passed to the decorator captures the true and uniqueness of the arguments to be used in the decorated function call + Args: + parameters: parameters that would identify a unique function call + output_directory: directory location for the output files + + Returns: a decorator to enable a function call to memoize/cached the resulting files + """ + + def decorator(func): + def wrapped(*args, **kwargs): + output_dir = _to_Path(output_directory) + input_hash = dict_to_uuid(parameters) + input_hash_fp = output_dir / f".{input_hash}.json" + # check if results already exist (from previous identical run) + output_dir_files_hash = dict_to_uuid( + { + f.relative_to(output_dir).as_posix(): f.stat().st_size + for f in output_dir.rglob("*") + if f.name != f".{input_hash}.json" + } + ) + if input_hash_fp.exists(): + with open(input_hash_fp, "r") as f: + meta = json.load(f) + if str(output_dir_files_hash) == meta["output_dir_files_hash"]: + logger.info(f"Existing results found, skip '{func.__name__}'") + with open(output_dir / f".{input_hash}_results.pickle", "rb") as f: + results = pickle.load(f) + return results + # no results - trigger the run + logger.info(f"No existing results found, calling '{func.__name__}'") + start_time = datetime.utcnow() + results = func(*args, **kwargs) + + with open(output_dir / f".{input_hash}_results.pickle", "wb") as f: + pickle.dump(results, f, protocol=pickle.HIGHEST_PROTOCOL) + + meta = { + "output_dir_files_hash": dict_to_uuid( + { + f.relative_to(output_dir).as_posix(): f.stat().st_size + for f in output_dir.rglob("*") + if f.name != f".{input_hash}.json" + } + ), + "start_time": start_time, + "completion_time": datetime.utcnow(), + } + with open(input_hash_fp, "w") as f: + json.dump(meta, f, default=str) + + return results + + return wrapped + + return decorator From c908bc21bfa506ba03a68c2e4892b934cb9defad Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Thu, 1 Feb 2024 16:24:58 -0600 Subject: [PATCH 08/11] Update requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index b18b774..65c4718 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ +datajoint dandi>=0.56.0 numpy From 81f1831695f8278291dd7083c9c6e308c5f357b1 Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Thu, 1 Feb 2024 16:25:50 -0600 Subject: [PATCH 09/11] Revert "Update requirements.txt" This reverts commit c908bc21bfa506ba03a68c2e4892b934cb9defad. --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 65c4718..b18b774 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,2 @@ -datajoint dandi>=0.56.0 numpy From 48d6aa7fb1168bfcb4db3256a3a1936aaf037c4d Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Fri, 2 Feb 2024 12:09:51 -0600 Subject: [PATCH 10/11] Revert "feat(utils): decorator for results memoization for expensive function calls" This reverts commit a2429940928f027a36b88095568b04849df6f649. --- element_interface/utils.py | 69 +------------------------------------- 1 file changed, 1 insertion(+), 68 deletions(-) diff --git a/element_interface/utils.py b/element_interface/utils.py index c3832f4..14d4eee 100644 --- a/element_interface/utils.py +++ b/element_interface/utils.py @@ -5,9 +5,7 @@ import pathlib import sys import uuid -import json -import pickle -from datetime import datetime + from datajoint.utils import to_camel_case logger = logging.getLogger("datajoint") @@ -189,68 +187,3 @@ def __exit__(self, *args): logger.setLevel(self.prev_log_level) sys.stdout.close() sys.stdout = self._original_stdout - - -def memoized_result(parameters: dict, output_directory: str): - """ - This is a decorator factory designed to cache the results of a function based on its input parameters and the state of the output directory. - If the function is called with the same parameters and the output files in the directory remain unchanged, - it returns the cached results; otherwise, it executes the function and caches the new results along with metadata. - Conditions for robust usage: - - the "output_directory" is to store exclusively the resulting files generated by this function call only, not a shared space with other functions/processes - - the "parameters" passed to the decorator captures the true and uniqueness of the arguments to be used in the decorated function call - Args: - parameters: parameters that would identify a unique function call - output_directory: directory location for the output files - - Returns: a decorator to enable a function call to memoize/cached the resulting files - """ - - def decorator(func): - def wrapped(*args, **kwargs): - output_dir = _to_Path(output_directory) - input_hash = dict_to_uuid(parameters) - input_hash_fp = output_dir / f".{input_hash}.json" - # check if results already exist (from previous identical run) - output_dir_files_hash = dict_to_uuid( - { - f.relative_to(output_dir).as_posix(): f.stat().st_size - for f in output_dir.rglob("*") - if f.name != f".{input_hash}.json" - } - ) - if input_hash_fp.exists(): - with open(input_hash_fp, "r") as f: - meta = json.load(f) - if str(output_dir_files_hash) == meta["output_dir_files_hash"]: - logger.info(f"Existing results found, skip '{func.__name__}'") - with open(output_dir / f".{input_hash}_results.pickle", "rb") as f: - results = pickle.load(f) - return results - # no results - trigger the run - logger.info(f"No existing results found, calling '{func.__name__}'") - start_time = datetime.utcnow() - results = func(*args, **kwargs) - - with open(output_dir / f".{input_hash}_results.pickle", "wb") as f: - pickle.dump(results, f, protocol=pickle.HIGHEST_PROTOCOL) - - meta = { - "output_dir_files_hash": dict_to_uuid( - { - f.relative_to(output_dir).as_posix(): f.stat().st_size - for f in output_dir.rglob("*") - if f.name != f".{input_hash}.json" - } - ), - "start_time": start_time, - "completion_time": datetime.utcnow(), - } - with open(input_hash_fp, "w") as f: - json.dump(meta, f, default=str) - - return results - - return wrapped - - return decorator From 20cf21ea86b00b9b7e31aeca0469e2ff26e37dce Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Wed, 22 May 2024 14:07:55 -0500 Subject: [PATCH 11/11] fix(suite2p_loader): allow loading suite2p results without ROI detection or trace extraction --- element_interface/suite2p_loader.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/element_interface/suite2p_loader.py b/element_interface/suite2p_loader.py index 07dbff1..2e6884b 100644 --- a/element_interface/suite2p_loader.py +++ b/element_interface/suite2p_loader.py @@ -138,15 +138,6 @@ def __init__(self, suite2p_plane_dir: str): ) self.creation_time = datetime.fromtimestamp(ops_fp.stat().st_ctime) - iscell_fp = self.fpath / "iscell.npy" - if not iscell_fp.exists(): - raise FileNotFoundError( - 'No "iscell.npy" found. Invalid suite2p plane folder: {}'.format( - self.fpath - ) - ) - self.curation_time = datetime.fromtimestamp(iscell_fp.stat().st_ctime) - # -- Initialize attributes -- for s2p_type in _suite2p_ftypes: setattr(self, "_{}".format(s2p_type), None) @@ -160,6 +151,11 @@ def __init__(self, suite2p_plane_dir: str): # -- load core files -- + @property + def curation_time(self): + print("DeprecationWarning: 'curation_time' is deprecated, set to be the same as 'creation time', no longer reliable.") + return self.creation_time + @property def ops(self): if self._ops is None: