diff --git a/README.md b/README.md index ab29ce14..bc2632cb 100644 --- a/README.md +++ b/README.md @@ -79,6 +79,6 @@ Ensure you stay in the `~/ProjectAeon/aeon_mecha` directory for the rest of the If you use this software, please cite it as below: -Sainsbury Wellcome Centre Foraging Behaviour Working Group. (2023). Aeon: An open-source platform to study the neural basis of ethological behaviours over naturalistic timescales, https://doi.org/10.5281/zenodo.8413142 +Sainsbury Wellcome Centre Foraging Behaviour Working Group. (2023). Aeon: An open-source platform to study the neural basis of ethological behaviours over naturalistic timescales, https://doi.org/10.5281/zenodo.8411157 -[![DOI](https://zenodo.org/badge/485512362.svg)](https://zenodo.org/badge/latestdoi/485512362) \ No newline at end of file +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.8411157.svg)](https://zenodo.org/doi/10.5281/zenodo.8411157) diff --git a/aeon/dj_pipeline/acquisition.py b/aeon/dj_pipeline/acquisition.py index 58b6f8f4..5057a86e 100644 --- a/aeon/dj_pipeline/acquisition.py +++ b/aeon/dj_pipeline/acquisition.py @@ -5,13 +5,13 @@ import numpy as np import pandas as pd -from aeon.analysis import utils as analysis_utils from aeon.io import api as io_api +from aeon.schema import schemas as aeon_schemas from aeon.io import reader as io_reader -from aeon.schema import dataset as aeon_schema +from aeon.analysis import utils as analysis_utils -from . import get_schema_name -from .utils import paths +from aeon.dj_pipeline import get_schema_name, lab, subject +from aeon.dj_pipeline.utils import paths logger = dj.logger schema = dj.schema(get_schema_name("acquisition")) @@ -22,19 +22,16 @@ "exp0.1-r0": "FrameTop", "social0-r1": "FrameTop", "exp0.2-r0": "CameraTop", - "oct1.0-r0": "CameraTop", - "social0.1-a3": "CameraTop", - "social0.1-a4": "CameraTop" } -_device_schema_mapping = { - "exp0.1-r0": aeon_schema.exp01, - "social0-r1": aeon_schema.exp01, - "exp0.2-r0": aeon_schema.exp02, - "oct1.0-r0": aeon_schema.octagon01, - "social0.1-a3": aeon_schema.social01, - "social0.1-a4": aeon_schema.social01 -} +# _device_schema_mapping = { +# "exp0.1-r0": aeon_schemas.exp01, +# "social0-r1": aeon_schemas.exp01, +# "exp0.2-r0": aeon_schemas.exp02, +# "oct1.0-r0": aeon_schemas.octagon01, +# "social0.1-a3": aeon_schemas.social01, +# "social0.1-a4": aeon_schemas.social01, +# } # ------------------- Type Lookup ------------------------ @@ -68,6 +65,15 @@ class EventType(dj.Lookup): ] +@schema +class DevicesSchema(dj.Lookup): + definition = """ + devices_schema_name: varchar(32) + """ + + contents = zip(aeon_schemas.__all__) + + # ------------------- Data repository/directory ------------------------ @@ -95,7 +101,7 @@ class DirectoryType(dj.Lookup): @schema class Experiment(dj.Manual): definition = """ - experiment_name: varchar(32) # e.g exp0-r0 + experiment_name: varchar(32) # e.g exp0-aeon3 --- experiment_start_time: datetime(6) # datetime of the start of this experiment experiment_description: varchar(1000) @@ -119,6 +125,13 @@ class Directory(dj.Part): directory_path: varchar(255) """ + class DevicesSchema(dj.Part): + definition = """ + -> master + --- + -> DevicesSchema + """ + @classmethod def get_data_directory(cls, experiment_key, directory_type="raw", as_posix=False): try: @@ -272,16 +285,29 @@ class Config(dj.Part): metadata_file_path: varchar(255) # path of the file, relative to the experiment repository """ + class DeviceType(dj.Part): + definition = """ # Device type(s) used in a particular acquisition epoch + -> master + device_type: varchar(36) + """ + @classmethod def ingest_epochs(cls, experiment_name, start=None, end=None): """Ingest epochs for the specified "experiment_name". Ingest only epochs that start in between the specified (start, end) time. If not specified, ingest all epochs. Note: "start" and "end" are datetime specified a string in the format: "%Y-%m-%d %H:%M:%S". """ - from .utils import streams_maker - from .utils.load_metadata import (extract_epoch_config, - ingest_epoch_metadata, - insert_device_types) + from aeon.dj_pipeline.utils import streams_maker + from aeon.dj_pipeline.utils.load_metadata import ( + extract_epoch_config, + ingest_epoch_metadata, + insert_device_types, + ) + + devices_schema = getattr( + aeon_schemas, + (Experiment.DevicesSchema & {"experiment_name": experiment_name}).fetch1("devices_schema_name"), + ) device_name = _ref_device_mapping.get(experiment_name, "CameraTop") @@ -309,7 +335,9 @@ def ingest_epochs(cls, experiment_name, start=None, end=None): if experiment_name != "exp0.1-r0": metadata_yml_filepath = epoch_dir / "Metadata.yml" if metadata_yml_filepath.exists(): - epoch_config = extract_epoch_config(experiment_name, metadata_yml_filepath) + epoch_config = extract_epoch_config( + experiment_name, devices_schema, metadata_yml_filepath + ) metadata_yml_filepath = epoch_config["metadata_file_path"] @@ -352,17 +380,24 @@ def ingest_epochs(cls, experiment_name, start=None, end=None): try: # Insert new entries for streams.DeviceType, streams.Device. insert_device_types( - _device_schema_mapping[epoch_key["experiment_name"]], + devices_schema, metadata_yml_filepath, ) # Define and instantiate new devices/stream tables under `streams` schema streams_maker.main() with cls.connection.transaction: # Insert devices' installation/removal/settings - ingest_epoch_metadata(experiment_name, metadata_yml_filepath) + epoch_device_types = ingest_epoch_metadata( + experiment_name, devices_schema, metadata_yml_filepath + ) + if epoch_device_types is not None: + cls.DeviceType.insert( + epoch_key | {"device_type": n} for n in epoch_device_types + ) epoch_list.append(epoch_key) except Exception as e: (cls.Config & epoch_key).delete_quick() + (cls.DeviceType & epoch_key).delete_quick() (cls & epoch_key).delete_quick() raise e @@ -452,6 +487,12 @@ def ingest_chunks(cls, experiment_name): epoch_end = (EpochEnd & epoch_key).fetch1("epoch_end") chunk_end = min(chunk_end, epoch_end) + if chunk_start in chunk_starts: + # handle cases where two chunks with identical start_time + # (starts in the same hour) but from 2 consecutive epochs + # using epoch_start as chunk_start in this case + chunk_start = epoch_start + # --- insert to Chunk --- chunk_key = {"experiment_name": experiment_name, "chunk_start": chunk_start} @@ -459,12 +500,6 @@ def ingest_chunks(cls, experiment_name): # skip over those already ingested continue - if chunk_start in chunk_starts: - # handle cases where two chunks with identical start_time - # (starts in the same hour) but from 2 consecutive epochs - # using epoch_start as chunk_start in this case - chunk_key["chunk_start"] = epoch_start - # chunk file and directory raw_data_dir, directory, repo_path = _match_experiment_directory( experiment_name, chunk_rep_file, raw_data_dirs @@ -527,7 +562,13 @@ def make(self, key): pd.Timestamp(chunk_end), ) else: - device = _device_schema_mapping[key["experiment_name"]].ExperimentalMetadata + devices_schema = getattr( + aeon_schemas, + (Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + device = devices_schema.ExperimentalMetadata subject_data = io_api.load( root=raw_data_dir.as_posix(), reader=device.SubjectState, @@ -578,7 +619,13 @@ def make(self, key): pd.Timestamp(chunk_end), ) else: - device = _device_schema_mapping[key["experiment_name"]].ExperimentalMetadata + devices_schema = getattr( + aeon_schemas, + (Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + device = devices_schema.ExperimentalMetadata subject_data = io_api.load( root=raw_data_dir.as_posix(), reader=device.SubjectState, @@ -617,7 +664,13 @@ def make(self, key): # Populate the part table raw_data_dir = Experiment.get_data_directory(key) - device = _device_schema_mapping[key["experiment_name"]].ExperimentalMetadata + devices_schema = getattr( + aeon_schemas, + (Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + device = devices_schema.ExperimentalMetadata try: # handles corrupted files - issue: https://github.com/SainsburyWellcomeCentre/aeon_mecha/issues/153 @@ -698,7 +751,14 @@ def make(self, key): raw_data_dir = Experiment.get_data_directory(key, directory_type=dir_type) - device = getattr(_device_schema_mapping[key["experiment_name"]], food_patch_description) + devices_schema = getattr( + aeon_schemas, + (Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + + device = getattr(devices_schema, food_patch_description) pellet_data = pd.concat( [ @@ -775,7 +835,14 @@ def make(self, key): raw_data_dir = Experiment.get_data_directory(key, directory_type=dir_type) - device = getattr(_device_schema_mapping[key["experiment_name"]], food_patch_description) + devices_schema = getattr( + aeon_schemas, + (Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + + device = getattr(devices_schema, food_patch_description) wheel_data = io_api.load( root=raw_data_dir.as_posix(), @@ -799,7 +866,14 @@ def get_wheel_data(cls, experiment_name, start, end, patch_name="Patch1", using_ key = {"experiment_name": experiment_name} raw_data_dir = Experiment.get_data_directory(key) - device = getattr(_device_schema_mapping[key["experiment_name"]], patch_name) + devices_schema = getattr( + aeon_schemas, + (Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + + device = getattr(devices_schema, patch_name) wheel_data = io_api.load( root=raw_data_dir.as_posix(), @@ -886,7 +960,14 @@ def make(self, key): food_patch_description = (ExperimentFoodPatch & key).fetch1("food_patch_description") raw_data_dir = Experiment.get_data_directory(key, directory_type=dir_type) - device = getattr(_device_schema_mapping[key["experiment_name"]], food_patch_description) + devices_schema = getattr( + aeon_schemas, + (Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + + device = getattr(devices_schema, food_patch_description) wheel_state = io_api.load( root=raw_data_dir.as_posix(), @@ -945,9 +1026,16 @@ def make(self, key): weight_scale_description = (ExperimentWeightScale & key).fetch1("weight_scale_description") + devices_schema = getattr( + aeon_schemas, + (Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + # in some epochs/chunks, the food patch device was mapped to "Nest" for device_name in (weight_scale_description, "Nest"): - device = getattr(_device_schema_mapping[key["experiment_name"]], device_name) + device = getattr(devices_schema, device_name) weight_data = io_api.load( root=raw_data_dir.as_posix(), reader=device.WeightRaw, @@ -987,9 +1075,16 @@ def make(self, key): raw_data_dir = Experiment.get_data_directory(key, directory_type=dir_type) weight_scale_description = (ExperimentWeightScale & key).fetch1("weight_scale_description") + devices_schema = getattr( + aeon_schemas, + (Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + # in some epochs/chunks, the food patch device was mapped to "Nest" for device_name in (weight_scale_description, "Nest"): - device = getattr(_device_schema_mapping[key["experiment_name"]], device_name) + device = getattr(devices_schema, device_name) weight_filtered = io_api.load( root=raw_data_dir.as_posix(), reader=device.WeightFiltered, @@ -1097,8 +1192,7 @@ def _load_legacy_subjectdata(experiment_name, data_dir, start, end): return subject_data if experiment_name == "social0-r1": - from aeon.dj_pipeline.create_experiments.create_socialexperiment_0 import \ - fixID + from aeon.dj_pipeline.create_experiments.create_socialexperiment_0 import fixID sessdf = subject_data.copy() sessdf = sessdf[~sessdf.id.str.contains("test")] diff --git a/aeon/dj_pipeline/create_experiments/device_type_mapper.json b/aeon/dj_pipeline/create_experiments/device_type_mapper.json index c4738ec4..848f0f3b 100644 --- a/aeon/dj_pipeline/create_experiments/device_type_mapper.json +++ b/aeon/dj_pipeline/create_experiments/device_type_mapper.json @@ -1 +1 @@ -{"VideoController": "CameraController", "CameraTop": "VideoSource", "CameraWest": "VideoSource", "CameraEast": "VideoSource", "CameraNorth": "VideoSource", "CameraSouth": "VideoSource", "CameraPatch1": "VideoSource", "CameraPatch2": "VideoSource", "CameraNest": "VideoSource", "AudioAmbient": "AudioSource", "Patch1": "UndergroundFeeder", "Patch2": "UndergroundFeeder", "WeightNest": "WeightScale", "TrackingTop": "PositionTracking", "ActivityCenter": "ActivityTracking", "ActivityArena": "ActivityTracking", "ActivityNest": "ActivityTracking", "ActivityPatch1": "ActivityTracking", "ActivityPatch2": "ActivityTracking", "InNest": "RegionTracking", "InPatch1": "RegionTracking", "InPatch2": "RegionTracking", "ArenaCenter": "DistanceFromPoint", "InArena": "InRange", "InCorridor": "InRange", "ClockSynchronizer": "Synchronizer", "Rfid": "Rfid Reader"} \ No newline at end of file +{"VideoController": "CameraController", "CameraTop": "SpinnakerVideoSource", "CameraWest": "SpinnakerVideoSource", "CameraEast": "SpinnakerVideoSource", "CameraNorth": "SpinnakerVideoSource", "CameraSouth": "SpinnakerVideoSource", "CameraPatch1": "SpinnakerVideoSource", "CameraPatch2": "SpinnakerVideoSource", "CameraNest": "SpinnakerVideoSource", "AudioAmbient": "AudioSource", "Patch1": "UndergroundFeeder", "Patch2": "UndergroundFeeder", "WeightNest": "WeightScale", "TrackingTop": "PositionTracking", "ActivityCenter": "ActivityTracking", "ActivityArena": "ActivityTracking", "ActivityNest": "ActivityTracking", "ActivityPatch1": "ActivityTracking", "ActivityPatch2": "ActivityTracking", "InNest": "RegionTracking", "InPatch1": "RegionTracking", "InPatch2": "RegionTracking", "ArenaCenter": "DistanceFromPoint", "InArena": "InRange", "InCorridor": "InRange", "ClockSynchronizer": "TimestampGenerator", "Rfid": "Rfid Reader", "CameraPatch3": "SpinnakerVideoSource", "Patch3": "UndergroundFeeder", "Nest": "WeightScale", "RfidNest1": "RfidReader", "RfidNest2": "RfidReader", "RfidGate": "RfidReader", "RfidPatch1": "RfidReader", "RfidPatch2": "RfidReader", "RfidPatch3": "RfidReader", "LightCycle": "EnvironmentCondition"} \ No newline at end of file diff --git a/aeon/dj_pipeline/streams.py b/aeon/dj_pipeline/streams.py index 0d0f8c58..49558b03 100644 --- a/aeon/dj_pipeline/streams.py +++ b/aeon/dj_pipeline/streams.py @@ -1,26 +1,22 @@ #---- DO NOT MODIFY ---- #---- THIS FILE IS AUTO-GENERATED BY `streams_maker.py` ---- -from uuid import UUID - +import re import datajoint as dj import pandas as pd +from uuid import UUID import aeon from aeon.dj_pipeline import acquisition, get_schema_name from aeon.io import api as io_api +from aeon.schema import schemas as aeon_schemas schema = dj.Schema(get_schema_name("streams")) @schema class StreamType(dj.Lookup): - """ - Catalog of all steam types for the different device types used across Project Aeon - One StreamType corresponds to one reader class in `aeon.io.reader` - The combination of `stream_reader` and `stream_reader_kwargs` should fully specify - the data loading routine for a particular device, using the `aeon.io.utils` - """ + """Catalog of all steam types for the different device types used across Project Aeon. One StreamType corresponds to one reader class in `aeon.io.reader`. The combination of `stream_reader` and `stream_reader_kwargs` should fully specify the data loading routine for a particular device, using the `aeon.io.utils`.""" definition = """ # Catalog of all stream types used across Project Aeon stream_type : varchar(20) @@ -35,9 +31,7 @@ class StreamType(dj.Lookup): @schema class DeviceType(dj.Lookup): - """ - Catalog of all device types used across Project Aeon - """ + """Catalog of all device types used across Project Aeon.""" definition = """ # Catalog of all device types used across Project Aeon device_type: varchar(36) @@ -61,6 +55,60 @@ class Device(dj.Lookup): """ +@schema +class RfidReader(dj.Manual): + definition = f""" + # rfid_reader placement and operation for a particular time period, at a certain location, for a given experiment (auto-generated with aeon_mecha-unknown) + -> acquisition.Experiment + -> Device + rfid_reader_install_time : datetime(6) # time of the rfid_reader placed and started operation at this position + --- + rfid_reader_name : varchar(36) + """ + + class Attribute(dj.Part): + definition = """ # metadata/attributes (e.g. FPS, config, calibration, etc.) associated with this experimental device + -> master + attribute_name : varchar(32) + --- + attribute_value=null : longblob + """ + + class RemovalTime(dj.Part): + definition = f""" + -> master + --- + rfid_reader_removal_time: datetime(6) # time of the rfid_reader being removed + """ + + +@schema +class SpinnakerVideoSource(dj.Manual): + definition = f""" + # spinnaker_video_source placement and operation for a particular time period, at a certain location, for a given experiment (auto-generated with aeon_mecha-unknown) + -> acquisition.Experiment + -> Device + spinnaker_video_source_install_time : datetime(6) # time of the spinnaker_video_source placed and started operation at this position + --- + spinnaker_video_source_name : varchar(36) + """ + + class Attribute(dj.Part): + definition = """ # metadata/attributes (e.g. FPS, config, calibration, etc.) associated with this experimental device + -> master + attribute_name : varchar(32) + --- + attribute_value=null : longblob + """ + + class RemovalTime(dj.Part): + definition = f""" + -> master + --- + spinnaker_video_source_removal_time: datetime(6) # time of the spinnaker_video_source being removed + """ + + @schema class UndergroundFeeder(dj.Manual): definition = f""" @@ -89,14 +137,14 @@ class RemovalTime(dj.Part): @schema -class VideoSource(dj.Manual): +class WeightScale(dj.Manual): definition = f""" - # video_source placement and operation for a particular time period, at a certain location, for a given experiment (auto-generated with aeon_mecha-unknown) + # weight_scale placement and operation for a particular time period, at a certain location, for a given experiment (auto-generated with aeon_mecha-unknown) -> acquisition.Experiment -> Device - video_source_install_time : datetime(6) # time of the video_source placed and started operation at this position + weight_scale_install_time : datetime(6) # time of the weight_scale placed and started operation at this position --- - video_source_name : varchar(36) + weight_scale_name : varchar(36) """ class Attribute(dj.Part): @@ -111,21 +159,145 @@ class RemovalTime(dj.Part): definition = f""" -> master --- - video_source_removal_time: datetime(6) # time of the video_source being removed + weight_scale_removal_time: datetime(6) # time of the weight_scale being removed """ @schema -class UndergroundFeederBeamBreak(dj.Imported): - definition = """# Raw per-chunk BeamBreak data stream from UndergroundFeeder (auto-generated with aeon_mecha-unknown) - -> UndergroundFeeder - -> acquisition.Chunk - --- - sample_count: int # number of data points acquired from this stream for a given chunk - timestamps: longblob # (datetime) timestamps of BeamBreak data +class RfidReaderRfidEvents(dj.Imported): + definition = """ # Raw per-chunk RfidEvents data stream from RfidReader (auto-generated with aeon_mecha-unknown) + -> RfidReader + -> acquisition.Chunk + --- + sample_count: int # number of data points acquired from this stream for a given chunk + timestamps: longblob # (datetime) timestamps of RfidEvents data + rfid: longblob + """ + + @property + def key_source(self): + f""" + Only the combination of Chunk and RfidReader with overlapping time + + Chunk(s) that started after RfidReader install time and ended before RfidReader remove time + + Chunk(s) that started after RfidReader install time for RfidReader that are not yet removed + """ + return ( + acquisition.Chunk * RfidReader.join(RfidReader.RemovalTime, left=True) + & 'chunk_start >= rfid_reader_install_time' + & 'chunk_start < IFNULL(rfid_reader_removal_time, "2200-01-01")' + ) + + def make(self, key): + chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( + "chunk_start", "chunk_end", "directory_type" + ) + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) + + device_name = (RfidReader & key).fetch1('rfid_reader_name') + + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + stream_reader = getattr(getattr(devices_schema, device_name), "RfidEvents") + + stream_data = io_api.load( + root=raw_data_dir.as_posix(), + reader=stream_reader, + start=pd.Timestamp(chunk_start), + end=pd.Timestamp(chunk_end), + ) + + self.insert1( + { + **key, + "sample_count": len(stream_data), + "timestamps": stream_data.index.values, + **{ + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns + if not c.startswith("_") + }, + }, + ignore_extra_fields=True, + ) + + +@schema +class SpinnakerVideoSourceVideo(dj.Imported): + definition = """ # Raw per-chunk Video data stream from SpinnakerVideoSource (auto-generated with aeon_mecha-unknown) + -> SpinnakerVideoSource + -> acquisition.Chunk + --- + sample_count: int # number of data points acquired from this stream for a given chunk + timestamps: longblob # (datetime) timestamps of Video data + hw_counter: longblob + hw_timestamp: longblob + """ + + @property + def key_source(self): + f""" + Only the combination of Chunk and SpinnakerVideoSource with overlapping time + + Chunk(s) that started after SpinnakerVideoSource install time and ended before SpinnakerVideoSource remove time + + Chunk(s) that started after SpinnakerVideoSource install time for SpinnakerVideoSource that are not yet removed """ - _stream_reader = aeon.io.reader.BitmaskEvent - _stream_detail = {'stream_type': 'BeamBreak', 'stream_reader': 'aeon.io.reader.BitmaskEvent', 'stream_reader_kwargs': {'pattern': '{pattern}_32', 'value': 34, 'tag': 'BeamBroken'}, 'stream_description': '', 'stream_hash': UUID('b14171e6-d27d-117a-ae73-a16c4b5fc8a2')} + return ( + acquisition.Chunk * SpinnakerVideoSource.join(SpinnakerVideoSource.RemovalTime, left=True) + & 'chunk_start >= spinnaker_video_source_install_time' + & 'chunk_start < IFNULL(spinnaker_video_source_removal_time, "2200-01-01")' + ) + + def make(self, key): + chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( + "chunk_start", "chunk_end", "directory_type" + ) + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) + + device_name = (SpinnakerVideoSource & key).fetch1('spinnaker_video_source_name') + + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + stream_reader = getattr(getattr(devices_schema, device_name), "Video") + + stream_data = io_api.load( + root=raw_data_dir.as_posix(), + reader=stream_reader, + start=pd.Timestamp(chunk_start), + end=pd.Timestamp(chunk_end), + ) + + self.insert1( + { + **key, + "sample_count": len(stream_data), + "timestamps": stream_data.index.values, + **{ + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns + if not c.startswith("_") + }, + }, + ignore_extra_fields=True, + ) + + +@schema +class UndergroundFeederBeamBreak(dj.Imported): + definition = """ # Raw per-chunk BeamBreak data stream from UndergroundFeeder (auto-generated with aeon_mecha-unknown) + -> UndergroundFeeder + -> acquisition.Chunk + --- + sample_count: int # number of data points acquired from this stream for a given chunk + timestamps: longblob # (datetime) timestamps of BeamBreak data + event: longblob + """ @property def key_source(self): @@ -135,8 +307,7 @@ def key_source(self): + Chunk(s) that started after UndergroundFeeder install time for UndergroundFeeder that are not yet removed """ return ( - acquisition.Chunk - * UndergroundFeeder.join(UndergroundFeeder.RemovalTime, left=True) + acquisition.Chunk * UndergroundFeeder.join(UndergroundFeeder.RemovalTime, left=True) & 'chunk_start >= underground_feeder_install_time' & 'chunk_start < IFNULL(underground_feeder_removal_time, "2200-01-01")' ) @@ -145,24 +316,21 @@ def make(self, key): chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( "chunk_start", "chunk_end", "directory_type" ) - raw_data_dir = acquisition.Experiment.get_data_directory( - key, directory_type=dir_type - ) + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) - device_name = (UndergroundFeeder & key).fetch1( - 'underground_feeder_name' - ) + device_name = (UndergroundFeeder & key).fetch1('underground_feeder_name') - stream = self._stream_reader( - **{ - k: v.format(**{k: device_name}) if k == "pattern" else v - for k, v in self._stream_detail["stream_reader_kwargs"].items() - } + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), ) + stream_reader = getattr(getattr(devices_schema, device_name), "BeamBreak") stream_data = io_api.load( root=raw_data_dir.as_posix(), - reader=stream, + reader=stream_reader, start=pd.Timestamp(chunk_start), end=pd.Timestamp(chunk_end), ) @@ -173,25 +341,25 @@ def make(self, key): "sample_count": len(stream_data), "timestamps": stream_data.index.values, **{ - c: stream_data[c].values - for c in stream.columns + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns if not c.startswith("_") }, - }, ignore_extra_fields=True + }, + ignore_extra_fields=True, ) @schema class UndergroundFeederDeliverPellet(dj.Imported): - definition = """# Raw per-chunk DeliverPellet data stream from UndergroundFeeder (auto-generated with aeon_mecha-unknown) - -> UndergroundFeeder - -> acquisition.Chunk - --- - sample_count: int # number of data points acquired from this stream for a given chunk - timestamps: longblob # (datetime) timestamps of DeliverPellet data - """ - _stream_reader = aeon.io.reader.BitmaskEvent - _stream_detail = {'stream_type': 'DeliverPellet', 'stream_reader': 'aeon.io.reader.BitmaskEvent', 'stream_reader_kwargs': {'pattern': '{pattern}_35', 'value': 1, 'tag': 'TriggeredPellet'}, 'stream_description': '', 'stream_hash': UUID('c49dda51-2e38-8b49-d1d8-2e54ea928e9c')} + definition = """ # Raw per-chunk DeliverPellet data stream from UndergroundFeeder (auto-generated with aeon_mecha-unknown) + -> UndergroundFeeder + -> acquisition.Chunk + --- + sample_count: int # number of data points acquired from this stream for a given chunk + timestamps: longblob # (datetime) timestamps of DeliverPellet data + event: longblob + """ @property def key_source(self): @@ -201,8 +369,7 @@ def key_source(self): + Chunk(s) that started after UndergroundFeeder install time for UndergroundFeeder that are not yet removed """ return ( - acquisition.Chunk - * UndergroundFeeder.join(UndergroundFeeder.RemovalTime, left=True) + acquisition.Chunk * UndergroundFeeder.join(UndergroundFeeder.RemovalTime, left=True) & 'chunk_start >= underground_feeder_install_time' & 'chunk_start < IFNULL(underground_feeder_removal_time, "2200-01-01")' ) @@ -211,24 +378,21 @@ def make(self, key): chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( "chunk_start", "chunk_end", "directory_type" ) - raw_data_dir = acquisition.Experiment.get_data_directory( - key, directory_type=dir_type - ) + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) - device_name = (UndergroundFeeder & key).fetch1( - 'underground_feeder_name' - ) + device_name = (UndergroundFeeder & key).fetch1('underground_feeder_name') - stream = self._stream_reader( - **{ - k: v.format(**{k: device_name}) if k == "pattern" else v - for k, v in self._stream_detail["stream_reader_kwargs"].items() - } + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), ) + stream_reader = getattr(getattr(devices_schema, device_name), "DeliverPellet") stream_data = io_api.load( root=raw_data_dir.as_posix(), - reader=stream, + reader=stream_reader, start=pd.Timestamp(chunk_start), end=pd.Timestamp(chunk_end), ) @@ -239,25 +403,27 @@ def make(self, key): "sample_count": len(stream_data), "timestamps": stream_data.index.values, **{ - c: stream_data[c].values - for c in stream.columns + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns if not c.startswith("_") }, - }, ignore_extra_fields=True + }, + ignore_extra_fields=True, ) @schema class UndergroundFeederDepletionState(dj.Imported): - definition = """# Raw per-chunk DepletionState data stream from UndergroundFeeder (auto-generated with aeon_mecha-unknown) - -> UndergroundFeeder - -> acquisition.Chunk - --- - sample_count: int # number of data points acquired from this stream for a given chunk - timestamps: longblob # (datetime) timestamps of DepletionState data - """ - _stream_reader = aeon.schema.foraging._PatchState - _stream_detail = {'stream_type': 'DepletionState', 'stream_reader': 'aeon.schema.foraging._PatchState', 'stream_reader_kwargs': {'pattern': '{pattern}_State_*'}, 'stream_description': '', 'stream_hash': UUID('17c3e36f-3f2e-2494-bbd3-5cb9a23d3039')} + definition = """ # Raw per-chunk DepletionState data stream from UndergroundFeeder (auto-generated with aeon_mecha-unknown) + -> UndergroundFeeder + -> acquisition.Chunk + --- + sample_count: int # number of data points acquired from this stream for a given chunk + timestamps: longblob # (datetime) timestamps of DepletionState data + threshold: longblob + offset: longblob + rate: longblob + """ @property def key_source(self): @@ -267,8 +433,7 @@ def key_source(self): + Chunk(s) that started after UndergroundFeeder install time for UndergroundFeeder that are not yet removed """ return ( - acquisition.Chunk - * UndergroundFeeder.join(UndergroundFeeder.RemovalTime, left=True) + acquisition.Chunk * UndergroundFeeder.join(UndergroundFeeder.RemovalTime, left=True) & 'chunk_start >= underground_feeder_install_time' & 'chunk_start < IFNULL(underground_feeder_removal_time, "2200-01-01")' ) @@ -277,24 +442,21 @@ def make(self, key): chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( "chunk_start", "chunk_end", "directory_type" ) - raw_data_dir = acquisition.Experiment.get_data_directory( - key, directory_type=dir_type - ) + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) - device_name = (UndergroundFeeder & key).fetch1( - 'underground_feeder_name' - ) + device_name = (UndergroundFeeder & key).fetch1('underground_feeder_name') - stream = self._stream_reader( - **{ - k: v.format(**{k: device_name}) if k == "pattern" else v - for k, v in self._stream_detail["stream_reader_kwargs"].items() - } + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), ) + stream_reader = getattr(getattr(devices_schema, device_name), "DepletionState") stream_data = io_api.load( root=raw_data_dir.as_posix(), - reader=stream, + reader=stream_reader, start=pd.Timestamp(chunk_start), end=pd.Timestamp(chunk_end), ) @@ -305,25 +467,26 @@ def make(self, key): "sample_count": len(stream_data), "timestamps": stream_data.index.values, **{ - c: stream_data[c].values - for c in stream.columns + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns if not c.startswith("_") }, - }, ignore_extra_fields=True + }, + ignore_extra_fields=True, ) @schema class UndergroundFeederEncoder(dj.Imported): - definition = """# Raw per-chunk Encoder data stream from UndergroundFeeder (auto-generated with aeon_mecha-unknown) - -> UndergroundFeeder - -> acquisition.Chunk - --- - sample_count: int # number of data points acquired from this stream for a given chunk - timestamps: longblob # (datetime) timestamps of Encoder data - """ - _stream_reader = aeon.io.reader.Encoder - _stream_detail = {'stream_type': 'Encoder', 'stream_reader': 'aeon.io.reader.Encoder', 'stream_reader_kwargs': {'pattern': '{pattern}_90_*'}, 'stream_description': '', 'stream_hash': UUID('f96b0b26-26f6-5ff6-b3c7-5aa5adc00c1a')} + definition = """ # Raw per-chunk Encoder data stream from UndergroundFeeder (auto-generated with aeon_mecha-unknown) + -> UndergroundFeeder + -> acquisition.Chunk + --- + sample_count: int # number of data points acquired from this stream for a given chunk + timestamps: longblob # (datetime) timestamps of Encoder data + angle: longblob + intensity: longblob + """ @property def key_source(self): @@ -333,8 +496,7 @@ def key_source(self): + Chunk(s) that started after UndergroundFeeder install time for UndergroundFeeder that are not yet removed """ return ( - acquisition.Chunk - * UndergroundFeeder.join(UndergroundFeeder.RemovalTime, left=True) + acquisition.Chunk * UndergroundFeeder.join(UndergroundFeeder.RemovalTime, left=True) & 'chunk_start >= underground_feeder_install_time' & 'chunk_start < IFNULL(underground_feeder_removal_time, "2200-01-01")' ) @@ -343,24 +505,21 @@ def make(self, key): chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( "chunk_start", "chunk_end", "directory_type" ) - raw_data_dir = acquisition.Experiment.get_data_directory( - key, directory_type=dir_type - ) + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) - device_name = (UndergroundFeeder & key).fetch1( - 'underground_feeder_name' - ) + device_name = (UndergroundFeeder & key).fetch1('underground_feeder_name') - stream = self._stream_reader( - **{ - k: v.format(**{k: device_name}) if k == "pattern" else v - for k, v in self._stream_detail["stream_reader_kwargs"].items() - } + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), ) + stream_reader = getattr(getattr(devices_schema, device_name), "Encoder") stream_data = io_api.load( root=raw_data_dir.as_posix(), - reader=stream, + reader=stream_reader, start=pd.Timestamp(chunk_start), end=pd.Timestamp(chunk_end), ) @@ -371,62 +530,58 @@ def make(self, key): "sample_count": len(stream_data), "timestamps": stream_data.index.values, **{ - c: stream_data[c].values - for c in stream.columns + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns if not c.startswith("_") }, - }, ignore_extra_fields=True + }, + ignore_extra_fields=True, ) @schema -class VideoSourcePosition(dj.Imported): - definition = """# Raw per-chunk Position data stream from VideoSource (auto-generated with aeon_mecha-unknown) - -> VideoSource - -> acquisition.Chunk - --- - sample_count: int # number of data points acquired from this stream for a given chunk - timestamps: longblob # (datetime) timestamps of Position data - """ - _stream_reader = aeon.io.reader.Position - _stream_detail = {'stream_type': 'Position', 'stream_reader': 'aeon.io.reader.Position', 'stream_reader_kwargs': {'pattern': '{pattern}_200_*'}, 'stream_description': '', 'stream_hash': UUID('d7727726-1f52-78e1-1355-b863350b6d03')} +class UndergroundFeederManualDelivery(dj.Imported): + definition = """ # Raw per-chunk ManualDelivery data stream from UndergroundFeeder (auto-generated with aeon_mecha-unknown) + -> UndergroundFeeder + -> acquisition.Chunk + --- + sample_count: int # number of data points acquired from this stream for a given chunk + timestamps: longblob # (datetime) timestamps of ManualDelivery data + manual_delivery: longblob + """ @property def key_source(self): f""" - Only the combination of Chunk and VideoSource with overlapping time - + Chunk(s) that started after VideoSource install time and ended before VideoSource remove time - + Chunk(s) that started after VideoSource install time for VideoSource that are not yet removed + Only the combination of Chunk and UndergroundFeeder with overlapping time + + Chunk(s) that started after UndergroundFeeder install time and ended before UndergroundFeeder remove time + + Chunk(s) that started after UndergroundFeeder install time for UndergroundFeeder that are not yet removed """ return ( - acquisition.Chunk - * VideoSource.join(VideoSource.RemovalTime, left=True) - & 'chunk_start >= video_source_install_time' - & 'chunk_start < IFNULL(video_source_removal_time, "2200-01-01")' + acquisition.Chunk * UndergroundFeeder.join(UndergroundFeeder.RemovalTime, left=True) + & 'chunk_start >= underground_feeder_install_time' + & 'chunk_start < IFNULL(underground_feeder_removal_time, "2200-01-01")' ) def make(self, key): chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( "chunk_start", "chunk_end", "directory_type" ) - raw_data_dir = acquisition.Experiment.get_data_directory( - key, directory_type=dir_type - ) + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) - device_name = (VideoSource & key).fetch1( - 'video_source_name' - ) + device_name = (UndergroundFeeder & key).fetch1('underground_feeder_name') - stream = self._stream_reader( - **{ - k: v.format(**{k: device_name}) if k == "pattern" else v - for k, v in self._stream_detail["stream_reader_kwargs"].items() - } + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), ) + stream_reader = getattr(getattr(devices_schema, device_name), "ManualDelivery") stream_data = io_api.load( root=raw_data_dir.as_posix(), - reader=stream, + reader=stream_reader, start=pd.Timestamp(chunk_start), end=pd.Timestamp(chunk_end), ) @@ -437,62 +592,120 @@ def make(self, key): "sample_count": len(stream_data), "timestamps": stream_data.index.values, **{ - c: stream_data[c].values - for c in stream.columns + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns if not c.startswith("_") }, - }, ignore_extra_fields=True + }, + ignore_extra_fields=True, ) @schema -class VideoSourceRegion(dj.Imported): - definition = """# Raw per-chunk Region data stream from VideoSource (auto-generated with aeon_mecha-unknown) - -> VideoSource - -> acquisition.Chunk - --- - sample_count: int # number of data points acquired from this stream for a given chunk - timestamps: longblob # (datetime) timestamps of Region data - """ - _stream_reader = aeon.schema.foraging._RegionReader - _stream_detail = {'stream_type': 'Region', 'stream_reader': 'aeon.schema.foraging._RegionReader', 'stream_reader_kwargs': {'pattern': '{pattern}_201_*'}, 'stream_description': '', 'stream_hash': UUID('6c78b3ac-ffff-e2ab-c446-03e3adf4d80a')} +class UndergroundFeederMissedPellet(dj.Imported): + definition = """ # Raw per-chunk MissedPellet data stream from UndergroundFeeder (auto-generated with aeon_mecha-unknown) + -> UndergroundFeeder + -> acquisition.Chunk + --- + sample_count: int # number of data points acquired from this stream for a given chunk + timestamps: longblob # (datetime) timestamps of MissedPellet data + missed_pellet: longblob + """ @property def key_source(self): f""" - Only the combination of Chunk and VideoSource with overlapping time - + Chunk(s) that started after VideoSource install time and ended before VideoSource remove time - + Chunk(s) that started after VideoSource install time for VideoSource that are not yet removed + Only the combination of Chunk and UndergroundFeeder with overlapping time + + Chunk(s) that started after UndergroundFeeder install time and ended before UndergroundFeeder remove time + + Chunk(s) that started after UndergroundFeeder install time for UndergroundFeeder that are not yet removed """ return ( - acquisition.Chunk - * VideoSource.join(VideoSource.RemovalTime, left=True) - & 'chunk_start >= video_source_install_time' - & 'chunk_start < IFNULL(video_source_removal_time, "2200-01-01")' + acquisition.Chunk * UndergroundFeeder.join(UndergroundFeeder.RemovalTime, left=True) + & 'chunk_start >= underground_feeder_install_time' + & 'chunk_start < IFNULL(underground_feeder_removal_time, "2200-01-01")' ) def make(self, key): chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( "chunk_start", "chunk_end", "directory_type" ) - raw_data_dir = acquisition.Experiment.get_data_directory( - key, directory_type=dir_type + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) + + device_name = (UndergroundFeeder & key).fetch1('underground_feeder_name') + + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + stream_reader = getattr(getattr(devices_schema, device_name), "MissedPellet") + + stream_data = io_api.load( + root=raw_data_dir.as_posix(), + reader=stream_reader, + start=pd.Timestamp(chunk_start), + end=pd.Timestamp(chunk_end), + ) + + self.insert1( + { + **key, + "sample_count": len(stream_data), + "timestamps": stream_data.index.values, + **{ + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns + if not c.startswith("_") + }, + }, + ignore_extra_fields=True, + ) + + +@schema +class UndergroundFeederRetriedDelivery(dj.Imported): + definition = """ # Raw per-chunk RetriedDelivery data stream from UndergroundFeeder (auto-generated with aeon_mecha-unknown) + -> UndergroundFeeder + -> acquisition.Chunk + --- + sample_count: int # number of data points acquired from this stream for a given chunk + timestamps: longblob # (datetime) timestamps of RetriedDelivery data + retried_delivery: longblob + """ + + @property + def key_source(self): + f""" + Only the combination of Chunk and UndergroundFeeder with overlapping time + + Chunk(s) that started after UndergroundFeeder install time and ended before UndergroundFeeder remove time + + Chunk(s) that started after UndergroundFeeder install time for UndergroundFeeder that are not yet removed + """ + return ( + acquisition.Chunk * UndergroundFeeder.join(UndergroundFeeder.RemovalTime, left=True) + & 'chunk_start >= underground_feeder_install_time' + & 'chunk_start < IFNULL(underground_feeder_removal_time, "2200-01-01")' ) - device_name = (VideoSource & key).fetch1( - 'video_source_name' + def make(self, key): + chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( + "chunk_start", "chunk_end", "directory_type" ) + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) - stream = self._stream_reader( - **{ - k: v.format(**{k: device_name}) if k == "pattern" else v - for k, v in self._stream_detail["stream_reader_kwargs"].items() - } + device_name = (UndergroundFeeder & key).fetch1('underground_feeder_name') + + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), ) + stream_reader = getattr(getattr(devices_schema, device_name), "RetriedDelivery") stream_data = io_api.load( root=raw_data_dir.as_posix(), - reader=stream, + reader=stream_reader, start=pd.Timestamp(chunk_start), end=pd.Timestamp(chunk_end), ) @@ -503,62 +716,122 @@ def make(self, key): "sample_count": len(stream_data), "timestamps": stream_data.index.values, **{ - c: stream_data[c].values - for c in stream.columns + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns if not c.startswith("_") }, - }, ignore_extra_fields=True + }, + ignore_extra_fields=True, ) @schema -class VideoSourceVideo(dj.Imported): - definition = """# Raw per-chunk Video data stream from VideoSource (auto-generated with aeon_mecha-unknown) - -> VideoSource - -> acquisition.Chunk - --- - sample_count: int # number of data points acquired from this stream for a given chunk - timestamps: longblob # (datetime) timestamps of Video data - """ - _stream_reader = aeon.io.reader.Video - _stream_detail = {'stream_type': 'Video', 'stream_reader': 'aeon.io.reader.Video', 'stream_reader_kwargs': {'pattern': '{pattern}_*'}, 'stream_description': '', 'stream_hash': UUID('f51c6174-e0c4-a888-3a9d-6f97fb6a019b')} +class WeightScaleWeightFiltered(dj.Imported): + definition = """ # Raw per-chunk WeightFiltered data stream from WeightScale (auto-generated with aeon_mecha-unknown) + -> WeightScale + -> acquisition.Chunk + --- + sample_count: int # number of data points acquired from this stream for a given chunk + timestamps: longblob # (datetime) timestamps of WeightFiltered data + weight: longblob + stability: longblob + """ @property def key_source(self): f""" - Only the combination of Chunk and VideoSource with overlapping time - + Chunk(s) that started after VideoSource install time and ended before VideoSource remove time - + Chunk(s) that started after VideoSource install time for VideoSource that are not yet removed + Only the combination of Chunk and WeightScale with overlapping time + + Chunk(s) that started after WeightScale install time and ended before WeightScale remove time + + Chunk(s) that started after WeightScale install time for WeightScale that are not yet removed """ return ( - acquisition.Chunk - * VideoSource.join(VideoSource.RemovalTime, left=True) - & 'chunk_start >= video_source_install_time' - & 'chunk_start < IFNULL(video_source_removal_time, "2200-01-01")' + acquisition.Chunk * WeightScale.join(WeightScale.RemovalTime, left=True) + & 'chunk_start >= weight_scale_install_time' + & 'chunk_start < IFNULL(weight_scale_removal_time, "2200-01-01")' ) def make(self, key): chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( "chunk_start", "chunk_end", "directory_type" ) - raw_data_dir = acquisition.Experiment.get_data_directory( - key, directory_type=dir_type + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) + + device_name = (WeightScale & key).fetch1('weight_scale_name') + + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), ) + stream_reader = getattr(getattr(devices_schema, device_name), "WeightFiltered") - device_name = (VideoSource & key).fetch1( - 'video_source_name' + stream_data = io_api.load( + root=raw_data_dir.as_posix(), + reader=stream_reader, + start=pd.Timestamp(chunk_start), + end=pd.Timestamp(chunk_end), + ) + + self.insert1( + { + **key, + "sample_count": len(stream_data), + "timestamps": stream_data.index.values, + **{ + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns + if not c.startswith("_") + }, + }, + ignore_extra_fields=True, + ) + + +@schema +class WeightScaleWeightRaw(dj.Imported): + definition = """ # Raw per-chunk WeightRaw data stream from WeightScale (auto-generated with aeon_mecha-unknown) + -> WeightScale + -> acquisition.Chunk + --- + sample_count: int # number of data points acquired from this stream for a given chunk + timestamps: longblob # (datetime) timestamps of WeightRaw data + weight: longblob + stability: longblob + """ + + @property + def key_source(self): + f""" + Only the combination of Chunk and WeightScale with overlapping time + + Chunk(s) that started after WeightScale install time and ended before WeightScale remove time + + Chunk(s) that started after WeightScale install time for WeightScale that are not yet removed + """ + return ( + acquisition.Chunk * WeightScale.join(WeightScale.RemovalTime, left=True) + & 'chunk_start >= weight_scale_install_time' + & 'chunk_start < IFNULL(weight_scale_removal_time, "2200-01-01")' ) - stream = self._stream_reader( - **{ - k: v.format(**{k: device_name}) if k == "pattern" else v - for k, v in self._stream_detail["stream_reader_kwargs"].items() - } + def make(self, key): + chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( + "chunk_start", "chunk_end", "directory_type" + ) + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) + + device_name = (WeightScale & key).fetch1('weight_scale_name') + + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), ) + stream_reader = getattr(getattr(devices_schema, device_name), "WeightRaw") stream_data = io_api.load( root=raw_data_dir.as_posix(), - reader=stream, + reader=stream_reader, start=pd.Timestamp(chunk_start), end=pd.Timestamp(chunk_end), ) @@ -569,11 +842,12 @@ def make(self, key): "sample_count": len(stream_data), "timestamps": stream_data.index.values, **{ - c: stream_data[c].values - for c in stream.columns + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns if not c.startswith("_") }, - }, ignore_extra_fields=True + }, + ignore_extra_fields=True, ) diff --git a/aeon/dj_pipeline/tracking.py b/aeon/dj_pipeline/tracking.py index cf399805..0e3b4ec2 100644 --- a/aeon/dj_pipeline/tracking.py +++ b/aeon/dj_pipeline/tracking.py @@ -7,6 +7,7 @@ from aeon.dj_pipeline import acquisition, dict_to_uuid, get_schema_name, lab, qc, streams from aeon.io import api as io_api +from aeon.schema import schemas as aeon_schemas schema = dj.schema(get_schema_name("tracking")) @@ -226,88 +227,160 @@ def get_object_position( @schema -class VideoSourceTracking(dj.Imported): +class SLEAPTracking(dj.Imported): definition = """ # Tracked objects position data from a particular VideoSource for multi-animal experiment using the SLEAP tracking method per chunk -> acquisition.Chunk - -> streams.VideoSource + -> streams.SpinnakerVideoSource -> TrackingParamSet + --- + sample_count: int # number of data points acquired from this stream for a given chunk """ - class Point(dj.Part): + class PoseIdentity(dj.Part): definition = """ -> master - point_name: varchar(16) + identity_idx: smallint --- - point_x: longblob - point_y: longblob - point_likelihood: longblob + identity_name: varchar(16) + identity_likelihood: longblob + anchor_part: varchar(16) # the name of the point used as anchor node for this class """ - class Pose(dj.Part): + class Part(dj.Part): definition = """ - -> master - pose_name: varchar(16) - class: smallint + -> master.PoseIdentity + part_name: varchar(16) --- - class_likelihood: longblob - centroid_x: longblob - centroid_y: longblob - centroid_likelihood: longblob - pose_timestamps: longblob - point_collection=null: varchar(1000) # List of point names - """ - - class PointCollection(dj.Part): - definition = """ - -> master.Pose - -> master.Point + x: longblob + y: longblob + likelihood: longblob + timestamps: longblob """ @property def key_source(self): return ( - (acquisition.Chunk & "experiment_name='multianimal'") - * (streams.VideoSourcePosition & (streams.VideoSource & "video_source_name='CameraTop'")) + acquisition.Chunk + * ( + streams.SpinnakerVideoSource.join(streams.SpinnakerVideoSource.RemovalTime, left=True) + & "spinnaker_video_source_name='CameraTop'" + ) * (TrackingParamSet & "tracking_paramset_id = 1") + & "chunk_start >= spinnaker_video_source_install_time" + & 'chunk_start < IFNULL(spinnaker_video_source_removal_time, "2200-01-01")' ) # SLEAP & CameraTop def make(self, key): - from aeon.schema.social import Pose - - # chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( - # "chunk_start", "chunk_end", "directory_type" - # ) - # raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) - # This needs to be modified later - sleap_reader = Pose( - pattern="", - columns=["class", "class_confidence", "centroid_x", "centroid_y", "centroid_confidence"], + chunk_start, chunk_end, dir_type = (acquisition.Chunk & key).fetch1( + "chunk_start", "chunk_end", "directory_type" ) - tracking_file_path = "/ceph/aeon/aeon/data/processed/test-node1/1234567/2023-08-10T18-31-00/macentroid/test-node1_1234567_2023-08-10T18-31-00_macentroid.bin" # temp file path for testing + raw_data_dir = acquisition.Experiment.get_data_directory(key, directory_type=dir_type) - tracking_df = sleap_reader.read(Path(tracking_file_path)) + device_name = (streams.SpinnakerVideoSource & key).fetch1("spinnaker_video_source_name") - pose_list = [] - for part_name in ["body"]: - for class_id in tracking_df["class"].unique(): - class_df = tracking_df[tracking_df["class"] == class_id] + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), + ) + stream_reader = getattr(getattr(devices_schema, device_name), "Pose") - pose_list.append( + pose_data = io_api.load( + root=raw_data_dir.as_posix(), + reader=stream_reader, + start=pd.Timestamp(chunk_start), + end=pd.Timestamp(chunk_end), + ) + + if not len(pose_data): + self.insert1({**key, "sample_count": 0}) + return + + # Find the config file for the SLEAP model + try: + f = next( + raw_data_dir.glob( + f"**/**/{stream_reader.pattern}{io_api.chunk(chunk_start).strftime('%Y-%m-%dT%H-%M-%S')}*.{stream_reader.extension}" + ) + ) + except StopIteration: + raise FileNotFoundError(f"Unable to find HARP bin file for {key}") + else: + config_file = stream_reader.get_config_file( + stream_reader._model_root / Path(*Path(f.stem.replace("_", "/")).parent.parts[1:]) + ) + + # get bodyparts and classes + bodyparts = stream_reader.get_bodyparts(config_file) + anchor_part = bodyparts[0] # anchor_part is always the first one + class_names = stream_reader.get_class_names(config_file) + + # ingest parts and classes + sample_count = 0 + class_entries, part_entries = [], [] + for class_idx in set(pose_data["class"].values.astype(int)): + class_position = pose_data[pose_data["class"] == class_idx] + for part in set(class_position.part.values): + part_position = class_position[class_position.part == part] + part_entries.append( { **key, - "pose_name": part_name, - "class": class_id, - "class_likelihood": class_df["class_likelihood"].values, - "centroid_x": class_df["x"].values, - "centroid_y": class_df["y"].values, - "centroid_likelihood": class_df["part_likelihood"].values, - "pose_timestamps": class_df.index.values, - "point_collection": "", + "identity_idx": class_idx, + "part_name": part, + "timestamps": part_position.index.values, + "x": part_position.x.values, + "y": part_position.y.values, + "likelihood": part_position.part_likelihood.values, } ) + if part == anchor_part: + class_likelihood = part_position.class_likelihood.values + sample_count = len(part_position.index.values) + class_entries.append( + { + **key, + "identity_idx": class_idx, + "identity_name": class_names[class_idx], + "anchor_part": anchor_part, + "identity_likelihood": class_likelihood, + } + ) - self.insert1(key) - self.Pose.insert(pose_list) + self.insert1({**key, "sample_count": sample_count}) + self.Class.insert(class_entries) + self.Part.insert(part_entries) + + @classmethod + def get_object_position( + cls, + experiment_name, + subject_name, + start, + end, + camera_name="CameraTop", + tracking_paramset_id=1, + in_meter=False, + ): + table = ( + cls.Class.proj(part_name="anchor_part") * cls.Part * acquisition.Chunk.proj("chunk_end") + & {"experiment_name": experiment_name} + & {"tracking_paramset_id": tracking_paramset_id} + & (streams.SpinnakerVideoSource & {"spinnaker_video_source_name": camera_name}) + ) + + return _get_position( + table, + object_attr="class_name", + object_name=subject_name, + start_attr="chunk_start", + end_attr="chunk_end", + start=start, + end=end, + fetch_attrs=["timestamps", "x", "y", "likelihood"], + attrs_to_scale=["position_x", "position_y"], + scale_factor=pixel_scale if in_meter else 1, + ) # ---------- HELPER ------------------ diff --git a/aeon/dj_pipeline/utils/device_type_mapper.json b/aeon/dj_pipeline/utils/device_type_mapper.json new file mode 100644 index 00000000..7f041bd5 --- /dev/null +++ b/aeon/dj_pipeline/utils/device_type_mapper.json @@ -0,0 +1 @@ +{"ClockSynchronizer": "TimestampGenerator", "VideoController": "CameraController", "CameraTop": "SpinnakerVideoSource", "CameraWest": "SpinnakerVideoSource", "CameraEast": "SpinnakerVideoSource", "CameraNorth": "SpinnakerVideoSource", "CameraSouth": "SpinnakerVideoSource", "CameraNest": "SpinnakerVideoSource", "CameraPatch1": "SpinnakerVideoSource", "CameraPatch2": "SpinnakerVideoSource", "CameraPatch3": "SpinnakerVideoSource", "AudioAmbient": "AudioSource", "Patch1": "UndergroundFeeder", "Patch2": "UndergroundFeeder", "Patch3": "UndergroundFeeder", "Nest": "WeightScale", "RfidNest1": "RfidReader", "RfidNest2": "RfidReader", "RfidGate": "RfidReader", "RfidPatch1": "RfidReader", "RfidPatch2": "RfidReader", "RfidPatch3": "RfidReader", "LightCycle": "EnvironmentCondition"} \ No newline at end of file diff --git a/aeon/dj_pipeline/utils/load_metadata.py b/aeon/dj_pipeline/utils/load_metadata.py index 22ec4478..24289f0f 100644 --- a/aeon/dj_pipeline/utils/load_metadata.py +++ b/aeon/dj_pipeline/utils/load_metadata.py @@ -17,31 +17,16 @@ logger = dj.logger _weight_scale_rate = 100 _weight_scale_nest = 1 -_colony_csv_path = pathlib.Path("/ceph/aeon/aeon/colony/colony.csv") - - -def ingest_subject(colony_csv_path: pathlib.Path = _colony_csv_path) -> None: - """Ingest subject information from the colony.csv file.""" - logger.warning("The use of 'colony.csv' is deprecated starting Nov 2023", DeprecationWarning) - - colony_df = pd.read_csv(colony_csv_path, skiprows=[1, 2]) - colony_df.rename(columns={"Id": "subject"}, inplace=True) - colony_df["sex"] = "U" - colony_df["subject_birth_date"] = "2021-01-01" - colony_df["subject_description"] = "" - subject.Subject.insert(colony_df, skip_duplicates=True, ignore_extra_fields=True) - acquisition.Experiment.Subject.insert( - (subject.Subject * acquisition.Experiment).proj(), skip_duplicates=True - ) +_aeon_schemas = ["social01"] def insert_stream_types(): - """Insert into streams.streamType table all streams in the dataset schema.""" - from aeon.schema import dataset + """Insert into streams.streamType table all streams in the aeon schemas.""" + from aeon.schema import schemas as aeon_schemas streams = dj.VirtualModule("streams", streams_maker.schema_name) - schemas = [v for v in dataset.__dict__.values() if isinstance(v, DotMap)] + schemas = [getattr(aeon_schemas, aeon_schema) for aeon_schema in _aeon_schemas] for schema in schemas: stream_entries = get_stream_entries(schema) @@ -49,20 +34,26 @@ def insert_stream_types(): q_param = streams.StreamType & {"stream_hash": entry["stream_hash"]} if q_param: # If the specified stream type already exists pname = q_param.fetch1("stream_type") - if pname != entry["stream_type"]: + if pname == entry["stream_type"]: + continue + else: # If the existed stream type does not have the same name: # human error, trying to add the same content with different name raise dj.DataJointError(f"The specified stream type already exists - name: {pname}") - - streams.StreamType.insert(stream_entries, skip_duplicates=True) + else: + streams.StreamType.insert1(entry) -def insert_device_types(schema: DotMap, metadata_yml_filepath: Path): - """Use dataset.schema and metadata.yml to insert into streams.DeviceType and streams.Device. Only insert device types that were defined both in the device schema (e.g., exp02) and Metadata.yml. It then creates new device tables under streams schema.""" +def insert_device_types(device_schema: DotMap, metadata_yml_filepath: Path): + """ + Use aeon.schema.schemas and metadata.yml to insert into streams.DeviceType and streams.Device. + Only insert device types that were defined both in the device schema (e.g., exp02) and Metadata.yml. + It then creates new device tables under streams schema. + """ streams = dj.VirtualModule("streams", streams_maker.schema_name) - device_info: dict[dict] = get_device_info(schema) - device_type_mapper, device_sn = get_device_mapper(schema, metadata_yml_filepath) + device_info: dict[dict] = get_device_info(device_schema) + device_type_mapper, device_sn = get_device_mapper(device_schema, metadata_yml_filepath) # Add device type to device_info. Only add if device types that are defined in Metadata.yml device_info = { @@ -126,7 +117,7 @@ def insert_device_types(schema: DotMap, metadata_yml_filepath: Path): streams.Device.insert(new_devices) -def extract_epoch_config(experiment_name: str, metadata_yml_filepath: str) -> dict: +def extract_epoch_config(experiment_name: str, devices_schema, metadata_yml_filepath: str) -> dict: """Parse experiment metadata YAML file and extract epoch configuration. Args: @@ -141,7 +132,7 @@ def extract_epoch_config(experiment_name: str, metadata_yml_filepath: str) -> di epoch_config: dict = ( io_api.load( str(metadata_yml_filepath.parent), - acquisition._device_schema_mapping[experiment_name].Metadata, + devices_schema.Metadata, ) .reset_index() .to_dict("records")[0] @@ -157,7 +148,8 @@ def extract_epoch_config(experiment_name: str, metadata_yml_filepath: str) -> di json.dumps(epoch_config["metadata"]["Devices"], default=lambda x: x.__dict__, indent=4) ) - if isinstance(devices, list): # In exp02, it is a list of dict. In presocial. It's a dict of dict. + # Maintain backward compatibility - In exp02, it is a list of dict. From presocial onward, it's a dict of dict. + if isinstance(devices, list): devices: dict = {d.pop("Name"): d for d in devices} # {deivce_name: device_config} return { @@ -170,7 +162,7 @@ def extract_epoch_config(experiment_name: str, metadata_yml_filepath: str) -> di } -def ingest_epoch_metadata(experiment_name, metadata_yml_filepath): +def ingest_epoch_metadata(experiment_name, devices_schema, metadata_yml_filepath): """Make entries into device tables.""" streams = dj.VirtualModule("streams", streams_maker.schema_name) @@ -180,7 +172,7 @@ def ingest_epoch_metadata(experiment_name, metadata_yml_filepath): experiment_key = {"experiment_name": experiment_name} metadata_yml_filepath = pathlib.Path(metadata_yml_filepath) - epoch_config = extract_epoch_config(experiment_name, metadata_yml_filepath) + epoch_config = extract_epoch_config(experiment_name, devices_schema, metadata_yml_filepath) previous_epoch = (acquisition.Experiment & experiment_key).aggr( acquisition.Epoch & f'epoch_start < "{epoch_config["epoch_start"]}"', @@ -192,10 +184,10 @@ def ingest_epoch_metadata(experiment_name, metadata_yml_filepath): # if identical commit -> no changes return - schema = acquisition._device_schema_mapping[experiment_name] - device_type_mapper, _ = get_device_mapper(schema, metadata_yml_filepath) + device_type_mapper, _ = get_device_mapper(devices_schema, metadata_yml_filepath) # Insert into each device table + epoch_device_types = [] device_list = [] device_removal_list = [] @@ -204,7 +196,16 @@ def ingest_epoch_metadata(experiment_name, metadata_yml_filepath): device_sn = device_config.get("SerialNumber", device_config.get("PortName")) device_key = {"device_serial_number": device_sn} + if not (streams.Device & device_key): + logger.warning( + f"Device {device_name} (serial number: {device_sn}) is not yet registered in streams.Device. Skipping..." + ) + # skip if this device (with a serial number) is not yet inserted in streams.Device + continue + device_list.append(device_key) + epoch_device_types.append(table.__name__) + table_entry = { "experiment_name": experiment_name, **device_key, @@ -221,7 +222,7 @@ def ingest_epoch_metadata(experiment_name, metadata_yml_filepath): for attribute_name, attribute_value in device_config.items() ] - """Check if this camera is currently installed. If the same camera serial number is currently installed check for any changes in configuration. If not, skip this""" + """Check if this device is currently installed. If the same device serial number is currently installed check for any changes in configuration. If not, skip this""" current_device_query = table - table.RemovalTime & experiment_key & device_key if current_device_query: @@ -256,6 +257,7 @@ def ingest_epoch_metadata(experiment_name, metadata_yml_filepath): ], } ) + epoch_device_types.remove(table.__name__) # Insert into table. table.insert1(table_entry, skip_duplicates=True) @@ -277,6 +279,8 @@ def ingest_epoch_metadata(experiment_name, metadata_yml_filepath): if device_removal(device_type, device_entry): table.RemovalTime.insert1(device_entry) + return set(epoch_device_types) + # region Get stream & device information def get_stream_entries(schema: DotMap) -> list[dict]: @@ -354,6 +358,7 @@ def _get_class_path(obj): "aeon.io.reader", "aeon.schema.foraging", "aeon.schema.octagon", + "aeon.schema.social", ]: device_info[device_name]["stream_type"].append(stream_type) device_info[device_name]["stream_reader"].append(_get_class_path(stream_obj)) @@ -418,7 +423,7 @@ def get_device_mapper(schema: DotMap, metadata_yml_filepath: Path): ) # Store the mapper dictionary here - filename = Path(__file__).parent.parent / "create_experiments/device_type_mapper.json" + filename = Path(__file__).parent.parent / "utils/device_type_mapper.json" device_type_mapper = {} # {device_name: device_type} device_sn = {} # {device_name: device_sn} diff --git a/aeon/dj_pipeline/utils/streams_maker.py b/aeon/dj_pipeline/utils/streams_maker.py index 20c58880..d333387e 100644 --- a/aeon/dj_pipeline/utils/streams_maker.py +++ b/aeon/dj_pipeline/utils/streams_maker.py @@ -9,6 +9,7 @@ import aeon from aeon.dj_pipeline import acquisition, get_schema_name from aeon.io import api as io_api +from aeon.schema import schemas as aeon_schemas logger = dj.logger @@ -108,6 +109,10 @@ def get_device_stream_template(device_type: str, stream_type: str, streams_modul for i, n in enumerate(stream_detail["stream_reader"].split(".")): reader = aeon if i == 0 else getattr(reader, n) + if reader is aeon.io.reader.Pose: + logger.warning("Automatic generation of stream table for Pose reader is not supported. Skipping...") + return None, None + stream = reader(**stream_detail["stream_reader_kwargs"]) table_definition = f""" # Raw per-chunk {stream_type} data stream from {device_type} (auto-generated with aeon_mecha-{aeon.__version__}) @@ -121,12 +126,11 @@ def get_device_stream_template(device_type: str, stream_type: str, streams_modul for col in stream.columns: if col.startswith("_"): continue + col = re.sub(r"\([^)]*\)", "", col) table_definition += f"{col}: longblob\n " class DeviceDataStream(dj.Imported): definition = table_definition - _stream_reader = reader - _stream_detail = stream_detail @property def key_source(self): @@ -149,16 +153,17 @@ def make(self, key): device_name = (ExperimentDevice & key).fetch1(f"{dj.utils.from_camel_case(device_type)}_name") - stream = self._stream_reader( - **{ - k: v.format(**{k: device_name}) if k == "pattern" else v - for k, v in self._stream_detail["stream_reader_kwargs"].items() - } + devices_schema = getattr( + aeon_schemas, + (acquisition.Experiment.DevicesSchema & {"experiment_name": key["experiment_name"]}).fetch1( + "devices_schema_name" + ), ) + stream_reader = getattr(getattr(devices_schema, device_name), "{stream_type}") stream_data = io_api.load( root=raw_data_dir.as_posix(), - reader=stream, + reader=stream_reader, start=pd.Timestamp(chunk_start), end=pd.Timestamp(chunk_end), ) @@ -168,7 +173,11 @@ def make(self, key): **key, "sample_count": len(stream_data), "timestamps": stream_data.index.values, - **{c: stream_data[c].values for c in stream.columns if not c.startswith("_")}, + **{ + re.sub(r"\([^)]*\)", "", c): stream_data[c].values + for c in stream_reader.columns + if not c.startswith("_") + }, }, ignore_extra_fields=True, ) @@ -187,12 +196,14 @@ def main(create_tables=True): imports_str = ( "#---- DO NOT MODIFY ----\n" "#---- THIS FILE IS AUTO-GENERATED BY `streams_maker.py` ----\n\n" + "import re\n" "import datajoint as dj\n" "import pandas as pd\n" "from uuid import UUID\n\n" "import aeon\n" "from aeon.dj_pipeline import acquisition, get_schema_name\n" - "from aeon.io import api as io_api\n\n" + "from aeon.io import api as io_api\n" + "from aeon.schema import schemas as aeon_schemas\n\n" 'schema = dj.Schema(get_schema_name("streams"))\n\n\n' ) f.write(imports_str) @@ -244,9 +255,8 @@ def main(create_tables=True): device_type, stream_type, streams_module=streams ) - stream_obj = table_class.__dict__["_stream_reader"] - reader = stream_obj.__module__ + "." + stream_obj.__name__ - stream_detail = table_class.__dict__["_stream_detail"] + if table_class is None: + continue device_stream_table_def = inspect.getsource(table_class).lstrip() @@ -267,17 +277,6 @@ def main(create_tables=True): for old, new in replacements.items(): device_stream_table_def = device_stream_table_def.replace(old, new) - device_stream_table_def = re.sub( - r"_stream_reader\s*=\s*reader", - f"_stream_reader = {reader}", - device_stream_table_def, - ) # insert reader - device_stream_table_def = re.sub( - r"_stream_detail\s*=\s*stream_detail", - f"_stream_detail = {stream_detail}", - device_stream_table_def, - ) # insert stream details - full_def = "@schema \n" + device_stream_table_def + "\n\n" with open(_STREAMS_MODULE_FILE) as f: diff --git a/aeon/dj_pipeline/webapps/sciviz/docker-compose-remote.yaml b/aeon/dj_pipeline/webapps/sciviz/docker-compose-remote.yaml index a8509e13..35742345 100644 --- a/aeon/dj_pipeline/webapps/sciviz/docker-compose-remote.yaml +++ b/aeon/dj_pipeline/webapps/sciviz/docker-compose-remote.yaml @@ -7,7 +7,7 @@ services: pharus: # cpus: 2.0 mem_limit: 16g - image: jverswijver/pharus:0.8.5-PY_VER-3.9 + image: datajoint/pharus:0.8.10-py3.9 environment: # - FLASK_ENV=development # enables logging to console from Flask - PHARUS_SPEC_PATH=/main/specsheet.yaml # for dynamic utils spec @@ -33,7 +33,7 @@ services: sci-viz: cpus: 2.0 mem_limit: 4g - image: jverswijver/sci-viz:2.3.3-hotfix3 + image: jverswijver/sci-viz:2.3.4 environment: - CHOKIDAR_USEPOLLING=true - REACT_APP_DJSCIVIZ_BACKEND_PREFIX=/api diff --git a/aeon/io/api.py b/aeon/io/api.py index 2b9bc745..5c16159f 100644 --- a/aeon/io/api.py +++ b/aeon/io/api.py @@ -115,7 +115,8 @@ def load(root, reader, start=None, end=None, time=None, tolerance=None, epoch=No # to fill missing values previous = reader.read(files[i - 1]) data = pd.concat([previous, frame]) - data = data.reindex(values, method="pad", tolerance=tolerance) + data = data.reindex(values, tolerance=tolerance) + data.dropna(inplace=True) else: data.drop(columns="time", inplace=True) dataframes.append(data) diff --git a/aeon/io/device.py b/aeon/io/device.py index 1a4916e6..e8e5cf0f 100644 --- a/aeon/io/device.py +++ b/aeon/io/device.py @@ -1,41 +1,40 @@ import inspect -def compositeStream(pattern, *args): - """Merges multiple data streams into a single composite stream.""" - composite = {} +def register(pattern, *args): + """Merges multiple Readers into a single registry.""" + registry = {} if args: - for stream in args: - if inspect.isclass(stream): - for method in vars(stream).values(): + for binder_fn in args: + if inspect.isclass(binder_fn): + for method in vars(binder_fn).values(): if isinstance(method, staticmethod): - composite.update(method.__func__(pattern)) + registry.update(method.__func__(pattern)) else: - composite.update(stream(pattern)) - return composite + registry.update(binder_fn(pattern)) + return registry class Device: - """Groups multiple data streams into a logical device. + """Groups multiple Readers into a logical device. - If a device contains a single stream with the same pattern as the device - `name`, it will be considered a singleton, and the stream reader will be - paired directly with the device without nesting. + If a device contains a single stream reader with the same pattern as the device `name`, it will be + considered a singleton, and the stream reader will be paired directly with the device without nesting. Attributes: name (str): Name of the device. - args (Any): Data streams collected from the device. + args (any): A binder function or class that returns a dictionary of Readers. pattern (str, optional): Pattern used to find raw chunk files, usually in the format `_`. """ def __init__(self, name, *args, pattern=None): self.name = name - self.stream = compositeStream(name if pattern is None else pattern, *args) + self.registry = register(name if pattern is None else pattern, *args) def __iter__(self): - if len(self.stream) == 1: - singleton = self.stream.get(self.name, None) + if len(self.registry) == 1: + singleton = self.registry.get(self.name, None) if singleton: return iter((self.name, singleton)) - return iter((self.name, self.stream)) + return iter((self.name, self.registry)) diff --git a/aeon/io/reader.py b/aeon/io/reader.py index 67570608..44aece5c 100644 --- a/aeon/io/reader.py +++ b/aeon/io/reader.py @@ -6,8 +6,10 @@ import numpy as np import pandas as pd from dotmap import DotMap +from pathlib import Path from aeon.io.api import chunk_key +from aeon import util _SECONDS_PER_TICK = 32e-6 _payloadtypes = { @@ -212,7 +214,7 @@ def read(self, file): specified unique identifier. """ data = super().read(file) - data = data[data.event & self.value > 0] + data = data[(data.event & self.value) == self.value] data["event"] = self.tag return data @@ -259,6 +261,129 @@ def read(self, file): return data +class Pose(Harp): + """Reader for Harp-binarized tracking data given a model that outputs id, parts, and likelihoods. + + Columns: + class (int): Int ID of a subject in the environment. + class_likelihood (float): Likelihood of the subject's identity. + part (str): Bodypart on the subject. + part_likelihood (float): Likelihood of the specified bodypart. + x (float): X-coordinate of the bodypart. + y (float): Y-coordinate of the bodypart. + """ + + def __init__(self, pattern: str, model_root: str = "/ceph/aeon/aeon/data/processed"): + """Pose reader constructor.""" + # `pattern` for this reader should typically be '_*' + super().__init__(pattern, columns=None) + self._model_root = Path(model_root) + + def read(self, file: Path) -> pd.DataFrame: + """Reads data from the Harp-binarized tracking file.""" + # Get config file from `file`, then bodyparts from config file. + model_dir = Path(*Path(file.stem.replace("_", "/")).parent.parts[1:]) + config_file_dir = self._model_root / model_dir + if not config_file_dir.exists(): + raise FileNotFoundError(f"Cannot find model dir {config_file_dir}") + config_file = self.get_config_file(config_file_dir) + parts = self.get_bodyparts(config_file) + + # Using bodyparts, assign column names to Harp register values, and read data in default format. + columns = ["class", "class_likelihood"] + for part in parts: + columns.extend([f"{part}_x", f"{part}_y", f"{part}_likelihood"]) + self.columns = columns + data = super().read(file) + + # Drop any repeat parts. + unique_parts, unique_idxs = np.unique(parts, return_index=True) + repeat_idxs = np.setdiff1d(np.arange(len(parts)), unique_idxs) + if repeat_idxs: # drop x, y, and likelihood cols for repeat parts (skip first 5 cols) + init_rep_part_col_idx = (repeat_idxs - 1) * 3 + 5 + rep_part_col_idxs = np.concatenate([np.arange(i, i + 3) for i in init_rep_part_col_idx]) + keep_part_col_idxs = np.setdiff1d(np.arange(len(data.columns)), rep_part_col_idxs) + data = data.iloc[:, keep_part_col_idxs] + parts = unique_parts + + # Set new columns, and reformat `data`. + n_parts = len(parts) + part_data_list = [pd.DataFrame()] * n_parts + new_columns = ["class", "class_likelihood", "part", "x", "y", "part_likelihood"] + new_data = pd.DataFrame(columns=new_columns) + for i, part in enumerate(parts): + part_columns = ["class", "class_likelihood", f"{part}_x", f"{part}_y", f"{part}_likelihood"] + part_data = pd.DataFrame(data[part_columns]) + part_data.insert(2, "part", part) + part_data.columns = new_columns + part_data_list[i] = part_data + new_data = pd.concat(part_data_list) + return new_data.sort_index() + + def get_class_names(self, file: Path) -> list[str]: + """Returns a list of classes from a model's config file.""" + classes = None + with open(file) as f: + config = json.load(f) + if file.stem == "confmap_config": # SLEAP + try: + heads = config["model"]["heads"] + classes = util.find_nested_key(heads, "class_vectors")["classes"] + except KeyError as err: + if not classes: + raise KeyError(f"Cannot find class_vectors in {file}.") from err + return classes + + def get_bodyparts(self, file: Path) -> list[str]: + """Returns a list of bodyparts from a model's config file.""" + parts = [] + with open(file) as f: + config = json.load(f) + if file.stem == "confmap_config": # SLEAP + try: + heads = config["model"]["heads"] + parts = [util.find_nested_key(heads, "anchor_part")] + parts += util.find_nested_key(heads, "part_names") + except KeyError as err: + if not parts: + raise KeyError(f"Cannot find bodyparts in {file}.") from err + return parts + + @classmethod + def get_config_file( + cls, + config_file_dir: Path, + config_file_names: None | list[str] = None, + ) -> Path: + """Returns the config file from a model's config directory.""" + if config_file_names is None: + config_file_names = ["confmap_config.json"] # SLEAP (add for other trackers to this list) + config_file = None + for f in config_file_names: + if (config_file_dir / f).exists(): + config_file = config_file_dir / f + break + if config_file is None: + raise FileNotFoundError(f"Cannot find config file in {config_file_dir}") + return config_file + + @classmethod + def class_int2str(cls, data: pd.DataFrame, config_file_dir: Path) -> pd.DataFrame: + """Converts a class integer in a tracking data dataframe to its associated string (subject id).""" + config_file = cls.get_config_file(config_file_dir) + if config_file.stem == "confmap_config": # SLEAP + with open(config_file) as f: + config = json.load(f) + try: + heads = config["model"]["heads"] + classes = util.find_nested_key(heads, "classes") + except KeyError as err: + raise KeyError(f"Cannot find classes in {config_file}.") from err + for i, subj in enumerate(classes): + data.loc[data["class"] == i, "class"] = subj + return data + + def from_dict(data, pattern=None): reader_type = data.get("type", None) if reader_type is not None: diff --git a/aeon/schema/core.py b/aeon/schema/core.py index 8181c710..cf60dc46 100644 --- a/aeon/schema/core.py +++ b/aeon/schema/core.py @@ -24,7 +24,7 @@ def encoder(pattern): def environment(pattern): """Metadata for environment mode and subjects.""" - return _device.compositeStream(pattern, environment_state, subject_state) + return _device.register(pattern, environment_state, subject_state) def environment_state(pattern): @@ -37,7 +37,7 @@ def subject_state(pattern): return {"SubjectState": _reader.Subject(f"{pattern}_SubjectState_*")} -def messageLog(pattern): +def message_log(pattern): """Message log data.""" return {"MessageLog": _reader.Log(f"{pattern}_MessageLog_*")} diff --git a/aeon/schema/dataset.py b/aeon/schema/dataset.py deleted file mode 100644 index 17ccdb59..00000000 --- a/aeon/schema/dataset.py +++ /dev/null @@ -1,74 +0,0 @@ -from dotmap import DotMap - -import aeon.schema.core as stream -from aeon.io import reader -from aeon.io.device import Device -from aeon.schema import foraging, octagon - -exp02 = DotMap( - [ - Device("Metadata", stream.metadata), - Device("ExperimentalMetadata", stream.environment, stream.messageLog), - Device("CameraTop", stream.video, stream.position, foraging.region), - Device("CameraEast", stream.video), - Device("CameraNest", stream.video), - Device("CameraNorth", stream.video), - Device("CameraPatch1", stream.video), - Device("CameraPatch2", stream.video), - Device("CameraSouth", stream.video), - Device("CameraWest", stream.video), - Device("Nest", foraging.weight), - Device("Patch1", foraging.patch), - Device("Patch2", foraging.patch), - ] -) - -exp01 = DotMap( - [ - Device("SessionData", foraging.session), - Device("FrameTop", stream.video, stream.position), - Device("FrameEast", stream.video), - Device("FrameGate", stream.video), - Device("FrameNorth", stream.video), - Device("FramePatch1", stream.video), - Device("FramePatch2", stream.video), - Device("FrameSouth", stream.video), - Device("FrameWest", stream.video), - Device("Patch1", foraging.depletionFunction, stream.encoder, foraging.feeder), - Device("Patch2", foraging.depletionFunction, stream.encoder, foraging.feeder), - ] -) - -octagon01 = DotMap( - [ - Device("Metadata", stream.metadata), - Device("CameraTop", stream.video, stream.position), - Device("CameraColorTop", stream.video), - Device("ExperimentalMetadata", stream.subject_state), - Device("Photodiode", octagon.photodiode), - Device("OSC", octagon.OSC), - Device("TaskLogic", octagon.TaskLogic), - Device("Wall1", octagon.Wall), - Device("Wall2", octagon.Wall), - Device("Wall3", octagon.Wall), - Device("Wall4", octagon.Wall), - Device("Wall5", octagon.Wall), - Device("Wall6", octagon.Wall), - Device("Wall7", octagon.Wall), - Device("Wall8", octagon.Wall), - ] -) - -social01 = exp02 -social01.Patch1.BeamBreak = reader.BitmaskEvent( - pattern="Patch1_32", value=0x22, tag="BeamBroken" -) -social01.Patch2.BeamBreak = reader.BitmaskEvent( - pattern="Patch2_32", value=0x22, tag="BeamBroken" -) -social01.Patch1.DeliverPellet = reader.BitmaskEvent( - pattern="Patch1_35", value=0x1, tag="TriggeredPellet" -) -social01.Patch2.DeliverPellet = reader.BitmaskEvent( - pattern="Patch2_35", value=0x1, tag="TriggeredPellet" -) diff --git a/aeon/schema/foraging.py b/aeon/schema/foraging.py index ffd8fdd9..14d76b9f 100644 --- a/aeon/schema/foraging.py +++ b/aeon/schema/foraging.py @@ -58,14 +58,14 @@ def region(pattern): return {"Region": _RegionReader(f"{pattern}_201_*")} -def depletionFunction(pattern): +def depletion_function(pattern): """State of the linear depletion function for foraging patches.""" return {"DepletionState": _PatchState(f"{pattern}_State_*")} def feeder(pattern): """Feeder commands and events.""" - return _device.compositeStream(pattern, beam_break, deliver_pellet) + return _device.register(pattern, beam_break, deliver_pellet) def beam_break(pattern): @@ -78,14 +78,34 @@ def deliver_pellet(pattern): return {"DeliverPellet": _reader.BitmaskEvent(f"{pattern}_35_*", 0x80, "TriggerPellet")} +def pellet_manual_delivery(pattern): + """Manual pellet delivery.""" + return {"ManualDelivery": _reader.Harp(f"{pattern}_*", ["manual_delivery"])} + + +def missed_pellet(pattern): + """Missed pellet delivery.""" + return {"MissedPellet": _reader.Harp(f"{pattern}_*", ["missed_pellet"])} + + +def pellet_retried_delivery(pattern): + """Retry pellet delivery.""" + return {"RetriedDelivery": _reader.Harp(f"{pattern}_*", ["retried_delivery"])} + + +def pellet_depletion_state(pattern): + """Pellet delivery state.""" + return {"DepletionState": _reader.Csv(f"{pattern}_*", ["threshold", "offset", "rate"])} + + def patch(pattern): """Data streams for a patch.""" - return _device.compositeStream(pattern, depletionFunction, _stream.encoder, feeder) + return _device.register(pattern, depletion_function, _stream.encoder, feeder) def weight(pattern): """Weight measurement data streams for a specific nest.""" - return _device.compositeStream(pattern, weight_raw, weight_filtered, weight_subject) + return _device.register(pattern, weight_raw, weight_filtered, weight_subject) def weight_raw(pattern): diff --git a/aeon/schema/schemas.py b/aeon/schema/schemas.py new file mode 100644 index 00000000..0afd2472 --- /dev/null +++ b/aeon/schema/schemas.py @@ -0,0 +1,86 @@ +from dotmap import DotMap +from aeon.io.device import Device +from aeon.schema import core, foraging, octagon, social + +exp02 = DotMap( + [ + Device("Metadata", core.metadata), + Device("ExperimentalMetadata", core.environment, core.message_log), + Device("CameraTop", core.video, core.position, foraging.region), + Device("CameraEast", core.video), + Device("CameraNest", core.video), + Device("CameraNorth", core.video), + Device("CameraPatch1", core.video), + Device("CameraPatch2", core.video), + Device("CameraSouth", core.video), + Device("CameraWest", core.video), + Device("Nest", foraging.weight), + Device("Patch1", foraging.patch), + Device("Patch2", foraging.patch), + ] +) + +exp01 = DotMap( + [ + Device("SessionData", foraging.session), + Device("FrameTop", core.video, core.position), + Device("FrameEast", core.video), + Device("FrameGate", core.video), + Device("FrameNorth", core.video), + Device("FramePatch1", core.video), + Device("FramePatch2", core.video), + Device("FrameSouth", core.video), + Device("FrameWest", core.video), + Device("Patch1", foraging.depletion_function, core.encoder, foraging.feeder), + Device("Patch2", foraging.depletion_function, core.encoder, foraging.feeder), + ] +) + +octagon01 = DotMap( + [ + Device("Metadata", core.metadata), + Device("CameraTop", core.video, core.position), + Device("CameraColorTop", core.video), + Device("ExperimentalMetadata", core.subject_state), + Device("Photodiode", octagon.photodiode), + Device("OSC", octagon.OSC), + Device("TaskLogic", octagon.TaskLogic), + Device("Wall1", octagon.Wall), + Device("Wall2", octagon.Wall), + Device("Wall3", octagon.Wall), + Device("Wall4", octagon.Wall), + Device("Wall5", octagon.Wall), + Device("Wall6", octagon.Wall), + Device("Wall7", octagon.Wall), + Device("Wall8", octagon.Wall), + ] +) + +social01 = DotMap( + [ + Device("Metadata", core.metadata), + Device("Environment", social.environment_b, social.subject_b), + Device("CameraTop", core.video, social.camera_top_pos_b), + Device("CameraNorth", core.video), + Device("CameraSouth", core.video), + Device("CameraEast", core.video), + Device("CameraWest", core.video), + Device("CameraPatch1", core.video), + Device("CameraPatch2", core.video), + Device("CameraPatch3", core.video), + Device("CameraNest", core.video), + Device("Nest", social.weight_raw_b, social.weight_filtered_b), + Device("Patch1", social.patch_streams_b), + Device("Patch2", social.patch_streams_b), + Device("Patch3", social.patch_streams_b), + Device("RfidGate", social.rfid_events_b), + Device("RfidNest1", social.rfid_events_b), + Device("RfidNest2", social.rfid_events_b), + Device("RfidPatch1", social.rfid_events_b), + Device("RfidPatch2", social.rfid_events_b), + Device("RfidPatch3", social.rfid_events_b), + ] +) + + +__all__ = ["exp01", "exp02", "octagon01", "social01"] diff --git a/aeon/schema/social.py b/aeon/schema/social.py index 97453af3..f9e7691d 100644 --- a/aeon/schema/social.py +++ b/aeon/schema/social.py @@ -1,119 +1,108 @@ -"""Readers for data relevant to Social experiments.""" - -import json -from pathlib import Path - -import numpy as np -import pandas as pd - -import aeon.io.reader as _reader -from aeon import util - - -class Pose(_reader.Harp): - """Reader for Harp-binarized tracking data given a model that outputs id, parts, and likelihoods. - - Columns: - class (int): Int ID of a subject in the environment. - class_likelihood (float): Likelihood of the subject's identity. - part (str): Bodypart on the subject. - part_likelihood (float): Likelihood of the specified bodypart. - x (float): X-coordinate of the bodypart. - y (float): Y-coordinate of the bodypart. - """ - - def __init__(self, pattern: str, extension: str = "bin"): - """Pose reader constructor.""" - # `pattern` for this reader should typically be '_*' - super().__init__(pattern, columns=None, extension=extension) - - def read( - self, file: Path, ceph_proc_dir: str | Path = "/ceph/aeon/aeon/data/processed" - ) -> pd.DataFrame: - """Reads data from the Harp-binarized tracking file.""" - # Get config file from `file`, then bodyparts from config file. - model_dir = Path(file.stem.replace("_", "/")).parent - config_file_dir = ceph_proc_dir / model_dir - if not config_file_dir.exists(): - raise FileNotFoundError(f"Cannot find model dir {config_file_dir}") - config_file = get_config_file(config_file_dir) - parts = self.get_bodyparts(config_file) - - # Using bodyparts, assign column names to Harp register values, and read data in default format. - columns = ["class", "class_likelihood"] - for part in parts: - columns.extend([f"{part}_x", f"{part}_y", f"{part}_likelihood"]) - self.columns = columns - data = super().read(file) - - # Drop any repeat parts. - unique_parts, unique_idxs = np.unique(parts, return_index=True) - repeat_idxs = np.setdiff1d(np.arange(len(parts)), unique_idxs) - if repeat_idxs: # drop x, y, and likelihood cols for repeat parts (skip first 5 cols) - init_rep_part_col_idx = (repeat_idxs - 1) * 3 + 5 - rep_part_col_idxs = np.concatenate([np.arange(i, i + 3) for i in init_rep_part_col_idx]) - keep_part_col_idxs = np.setdiff1d(np.arange(len(data.columns)), rep_part_col_idxs) - data = data.iloc[:, keep_part_col_idxs] - parts = unique_parts - - # Set new columns, and reformat `data`. - n_parts = len(parts) - part_data_list = [pd.DataFrame()] * n_parts - new_columns = ["class", "class_likelihood", "part", "x", "y", "part_likelihood"] - new_data = pd.DataFrame(columns=new_columns) - for i, part in enumerate(parts): - part_columns = ["class", "class_likelihood", f"{part}_x", f"{part}_y", f"{part}_likelihood"] - part_data = pd.DataFrame(data[part_columns]) - part_data.insert(2, "part", part) - part_data.columns = new_columns - part_data_list[i] = part_data - new_data = pd.concat(part_data_list) - return new_data.sort_index() - - def get_bodyparts(self, file: Path) -> list[str]: - """Returns a list of bodyparts from a model's config file.""" - parts = [] - with open(file) as f: - config = json.load(f) - if file.stem == "confmap_config": # SLEAP - try: - heads = config["model"]["heads"] - parts = [util.find_nested_key(heads, "anchor_part")] - parts += util.find_nested_key(heads, "part_names") - except KeyError as err: - if not parts: - raise KeyError(f"Cannot find bodyparts in {file}.") from err - return parts - - -def get_config_file( - config_file_dir: Path, - config_file_names: None | list[str] = None, -) -> Path: - """Returns the config file from a model's config directory.""" - if config_file_names is None: - config_file_names = ["confmap_config.json"] # SLEAP (add for other trackers to this list) - config_file = None - for f in config_file_names: - if (config_file_dir / f).exists(): - config_file = config_file_dir / f - break - if config_file is None: - raise FileNotFoundError(f"Cannot find config file in {config_file_dir}") - return config_file - - -def class_int2str(data: pd.DataFrame, config_file_dir: Path) -> pd.DataFrame: - """Converts a class integer in a tracking data dataframe to its associated string (subject id).""" - config_file = get_config_file(config_file_dir) - if config_file.stem == "confmap_config": # SLEAP - with open(config_file) as f: - config = json.load(f) - try: - heads = config["model"]["heads"] - classes = util.find_nested_key(heads, "classes") - except KeyError as err: - raise KeyError(f"Cannot find classes in {config_file}.") from err - for i, subj in enumerate(classes): - data.loc[data["class"] == i, "class"] = subj - return data +from aeon.io import reader +from aeon.io.device import Device, register +from aeon.schema import core, foraging + + +"""Creating the Social 0.1 schema""" + +# Above we've listed out all the streams we recorded from during Social0.1, but we won't care to analyze all +# of them. Instead, we'll create a DotMap schema from Device objects that only contains Readers for the +# streams we want to analyze. + +# We'll see both examples of binder functions we saw previously: 1. "empty pattern", and +# 2. "device-name passed". + +# And we'll see both examples of instantiating Device objects we saw previously: 1. from singleton binder +# functions; 2. from multiple and/or nested binder functions. + +# (Note, in the simplest case, a schema can always be created from / reduced to "empty pattern" binder +# functions as singletons in Device objects.) + +# Metadata.yml (will be a singleton binder function Device object) +# --- + +metadata = Device("Metadata", core.metadata) + +# --- + +# Environment (will be a nested, multiple binder function Device object) +# --- + +# BlockState +block_state_b = lambda pattern: { + "BlockState": reader.Csv(f"{pattern}_BlockState_*", ["pellet_ct", "pellet_ct_thresh", "due_time"]) +} + +# LightEvents +light_events_b = lambda pattern: { + "LightEvents": reader.Csv(f"{pattern}_LightEvents_*", ["channel", "value"]) +} + +# Combine EnvironmentState, BlockState, LightEvents +environment_b = lambda pattern: register( + pattern, core.environment_state, block_state_b, light_events_b, core.message_log +) + +# SubjectState +subject_state_b = lambda pattern: { + "SubjectState": reader.Csv(f"{pattern}_SubjectState_*", ["id", "weight", "type"]) +} + +# SubjectVisits +subject_visits_b = lambda pattern: { + "SubjectVisits": reader.Csv(f"{pattern}_SubjectVisit_*", ["id", "type", "region"]) +} + +# SubjectWeight +subject_weight_b = lambda pattern: { + "SubjectWeight": reader.Csv( + f"{pattern}_SubjectWeight_*", ["weight", "confidence", "subject_id", "int_id"] + ) +} + +# Separate Device object for subject-specific streams. +subject_b = lambda pattern: register(pattern, subject_state_b, subject_visits_b, subject_weight_b) +# --- + +# Camera +# --- + +camera_top_pos_b = lambda pattern: {"Pose": reader.Pose(f"{pattern}_test-node1*")} + +# --- + +# Nest +# --- + +weight_raw_b = lambda pattern: {"WeightRaw": reader.Harp(f"{pattern}_200_*", ["weight(g)", "stability"])} +weight_filtered_b = lambda pattern: { + "WeightFiltered": reader.Harp(f"{pattern}_202_*", ["weight(g)", "stability"]) +} + +# --- + +# Patch +# --- + +# Combine streams for Patch device +patch_streams_b = lambda pattern: register( + pattern, + foraging.pellet_depletion_state, + core.encoder, + foraging.feeder, + foraging.pellet_manual_delivery, + foraging.missed_pellet, + foraging.pellet_retried_delivery, +) +# --- + +# Rfid +# --- + + +def rfid_events_b(pattern): + """RFID events reader""" + pattern = pattern.replace("Rfid", "") + if pattern.startswith("Events"): + pattern = pattern.replace("Events", "") + return {"RfidEvents": reader.Harp(f"RfidEvents{pattern}_*", ["rfid"])} diff --git a/docs/examples/get_harp_stream_event_bitmask.ipynb b/docs/examples/get_harp_stream_event_bitmask.ipynb new file mode 100644 index 00000000..3c01212b --- /dev/null +++ b/docs/examples/get_harp_stream_event_bitmask.ipynb @@ -0,0 +1,124 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"Jupyter settings and Imports\"\"\"\n", + "\n", + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "from pathlib import Path\n", + "import pandas as pd\n", + "\n", + "import aeon.io.api as api\n", + "from aeon.io import reader" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "data:\n", + " beambreak\n", + "time \n", + "2023-06-21 10:01:16.633728027 34\n", + "2023-06-21 10:01:16.649184227 32\n", + "2023-06-21 10:01:28.314400196 34\n", + "2023-06-21 10:01:28.331103802 32\n", + "2023-06-21 10:01:38.428864002 34\n", + "... ...\n", + "2023-06-21 11:16:43.647552013 32\n", + "2023-06-21 11:16:43.655648232 34\n", + "2023-06-21 11:16:43.674079895 32\n", + "2023-06-21 11:21:40.381728172 34\n", + "2023-06-21 11:21:40.397024155 32\n", + "\n", + "[196 rows x 1 columns]\n", + "\n", + "\n", + "bitmask:\n", + " 34\n", + "\n", + "\n", + "stream_data:\n", + " event\n", + "time \n", + "2023-06-21 10:01:16.633728027 beambreak\n", + "2023-06-21 10:01:28.314400196 beambreak\n", + "2023-06-21 10:01:38.428864002 beambreak\n", + "2023-06-21 10:01:53.453343868 beambreak\n", + "2023-06-21 10:04:14.685791969 beambreak\n", + "... ...\n", + "2023-06-21 11:15:20.406752110 beambreak\n", + "2023-06-21 11:16:24.036767960 beambreak\n", + "2023-06-21 11:16:43.625472069 beambreak\n", + "2023-06-21 11:16:43.655648232 beambreak\n", + "2023-06-21 11:21:40.381728172 beambreak\n", + "\n", + "[98 rows x 1 columns]\n" + ] + } + ], + "source": [ + "\"\"\"How to find the bitmask associated with any harp stream event and create a new reader: \n", + "example with patch beambreak\"\"\"\n", + "\n", + "# Ensure you have the pattern of the stream (look at the filename), and the expected event name\n", + "pattern = \"Patch1_32*\"\n", + "event_name = \"beambreak\"\n", + "# Set the reader for the stream\n", + "harp_reader = reader.Harp(pattern=pattern, columns=[event_name])\n", + "# Set the root dir and a time range in which you know the stream acquired data\n", + "root = Path(\"/ceph/aeon/aeon/data/raw/AEON3/presocial0.1\")\n", + "start = pd.Timestamp(\"2023-06-21 10:00:00\")\n", + "end = pd.Timestamp(\"2023-06-21 12:00:10\")\n", + "# Get the bitmask as the first value of the loaded stream\n", + "data = api.load(root, harp_reader, start=start, end=end)\n", + "bitmask = data.iloc[0, 0]\n", + "new_reader = reader.BitmaskEvent(pattern, bitmask, event_name)\n", + "stream_data = api.load(root, new_reader, start=start, end=end)\n", + "\n", + "print(f\"data:\\n {data}\\n\\n\")\n", + "print(f\"bitmask:\\n {bitmask}\\n\\n\")\n", + "print(f\"stream_data:\\n {stream_data}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "aeon", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/examples/understanding_aeon_data_architecture.ipynb b/docs/examples/understanding_aeon_data_architecture.ipynb new file mode 100644 index 00000000..e3df6981 --- /dev/null +++ b/docs/examples/understanding_aeon_data_architecture.ipynb @@ -0,0 +1,4177 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Aeon data file structure on Ceph" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Raw data**: `/ceph/aeon/aeon/data/raw/////`\n", + "\n", + "e.g. `/ceph/aeon/aeon/data/raw/AEON3/social0.1/2023-12-01T14-30-34/Patch1/Patch1_90_2023-12-02T12-00-00.bin`\n", + "\n", + "**Processed data (e.g. trained and exported SLEAP model)**: `/ceph/aeon/aeon/data/processed/////frozen_graph.pb`\n", + "\n", + "e.g. `/ceph/aeon/aeon/data/processed/test-node1/0000005/2023-11-30T01-29-00/topdown_multianimal_id/frozen_graph.pb`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading Aeon data in Python" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Terminology\n", + "\n", + "**_Chunk Duration_**: The time duration over which experiment data files are written out. Currently, all Aeon experiments write out acquired data to files every hour (1-hour chunks).\n", + "\n", + "**_Acquisition Epoch_**: One run of an experiment workflow. When an experiment workflow restarts, a new epoch starts.\n", + "\n", + "E.g. `ceph/aeon/aeon/data/raw/AEON3/social0.1/2023-12-03T13-05-15` is an acquisition epoch in the Social0.1 experiment. Because the next epoch directory is `ceph/aeon/aeon/data/raw/AEON3/social0.1/2023-12-03T13-30-30`, we know this first epoch lasted only ~25 minutes.\n", + "\n", + "**_Stream_**: Data that comes from a single source.\n", + "\n", + "A single data file is associated with each stream, so often 'stream' and 'file' can be interchanged. If the stream comes from a harp device, the stream-file contains information about the register of the harp device which generated the stream, as well as the associated chunk datetime.\n", + "\n", + "For a harp stream, the filename format is as follows:
\n", + "`__` e.g. `Patch1_90_2023-12-02T12-00-00.bin`
\n", + "By convention, harp streams which are acquired in software start with register number '200'; e.g. the largest-blob-centroid-tracking stream filename is: `CameraTop_200*.bin`\n", + "\n", + "Each stream can contain single or multi-dimensional data (e.g. a patch wheel magnetic encoder stream contains information about both the magnetic field strength and angle: however, each dimension is associated with a unique bitmask, and thus can be isolated by applying this bitmask to the stream).\n", + "\n", + "**_Reader_**: A Python class whose instantiated objects each read one particular stream. Simple working principle: each `Reader` has a `read` method which takes in a single stream-file and reads the data in that file into a pandas `DataFrame` (see `aeon/io/reader.py` and `aeon/schema/*.py`).\n", + "\n", + "e.g. `Encoder` readers read values from `Patch__` files (these contain a patch wheel's magnetic encoder readings, to determine how much the wheel has been spun).\n", + "\n", + "Whenever a new device is implemented in an Aeon experiment, a new `Reader` should be created for the acquired data, such that the data can be read and returned in the form of a pandas `DataFrame`.\n", + "\n", + "**_Device_**: A collection of streams grouped together for convenience, often for related streams.\n", + "\n", + "On ceph, we organize streams into device folders:
e.g. `ceph/aeon/aeon/data/raw/AEON3/social0.1/2023-12-01T14-30-34/Patch1` contains the patch-heartbeat stream (`Patch1_8`), the patch-beambreak stream (`Patch1_32`), the patch-pellet delivery-pin-set stream (`Patch1_35`), the patch-pellet-delivery-pin-cleared stream (`Patch1_36`), the patch-wheel-magnetic-encoder stream (`Patch1_90`), the patch-wheel-magnetic-encoder-mode stream (`Patch1_91`), the patch-feeder-dispenser-state stream (`Patch1_200`), the patch-pellet-manual-delivery stream (`Patch1_201`), the patch-missed-pellet-stream (`Patch1_202`), the patch-pellet-delivery-retry stream (`Patch1_203`), and the patch-state stream (`Patch1_State`).\n", + "\n", + "In code, we create logical devices via the `Device` class (see `aeon/io/device.py`)
\n", + "e.g. We often define 'Patch' devices that contain `Reader` objects (in the _`register`_ attribute) that are associated with specific streams (as experimenters may not care about analyzing all streams in a `Patch` device folder on ceph), e.g. wheel-magnetic-encoder, state, pellet-delivery-pin-set, and beambreak.\n", + "\n", + "One last important aspect of `Device` objects are _binder functions_: on instantiation, `Device` requires at least one argument that is a function that returns a dict of `Reader` objects (these get set into the `Device` object's `registry`). We'll explain this more in detail and show examples below.\n", + "\n", + "**_Schema_**: A list of devices grouped within a `DotMap` object (see `aeon/docs/examples/schemas.py`). Each experiment is associated with a schema. If a schema changes, then the experiment neccesarily must be different (either in name or version number), as the acquired data is now different.\n", + "\n", + "**_Dataset_**: All data belonging to a particular experiment. \n", + "\n", + "e.g. All data in `ceph/aeon/aeon/data/raw/AEON3/social0.1`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Code\n", + "\n", + "With this terminology in mind, let's get to the code!" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n" + ] + } + ], + "source": [ + "\"\"\"Notebook settings and imports.\"\"\"\n", + "\n", + "%load_ext autoreload\n", + "%autoreload 2\n", + "# %flow mode reactive\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "\n", + "from pathlib import Path\n", + "\n", + "from dotmap import DotMap\n", + "\n", + "import aeon\n", + "from aeon.io import reader\n", + "from aeon.io.device import Device, register\n", + "from aeon.schema import core, foraging, social\n", + "from aeon.schema.schemas import exp02" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"Set experiment root path and time range / set to load data\"\"\"\n", + "\n", + "root = Path(\"/ceph/aeon/aeon/data/raw/AEON3/social0.1\")\n", + "start_time = pd.Timestamp(\"2023-12-02 10:30:00\")\n", + "end_time = pd.Timestamp(\"2023-12-02 12:30:00\")\n", + "time_set = pd.concat(\n", + " [\n", + " pd.Series(pd.date_range(start_time, start_time + pd.Timedelta(hours=1), freq=\"1s\")),\n", + " pd.Series(pd.date_range(end_time, end_time + pd.Timedelta(hours=1), freq=\"1s\"))\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"Creating a new `Reader` class\"\"\"\n", + "\n", + "# All readers are subclassed from the base `Reader` class. They thus all contain a `read` method,\n", + "# for returning data from a file in the form of a pandas DataFrame, and the following attributes, \n", + "# which must be specified on object construction:\n", + "# `pattern`: a prefix in the filename used by `aeon.io.api.load` to find matching files to load\n", + "# `columns`: a list of column names for the returned DataFrame\n", + "# `extension`: the file extension of the files to be read\n", + "\n", + "# Using these principles, we can recreate a simple reader for reading subject weight data from the \n", + "# social0.1 experiments, which are saved in .csv format.\n", + "\n", + "# First, we'll create a general Csv reader, subclassed from `Reader`.\n", + "class Csv(reader.Reader):\n", + " \"\"\"Reads data from csv text files, where the first column stores the Aeon timestamp, in seconds.\"\"\"\n", + "\n", + " def __init__(self, pattern, columns, extension=\"csv\"):\n", + " super().__init__(pattern, columns, extension)\n", + "\n", + " def read(self, file):\n", + " return pd.read_csv(file, header=0, names=self.columns, index_col=0)\n", + " \n", + "# Next, we'll create a reader for the subject weight data, subclassed from `Csv`.\n", + "\n", + "# We know from our data that the files of interest start with 'Environment_SubjectWeight' and columns are: \n", + "# 1) Aeon timestamp in seconds from 1904/01/01 (1904 date system)\n", + "# 2) Weight in grams\n", + "# 3) Weight stability confidence (0-1)\n", + "# 4) Subject ID (string)\n", + "# 5) Subject ID (integer)\n", + "# Since the first column (Aeon timestamp) will be set as the index, we'll use the rest as DataFrame columns.\n", + "# And we don't need to define `read`, as it will use the `Csv` class's `read` method.\n", + "\n", + "class Subject_Weight(Csv):\n", + " \"\"\"Reads subject weight data from csv text files.\"\"\"\n", + " \n", + " def __init__(\n", + " self, \n", + " pattern=\"Environment_SubjectWeight*\",\n", + " columns=[\"weight\", \"confidence\", \"subject_id\", \"int_id\"], \n", + " extension=\"csv\"\n", + " ):\n", + " super().__init__(pattern, columns, extension)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Read from a single file:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
weightconfidencesubject_idint_id
3.784363e+0929.7999991CAA-11207460
3.784363e+0929.7999991CAA-11207460
3.784363e+0929.7999991CAA-11207460
3.784363e+0929.7999991CAA-11207460
3.784363e+0929.7999991CAA-11207460
...............
3.784367e+0931.2000011CAA-11207470
3.784367e+0931.2000011CAA-11207470
3.784367e+0931.2000011CAA-11207470
3.784367e+0931.2000011CAA-11207470
3.784367e+0931.2000011CAA-11207470
\n", + "

4382 rows × 4 columns

\n", + "
" + ], + "text/plain": [ + " weight confidence subject_id int_id\n", + "3.784363e+09 29.799999 1 CAA-1120746 0\n", + "3.784363e+09 29.799999 1 CAA-1120746 0\n", + "3.784363e+09 29.799999 1 CAA-1120746 0\n", + "3.784363e+09 29.799999 1 CAA-1120746 0\n", + "3.784363e+09 29.799999 1 CAA-1120746 0\n", + "... ... ... ... ...\n", + "3.784367e+09 31.200001 1 CAA-1120747 0\n", + "3.784367e+09 31.200001 1 CAA-1120747 0\n", + "3.784367e+09 31.200001 1 CAA-1120747 0\n", + "3.784367e+09 31.200001 1 CAA-1120747 0\n", + "3.784367e+09 31.200001 1 CAA-1120747 0\n", + "\n", + "[4382 rows x 4 columns]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Read from a contiguous time range:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
weightconfidencesubject_idint_id
time
2023-12-02 10:31:12.84000015329.51CAA-11207460
2023-12-02 10:31:12.94000005729.51CAA-11207460
2023-12-02 10:31:13.03999996229.51CAA-11207460
2023-12-02 10:31:13.09999990529.51CAA-11207460
2023-12-02 10:31:13.19999980929.51CAA-11207460
...............
2023-12-02 12:27:29.46000003831.11CAA-11207470
2023-12-02 12:27:29.55999994331.11CAA-11207470
2023-12-02 12:27:29.61999988631.11CAA-11207470
2023-12-02 12:27:29.71999979031.11CAA-11207471
2023-12-02 12:27:29.82000017231.11CAA-11207470
\n", + "

10525 rows × 4 columns

\n", + "
" + ], + "text/plain": [ + " weight confidence subject_id int_id\n", + "time \n", + "2023-12-02 10:31:12.840000153 29.5 1 CAA-1120746 0\n", + "2023-12-02 10:31:12.940000057 29.5 1 CAA-1120746 0\n", + "2023-12-02 10:31:13.039999962 29.5 1 CAA-1120746 0\n", + "2023-12-02 10:31:13.099999905 29.5 1 CAA-1120746 0\n", + "2023-12-02 10:31:13.199999809 29.5 1 CAA-1120746 0\n", + "... ... ... ... ...\n", + "2023-12-02 12:27:29.460000038 31.1 1 CAA-1120747 0\n", + "2023-12-02 12:27:29.559999943 31.1 1 CAA-1120747 0\n", + "2023-12-02 12:27:29.619999886 31.1 1 CAA-1120747 0\n", + "2023-12-02 12:27:29.719999790 31.1 1 CAA-1120747 1\n", + "2023-12-02 12:27:29.820000172 31.1 1 CAA-1120747 0\n", + "\n", + "[10525 rows x 4 columns]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Read from a set of times:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
weightconfidencesubject_idint_id
2023-12-02 10:30:0030.0000001.0CAA-11207470.0
2023-12-02 10:30:0130.0000001.0CAA-11207470.0
2023-12-02 10:30:0230.0000001.0CAA-11207470.0
2023-12-02 10:30:0330.0000001.0CAA-11207470.0
2023-12-02 10:30:0430.0000001.0CAA-11207470.0
...............
2023-12-02 13:18:2529.7999991.0CAA-11207460.0
2023-12-02 13:18:2629.7999991.0CAA-11207460.0
2023-12-02 13:22:0529.9000001.0CAA-11207460.0
2023-12-02 13:22:1429.7999991.0CAA-11207460.0
2023-12-02 13:22:1829.7999991.0CAA-11207460.0
\n", + "

3691 rows × 4 columns

\n", + "
" + ], + "text/plain": [ + " weight confidence subject_id int_id\n", + "2023-12-02 10:30:00 30.000000 1.0 CAA-1120747 0.0\n", + "2023-12-02 10:30:01 30.000000 1.0 CAA-1120747 0.0\n", + "2023-12-02 10:30:02 30.000000 1.0 CAA-1120747 0.0\n", + "2023-12-02 10:30:03 30.000000 1.0 CAA-1120747 0.0\n", + "2023-12-02 10:30:04 30.000000 1.0 CAA-1120747 0.0\n", + "... ... ... ... ...\n", + "2023-12-02 13:18:25 29.799999 1.0 CAA-1120746 0.0\n", + "2023-12-02 13:18:26 29.799999 1.0 CAA-1120746 0.0\n", + "2023-12-02 13:22:05 29.900000 1.0 CAA-1120746 0.0\n", + "2023-12-02 13:22:14 29.799999 1.0 CAA-1120746 0.0\n", + "2023-12-02 13:22:18 29.799999 1.0 CAA-1120746 0.0\n", + "\n", + "[3691 rows x 4 columns]" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\"\"\"Loading data via a `Reader` object\"\"\"\n", + "\n", + "# We can now load data by specifying a file \n", + "subject_weight_reader = Subject_Weight()\n", + "acq_epoch = \"2023-12-01T14-30-34\"\n", + "weight_file = root / acq_epoch / \"Environment/Environment_SubjectWeight_2023-12-02T12-00-00.csv\"\n", + "print(\"Read from a single file:\")\n", + "display(subject_weight_reader.read(weight_file))\n", + "\n", + "# And we can use `load` to load data across many same-stream files given a time range or time set.\n", + "print(\"Read from a contiguous time range:\")\n", + "display(aeon.load(root, subject_weight_reader, start=start_time, end=end_time))\n", + "print(\"Read from a set of times:\")\n", + "display(aeon.load(root, subject_weight_reader, time=time_set.values))" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
weightid
3.784363e+0929.799999CAA-1120746
3.784363e+0929.799999CAA-1120746
3.784363e+0929.799999CAA-1120746
3.784363e+0929.799999CAA-1120746
3.784363e+0929.799999CAA-1120746
.........
3.784367e+0931.200001CAA-1120747
3.784367e+0931.200001CAA-1120747
3.784367e+0931.200001CAA-1120747
3.784367e+0931.200001CAA-1120747
3.784367e+0931.200001CAA-1120747
\n", + "

4382 rows × 2 columns

\n", + "
" + ], + "text/plain": [ + " weight id\n", + "3.784363e+09 29.799999 CAA-1120746\n", + "3.784363e+09 29.799999 CAA-1120746\n", + "3.784363e+09 29.799999 CAA-1120746\n", + "3.784363e+09 29.799999 CAA-1120746\n", + "3.784363e+09 29.799999 CAA-1120746\n", + "... ... ...\n", + "3.784367e+09 31.200001 CAA-1120747\n", + "3.784367e+09 31.200001 CAA-1120747\n", + "3.784367e+09 31.200001 CAA-1120747\n", + "3.784367e+09 31.200001 CAA-1120747\n", + "3.784367e+09 31.200001 CAA-1120747\n", + "\n", + "[4382 rows x 2 columns]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
weightconfidencesubject_idint_id
time
\n", + "
" + ], + "text/plain": [ + "Empty DataFrame\n", + "Columns: [weight, confidence, subject_id, int_id]\n", + "Index: []" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
weightconfidencesubject_idint_id
time
2023-12-02 10:31:12.84000015329.51CAA-11207460
2023-12-02 10:31:12.94000005729.51CAA-11207460
2023-12-02 10:31:13.03999996229.51CAA-11207460
2023-12-02 10:31:13.09999990529.51CAA-11207460
2023-12-02 10:31:13.19999980929.51CAA-11207460
...............
2023-12-02 12:27:29.46000003831.11CAA-11207470
2023-12-02 12:27:29.55999994331.11CAA-11207470
2023-12-02 12:27:29.61999988631.11CAA-11207470
2023-12-02 12:27:29.71999979031.11CAA-11207471
2023-12-02 12:27:29.82000017231.11CAA-11207470
\n", + "

10525 rows × 4 columns

\n", + "
" + ], + "text/plain": [ + " weight confidence subject_id int_id\n", + "time \n", + "2023-12-02 10:31:12.840000153 29.5 1 CAA-1120746 0\n", + "2023-12-02 10:31:12.940000057 29.5 1 CAA-1120746 0\n", + "2023-12-02 10:31:13.039999962 29.5 1 CAA-1120746 0\n", + "2023-12-02 10:31:13.099999905 29.5 1 CAA-1120746 0\n", + "2023-12-02 10:31:13.199999809 29.5 1 CAA-1120746 0\n", + "... ... ... ... ...\n", + "2023-12-02 12:27:29.460000038 31.1 1 CAA-1120747 0\n", + "2023-12-02 12:27:29.559999943 31.1 1 CAA-1120747 0\n", + "2023-12-02 12:27:29.619999886 31.1 1 CAA-1120747 0\n", + "2023-12-02 12:27:29.719999790 31.1 1 CAA-1120747 1\n", + "2023-12-02 12:27:29.820000172 31.1 1 CAA-1120747 0\n", + "\n", + "[10525 rows x 4 columns]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "bitmasks: [32 34]\n", + "raw data:\n", + " beambreak\n", + "time \n", + "2023-12-02 11:33:03.942463875 34\n", + "2023-12-02 11:33:03.951744080 32\n", + "2023-12-02 11:33:18.500351906 34\n", + "2023-12-02 11:33:18.503456115 32\n", + "2023-12-02 11:33:18.509632111 34\n", + "\n", + "\n", + "bitmasked data:\n", + " event\n", + "time \n", + "2023-12-02 11:33:03.942463875 beambreak\n", + "2023-12-02 11:33:18.500351906 beambreak\n", + "2023-12-02 11:33:18.509632111 beambreak\n", + "2023-12-02 11:33:18.515808104 beambreak\n", + "2023-12-02 11:33:43.750751972 beambreak\n" + ] + } + ], + "source": [ + "\"\"\"Updating a `Reader` object\"\"\"\n", + "\n", + "# Occasionally, we may want to tweak the output from a `Reader` object's `read` method, or some tweaks to \n", + "# streams on the acquisition side may require us to make corresponding tweaks to a `Reader` object to\n", + "# ensure it works properly. We'll cover some of these cases here.\n", + "\n", + "# 1. Column changes\n", + "\n", + "# First, if we want to simply change the output from `read`, we can change the columns of an instantiated\n", + "# `Reader` object. Let's change `subject_id` to `id`, and after reading, drop the `confidence` and `int_id`\n", + "# columns.\n", + "subject_weight_reader.columns = [\"weight\", \"confidence\", \"id\", \"int_id\"]\n", + "data = subject_weight_reader.read(weight_file)\n", + "data.drop([\"confidence\", \"int_id\"], axis=1, inplace=True)\n", + "display(data)\n", + "\n", + "\n", + "# 2. Pattern changes\n", + "\n", + "# Next, occasionally a stream's filename may change, in which case we'll need to update the `Reader` \n", + "# object's `pattern` to find the new files using `load`: \n", + "\n", + "# Let's simulate a case where the old SubjectWeight stream was called Weight, and create a `Reader` class.\n", + "class Subject_Weight(Csv):\n", + " \"\"\"Reads subject weight data from csv text files.\"\"\"\n", + " \n", + " def __init__(\n", + " self, \n", + " pattern=\"Environment_Weight*\",\n", + " columns=[\"weight\", \"confidence\", \"subject_id\", \"int_id\"], \n", + " extension=\"csv\"\n", + " ):\n", + " super().__init__(pattern, columns, extension)\n", + "\n", + "# We'll see that we can't find any files with this pattern.\n", + "subject_weight_reader = Subject_Weight()\n", + "data = aeon.load(root, subject_weight_reader, start=start_time, end=end_time)\n", + "display(data) # empty dataframe\n", + "\n", + "# But if we just update the pattern, `load` will find the files.\n", + "subject_weight_reader.pattern = \"Environment_SubjectWeight*\"\n", + "data = aeon.load(root, subject_weight_reader, start=start_time, end=end_time)\n", + "display(data) \n", + "\n", + "\n", + "# 3. Bitmask changes for Harp streams\n", + "\n", + "# Lastly, some Harp streams use bitmasks to distinguish writing out different events to the same file.\n", + "# e.g. The beambreak stream `Patch_32*` writes out events both for when the beam is broken and when\n", + "# it gets reset. Given a Harp stream, we can find all bitmasks associated with it, and choose which one\n", + "# to use to filter the data:\n", + "\n", + "# Given a stream, we can create a `Harp` reader object to find all bitmasks associated with it.\n", + "pattern = \"Patch1_32*\"\n", + "event_name = \"beambreak\"\n", + "harp_reader = reader.Harp(pattern=pattern, columns=[event_name])\n", + "data = aeon.load(root, harp_reader, start=start_time, end=end_time)\n", + "bitmasks = np.unique(data[event_name].values)\n", + "print(f\"bitmasks: {bitmasks}\")\n", + "\n", + "# Let's set the bitmask to '34', and create a new `Reader` object to use this.\n", + "bitmask = 34\n", + "beambreak_reader = reader.BitmaskEvent(pattern, bitmask, event_name)\n", + "bitmasked_data = aeon.load(root, beambreak_reader, start=start_time, end=end_time)\n", + "\n", + "print(f\"raw data:\\n {data.head()}\\n\\n\")\n", + "print(f\"bitmasked data:\\n {bitmasked_data.head()}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "d.registry={'subject_weight': <__main__.Subject_Weight object at 0x7fa4433d5f50>, 'subject_state': }\n", + "schema.SubjectMetadata=DotMap(subject_weight=<__main__.Subject_Weight object at 0x7fa4433d5f50>, subject_state=)\n", + "schema.SubjectMetadata.subject_weight=<__main__.Subject_Weight object at 0x7fa4433d5f50>\n", + "schema.Metadata=\n" + ] + } + ], + "source": [ + "\"\"\"Instantiating a `Device` object\"\"\"\n", + "\n", + "# A `Device` object is instantiated from a name, followed by one or more 'binder functions', which \n", + "# return a dictionary of a name paired with a `Reader` object. We call such a dictionary of `:Reader`\n", + "# key-value pairs a 'registry'. Each binder function requires a `pattern` argument, which can be used to\n", + "# set the pattern of the `Reader` object it returns. This requirement for binder functions is for allowing\n", + "# the `Device` to optionally pass its name to appropriately set the pattern of `Reader` objects it contains.\n", + "\n", + "# Below are examples of \"empty pattern\" binder functions, where the pattern doesn't get used.\n", + "def subject_weight_binder(pattern): # an example subject weight binder function\n", + " return {\"subject_weight\": subject_weight_reader}\n", + "\n", + "def subject_state_binder(pattern): # an example subject state binder function\n", + " return {\"subject_state\": reader.Subject(pattern=\"Environment_SubjectState*\")}\n", + "\n", + "d = Device(\"SubjectMetadata\", subject_weight_binder, subject_state_binder)\n", + "\n", + "# On creation, the `Device` object puts all registries into a single registry, which is accessible via the\n", + "# `registry` attribute.\n", + "print(f\"{d.registry=}\")\n", + "\n", + "# This is done so that we can create a 'schema' (a DotMap of a list of `Device` objects), where a `Device`\n", + "# object name is a key for the schema, and the corresponding values of the `registry` names (which are keys\n", + "# for the `Device` object) are the `Reader` objects associated with that `Device` object.\n", + "\n", + "# This works because, when a list of `Device` objects are passed into the `DotMap` constructor, the\n", + "# `__iter__` method of the `Device` object returns a tuple of the object's name with its `stream` \n", + "# attribute, which is passed in directly to the DotMap constructor to create a nested DotMap:\n", + "# device_name -> stream_name -> stream `Reader` object. This is shown below:\n", + "\n", + "d2 = Device(\"Metadata\", core.metadata) # instantiate Device from a defined binder function\n", + "schema = DotMap([d, d2]) # create schema as DotMap of list of Device objects\n", + "print(f\"{schema.SubjectMetadata=}\") # Device object name as key to schema\n", + "print(f\"{schema.SubjectMetadata.subject_weight=}\") # binder function name yields the Reader object\n", + "print(f\"{schema.Metadata=}\") # for a singleton Device object, Device name alone yields the Reader object" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "feeder_device.registry={'pellet_trigger': , 'pellet_beambreak': }\n", + "feeder_device_nested.registry={'pellet_trigger': , 'pellet_beambreak': }\n", + "patch_device.registry={'pellet_trigger': , 'pellet_beambreak': , 'Encoder': }\n" + ] + } + ], + "source": [ + "\"\"\"Nested binder functions\"\"\"\n", + "\n", + "# Binder functions can return a dict whose value is actually composed of multiple, rather than a single,\n", + "# `Reader` objects. This is done by creating nested binder functions, via `register`.\n", + "\n", + "# First let's define two standard binder functions, for pellet delivery trigger and beambreak events. \n", + "# In all examples below we'll define \"device-name passed\" binder functions, since the `Device` object which\n", + "# will be instantiated from these functions will pass its name to set the pattern of the corresponding\n", + "# Reader objects.\n", + "def pellet_trigger(pattern):\n", + " \"\"\"Pellet delivery trigger events.\"\"\"\n", + " return {\"pellet_trigger\": reader.BitmaskEvent(f\"{pattern}_35_*\", 0x80, \"PelletTriggered\")}\n", + "\n", + "\n", + "def pellet_beambreak(pattern):\n", + " \"\"\"Pellet beambreak events.\"\"\"\n", + " return {\"pellet_beambreak\": reader.BitmaskEvent(f\"{pattern}_32_*\", 0x22, \"PelletDetected\")}\n", + "\n", + "# Next, we'll define a nested binder function for a \"feeder\", which returns the two binder functions above.\n", + "def feeder(pattern):\n", + " \"\"\"Feeder commands and events.\"\"\"\n", + " return register(pattern, pellet_trigger, pellet_beambreak)\n", + "\n", + "# And further, we can define a higher-level nested binder function for a \"patch\", which includes the\n", + "# magnetic encoder values for a patch's wheel in addition to `feeder`.\n", + "def patch(pattern):\n", + " \"\"\"Data streams for a patch.\"\"\"\n", + " return register(pattern, feeder, core.encoder)\n", + "\n", + "\n", + "# We can now instantiate a `Device` object as done previously, from combinations of binder functions, but \n", + "# also from nested binder functions.\n", + "feeder_device = Device(\"Patch1\", pellet_trigger, pellet_beambreak)\n", + "feeder_device_nested = Device(\"Patch1\", feeder)\n", + "patch_device = Device(\"Patch1\", patch)\n", + "\n", + "# And we can see that `feeder_device` and `feeder_device_nested` are equivalent.\n", + "print(f\"{feeder_device.registry=}\")\n", + "print(f\"{feeder_device_nested.registry=}\")\n", + "\n", + "# And `patch_device` contains the same Reader objects as these plus an `Encoder` Reader.\n", + "print(f\"{patch_device.registry=}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Social0.1 Data Streams\n", + "\n", + "Now that we've covered streams, readers, binder functions, devices, and schemas, let's build a schema for the Social0.1 Experiment!\n", + "\n", + "First we'll need to know all the streams we recorded during the Social0.1 experiment: these can be found via\n", + "looking through all devices in an acqusition epoch \n", + "(e.g. `ceph/aeon/aeon/data/raw/AEON3/social0.1/2023-12-01T14-30-34`)\n", + "\n", + "And here they are: (*note: register 8 is always the harp heartbeat for any device that has this stream.*)\n", + "\n", + "- Metadata.yml\n", + "- AudioAmbient (.wav)\n", + " - .wav: raw audio\n", + "- Environment\n", + " - BlockState\n", + " - EnvironmentState\n", + " - LightEvents\n", + " - MessageLog\n", + " - SubjectState\n", + " - SubjectVisits\n", + " - SubjectWeight\n", + "- CameraTop (200, 201, .avi, .csv, )\n", + " - 200: position\n", + " - 201: region\n", + "- CameraNorth (avi, csv)\n", + "- CameraEast (avi, csv)\n", + "- CameraSouth (avi, csv)\n", + "- CameraWest (avi, csv)\n", + "- CameraPatch1 (avi, csv)\n", + "- CameraPatch2 (avi, csv)\n", + "- CameraPatch3 (avi, csv)\n", + "- CameraNest (avi, csv)\n", + "- ClockSynchronizer (8, 36)\n", + " - 36: hearbeat out\n", + "- Nest (200, 201, 202, 203)\n", + " - 200: raw weight\n", + " - 201: tare weight\n", + " - 202: filtered weight\n", + " - 203: baseline weight\n", + " - 204: subject weight\n", + "- Patch1 (8, 32, 35, 36, 87, 90, 91, 200, 201, 202, 203, State)\n", + " - 32: beambreak\n", + " - 35: set delivery\n", + " - 36: clear delivery\n", + " - 87: expansion board state\n", + " - 90: encoder read\n", + " - 91: encoder mode\n", + " - 200: dispenser state\n", + " - 201: manual delivery\n", + " - 202: missed pellet\n", + " - 203: retry delivery\n", + "- Patch2 (8, 32, 35, 36, 87, 90, 91, State)\n", + "- Patch3 (8, 32, 35, 36, 87, 90, 91, 200, 203, State)\n", + "- RfidEventsGate (8, 32, 35)\n", + " - 32: entry id\n", + " - 35: hardware notifications\n", + "- RfidEventsNest1 (8, 32, 35)\n", + "- RfidEventsNest2 (8, 32, 35)\n", + "- RfidEventsPatch1 (8, 32, 35)\n", + "- RfidEventsPatch2 (8, 32, 35)\n", + "- RfidEventsPatch3 (8, 32, 35)\n", + "- System\n", + " - AvailableMemory\n", + "- VideoController (8, 32, 33, 34, 35, 36, 45, 52)" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"Creating the Social 0.1 schema\"\"\"\n", + "\n", + "# Above we've listed out all the streams we recorded from during Social0.1, but we won't care to analyze all\n", + "# of them. Instead, we'll create a DotMap schema from Device objects that only contains Readers for the\n", + "# streams we want to analyze.\n", + "\n", + "# We'll see both examples of binder functions we saw previously: 1. \"empty pattern\", and\n", + "# 2. \"device-name passed\".\n", + "\n", + "# And we'll see both examples of instantiating Device objects we saw previously: 1. from singleton binder\n", + "# functions; 2. from multiple and/or nested binder functions.\n", + "\n", + "# (Note, in the simplest case, a schema can always be created from / reduced to \"empty pattern\" binder\n", + "# functions as singletons in Device objects.)\n", + "\n", + "# Metadata.yml (will be a singleton binder function Device object)\n", + "# ---\n", + "\n", + "core.metadata # binder function: \"device-name passed\": returns a `reader.Metadata` Reader object\n", + "metadata = Device(\"Metadata\", core.metadata)\n", + "\n", + "# ---\n", + "\n", + "# Environment (will be a nested, multiple binder function Device object)\n", + "# ---\n", + "\n", + "# BlockState\n", + "# binder function: \"device-name passed\"; `pattern` will be set by `Device` object name: \"Environment\"\n", + "block_state_b = lambda pattern: {\n", + " \"BlockState\": reader.Csv(f\"{pattern}_BlockState*\", [\"pellet_ct\", \"pellet_ct_thresh\", \"due_time\"])\n", + "}\n", + "\n", + "# EnvironmentState\n", + "core.environment_state # binder function: \"device-name passed\"\n", + "\n", + "# Combine EnvironmentState and BlockState\n", + "env_block_state_b = lambda pattern: register(pattern, core.environment_state, block_state_b)\n", + "\n", + "# LightEvents\n", + "cols = [\"channel\", \"value\"]\n", + "light_events_r = reader.Csv(\"Environment_LightEvents*\", cols)\n", + "light_events_b = lambda pattern: {\"LightEvents\": light_events_r} # binder function: \"empty pattern\"\n", + "\n", + "# MessageLog\n", + "core.message_log # binder function: \"device-name passed\"\n", + "\n", + "# SubjectState\n", + "cols = [\"id\", \"weight\", \"type\"]\n", + "subject_state_r = reader.Csv(\"Environment_SubjectState*\", cols)\n", + "subject_state_b = lambda pattern: {\"SubjectState\": subject_state_r} # binder function: \"empty pattern\"\n", + "\n", + "# SubjectVisits\n", + "cols = [\"id\", \"type\", \"region\"]\n", + "subject_visits_r = reader.Csv(\"Environment_SubjectVisits*\", cols)\n", + "subject_visits_b = lambda pattern: {\"SubjectVisits\": subject_visits_r} # binder function: \"empty pattern\"\n", + "\n", + "# SubjectWeight\n", + "cols = [\"weight\", \"confidence\", \"subject_id\", \"int_id\"]\n", + "subject_weight_r = reader.Csv(\"Environment_SubjectWeight*\", cols)\n", + "subject_weight_b = lambda pattern: {\"SubjectWeight\": subject_weight_r} # binder function: \"empty pattern\"\n", + "\n", + "# Nested binder fn Device object.\n", + "environment = Device(\n", + " \"Environment\", # device name\n", + " env_block_state_b,\n", + " light_events_b,\n", + " core.message_log\n", + ")\n", + "\n", + "# Separate Device object for subject-specific streams.\n", + "subject = Device(\n", + " \"Subject\",\n", + " subject_state_b,\n", + " subject_visits_b,\n", + " subject_weight_b\n", + ")\n", + "\n", + "# ---\n", + "\n", + "# Camera\n", + "# ---\n", + "\n", + "camera_top_b = lambda pattern: {\"CameraTop\": reader.Video(\"CameraTop*\")}\n", + "camera_top_pos_b = lambda pattern: {\"CameraTopPos\": social.Pose(\"CameraTop_test-node1*\")}\n", + "\n", + "cam_names = [\"North\", \"South\", \"East\", \"West\", \"Patch1\", \"Patch2\", \"Patch3\", \"Nest\"]\n", + "cam_names = [\"Camera\" + name for name in cam_names]\n", + "camera_b = [lambda pattern, name=name: {name: reader.Video(name + \"*\")} for name in cam_names]\n", + "\n", + "camera = Device(\n", + " \"Camera\", \n", + " camera_top_b, \n", + " camera_top_pos_b, \n", + " *camera_b\n", + ")\n", + "\n", + "# ---\n", + "\n", + "# Nest\n", + "# ---\n", + "\n", + "weight_raw_b = lambda pattern: {\"WeightRaw\": reader.Harp(\"Nest_200*\", [\"weight(g)\", \"stability\"])}\n", + "weight_filtered_b = lambda pattern: {\"WeightFiltered\": reader.Harp(\"Nest_202*\", [\"weight(g)\", \"stability\"])}\n", + "\n", + "nest = Device(\n", + " \"Nest\", \n", + " weight_raw_b, \n", + " weight_filtered_b, \n", + ")\n", + "\n", + "# ---\n", + "\n", + "# Patch\n", + "# ---\n", + "\n", + "patches = [\"1\", \"2\", \"3\"]\n", + "patch_streams = [\"32\", \"35\", \"90\", \"201\", \"202\", \"203\", \"State\"]\n", + "patch_names = [\"Patch\" + name + \"_\" + stream for name in patches for stream in patch_streams]\n", + "patch_b = []\n", + "for stream in patch_names:\n", + " if \"32\" in stream:\n", + " fn = lambda pattern, stream=stream: {\n", + " stream: reader.BitmaskEvent(stream + \"*\", value=34, tag=\"beambreak\")\n", + " }\n", + " elif \"35\" in stream:\n", + " fn = lambda pattern, stream=stream: {\n", + " stream: reader.BitmaskEvent(stream + \"*\", value=1, tag=\"delivery\")\n", + " }\n", + " elif \"90\" in stream:\n", + " fn = lambda pattern, stream=stream: {stream: reader.Encoder(stream + \"*\")}\n", + " elif \"201\" in stream:\n", + " fn = lambda pattern, stream=stream: {stream: reader.Harp(stream + \"*\", [\"manual_delivery\"])}\n", + " elif \"202\" in stream:\n", + " fn = lambda pattern, stream=stream: {stream: reader.Harp(stream + \"*\", [\"missed_pellet\"])}\n", + " elif \"203\" in stream:\n", + " fn = lambda pattern, stream=stream: {stream: reader.Harp(stream + \"*\", [\"retried_delivery\"])}\n", + " elif \"State\" in stream:\n", + " fn = lambda pattern, stream=stream: {\n", + " stream: reader.Csv(stream + \"*\", [\"threshold\", \"offset\", \"rate\"])\n", + " }\n", + " patch_b.append(fn)\n", + "\n", + "patch = Device(\n", + " \"Patch\", \n", + " *patch_b\n", + ")\n", + "# ---\n", + "\n", + "# Rfid\n", + "# ---\n", + "\n", + "rfid_names = [\"EventsGate\", \"EventsNest1\", \"EventsNest2\", \"EventsPatch1\", \"EventsPatch2\", \"EventsPatch3\"]\n", + "rfid_names = [\"Rfid\" + name for name in rfid_names]\n", + "rfid_b = [lambda pattern, name=name: {name: reader.Harp(name + \"*\", [\"rfid\"])} for name in rfid_names]\n", + "\n", + "rfid = Device(\n", + " \"Rfid\", \n", + " *rfid_b\n", + ")\n", + "\n", + "# ---" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [], + "source": [ + "social01 = DotMap(\n", + " [\n", + " metadata,\n", + " environment,\n", + " subject,\n", + " camera,\n", + " nest,\n", + " patch,\n", + " rfid\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": {}, + "outputs": [], + "source": [ + "# cols = [\"1\", \"2\", \"3\", \"4\", \"5\"]\n", + "# r = reader.Harp(\"RfidEventsGate_32*\", cols)\n", + "# start_time = pd.Timestamp(\"2023-12-02 10:30:00\")\n", + "# end_time = pd.Timestamp(\"2023-12-02 12:30:00\")\n", + "# aeon.load(root, r, start=start_time, end=end_time)" + ] + }, + { + "cell_type": "code", + "execution_count": 56, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Metadata:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
workflowcommitmetadata
time
\n", + "
" + ], + "text/plain": [ + "Empty DataFrame\n", + "Columns: [workflow, commit, metadata]\n", + "Index: []" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Environment_EnvironmentState_*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
state
time
2023-12-05 15:28:04.552000046Maintenance
2023-12-05 15:30:23.199999809Experiment
\n", + "
" + ], + "text/plain": [ + " state\n", + "time \n", + "2023-12-05 15:28:04.552000046 Maintenance\n", + "2023-12-05 15:30:23.199999809 Experiment" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Environment_BlockState*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
pellet_ctpellet_ct_threshdue_time
time
2023-12-05 15:02:21.03200006533390001-01-01T00:00:00.0000000
2023-12-05 15:02:45.59999990534390001-01-01T00:00:00.0000000
2023-12-05 15:02:56.76399993935390001-01-01T00:00:00.0000000
2023-12-05 15:09:38.00400018636390001-01-01T00:00:00.0000000
2023-12-05 15:09:59.62799978337390001-01-01T00:00:00.0000000
\n", + "
" + ], + "text/plain": [ + " pellet_ct pellet_ct_thresh \\\n", + "time \n", + "2023-12-05 15:02:21.032000065 33 39 \n", + "2023-12-05 15:02:45.599999905 34 39 \n", + "2023-12-05 15:02:56.763999939 35 39 \n", + "2023-12-05 15:09:38.004000186 36 39 \n", + "2023-12-05 15:09:59.627999783 37 39 \n", + "\n", + " due_time \n", + "time \n", + "2023-12-05 15:02:21.032000065 0001-01-01T00:00:00.0000000 \n", + "2023-12-05 15:02:45.599999905 0001-01-01T00:00:00.0000000 \n", + "2023-12-05 15:02:56.763999939 0001-01-01T00:00:00.0000000 \n", + "2023-12-05 15:09:38.004000186 0001-01-01T00:00:00.0000000 \n", + "2023-12-05 15:09:59.627999783 0001-01-01T00:00:00.0000000 " + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Environment_LightEvents*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
channelvalue
time
2023-12-05 15:00:00378
2023-12-05 15:00:00778
2023-12-05 15:00:0050
2023-12-05 15:00:00180
2023-12-05 15:00:00350
\n", + "
" + ], + "text/plain": [ + " channel value\n", + "time \n", + "2023-12-05 15:00:00 3 78\n", + "2023-12-05 15:00:00 7 78\n", + "2023-12-05 15:00:00 5 0\n", + "2023-12-05 15:00:00 18 0\n", + "2023-12-05 15:00:00 35 0" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Environment_MessageLog_*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
prioritytypemessage
time
2023-12-05 15:03:02.760000229AlertTrackingFailureCAA-1120747
2023-12-05 15:06:32.019999981AlertTrackingFailureCAA-1120747
2023-12-05 15:11:06.400000095AlertTrackingFailureCAA-1120747
2023-12-05 15:14:37.320000172AlertTrackingFailureCAA-1120747
2023-12-05 15:19:46.980000019AlertTrackingFailureCAA-1120747
\n", + "
" + ], + "text/plain": [ + " priority type message\n", + "time \n", + "2023-12-05 15:03:02.760000229 Alert TrackingFailure CAA-1120747\n", + "2023-12-05 15:06:32.019999981 Alert TrackingFailure CAA-1120747\n", + "2023-12-05 15:11:06.400000095 Alert TrackingFailure CAA-1120747\n", + "2023-12-05 15:14:37.320000172 Alert TrackingFailure CAA-1120747\n", + "2023-12-05 15:19:46.980000019 Alert TrackingFailure CAA-1120747" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Environment_SubjectState*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
idweighttype
time
\n", + "
" + ], + "text/plain": [ + "Empty DataFrame\n", + "Columns: [id, weight, type]\n", + "Index: []" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Environment_SubjectVisits*:\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/nfs/nhome/live/jbhagat/ProjectAeon/aeon_mecha/aeon/io/api.py:149: UserWarning: data index for Environment_SubjectVisits* contains duplicate keys!\n", + " warnings.warn(f\"data index for {reader.pattern} contains duplicate keys!\")\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
idtyperegion
time
2023-12-05 15:02:09.440000057CAA-1120747EnterPatch2
2023-12-05 15:02:09.519999981CAA-1120747ExitPatch2
2023-12-05 15:02:14.900000095CAA-1120747EnterPatch3
2023-12-05 15:02:15.000000000CAA-1120747ExitPatch3
2023-12-05 15:02:15.380000114CAA-1120747EnterPatch3
\n", + "
" + ], + "text/plain": [ + " id type region\n", + "time \n", + "2023-12-05 15:02:09.440000057 CAA-1120747 Enter Patch2\n", + "2023-12-05 15:02:09.519999981 CAA-1120747 Exit Patch2\n", + "2023-12-05 15:02:14.900000095 CAA-1120747 Enter Patch3\n", + "2023-12-05 15:02:15.000000000 CAA-1120747 Exit Patch3\n", + "2023-12-05 15:02:15.380000114 CAA-1120747 Enter Patch3" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Environment_SubjectWeight*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
weightconfidencesubject_idint_id
time
2023-12-05 15:06:48.53999996229.01CAA-11207471
2023-12-05 15:06:48.63999986629.01CAA-11207471
2023-12-05 15:06:48.69999980929.01CAA-11207471
2023-12-05 15:06:48.80000019129.01CAA-11207471
2023-12-05 15:06:48.90000009529.01CAA-11207471
\n", + "
" + ], + "text/plain": [ + " weight confidence subject_id int_id\n", + "time \n", + "2023-12-05 15:06:48.539999962 29.0 1 CAA-1120747 1\n", + "2023-12-05 15:06:48.639999866 29.0 1 CAA-1120747 1\n", + "2023-12-05 15:06:48.699999809 29.0 1 CAA-1120747 1\n", + "2023-12-05 15:06:48.800000191 29.0 1 CAA-1120747 1\n", + "2023-12-05 15:06:48.900000095 29.0 1 CAA-1120747 1" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Nest_200*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
weight(g)stability
time
2023-12-05 15:00:00.000000000-1.21.0
2023-12-05 15:00:00.159999847-1.21.0
2023-12-05 15:00:00.260000229-1.21.0
2023-12-05 15:00:00.340000153-1.21.0
2023-12-05 15:00:00.420000076-1.21.0
\n", + "
" + ], + "text/plain": [ + " weight(g) stability\n", + "time \n", + "2023-12-05 15:00:00.000000000 -1.2 1.0\n", + "2023-12-05 15:00:00.159999847 -1.2 1.0\n", + "2023-12-05 15:00:00.260000229 -1.2 1.0\n", + "2023-12-05 15:00:00.340000153 -1.2 1.0\n", + "2023-12-05 15:00:00.420000076 -1.2 1.0" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Nest_202*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
weight(g)stability
time
2023-12-05 15:00:00.000000000-1.21.0
2023-12-05 15:00:00.159999847-1.21.0
2023-12-05 15:00:00.260000229-1.21.0
2023-12-05 15:00:00.340000153-1.21.0
2023-12-05 15:00:00.420000076-1.21.0
\n", + "
" + ], + "text/plain": [ + " weight(g) stability\n", + "time \n", + "2023-12-05 15:00:00.000000000 -1.2 1.0\n", + "2023-12-05 15:00:00.159999847 -1.2 1.0\n", + "2023-12-05 15:00:00.260000229 -1.2 1.0\n", + "2023-12-05 15:00:00.340000153 -1.2 1.0\n", + "2023-12-05 15:00:00.420000076 -1.2 1.0" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch1_32*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
event
time
2023-12-05 15:02:21.213376045beambreak
2023-12-05 15:02:45.747712135beambreak
2023-12-05 15:02:56.878367901beambreak
2023-12-05 15:09:38.138751984beambreak
2023-12-05 15:09:59.770847797beambreak
\n", + "
" + ], + "text/plain": [ + " event\n", + "time \n", + "2023-12-05 15:02:21.213376045 beambreak\n", + "2023-12-05 15:02:45.747712135 beambreak\n", + "2023-12-05 15:02:56.878367901 beambreak\n", + "2023-12-05 15:09:38.138751984 beambreak\n", + "2023-12-05 15:09:59.770847797 beambreak" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch1_35*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
event
time
2023-12-05 15:02:21.035488129delivery
2023-12-05 15:02:45.601503849delivery
2023-12-05 15:02:56.767488003delivery
2023-12-05 15:09:38.005504131delivery
2023-12-05 15:09:59.629504204delivery
\n", + "
" + ], + "text/plain": [ + " event\n", + "time \n", + "2023-12-05 15:02:21.035488129 delivery\n", + "2023-12-05 15:02:45.601503849 delivery\n", + "2023-12-05 15:02:56.767488003 delivery\n", + "2023-12-05 15:09:38.005504131 delivery\n", + "2023-12-05 15:09:59.629504204 delivery" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch1_90*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
angleintensity
time
2023-12-05 15:00:00.000000000140562894
2023-12-05 15:00:00.001984119140552902
2023-12-05 15:00:00.004000186140572896
2023-12-05 15:00:00.005983829140532898
2023-12-05 15:00:00.007999897140572897
\n", + "
" + ], + "text/plain": [ + " angle intensity\n", + "time \n", + "2023-12-05 15:00:00.000000000 14056 2894\n", + "2023-12-05 15:00:00.001984119 14055 2902\n", + "2023-12-05 15:00:00.004000186 14057 2896\n", + "2023-12-05 15:00:00.005983829 14053 2898\n", + "2023-12-05 15:00:00.007999897 14057 2897" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch1_201*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
manual_delivery
time
\n", + "
" + ], + "text/plain": [ + "Empty DataFrame\n", + "Columns: [manual_delivery]\n", + "Index: []" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch1_202*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
missed_pellet
time
2023-12-06 13:06:33.9416961671
2023-12-06 21:19:29.8788480761
2023-12-07 09:53:59.8712639811
2023-12-07 10:08:04.8767681121
2023-12-07 10:16:46.1244478231
\n", + "
" + ], + "text/plain": [ + " missed_pellet\n", + "time \n", + "2023-12-06 13:06:33.941696167 1\n", + "2023-12-06 21:19:29.878848076 1\n", + "2023-12-07 09:53:59.871263981 1\n", + "2023-12-07 10:08:04.876768112 1\n", + "2023-12-07 10:16:46.124447823 1" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch1_203*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
retried_delivery
time
2023-12-05 16:04:15.7034878731
2023-12-05 16:17:31.9724798201
2023-12-05 17:30:23.1815037731
2023-12-05 17:30:24.1975040441
2023-12-05 17:30:41.3695039751
\n", + "
" + ], + "text/plain": [ + " retried_delivery\n", + "time \n", + "2023-12-05 16:04:15.703487873 1\n", + "2023-12-05 16:17:31.972479820 1\n", + "2023-12-05 17:30:23.181503773 1\n", + "2023-12-05 17:30:24.197504044 1\n", + "2023-12-05 17:30:41.369503975 1" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch1_State*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
thresholdoffsetrate
time
2023-12-05 15:02:21.032000065255.501030750.01
2023-12-05 15:02:45.599999905100.117430750.01
2023-12-05 15:02:56.763999939355.328025750.01
2023-12-05 15:09:38.004000186307.886556750.01
2023-12-05 15:09:59.62799978386.638658750.01
\n", + "
" + ], + "text/plain": [ + " threshold offset rate\n", + "time \n", + "2023-12-05 15:02:21.032000065 255.501030 75 0.01\n", + "2023-12-05 15:02:45.599999905 100.117430 75 0.01\n", + "2023-12-05 15:02:56.763999939 355.328025 75 0.01\n", + "2023-12-05 15:09:38.004000186 307.886556 75 0.01\n", + "2023-12-05 15:09:59.627999783 86.638658 75 0.01" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch2_32*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
event
time
2023-12-05 21:57:40.172095776beambreak
2023-12-05 21:58:17.694560051beambreak
2023-12-05 21:58:17.703807831beambreak
2023-12-05 21:58:39.021152020beambreak
2023-12-05 21:59:02.698304176beambreak
\n", + "
" + ], + "text/plain": [ + " event\n", + "time \n", + "2023-12-05 21:57:40.172095776 beambreak\n", + "2023-12-05 21:58:17.694560051 beambreak\n", + "2023-12-05 21:58:17.703807831 beambreak\n", + "2023-12-05 21:58:39.021152020 beambreak\n", + "2023-12-05 21:59:02.698304176 beambreak" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch2_35*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
event
time
2023-12-05 21:57:40.035488129delivery
2023-12-05 21:58:17.564479828delivery
2023-12-05 21:58:38.883488178delivery
2023-12-05 21:59:00.546495914delivery
2023-12-05 21:59:01.559487820delivery
\n", + "
" + ], + "text/plain": [ + " event\n", + "time \n", + "2023-12-05 21:57:40.035488129 delivery\n", + "2023-12-05 21:58:17.564479828 delivery\n", + "2023-12-05 21:58:38.883488178 delivery\n", + "2023-12-05 21:59:00.546495914 delivery\n", + "2023-12-05 21:59:01.559487820 delivery" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch2_90*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
angleintensity
time
2023-12-05 15:00:00.00000000039083164
2023-12-05 15:00:00.00198411939033172
2023-12-05 15:00:00.00400018639003167
2023-12-05 15:00:00.00598382938993166
2023-12-05 15:00:00.00799989739023170
\n", + "
" + ], + "text/plain": [ + " angle intensity\n", + "time \n", + "2023-12-05 15:00:00.000000000 3908 3164\n", + "2023-12-05 15:00:00.001984119 3903 3172\n", + "2023-12-05 15:00:00.004000186 3900 3167\n", + "2023-12-05 15:00:00.005983829 3899 3166\n", + "2023-12-05 15:00:00.007999897 3902 3170" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch2_201*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
manual_delivery
time
\n", + "
" + ], + "text/plain": [ + "Empty DataFrame\n", + "Columns: [manual_delivery]\n", + "Index: []" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch2_202*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
missed_pellet
time
\n", + "
" + ], + "text/plain": [ + "Empty DataFrame\n", + "Columns: [missed_pellet]\n", + "Index: []" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch2_203*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
retried_delivery
time
2023-12-05 21:59:01.5604801181
2023-12-05 21:59:02.5695037841
2023-12-06 03:47:32.9184961321
2023-12-06 05:24:27.8015041351
2023-12-06 05:31:37.3375039101
\n", + "
" + ], + "text/plain": [ + " retried_delivery\n", + "time \n", + "2023-12-05 21:59:01.560480118 1\n", + "2023-12-05 21:59:02.569503784 1\n", + "2023-12-06 03:47:32.918496132 1\n", + "2023-12-06 05:24:27.801504135 1\n", + "2023-12-06 05:31:37.337503910 1" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch2_State*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
thresholdoffsetrate
time
2023-12-05 15:10:27.000000000NaN750.0033
2023-12-05 15:10:27.001984119316.702028750.0033
2023-12-05 15:10:27.007999897316.702028750.0033
2023-12-05 16:28:21.000000000NaN750.0020
2023-12-05 16:28:21.001984119219.666377750.0020
\n", + "
" + ], + "text/plain": [ + " threshold offset rate\n", + "time \n", + "2023-12-05 15:10:27.000000000 NaN 75 0.0033\n", + "2023-12-05 15:10:27.001984119 316.702028 75 0.0033\n", + "2023-12-05 15:10:27.007999897 316.702028 75 0.0033\n", + "2023-12-05 16:28:21.000000000 NaN 75 0.0020\n", + "2023-12-05 16:28:21.001984119 219.666377 75 0.0020" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch3_32*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
event
time
2023-12-05 15:44:45.612192154beambreak
2023-12-06 06:07:05.146624088beambreak
2023-12-06 07:04:29.012159824beambreak
2023-12-06 08:34:13.545279980beambreak
2023-12-06 08:34:35.653376102beambreak
\n", + "
" + ], + "text/plain": [ + " event\n", + "time \n", + "2023-12-05 15:44:45.612192154 beambreak\n", + "2023-12-06 06:07:05.146624088 beambreak\n", + "2023-12-06 07:04:29.012159824 beambreak\n", + "2023-12-06 08:34:13.545279980 beambreak\n", + "2023-12-06 08:34:35.653376102 beambreak" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch3_35*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
event
time
2023-12-05 15:44:45.477503777delivery
2023-12-06 06:07:04.042496204delivery
2023-12-06 06:07:05.049503803delivery
2023-12-06 07:04:28.900479794delivery
2023-12-06 08:34:13.445504189delivery
\n", + "
" + ], + "text/plain": [ + " event\n", + "time \n", + "2023-12-05 15:44:45.477503777 delivery\n", + "2023-12-06 06:07:04.042496204 delivery\n", + "2023-12-06 06:07:05.049503803 delivery\n", + "2023-12-06 07:04:28.900479794 delivery\n", + "2023-12-06 08:34:13.445504189 delivery" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch3_90*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
angleintensity
time
2023-12-05 15:00:00.000000000106394119
2023-12-05 15:00:00.001984119106394120
2023-12-05 15:00:00.004000186106414121
2023-12-05 15:00:00.005983829106404118
2023-12-05 15:00:00.007999897106384118
\n", + "
" + ], + "text/plain": [ + " angle intensity\n", + "time \n", + "2023-12-05 15:00:00.000000000 10639 4119\n", + "2023-12-05 15:00:00.001984119 10639 4120\n", + "2023-12-05 15:00:00.004000186 10641 4121\n", + "2023-12-05 15:00:00.005983829 10640 4118\n", + "2023-12-05 15:00:00.007999897 10638 4118" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch3_201*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
manual_delivery
time
\n", + "
" + ], + "text/plain": [ + "Empty DataFrame\n", + "Columns: [manual_delivery]\n", + "Index: []" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch3_202*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
missed_pellet
time
\n", + "
" + ], + "text/plain": [ + "Empty DataFrame\n", + "Columns: [missed_pellet]\n", + "Index: []" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch3_203*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
retried_delivery
time
2023-12-06 06:07:05.0504961011
2023-12-06 08:51:15.8424959181
\n", + "
" + ], + "text/plain": [ + " retried_delivery\n", + "time \n", + "2023-12-06 06:07:05.050496101 1\n", + "2023-12-06 08:51:15.842495918 1" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Patch3_State*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
thresholdoffsetrate
time
2023-12-05 15:10:27.001984119NaN750.0020
2023-12-05 15:10:27.004000186545.555314750.0020
2023-12-05 15:10:27.009984016545.555314750.0020
2023-12-05 15:44:45.4759998321024.856116750.0020
2023-12-05 16:28:21.000000000NaN750.0033
\n", + "
" + ], + "text/plain": [ + " threshold offset rate\n", + "time \n", + "2023-12-05 15:10:27.001984119 NaN 75 0.0020\n", + "2023-12-05 15:10:27.004000186 545.555314 75 0.0020\n", + "2023-12-05 15:10:27.009984016 545.555314 75 0.0020\n", + "2023-12-05 15:44:45.475999832 1024.856116 75 0.0020\n", + "2023-12-05 16:28:21.000000000 NaN 75 0.0033" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "RfidEventsGate*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
rfid
time
2023-12-05 15:03:03.993120193977200010163729
2023-12-05 15:03:30.682623863977200010164323
2023-12-05 15:03:31.019872189977200010164323
2023-12-05 15:03:31.395616055977200010164323
2023-12-05 15:06:38.510911942977200010164323
\n", + "
" + ], + "text/plain": [ + " rfid\n", + "time \n", + "2023-12-05 15:03:03.993120193 977200010163729\n", + "2023-12-05 15:03:30.682623863 977200010164323\n", + "2023-12-05 15:03:31.019872189 977200010164323\n", + "2023-12-05 15:03:31.395616055 977200010164323\n", + "2023-12-05 15:06:38.510911942 977200010164323" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "RfidEventsNest1*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
rfid
time
2023-12-05 15:00:00.0000000003784633200
2023-12-05 15:00:00.0015039443784633200
2023-12-05 15:00:01.0000000003784633201
2023-12-05 15:00:01.0015039443784633201
2023-12-05 15:00:02.0000000003784633202
\n", + "
" + ], + "text/plain": [ + " rfid\n", + "time \n", + "2023-12-05 15:00:00.000000000 3784633200\n", + "2023-12-05 15:00:00.001503944 3784633200\n", + "2023-12-05 15:00:01.000000000 3784633201\n", + "2023-12-05 15:00:01.001503944 3784633201\n", + "2023-12-05 15:00:02.000000000 3784633202" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "RfidEventsNest2*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
rfid
time
2023-12-05 15:03:33.940767765977200010163729
2023-12-05 15:08:28.597375870977200010164323
2023-12-05 15:08:34.070496082977200010164323
2023-12-05 15:08:50.152063847977200010164323
2023-12-05 15:08:50.489439964977200010164323
\n", + "
" + ], + "text/plain": [ + " rfid\n", + "time \n", + "2023-12-05 15:03:33.940767765 977200010163729\n", + "2023-12-05 15:08:28.597375870 977200010164323\n", + "2023-12-05 15:08:34.070496082 977200010164323\n", + "2023-12-05 15:08:50.152063847 977200010164323\n", + "2023-12-05 15:08:50.489439964 977200010164323" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "RfidEventsPatch1*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
rfid
time
2023-12-05 15:00:00.0000000003784633200
2023-12-05 15:00:00.0015039443784633200
2023-12-05 15:00:01.0000000003784633201
2023-12-05 15:00:01.0015039443784633201
2023-12-05 15:00:02.0000000003784633202
\n", + "
" + ], + "text/plain": [ + " rfid\n", + "time \n", + "2023-12-05 15:00:00.000000000 3784633200\n", + "2023-12-05 15:00:00.001503944 3784633200\n", + "2023-12-05 15:00:01.000000000 3784633201\n", + "2023-12-05 15:00:01.001503944 3784633201\n", + "2023-12-05 15:00:02.000000000 3784633202" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "RfidEventsPatch2*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
rfid
time
2023-12-05 15:02:33.719103813977200010163729
2023-12-05 15:02:34.209599972977200010163729
2023-12-05 15:02:34.608064175977200010163729
2023-12-05 15:02:35.006527901977200010163729
2023-12-05 15:02:35.251743793977200010163729
\n", + "
" + ], + "text/plain": [ + " rfid\n", + "time \n", + "2023-12-05 15:02:33.719103813 977200010163729\n", + "2023-12-05 15:02:34.209599972 977200010163729\n", + "2023-12-05 15:02:34.608064175 977200010163729\n", + "2023-12-05 15:02:35.006527901 977200010163729\n", + "2023-12-05 15:02:35.251743793 977200010163729" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "RfidEventsPatch3*:\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
rfid
time
2023-12-05 15:02:19.841599941977200010164323
2023-12-05 15:02:20.271039963977200010164323
2023-12-05 15:02:20.731232166977200010164323
2023-12-05 15:02:21.130015849977200010164323
2023-12-05 15:02:21.896927834977200010164323
\n", + "
" + ], + "text/plain": [ + " rfid\n", + "time \n", + "2023-12-05 15:02:19.841599941 977200010164323\n", + "2023-12-05 15:02:20.271039963 977200010164323\n", + "2023-12-05 15:02:20.731232166 977200010164323\n", + "2023-12-05 15:02:21.130015849 977200010164323\n", + "2023-12-05 15:02:21.896927834 977200010164323" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\"\"\"Test all readers in schema.\"\"\"\n", + "\n", + "def find_obj(dotmap, obj):\n", + " \"\"\"Returns a list of objects of type `obj` found in a DotMap.\"\"\"\n", + " objs = []\n", + " for value in dotmap.values():\n", + " if isinstance(value, obj):\n", + " objs.append(value)\n", + " elif isinstance(value, DotMap):\n", + " objs.extend(find_obj(value, obj))\n", + " return objs\n", + "\n", + "readers = find_obj(social01, reader.Reader)\n", + "start_time = pd.Timestamp(\"2023-12-05 15:00:00\")\n", + "end_time = pd.Timestamp(\"2023-12-07 11:00:00\")\n", + "for r in readers:\n", + " data = aeon.load(root, r, start=start_time, end=end_time)\n", + " #assert not data.empty, f\"No data found with {r}.\"\n", + " print(f\"\\n{r.pattern}:\")\n", + " display(data.head())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "aeon", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tests/io/test_api.py b/tests/io/test_api.py index 48986830..8f9d8c0b 100644 --- a/tests/io/test_api.py +++ b/tests/io/test_api.py @@ -5,7 +5,7 @@ from pytest import mark import aeon -from aeon.schema.dataset import exp02 +from aeon.schema.schemas import exp02 nonmonotonic_path = Path(__file__).parent.parent / "data" / "nonmonotonic" monotonic_path = Path(__file__).parent.parent / "data" / "monotonic"