From a0f32bb32c1f04c00dfa3b33de80322d40b5152d Mon Sep 17 00:00:00 2001
From: Liezl Maree <38435167+roomrys@users.noreply.github.com>
Date: Thu, 23 Mar 2023 15:05:08 -0700
Subject: [PATCH] SLEAP v1.3.0 (#1231)
---
.conda/bld.bat | 2 +
.conda/build.sh | 2 +
.conda/meta.yaml | 2 +-
.github/ISSUE_TEMPLATE/bug_report.md | 2 +-
.github/workflows/website.yml | 2 +-
README.rst | 2 +-
docs/conf.py | 4 +-
docs/installation.md | 4 +-
environment.yml | 4 +-
environment_build.yml | 1 +
environment_no_cuda.yml | 4 +-
sleap/config/pipeline_form.yaml | 113 +++-
sleap/config/training_editor_form.yaml | 111 +++-
sleap/gui/app.py | 55 +-
sleap/gui/commands.py | 158 ++++-
sleap/gui/dataviews.py | 2 +
sleap/gui/dialogs/importvideos.py | 40 +-
sleap/gui/learning/dialog.py | 79 ++-
sleap/gui/learning/receptivefield.py | 17 +-
sleap/gui/learning/runners.py | 30 +-
sleap/gui/learning/scopedkeydict.py | 4 +
sleap/gui/no-preview.png | Bin 5223 -> 0 bytes
sleap/io/dataset.py | 16 +
sleap/io/format/deeplabcut.py | 5 +-
sleap/io/video.py | 2 +
sleap/nn/inference.py | 559 +++++++++++-------
sleap/nn/training.py | 11 +-
sleap/skeleton.py | 78 ++-
sleap/version.py | 2 +-
.../labeled-data/video/CollectedData_LM.csv | 8 +
.../{ => labeled-data/video}/dlc_testdata.csv | 0
.../video}/dlc_testdata_v2.csv | 0
.../dlc/{ => labeled-data/video}/img000.png | Bin
.../dlc/{ => labeled-data/video}/img001.png | Bin
.../dlc/{ => labeled-data/video}/img002.png | Bin
.../dlc/{ => labeled-data/video}/img003.png | Bin
.../video}/madlc_testdata.csv | 0
.../video}/madlc_testdata_v2.csv | 0
tests/data/dlc/madlc_230_config.yaml | 67 +++
tests/data/tracks/clip.2node.slp | Bin 339888 -> 352040 bytes
tests/gui/learning/test_dialog.py | 29 +
tests/gui/test_commands.py | 216 +++++++
tests/gui/test_import.py | 13 +-
tests/gui/test_inference_gui.py | 24 +
tests/io/test_dataset.py | 11 +
tests/io/test_formats.py | 10 +-
tests/io/test_video.py | 4 +
tests/nn/test_training.py | 2 +-
tests/test_skeleton.py | 34 +-
49 files changed, 1386 insertions(+), 343 deletions(-)
delete mode 100644 sleap/gui/no-preview.png
create mode 100644 tests/data/dlc/labeled-data/video/CollectedData_LM.csv
rename tests/data/dlc/{ => labeled-data/video}/dlc_testdata.csv (100%)
rename tests/data/dlc/{ => labeled-data/video}/dlc_testdata_v2.csv (100%)
rename tests/data/dlc/{ => labeled-data/video}/img000.png (100%)
rename tests/data/dlc/{ => labeled-data/video}/img001.png (100%)
rename tests/data/dlc/{ => labeled-data/video}/img002.png (100%)
rename tests/data/dlc/{ => labeled-data/video}/img003.png (100%)
rename tests/data/dlc/{ => labeled-data/video}/madlc_testdata.csv (100%)
rename tests/data/dlc/{ => labeled-data/video}/madlc_testdata_v2.csv (100%)
create mode 100644 tests/data/dlc/madlc_230_config.yaml
diff --git a/.conda/bld.bat b/.conda/bld.bat
index 1fb7dc081..4cf76b3c5 100644
--- a/.conda/bld.bat
+++ b/.conda/bld.bat
@@ -21,6 +21,7 @@ pip install attrs==21.2.0
pip install cattrs==1.1.1
pip install jsonpickle==1.2
pip install networkx
+pip install nixio>=1.5.3
@REM pip install tensorflow>=2.6.3,<=2.7.1
@REM pip install h5py>=3.1.0,<=3.6.0
pip install python-rapidjson
@@ -36,6 +37,7 @@ pip install imgaug==0.4.0
pip install scikit-image
pip install scikit-learn==1.0.*
pip install scikit-video
+pip install tensorflow-hub
pip install imgstore==0.2.9
pip install qimage2ndarray==1.9.0
pip install jsmin
diff --git a/.conda/build.sh b/.conda/build.sh
index 3e1e1a705..b2569b8f9 100644
--- a/.conda/build.sh
+++ b/.conda/build.sh
@@ -18,6 +18,7 @@ pip install attrs==21.2.0
pip install cattrs==1.1.1
pip install jsonpickle==1.2
pip install networkx
+pip install nixio>=1.5.3
# pip install tensorflow>=2.6.3,<=2.7.1
# pip install h5py>=3.1.0,<=3.6.0
pip install python-rapidjson
@@ -33,6 +34,7 @@ pip install imgaug==0.4.0
pip install scikit-image
pip install scikit-learn==1.0.*
pip install scikit-video
+pip install tensorflow-hub
pip install imgstore==0.2.9
pip install qimage2ndarray==1.9.0
pip install jsmin
diff --git a/.conda/meta.yaml b/.conda/meta.yaml
index fbe01340a..9356c113c 100644
--- a/.conda/meta.yaml
+++ b/.conda/meta.yaml
@@ -16,7 +16,7 @@ about:
summary: {{ data.get('description') }}
build:
- number: 1
+ number: 4
source:
path: ../
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 399c86654..8e2261c6d 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -28,7 +28,7 @@ Please include information about how you installed.
- OS:
- Version(s):
-
+
- SLEAP installation method (listed [here](https://sleap.ai/installation.html#)):
- [ ] [Conda from package](https://sleap.ai/installation.html#conda-package)
- [ ] [Conda from source](https://sleap.ai/installation.html#conda-from-source)
diff --git a/.github/workflows/website.yml b/.github/workflows/website.yml
index fd8244101..ebba8e53d 100644
--- a/.github/workflows/website.yml
+++ b/.github/workflows/website.yml
@@ -8,7 +8,7 @@ on:
# 'main' triggers updates to 'sleap.ai', all others to 'sleap.ai/develop'
- main
- develop
- - liezl/docs-apple-silicon
+ - liezl/1.3.0-build-cleanup
paths:
- "docs/**"
- "README.rst"
diff --git a/README.rst b/README.rst
index 5380032cb..4821b4a4d 100644
--- a/README.rst
+++ b/README.rst
@@ -11,7 +11,7 @@
:alt: Coverage
.. |Documentation| image::
- https://img.shields.io/github/workflow/status/talmolab/sleap/Build%20website?label=Documentation
+ https://img.shields.io/badge/Documentation-sleap.ai-lightgrey
:target: https://sleap.ai
:alt: Documentation
diff --git a/docs/conf.py b/docs/conf.py
index cc259933f..3dc40ff71 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -28,7 +28,7 @@
copyright = f"2019–{date.today().year}, Talmo Lab"
# The short X.Y version
-version = "1.3.0a0"
+version = "1.3.0"
# Get the sleap version
# with open("../sleap/version.py") as f:
@@ -36,7 +36,7 @@
# version = re.search("\d.+(?=['\"])", version_file).group(0)
# Release should be the full branch name
-release = "v1.3.0a0"
+release = "v1.3.0"
html_title = f"SLEAP ({release})"
html_short_title = "SLEAP"
diff --git a/docs/installation.md b/docs/installation.md
index 296c2c1e5..06eed1a1e 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -64,7 +64,7 @@ On Windows, our personal preference is to use alternative terminal apps like [Cm
### `conda` package
```bash
-conda create -y -n sleap -c sleap -c nvidia -c conda-forge sleap=1.2.9
+conda create -y -n sleap -c sleap -c nvidia -c conda-forge sleap=1.3.0
```
**This is the recommended installation method**. Works on **Windows** and **Linux**.
@@ -113,7 +113,7 @@ conda create -y -n sleap -c sleap -c nvidia -c conda-forge sleap=1.2.9
### `pip` package
```bash
-pip install sleap==1.2.9
+pip install sleap==1.3.0
```
This works on **any OS except Apple silicon** and on **Google Colab**.
diff --git a/environment.yml b/environment.yml
index 433092fd3..0ab370245 100644
--- a/environment.yml
+++ b/environment.yml
@@ -7,7 +7,7 @@ dependencies:
- conda-forge::pyside2>=5.13.2,<=5.14.1
- conda-forge::h5py=3.1.0
- conda-forge::scipy>=1.4.1,<=1.7.3
- - pillow=8.4.0
+ - conda-forge::pillow=8.4.0
- shapely=1.7.1
- conda-forge::pandas
- ffmpeg
@@ -17,7 +17,7 @@ dependencies:
# - sleap::tensorflow=2.7.0
# - sleap::pyside2=5.14.1
- qtpy>=2.0.1
- - conda-forge::pip!=22.0.4
+ - conda-forge::pip
- pip:
- "--editable=."
- "--requirement=./dev_requirements.txt"
diff --git a/environment_build.yml b/environment_build.yml
index 3f00fecba..c2d12d846 100644
--- a/environment_build.yml
+++ b/environment_build.yml
@@ -14,6 +14,7 @@ dependencies:
# - cudatoolkit=11.3.1
# - cudnn=8.2.1
# - nvidia::cuda-nvcc=11.3
+ - protobuf=3.20.* # https://developers.google.com/protocol-buffers/docs/news/2022-05-06#python-updates
- qtpy>=2.0.1
- conda-build=3.21.7
- anaconda-client
diff --git a/environment_no_cuda.yml b/environment_no_cuda.yml
index 136121475..dcded8de9 100644
--- a/environment_no_cuda.yml
+++ b/environment_no_cuda.yml
@@ -7,7 +7,7 @@ dependencies:
- conda-forge::pyside2>=5.13.2,<=5.14.1
- conda-forge::h5py=3.1.0
- conda-forge::scipy>=1.4.1,<=1.7.3
- - pillow=8.4.0
+ - conda-forge::pillow=8.4.0
- shapely=1.7.1
- conda-forge::pandas
- ffmpeg
@@ -17,7 +17,7 @@ dependencies:
# - sleap::tensorflow=2.7.0
# - sleap::pyside2=5.14.1
- qtpy>=2.0.1
- - conda-forge::pip!=22.0.4
+ - conda-forge::pip # !=22.0.4
- pip:
- "--editable=."
- "--requirement=./dev_requirements.txt"
diff --git a/sleap/config/pipeline_form.yaml b/sleap/config/pipeline_form.yaml
index 83aa1be1b..fe5eb4263 100644
--- a/sleap/config/pipeline_form.yaml
+++ b/sleap/config/pipeline_form.yaml
@@ -4,7 +4,7 @@ training:
label: Training/Inference Pipeline Type
type: stacked
default: "multi-animal bottom-up "
- options: "multi-animal bottom-up,multi-animal top-down,single animal"
+ options: "multi-animal bottom-up,multi-animal top-down,multi-animal bottom-up-id,multi-animal top-down-id,single animal"
multi-animal bottom-up:
- type: text
@@ -79,6 +79,80 @@ training:
name: model.heads.centered_instance.sigma
type: double
+ multi-animal bottom-up-id:
+ - type: text
+ text: 'Multi-Animal Bottom-Up-Id Pipeline:
+ This pipeline uses single model with two output heads: a "confidence
+ map" head to predicts the nodes for an entire image and a "part
+ affinity field" head to group the nodes into distinct animal
+ instances. It also handles classification and tracking.'
+
+ - name: model.heads.multi_class_bottomup.confmaps.sigma
+ label: Sigma for Nodes
+ type: double
+ default: 5.0
+ help: Spread of the Gaussian distribution of the confidence maps as a scalar float.
+ Smaller values are more precise but may be difficult to learn as they have a
+ lower density within the image space. Larger values are easier to learn but
+ are less precise with respect to the peak coordinate. This spread is in units
+ of pixels of the model input image, i.e., the image resolution after any input
+ scaling is applied.
+
+ - name: model.heads.multi_class_bottomup.class_maps.sigma
+ label: Sigma for Edges
+ type: double
+ default: 15.0
+ help: Spread of the Gaussian distribution that weigh the part affinity fields
+ as a function of their distance from the edge they represent. Smaller values
+ are more precise but may be difficult to learn as they have a lower density
+ within the image space. Larger values are easier to learn but are less precise
+ with respect to the edge distance, so can be less useful in disambiguating between
+ edges that are nearby and parallel in direction. This spread is in units of
+ pixels of the model input image, i.e., the image resolution after any input
+ scaling is applied.
+
+ multi-animal top-down-id:
+ - type: text
+ text: 'Multi-Animal Top-Down-Id Pipeline:
+ This pipeline uses two models: a "centroid" model to locate and crop
+ around each animal in the frame, and a "centered-instance confidence
+ map" model for predicted node locations for each individual animal
+ predicted by the centroid model. It also handles classification and
+ tracking.'
+
+ - default: 5.0
+ help: Spread of the Gaussian distribution of the confidence maps as a scalar float.
+ Smaller values are more precise but may be difficult to learn as they have a
+ lower density within the image space. Larger values are easier to learn but
+ are less precise with respect to the peak coordinate. This spread is in units
+ of pixels of the model input image, i.e., the image resolution after any input
+ scaling is applied.
+ label: Sigma for Centroids
+ name: model.heads.centroid.sigma
+ type: double
+
+ - default: null
+ help: Text name of a body part (node) to use as the anchor point. If None, the
+ midpoint of the bounding box of all visible instance points will be used as
+ the anchor. The bounding box midpoint will also be used if the anchor part is
+ specified but not visible in the instance. Setting a reliable anchor point can
+ significantly improve topdown model accuracy as they benefit from a consistent
+ geometry of the body parts relative to the center of the image.
+ label: Anchor Part
+ name: model.heads.multi_class_topdown.confmaps.anchor_part
+ type: optional_list
+
+ - default: 5.0
+ help: Spread of the Gaussian distribution of the confidence maps as a scalar float.
+ Smaller values are more precise but may be difficult to learn as they have a
+ lower density within the image space. Larger values are easier to learn but
+ are less precise with respect to the peak coordinate. This spread is in units
+ of pixels of the model input image, i.e., the image resolution after any input
+ scaling is applied.
+ label: Sigma for Nodes
+ name: model.heads.multi_class_topdown.confmaps.sigma
+ type: double
+
single animal:
- type: text
text: 'Single Animal Pipeline:
@@ -197,7 +271,7 @@ inference:
label: Training/Inference Pipeline Type
type: stacked
default: "multi-animal bottom-up "
- options: "multi-animal bottom-up,multi-animal top-down,single animal,none"
+ options: "multi-animal bottom-up,multi-animal top-down,multi-animal bottom-up-id,multi-animal top-down-id,single animal,movenet-lightning,movenet-thunder,none"
multi-animal bottom-up:
- type: text
@@ -215,6 +289,23 @@ inference:
"centered-instance confidence map" model for predicted node locations
for each individual animal predicted by the centroid model.'
+ multi-animal bottom-up-id:
+ - type: text
+ text: 'Multi-Animal Bottom-Up-Id Pipeline:
+ This pipeline uses single model with two output heads: a "confidence
+ map" head to predicts the nodes for an entire image and a "part
+ affinity field" head to group the nodes into distinct animal
+ instances. It also handles classification and tracking.'
+
+ multi-animal top-down-id:
+ - type: text
+ text: 'Multi-Animal Top-Down-Id Pipeline:
+ This pipeline uses two models: a "centroid" model to locate and crop
+ around each animal in the frame, and a "centered-instance confidence
+ map" model for predicted node locations for each individual animal
+ predicted by the centroid model. It also handles classification and
+ tracking.'
+
single animal:
- type: text
text: 'Single Animal Pipeline:
@@ -224,6 +315,24 @@ inference:
For predicting on videos with more than one animal per frame, use a
multi-animal pipeline (even if your training data has one instance per frame).'
+ movenet-lightning:
+ - type: text
+ text: 'MoveNet Lightning Pipeline:
+ This pipeline uses a pretrained MoveNet Lightning model to predict the
+ nodes for an entire image and then groups all of these nodes into a single
+ instance. Lightning is intended for latency-critical applications. Note
+ that this model is intended for human pose estimation. There is no support
+ for videos containing more than one instance'
+
+ movenet-thunder:
+ - type: text
+ text: 'MoveNet Thunder Pipeline:
+ This pipeline uses a pretrained MoveNet Thunder model to predict the nodes
+ for an entire image and then groups all of these nodes into a single
+ instance. Thunder is intended for applications that require high accuracy.
+ Note that this model is intended for human pose estimation. There is no
+ support for videos containing more than one instance'
+
none:
- name: tracking.tracker
diff --git a/sleap/config/training_editor_form.yaml b/sleap/config/training_editor_form.yaml
index d362e8cee..bb4bc6e6c 100644
--- a/sleap/config/training_editor_form.yaml
+++ b/sleap/config/training_editor_form.yaml
@@ -408,8 +408,117 @@ model:
label: Loss Weight
name: model.heads.multi_instance.pafs.loss_weight
type: double
+ multi_class_topdown:
+ - default: 1.5
+ help: Spread of the Gaussian distribution of the confidence maps as a scalar float.
+ Smaller values are more precise but may be difficult to learn as they have a
+ lower density within the image space. Larger values are easier to learn but
+ are less precise with respect to the peak coordinate. This spread is in units
+ of pixels of the model input image, i.e., the image resolution after any input
+ scaling is applied.
+ label: Sigma
+ name: model.heads.multi_class_topdown.confmaps.sigma
+ type: double
+ - default: 2
+ help: The stride of the output confidence maps relative to the input image. This
+ is the reciprocal of the resolution, e.g., an output stride of 2 results in
+ confidence maps that are 0.5x the size of the input. Increasing this value can
+ considerably speed up model performance and decrease memory requirements, at
+ the cost of decreased spatial resolution.
+ label: Output Stride
+ name: model.heads.multi_class_topdown.confmaps.output_stride
+ type: list
+ options: 1,2,4,8,16,32,64
+ - default: 1.0
+ help: Scalar float used to weigh the loss term for this head during training.
+ Increase this to encourage the optimization to focus on improving this specific
+ output in multi-head models.
+ label: Loss Weight
+ name: model.heads.multi_class_topdown.confmaps.loss_weight
+ type: double
+ - default: 3
+ help: Number of fully-connected layers before the classification output
+ layer. These can help in transforming general image features into
+ classification-specific features.
+ label: Fully Connected Layers
+ name: model.heads.multi_class_topdown.class_vectors.num_fc_layers
+ type: integer
+ - default: 64
+ help: Number of units (dimensions) in the fully-connected layers before
+ classification. Increasing this can improve the representational capacity in
+ the pre-classification layers.
+ label: Fully Connected Units
+ name: model.heads.multi_class_topdown.class_vectors.num_fc_units
+ type: integer
+ - default: true
+ help: Whether to use global max pooling prior to flattening.
+ label: Global Pool
+ name: model.heads.multi_class_topdown.class_vectors.global_pool
+ type: bool
+ - default: 1.0
+ help: Scalar float used to weigh the loss term for this head during
+ training. Increase this to encourage the optimization to focus on improving
+ this specific output in multi-head models.
+ label: Loss Weight
+ name: model.heads.multi_class_topdown.class_vectors.loss_weight
+ type: double
+ multi_class_bottomup:
+ - default: 5.0
+ help: Spread of the Gaussian distribution of the confidence maps as a scalar float.
+ Smaller values are more precise but may be difficult to learn as they have a
+ lower density within the image space. Larger values are easier to learn but
+ are less precise with respect to the peak coordinate. This spread is in units
+ of pixels of the model input image, i.e., the image resolution after any input
+ scaling is applied.
+ label: Sigma
+ name: model.heads.multi_class_bottomup.confmaps.sigma
+ type: double
+ - default: 2
+ help: The stride of the output confidence maps relative to the input image. This
+ is the reciprocal of the resolution, e.g., an output stride of 2 results in
+ confidence maps that are 0.5x the size of the input. Increasing this value can
+ considerably speed up model performance and decrease memory requirements, at
+ the cost of decreased spatial resolution.
+ label: Output Stride
+ name: model.heads.multi_class_bottomup.confmaps.output_stride
+ type: list
+ options: 1,2,4,8,16,32,64
+ - default: 1.0
+ help: Scalar float used to weigh the loss term for this head during training.
+ Increase this to encourage the optimization to focus on improving this specific
+ output in multi-head models.
+ label: Loss Weight
+ name: model.heads.multi_class_bottomup.confmaps.loss_weight
+ type: double
+ - default: 5.0
+ help: Spread of the Gaussian distribution of the confidence maps as a scalar float.
+ Smaller values are more precise but may be difficult to learn as they have a
+ lower density within the image space. Larger values are easier to learn but
+ are less precise with respect to the peak coordinate. This spread is in units
+ of pixels of the model input image, i.e., the image resolution after any input
+ scaling is applied.
+ label: Sigma
+ name: model.heads.multi_class_bottomup.class_maps.sigma
+ type: double
+ - default: 16
+ help: The stride of the output class maps relative to the input image. This
+ is the reciprocal of the resolution, e.g., an output stride of 2 results in
+ confidence maps that are 0.5x the size of the input. Increasing this value can
+ considerably speed up model performance and decrease memory requirements, at
+ the cost of decreased spatial resolution.
+ label: Output Stride
+ name: model.heads.multi_class_bottomup.class_maps.output_stride
+ type: list
+ options: 1,2,4,8,16,32,64
+ - default: 2.0
+ help: Scalar float used to weigh the loss term for this head during training.
+ Increase this to encourage the optimization to focus on improving this specific
+ output in multi-head models.
+ label: Loss Weight
+ name: model.heads.multi_class_bottomup.class_maps.loss_weight
+ type: double
name: _heads_name
- options: single_instance,centroid,centered_instance,multi_instance
+ options: single_instance,centroid,centered_instance,multi_instance,multi_class_topdown,multi_class_bottomup
single_instance:
- default: 5.0
help: Spread of the Gaussian distribution of the confidence maps as a scalar float.
diff --git a/sleap/gui/app.py b/sleap/gui/app.py
index f568e89bd..a14fc007a 100644
--- a/sleap/gui/app.py
+++ b/sleap/gui/app.py
@@ -169,6 +169,8 @@ def __init__(
self.state["skeleton_description"] = "No skeleton loaded yet"
if no_usage_data:
self.state["share usage data"] = False
+ self.state["clipboard_track"] = None
+ self.state["clipboard_instance"] = None
self.state.connect("marker size", self.plotFrame)
self.state.connect("node label size", self.plotFrame)
self.state.connect("show non-visible nodes", self.plotFrame)
@@ -630,7 +632,7 @@ def prev_vid():
add_submenu_choices(
menu=viewMenu,
title="Node Marker Size",
- options=(1, 4, 6, 8, 12),
+ options=(1, 2, 4, 6, 8, 12),
key="marker size",
)
@@ -722,6 +724,19 @@ def new_instance_menu_action():
labelMenu.addSeparator()
+ labelMenu.addAction(
+ "Copy Instance",
+ self.commands.copyInstance,
+ Qt.CTRL + Qt.Key_C,
+ )
+ labelMenu.addAction(
+ "Paste Instance",
+ self.commands.pasteInstance,
+ Qt.CTRL + Qt.Key_V,
+ )
+
+ labelMenu.addSeparator()
+
add_menu_item(
labelMenu,
"delete frame predictions",
@@ -786,13 +801,37 @@ def new_instance_menu_action():
)
self.delete_tracks_menu = tracksMenu.addMenu("Delete Track")
self.delete_tracks_menu.setEnabled(False)
+
+ self.delete_multiple_tracks_menu = tracksMenu.addMenu("Delete Multiple Tracks")
+ self.delete_multiple_tracks_menu.setToolTip(
+ "Delete either only 'Unused' tracks or 'All' tracks, and update instances. Instances are not removed."
+ )
+
add_menu_item(
- tracksMenu,
+ self.delete_multiple_tracks_menu,
+ "delete unused tracks",
+ "Unused",
+ lambda: self.commands.deleteMultipleTracks(delete_all=False),
+ )
+
+ add_menu_item(
+ self.delete_multiple_tracks_menu,
"delete all tracks",
- "Delete All Tracks",
- self.commands.deleteAllTracks,
- ).setToolTip(
- "Delete all tracks and update instances. Instances are not removed."
+ "All",
+ lambda: self.commands.deleteMultipleTracks(delete_all=True),
+ )
+
+ tracksMenu.addSeparator()
+
+ tracksMenu.addAction(
+ "Copy Instance Track",
+ self.commands.copyInstanceTrack,
+ Qt.CTRL + Qt.SHIFT + Qt.Key_C,
+ )
+ tracksMenu.addAction(
+ "Paste Instance Track",
+ self.commands.pasteInstanceTrack,
+ Qt.CTRL + Qt.SHIFT + Qt.Key_V,
)
tracksMenu.addSeparator()
@@ -1917,7 +1956,3 @@ def main(args: Optional[list] = None):
app.exec_()
pass
-
-
-if __name__ == "__main__":
- main()
diff --git a/sleap/gui/commands.py b/sleap/gui/commands.py
index d53585159..59c377d39 100644
--- a/sleap/gui/commands.py
+++ b/sleap/gui/commands.py
@@ -537,8 +537,17 @@ def setInstancePointVisibility(self, instance: Instance, node: Node, visible: bo
)
def addUserInstancesFromPredictions(self):
+ """Create user instance from a predicted instance."""
self.execute(AddUserInstancesFromPredictions)
+ def copyInstance(self):
+ """Copy the selected instance to the instance clipboard."""
+ self.execute(CopyInstance)
+
+ def pasteInstance(self):
+ """Paste the instance from the clipboard as a new copy."""
+ self.execute(PasteInstance)
+
def deleteSelectedInstance(self):
"""Deletes currently selected instance."""
self.execute(DeleteSelectedInstance)
@@ -563,9 +572,17 @@ def deleteTrack(self, track: "Track"):
"""Delete a track and remove from all instances."""
self.execute(DeleteTrack, track=track)
- def deleteAllTracks(self):
+ def deleteMultipleTracks(self, delete_all: bool = False):
"""Delete all tracks."""
- self.execute(DeleteAllTracks)
+ self.execute(DeleteMultipleTracks, delete_all=delete_all)
+
+ def copyInstanceTrack(self):
+ """Copies the selected instance's track to the track clipboard."""
+ self.execute(CopyInstanceTrack)
+
+ def pasteInstanceTrack(self):
+ """Pastes the track in the clipboard to the selected instance."""
+ self.execute(PasteInstanceTrack)
def setTrackName(self, track: "Track", name: str):
"""Sets name for track."""
@@ -1638,22 +1655,36 @@ class ToggleGrayscale(EditCommand):
@staticmethod
def do_action(context: CommandContext, params: dict):
"""Reset the video backend."""
- video: Video = context.state["video"]
- try:
- grayscale = video.backend.grayscale
- video.backend.reset(grayscale=(not grayscale))
- except:
- print(
- f"This video type {type(video.backend)} does not support grayscale yet."
- )
- @staticmethod
- def ask(context: CommandContext, params: dict) -> bool:
- """Check that video can be reset."""
+ def try_to_read_grayscale(video: Video):
+ try:
+ return video.backend.grayscale
+ except:
+ return None
+
# Check that current video is set
- if context.state["video"] is None:
- return False
- return True
+ if len(context.labels.videos) == 0:
+ raise ValueError("No videos detected in `Labels`.")
+
+ # Intuitively find the "first" video that supports grayscale
+ grayscale = try_to_read_grayscale(context.state["video"])
+ if grayscale is None:
+ for video in context.labels.videos:
+ grayscale = try_to_read_grayscale(video)
+ if grayscale is not None:
+ break
+
+ if grayscale is None:
+ raise ValueError("No videos support grayscale.")
+
+ for idx, video in enumerate(context.labels.videos):
+ try:
+ video.backend.reset(grayscale=(not grayscale))
+ except:
+ print(
+ f"This video type {type(video.backend)} for video at index {idx} "
+ f"does not support grayscale yet."
+ )
class AddVideo(EditCommand):
@@ -1992,8 +2023,10 @@ def try_and_skip_if_error(func, *args, **kwargs):
# Load new skeleton
filename = params["filename"]
new_skeleton = OpenSkeleton.load_skeleton(filename)
- if new_skeleton.description == None:
- new_skeleton.description = f"Custom Skeleton loaded from {filename}"
+
+ # Description and preview image only used for template skeletons
+ new_skeleton.description = None
+ new_skeleton.preview_image = None
context.state["skeleton_description"] = new_skeleton.description
context.state["skeleton_preview_image"] = new_skeleton.preview_image
@@ -2561,12 +2594,55 @@ def do_action(context: CommandContext, params: dict):
context.labels.remove_track(track)
-class DeleteAllTracks(EditCommand):
+class DeleteMultipleTracks(EditCommand):
+ topics = [UpdateTopic.tracks]
+
+ @staticmethod
+ def do_action(context: CommandContext, params: dict):
+ """Delete either all tracks or just unused tracks.
+
+ Args:
+ context: The command context.
+ params: The command parameters.
+ delete_all: If True, delete all tracks. If False, delete only
+ unused tracks.
+ """
+ delete_all: bool = params["delete_all"]
+ if delete_all:
+ context.labels.remove_all_tracks()
+ else:
+ context.labels.remove_unused_tracks()
+
+
+class CopyInstanceTrack(EditCommand):
+ @staticmethod
+ def do_action(context: CommandContext, params: dict):
+ selected_instance: Instance = context.state["instance"]
+ if selected_instance is None:
+ return
+ context.state["clipboard_track"] = selected_instance.track
+
+
+class PasteInstanceTrack(EditCommand):
topics = [UpdateTopic.tracks]
@staticmethod
def do_action(context: CommandContext, params: dict):
- context.labels.remove_all_tracks()
+ selected_instance: Instance = context.state["instance"]
+ track_to_paste = context.state["clipboard_track"]
+ if selected_instance is None or track_to_paste is None:
+ return
+
+ # Ensure mutual exclusivity of tracks within a frame.
+ for inst in selected_instance.frame.instances_to_show:
+ if inst == selected_instance:
+ continue
+ if inst.track is not None and inst.track == track_to_paste:
+ # Unset track for other instances that have the same track.
+ inst.track = None
+
+ # Set the track on the selected instance.
+ selected_instance.track = context.state["clipboard_track"]
class SetTrackName(EditCommand):
@@ -3054,6 +3130,48 @@ def do_action(cls, context: CommandContext, params: dict):
context.labels.add_instance(context.state["labeled_frame"], new_instance)
+class CopyInstance(EditCommand):
+ @classmethod
+ def do_action(cls, context: CommandContext, params: dict):
+ current_instance: Instance = context.state["instance"]
+ if current_instance is None:
+ return
+ context.state["clipboard_instance"] = current_instance
+
+
+class PasteInstance(EditCommand):
+ topics = [UpdateTopic.frame, UpdateTopic.project_instances]
+
+ @classmethod
+ def do_action(cls, context: CommandContext, params: dict):
+ base_instance: Instance = context.state["clipboard_instance"]
+ current_frame: LabeledFrame = context.state["labeled_frame"]
+ if base_instance is None or current_frame is None:
+ return
+
+ # Create a new instance copy.
+ new_instance = Instance.from_numpy(
+ base_instance.numpy(), skeleton=base_instance.skeleton
+ )
+
+ if base_instance.frame != current_frame:
+ # Only copy the track if we're not on the same frame and the track doesn't
+ # exist on the current frame.
+ current_frame_tracks = [
+ inst.track for inst in current_frame if inst.track is not None
+ ]
+ if base_instance.track not in current_frame_tracks:
+ new_instance.track = base_instance.track
+
+ # Add to the current frame.
+ context.labels.add_instance(current_frame, new_instance)
+
+ if current_frame not in context.labels.labels:
+ # Add current frame to labels if it wasn't already there. This happens when
+ # adding an instance to an empty labeled frame that isn't in the labels.
+ context.labels.append(current_frame)
+
+
def open_website(url: str):
"""Open website in default browser.
diff --git a/sleap/gui/dataviews.py b/sleap/gui/dataviews.py
index 00665c87b..a8c7f42b6 100644
--- a/sleap/gui/dataviews.py
+++ b/sleap/gui/dataviews.py
@@ -88,7 +88,9 @@ def items(self):
@items.setter
def items(self, obj):
if not obj:
+ self.beginResetModel()
self._data = []
+ self.endResetModel()
return
self.obj = obj
diff --git a/sleap/gui/dialogs/importvideos.py b/sleap/gui/dialogs/importvideos.py
index b9e3227bb..4f396995b 100644
--- a/sleap/gui/dialogs/importvideos.py
+++ b/sleap/gui/dialogs/importvideos.py
@@ -37,6 +37,7 @@
import h5py
import qimage2ndarray
+import cv2
from typing import Any, Dict, List, Optional
@@ -141,6 +142,12 @@ def __init__(
"video_class": Video.from_filename,
"params": [],
},
+ {
+ "video_type": "single_image",
+ "match": "jpg,png,tif,jpeg,tiff",
+ "video_class": Video.from_filename,
+ "params": [{"name": "grayscale", "type": "check"}],
+ },
]
outer_layout = QVBoxLayout()
@@ -200,6 +207,18 @@ def __init__(
button_layout.addWidget(all_channels_first_button)
all_channels_last_button.clicked.connect(self.set_all_channels_last)
all_channels_first_button.clicked.connect(self.set_all_channels_first)
+ if any(
+ [
+ widget.import_type["video_type"] == "single_image"
+ for widget in self.import_widgets
+ ]
+ ):
+ all_grayscale_button = QPushButton("All grayscale")
+ all_rgb_button = QPushButton("All RGB")
+ button_layout.addWidget(all_grayscale_button)
+ button_layout.addWidget(all_rgb_button)
+ all_grayscale_button.clicked.connect(self.set_all_grayscale)
+ all_rgb_button.clicked.connect(self.set_all_rgb)
cancel_button = QPushButton("Cancel")
import_button = QPushButton("Import")
@@ -549,18 +568,19 @@ def __init__(self, message: str = str(), *args, **kwargs):
class VideoPreviewWidget(QWidget):
"""Widget to show video preview. Based on :class:`Video` class.
- Args:
+ Attributes:
video: the video to show
-
- Returns:
- None.
+ max_preview_size: Maximum size of the preview images.
Note:
This widget is used by ImportItemWidget.
"""
- def __init__(self, video: Video = None, *args, **kwargs):
+ def __init__(
+ self, video: Video = None, max_preview_size: int = 256, *args, **kwargs
+ ):
super(VideoPreviewWidget, self).__init__(*args, **kwargs)
+ self.max_preview_size = max_preview_size
# widgets to include
self.view = GraphicsView()
self.video_label = QLabel()
@@ -599,10 +619,20 @@ def plot(self, idx=0):
# Get image data
frame = self.video.get_frame(idx)
+
+ # Re-size the preview image
+ height, width = frame.shape[:2]
+ img_length = max(height, width)
+ if img_length > self.max_preview_size:
+ ratio = self.max_preview_size / img_length
+ frame = cv2.resize(frame, None, fx=ratio, fy=ratio)
+
# Clear existing objects
self.view.clear()
+
# Convert ndarray to QImage
image = qimage2ndarray.array2qimage(frame)
+
# Display image
self.view.setImage(image)
diff --git a/sleap/gui/learning/dialog.py b/sleap/gui/learning/dialog.py
index a8ea86015..5fdbf43fa 100644
--- a/sleap/gui/learning/dialog.py
+++ b/sleap/gui/learning/dialog.py
@@ -24,6 +24,7 @@
"data.instance_cropping.center_on_part",
"model.heads.centered_instance.anchor_part",
"model.heads.centroid.anchor_part",
+ "model.heads.multi_class_topdown.confmaps.anchor_part",
]
@@ -279,7 +280,14 @@ def disconnect_signals(self):
tab.valueChanged.disconnect()
def make_tabs(self):
- heads = ("single_instance", "centroid", "centered_instance", "multi_instance")
+ heads = (
+ "single_instance",
+ "centroid",
+ "centered_instance",
+ "multi_instance",
+ "multi_class_topdown",
+ "multi_class_bottomup",
+ )
video = self.labels.videos[0] if self.labels else None
@@ -305,6 +313,11 @@ def adjust_data_to_update_other_tabs(self, source_data, updated_data=None):
elif "model.heads.centered_instance.anchor_part" in source_data:
anchor_part = source_data["model.heads.centered_instance.anchor_part"]
set_anchor = True
+ elif "model.heads.multi_class_topdown.confmaps.anchor_part" in source_data:
+ anchor_part = source_data[
+ "model.heads.multi_class_topdown.confmaps.anchor_part"
+ ]
+ set_anchor = True
# Use None instead of empty string/list
anchor_part = anchor_part or None
@@ -312,6 +325,9 @@ def adjust_data_to_update_other_tabs(self, source_data, updated_data=None):
if set_anchor:
updated_data["model.heads.centroid.anchor_part"] = anchor_part
updated_data["model.heads.centered_instance.anchor_part"] = anchor_part
+ updated_data[
+ "model.heads.multi_class_topdown.confmaps.anchor_part"
+ ] = anchor_part
updated_data["data.instance_cropping.center_on_part"] = anchor_part
def update_tabs_from_pipeline(self, source_data):
@@ -350,13 +366,18 @@ def on_tab_data_change(self, tab_name=None):
def get_most_recent_pipeline_trained(self) -> Text:
recent_cfg_info = self._cfg_getter.get_first()
+
if recent_cfg_info and recent_cfg_info.head_name:
+ if recent_cfg_info.head_name in ("multi_class_topdown",):
+ return "top-down-id"
if recent_cfg_info.head_name in ("centroid", "centered_instance"):
return "top-down"
- if recent_cfg_info.head_name in ("multi_instance"):
+ if recent_cfg_info.head_name in ("multi_instance",):
return "bottom-up"
- if recent_cfg_info.head_name in ("single_instance"):
+ if recent_cfg_info.head_name in ("single_instance",):
return "single"
+ if recent_cfg_info.head_name in ("multi_class_bottomup",):
+ return "bottom-up-id"
return ""
def set_default_pipeline_tab(self):
@@ -376,6 +397,8 @@ def add_tab(self, tab_name):
"centroid": "Centroid Model Configuration",
"centered_instance": "Centered Instance Model Configuration",
"multi_instance": "Bottom-Up Model Configuration",
+ "multi_class_topdown": "Top-Down-Id Model Configuration",
+ "multi_class_bottomup": "Bottom-Up-Id Model Configuration",
}
self.tab_widget.addTab(self.tabs[tab_name], tab_labels[tab_name])
self.shown_tab_names.append(tab_name)
@@ -393,6 +416,11 @@ def set_pipeline(self, pipeline: str):
self.add_tab("centered_instance")
elif pipeline == "bottom-up":
self.add_tab("multi_instance")
+ elif pipeline == "top-down-id":
+ self.add_tab("centroid")
+ self.add_tab("multi_class_topdown")
+ elif pipeline == "bottom-up-id":
+ self.add_tab("multi_class_bottomup")
elif pipeline == "single":
self.add_tab("single_instance")
self.current_pipeline = pipeline
@@ -485,6 +513,32 @@ def get_every_head_config_data(
cfg = scopedkeydict.make_training_config_from_key_val_dict(
loaded_cfg_scoped
)
+
+ if len(self.labels.tracks) > 0:
+
+ # For multiclass topdown, the class vectors output stride
+ # should be the max stride.
+ backbone_name = scopedkeydict.find_backbone_name_from_key_val_dict(
+ tab_cfg_key_val_dict
+ )
+ max_stride = tab_cfg_key_val_dict[
+ f"model.backbone.{backbone_name}.max_stride"
+ ]
+
+ # Classes should be added here to prevent value error in
+ # model since we don't add them in the training config yaml.
+ if cfg.model.heads.multi_class_bottomup is not None:
+ cfg.model.heads.multi_class_bottomup.class_maps.classes = [
+ t.name for t in self.labels.tracks
+ ]
+ elif cfg.model.heads.multi_class_topdown is not None:
+ cfg.model.heads.multi_class_topdown.class_vectors.classes = [
+ t.name for t in self.labels.tracks
+ ]
+ cfg.model.heads.multi_class_topdown.class_vectors.output_stride = (
+ max_stride
+ )
+
cfg_info = configs.ConfigFileInfo(config=cfg, head_name=tab_name)
cfg_info_list.append(cfg_info)
@@ -843,16 +897,28 @@ def emitPipeline(self):
def current_pipeline(self):
pipeline_selected_label = self.pipeline_field.value()
if "top-down" in pipeline_selected_label:
- return "top-down"
+ if "id" not in pipeline_selected_label:
+ return "top-down"
+ else:
+ return "top-down-id"
if "bottom-up" in pipeline_selected_label:
- return "bottom-up"
+ if "id" not in pipeline_selected_label:
+ return "bottom-up"
+ else:
+ return "bottom-up-id"
if "single" in pipeline_selected_label:
return "single"
return ""
@current_pipeline.setter
def current_pipeline(self, val):
- if val not in ("top-down", "bottom-up", "single"):
+ if val not in (
+ "top-down",
+ "bottom-up",
+ "single",
+ "top-down-id",
+ "bottom-up-id",
+ ):
raise ValueError(f"Cannot set pipeline to {val}")
# Match short name to full pipeline name shown in menu
@@ -1034,6 +1100,7 @@ def acceptSelectedConfigInfo(self, cfg_info: configs.ConfigFileInfo):
def update_receptive_field(self):
data_form_data = self.form_widgets["data"].get_form_data()
+
model_cfg = scopedkeydict.make_model_config_from_key_val_dict(
key_val_dict=self.form_widgets["model"].get_form_data()
)
diff --git a/sleap/gui/learning/receptivefield.py b/sleap/gui/learning/receptivefield.py
index 1d3a95e64..9b9770092 100644
--- a/sleap/gui/learning/receptivefield.py
+++ b/sleap/gui/learning/receptivefield.py
@@ -1,19 +1,16 @@
"""
Widget for previewing receptive field on sample image using model hyperparams.
"""
-from sleap import Video
-from sleap.nn.config import ModelConfig
-from sleap.gui.widgets.video import GraphicsView
+from typing import Optional, Text
import numpy as np
+from qtpy import QtWidgets, QtGui, QtCore
-from sleap import Skeleton
+from sleap import Video, Track, Skeleton
+from sleap.nn.config import ModelConfig
+from sleap.gui.widgets.video import GraphicsView
from sleap.nn.model import Model
-from typing import Optional, Text
-
-from qtpy import QtWidgets, QtGui, QtCore
-
def compute_rf(down_blocks: int, convs_per_block: int = 2, kernel_size: int = 3) -> int:
"""
@@ -54,7 +51,9 @@ def receptive_field_info_from_model_cfg(model_cfg: ModelConfig) -> dict:
)
try:
- model = Model.from_config(model_cfg, Skeleton())
+ # `Skeleton` and `Tracks` not important for receptive field computation, but
+ # required as check in `Model.from_config`
+ model = Model.from_config(model_cfg, skeleton=Skeleton(), tracks=[Track()])
except ZeroDivisionError:
# Unable to create model from these config parameters
return rf_info
diff --git a/sleap/gui/learning/runners.py b/sleap/gui/learning/runners.py
index 628e1ef93..83e56d995 100644
--- a/sleap/gui/learning/runners.py
+++ b/sleap/gui/learning/runners.py
@@ -543,23 +543,29 @@ def run_learning_pipeline(
save_viz = inference_params.get("_save_viz", False)
- # Train the TrainingJobs
- trained_job_paths = run_gui_training(
- labels_filename=labels_filename,
- labels=labels,
- config_info_list=config_info_list,
- gui=True,
- save_viz=save_viz,
- )
+ if "movenet" in inference_params["_pipeline"]:
+ trained_job_paths = [inference_params["_pipeline"]]
+
+ else:
+ # Train the TrainingJobs
+ trained_job_paths = run_gui_training(
+ labels_filename=labels_filename,
+ labels=labels,
+ config_info_list=config_info_list,
+ gui=True,
+ save_viz=save_viz,
+ )
+
+ # Check that all the models were trained
+ if None in trained_job_paths.values():
+ return -1
- # Check that all the models were trained
- if None in trained_job_paths.values():
- return -1
+ trained_job_paths = list(trained_job_paths.values())
inference_task = InferenceTask(
labels=labels,
labels_filename=labels_filename,
- trained_job_paths=list(trained_job_paths.values()),
+ trained_job_paths=trained_job_paths,
inference_params=inference_params,
)
diff --git a/sleap/gui/learning/scopedkeydict.py b/sleap/gui/learning/scopedkeydict.py
index 294673570..aeeb790cf 100644
--- a/sleap/gui/learning/scopedkeydict.py
+++ b/sleap/gui/learning/scopedkeydict.py
@@ -157,8 +157,12 @@ def resolve_strides_from_key_val_dict(
"model.heads.centroid.output_stride",
"model.heads.multi_instance.confmaps.output_stride",
"model.heads.multi_instance.pafs.output_stride",
+ "model.heads.multi_class_topdown.confmaps.output_stride",
+ "model.heads.multi_class_bottomup.confmaps.output_stride",
+ "model.heads.multi_class_bottomup.class_maps.output_stride",
]:
stride = key_val_dict.get(key, None)
+
if stride is not None:
stride = int(stride)
max_stride = (
diff --git a/sleap/gui/no-preview.png b/sleap/gui/no-preview.png
deleted file mode 100644
index 1aec2a76b5cb8467d94c192bdae9c5359a8212cf..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 5223
zcmds5c|4SB`+sa%k_br#gDfG#D47{E$P6NDDnim2nk>W2*tcmTSq2r!pe&J!QmJf-
zY)xcqp;XQ=c3H+A-l5K^^E>DK&fo9*JfC~+`?;R$`@OF3buXWpiLtgc*(9)C004kZ
z`%Tfd?4GjL_%^UVYf-LrcE>@oHQ5IgwSb1$54kw|{q7bPfGT^;2k>xg0l3y&*f+qj
z9pG6X1ArNa)E{G84lRI_Z3na4(P%ut{lh1O-LGAy?8Zia?+x7uCvafvlLRt>bdul;
zHae&SR}I>*n5*E#=Sqg?0uIbNgnG-!Rv?)ZI89Y_GMQFN_w0M}^S
z0D#*cZ)Z=ow>SXD61+8B*FtIpdHb%R0KFhMd+3cLyMlwfy?jXUAf)`d2b?`#GehLT
z>n>zZq`bX_HQ0zi#DR4*G&MBkQ37BvSdZxD4!1=c|A4cfkn$(VWM4Q05*Qe$5vZ*}
zAbLQwU@#a&6AFPscd$}{X@jx
zktDldUmV01MC4{4KM0^0w&6r5=UY{yoS0V|wrXMN)djWpc
zu_k4`f^fVKiR|iw#qCF<*i0IDyc=A{2yJX=ghB7wi$QB?VW5T>=w2g?&Tbe?2L?s&
zh3P^50{xlKPi8j)*58}0;s>+aPiD-&G5-Sn$*c$Yi`4Zc|9vIu{doCLL5(&4Ip|-Y
zzXbjMZvQ`4$@t%Be}Vp)7H&kuxsnM)I|9KA^)DySy7oT-esJkQ);8uZ8}<7sux@1^
z11JGD^7kVJC4l0JU;u#7hy7?nyC9CSo+DJ5x-A>N1c0&CeO^*tL^0S#FRv!)ZQ)p!jd8fTDtn;nqVFilO0cH9}cjt)r}4i?vAIpd)zE9D;v4{xG>Zmh4%=b
zZr&?p3u%_uEO(%n_e!3&q}0c>^0RMU&iM4t+6Kx748Gky5wC#^AYP&M$pVs#%(*+k
zfWe#-{I7A!!5puR+SFvD=6LFM)9cQ%rqT3U)4K-tObKcRj(^=rEa_&>Q&$H!O)(65fTs0*-B3e4Y6jI5uMLC&l_m`1zQnKo8F_Jdzo
zwL%DW=kS>~qSJ!mTE@4iLUXlG>)va*cuX%NK17qcuPr@w>=?>o8J?MLK|ql6U(QP2
zIV%KLSza6|PRDG#UVD;W=UH5tlDYu^1@|QlRda2Abv%`k%$qCrttaqv9$PuWLv%T5
zd@zS?>eSdeVd{m&{>@A
z^2pMYSdOdwTD9bkWZe%O$OyEI1`$nD46Elpg+fM*G#SrNis5o4t8*j2O*VLq4&`TM
zj+cL}st`L@TYJ4dZEp|x;P``+ZP^ds9ShAXA1_nm60?yEr=4zI@O)Qf!LGJ_H!jt=
zL?O37olpfEi8&d%(4X3I&2DrerZ*x4xQG;%YiCL3ful%KIno>t%{
zU6QkqO6Svyi~Y#de+iuutTqK_`PT4v`I>V;HEmLd9e~===bUWSQVZlnt9OESq|%)=?2Wtu(vif*g;FU!Ax^
z%btAXRHFjE`Pv?sdBw`&h^UCqtG%Cs6f%N-iwQ<~y$_%3c_7_*+KuHBvCt2@Mn#N2
z(K9KHVJe!ZNC5eq+xS)V*()c8h4o*0648?3;0?Ddrj-9U&mwwEWT0T_gJ2&-3+^X%1B^m
z*TUH2&n>*lu_&T#`9jDELm~6HIEve-?VzDqUx!k8#p=qE=whReaa7f)h+pV+%@jME*)IQD=gK>Dd|I5KQGC=rv^%!LMoQ*JgCwCE
z-@=ty-Xi_gZ`0)GJQ971%ruY8rfG~*sN%GH;QQHkrX5Rx>B3;(x446rFvb11be!!q
zwumKg*`W_n@-6f&Gw+U5
z)}68;gEzYFlaIPbAxI@mcl9K5+2uXqeJ3(ZZl0Igo5^&a9%|xhW5wr3^;S&h-kf~h
z$PX^8@rx6YlL|xnDi9!gVSqYBr_`x}jCJd<^*Dy^v(rS(qQ&(g+%UB`*23
zV!DMtyitQ8Cn4GLs1IzA$RFcHLE6}F7d|suK&N>YJ!R_WfVd~F?rPUUT-+lD?!I%M
zM)w837i{8wyT9BUF$Cx>B4TyhAh^t6W?HbxF0*(AdaKm(E3S!pK6zdGHXT>c2_q#n
zKb2(SZfDwAw_%Ye66+AP^#ZuCDGKGF#*##^hLp7v_sCiSHpg>&jJNeVr=HvNbV%=Y
zw{F{NFb=+l&|bHnR{fkqaQy%RU
zq>=#JGooV-1yjm=qVTAYHw}9GQYW1ex(`2kH;9@v4(ngG$v-FU=B(zXF#V)>NNsFK
z&Y;Vs<=g(dBTXzRTVCAYe^gphR3S6#Uo8Dz$KB5*lhl7xKReOuVR
zNSo-Dbd3|4B5(4hW6+^p#fSH%d36!WicPBcY2*CSD1fv3XfS)n9x)VhNWx*G09s$S
z@#|%`gbP(IeHG6;tx97|H%rd3LNZ4At7oj0+&sF2>xF^i{UQe5{HEOZj9i{f+L%es
z2ELa3w8trQ>{0LomE_78M(d4n*GRYs8W~KOIs?>J)lZhTU)q>f(#c90V^qj2Y|%W#
z00ymfb=nKcfP0;FT9mCBZ|6#$!G|*9Jj(;VE;1%9^!;&io(_D`BLt^8p{bU|?jbd%
zX{6Y4v=yQ|(bA=4@l`HVfe!%L4EqV0ej15>(7E}vBH=+_B8P4rsv4`%5G)}}wV@Rt
zWe`1Td5n*wxOXMlA2%vOGFcmg$3VB73r#qw_xg{W$ppuHPLmr2M<)c{=g6kDn)}dj
zcenvg>pTkx5fC`}l^I?){c3FFn#XcKJ=Myqn=*brrr=p$vU##cBqUPUBx8=R3O3J)
z7wmh|8znU*0e?NMI1k!!cHV9*SUR{O0dpqLJe7IDST$)3b7_vJn>OQE>g5pe*m{_3
z0ZC4iI3agAQ1FukEqu0%!CG*~Qm+68zxgJ(JSE~1gj;5|CNRR=Nb~VPHAdJMBd4rz
zekD7+?xHx%6MR`hbM7O_D&mH`k;;V|mlH0ACXI_ex}#7c(pxrCM2F;5oJ+xxhiFIPh`LI
z7p}4MdfSPpxY@6gz?y21LfVMaDPqMJ%%LNc#np@z}s)`xYY^s&u=$j-MB`mvll?qSS;
zVfBskrZpVZkK77kXHs)mkL^@UPn#Z$(SS00wIL@k`gCDlPtw)s&ewj-FJ56YG%0M-
zCeEv|>R*zyhjHiLXQ5?@3{|WGU;s2OJpM>DhtIypPJ7t3H6*>Y1ol^=t^(~#{yUGU
z&W>8~mdx{~1df5WCVkC!>s~GLO`%#r7^+o?LfJ;iJ#xp
zraxToFC)dVO+6$`JFxP~2r4uCks
zvQA*`b@4Gcq7Wmvm1O3Vl~c9)Y@c4V(#S`gzBCb5)vF}2uBY;9mq^i
z1(~dG0Xa9&&RUfkp;C#p!Da^|N4%v6#8s$FO@a~W~f}`->!-HM?tqH}q
zQvx{cT=DrOqSvKej_exaE~sg@#3>sfs)~+w^IM "Video":
kwargs["dataset"] = "" # prevent serialization from breaking
elif os.path.isdir(filename) or "metadata.yaml" in filename:
backend_class = ImgStoreVideo
+ elif filename.lower().endswith(("jpg", "jpeg", "png", "tif", "tiff")):
+ backend_class = SingleImageVideo
else:
raise ValueError("Could not detect backend for specified filename.")
diff --git a/sleap/nn/inference.py b/sleap/nn/inference.py
index fdc725cc4..7a186c26b 100644
--- a/sleap/nn/inference.py
+++ b/sleap/nn/inference.py
@@ -42,6 +42,8 @@
import tensorflow_hub as hub
from abc import ABC, abstractmethod
from typing import Text, Optional, List, Dict, Union, Iterator, Tuple
+from threading import Thread
+from queue import Queue
import tensorflow as tf
import numpy as np
@@ -67,6 +69,7 @@
from sleap.nn.utils import reset_input_layer
from sleap.io.dataset import Labels
from sleap.util import frame_list
+from sleap.instance import PredictedInstance, LabeledFrame
from tensorflow.python.framework.convert_to_constants import (
convert_variables_to_constants_v2,
@@ -1528,38 +1531,54 @@ def _make_labeled_frames_from_generator(
arrays returned from the inference result generator.
"""
skeleton = self.confmap_config.data.labels.skeletons[0]
-
- # Loop over batches.
predicted_frames = []
- for ex in generator:
-
- # Loop over frames.
- for video_ind, frame_ind, points, confidences in zip(
- ex["video_ind"],
- ex["frame_ind"],
- ex["instance_peaks"],
- ex["instance_peak_vals"],
- ):
- # Loop over instances.
- if np.isnan(points[0]).all():
- predicted_instances = []
- else:
- predicted_instances = [
- sleap.instance.PredictedInstance.from_arrays(
- points=points[0],
- point_confidences=confidences[0],
- instance_score=np.nansum(confidences[0]),
- skeleton=skeleton,
- )
- ]
- predicted_frames.append(
- sleap.LabeledFrame(
- video=data_provider.videos[video_ind],
- frame_idx=frame_ind,
- instances=predicted_instances,
+ def _object_builder():
+ while True:
+ ex = prediction_queue.get()
+ if ex is None:
+ break
+
+ # Loop over frames.
+ for video_ind, frame_ind, points, confidences in zip(
+ ex["video_ind"],
+ ex["frame_ind"],
+ ex["instance_peaks"],
+ ex["instance_peak_vals"],
+ ):
+ # Loop over instances.
+ if np.isnan(points[0]).all():
+ predicted_instances = []
+ else:
+ predicted_instances = [
+ PredictedInstance.from_numpy(
+ points=points[0],
+ point_confidences=confidences[0],
+ instance_score=np.nansum(confidences[0]),
+ skeleton=skeleton,
+ )
+ ]
+
+ predicted_frames.append(
+ LabeledFrame(
+ video=data_provider.videos[video_ind],
+ frame_idx=frame_ind,
+ instances=predicted_instances,
+ )
)
- )
+
+ # Set up threaded object builder.
+ prediction_queue = Queue()
+ object_builder = Thread(target=_object_builder)
+ object_builder.start()
+
+ # Loop over batches.
+ try:
+ for ex in generator:
+ prediction_queue.put(ex)
+ finally:
+ prediction_queue.put(None)
+ object_builder.join()
return predicted_frames
@@ -2514,62 +2533,81 @@ def _make_labeled_frames_from_generator(
else:
skeleton = self.centroid_config.data.labels.skeletons[0]
- # Loop over batches.
predicted_frames = []
- for ex in generator:
- if "n_valid" in ex:
- ex["instance_peaks"] = [
- x[:n] for x, n in zip(ex["instance_peaks"], ex["n_valid"])
- ]
- ex["instance_peak_vals"] = [
- x[:n] for x, n in zip(ex["instance_peak_vals"], ex["n_valid"])
- ]
- ex["centroids"] = [
- x[:n] for x, n in zip(ex["centroids"], ex["n_valid"])
- ]
- ex["centroid_vals"] = [
- x[:n] for x, n in zip(ex["centroid_vals"], ex["n_valid"])
- ]
+ def _object_builder():
+ while True:
+ ex = prediction_queue.get()
+ if ex is None:
+ break
- # Loop over frames.
- for image, video_ind, frame_ind, points, confidences, scores in zip(
- ex["image"],
- ex["video_ind"],
- ex["frame_ind"],
- ex["instance_peaks"],
- ex["instance_peak_vals"],
- ex["centroid_vals"],
- ):
+ if "n_valid" in ex:
+ ex["instance_peaks"] = [
+ x[:n] for x, n in zip(ex["instance_peaks"], ex["n_valid"])
+ ]
+ ex["instance_peak_vals"] = [
+ x[:n] for x, n in zip(ex["instance_peak_vals"], ex["n_valid"])
+ ]
+ ex["centroids"] = [
+ x[:n] for x, n in zip(ex["centroids"], ex["n_valid"])
+ ]
+ ex["centroid_vals"] = [
+ x[:n] for x, n in zip(ex["centroid_vals"], ex["n_valid"])
+ ]
+
+ # Loop over frames.
+ for image, video_ind, frame_ind, points, confidences, scores in zip(
+ ex["image"],
+ ex["video_ind"],
+ ex["frame_ind"],
+ ex["instance_peaks"],
+ ex["instance_peak_vals"],
+ ex["centroid_vals"],
+ ):
- # Loop over instances.
- predicted_instances = []
- for pts, confs, score in zip(points, confidences, scores):
- if np.isnan(pts).all():
- continue
-
- predicted_instances.append(
- sleap.instance.PredictedInstance.from_arrays(
- points=pts,
- point_confidences=confs,
- instance_score=score,
- skeleton=skeleton,
+ # Loop over instances.
+ predicted_instances = []
+ for pts, confs, score in zip(points, confidences, scores):
+ if np.isnan(pts).all():
+ continue
+
+ predicted_instances.append(
+ PredictedInstance.from_numpy(
+ points=pts,
+ point_confidences=confs,
+ instance_score=score,
+ skeleton=skeleton,
+ )
)
- )
- if self.tracker:
- # Set tracks for predicted instances in this frame.
- predicted_instances = self.tracker.track(
- untracked_instances=predicted_instances, img=image, t=frame_ind
- )
+ if self.tracker:
+ # Set tracks for predicted instances in this frame.
+ predicted_instances = self.tracker.track(
+ untracked_instances=predicted_instances,
+ img=image,
+ t=frame_ind,
+ )
- predicted_frames.append(
- sleap.LabeledFrame(
- video=data_provider.videos[video_ind],
- frame_idx=frame_ind,
- instances=predicted_instances,
+ predicted_frames.append(
+ LabeledFrame(
+ video=data_provider.videos[video_ind],
+ frame_idx=frame_ind,
+ instances=predicted_instances,
+ )
)
- )
+
+ # Set up threaded object builder.
+ prediction_queue = Queue()
+ object_builder = Thread(target=_object_builder)
+ object_builder.start()
+
+ # Loop over batches.
+ try:
+ for ex in generator:
+ prediction_queue.put(ex)
+ finally:
+ prediction_queue.put(None)
+ object_builder.join()
if self.tracker:
self.tracker.final_pass(predicted_frames)
@@ -3092,61 +3130,79 @@ def _make_labeled_frames_from_generator(
arrays returned from the inference result generator.
"""
skeleton = self.bottomup_config.data.labels.skeletons[0]
-
- # Loop over batches.
predicted_frames = []
- for ex in generator:
- if "n_valid" in ex:
- # Crop possibly variable length results.
- ex["instance_peaks"] = [
- x[:n] for x, n in zip(ex["instance_peaks"], ex["n_valid"])
- ]
- ex["instance_peak_vals"] = [
- x[:n] for x, n in zip(ex["instance_peak_vals"], ex["n_valid"])
- ]
- ex["instance_scores"] = [
- x[:n] for x, n in zip(ex["instance_scores"], ex["n_valid"])
- ]
+ def _object_builder():
+ while True:
+ ex = prediction_queue.get()
+ if ex is None:
+ break
- # Loop over frames.
- for image, video_ind, frame_ind, points, confidences, scores in zip(
- ex["image"],
- ex["video_ind"],
- ex["frame_ind"],
- ex["instance_peaks"],
- ex["instance_peak_vals"],
- ex["instance_scores"],
- ):
+ if "n_valid" in ex:
+ # Crop possibly variable length results.
+ ex["instance_peaks"] = [
+ x[:n] for x, n in zip(ex["instance_peaks"], ex["n_valid"])
+ ]
+ ex["instance_peak_vals"] = [
+ x[:n] for x, n in zip(ex["instance_peak_vals"], ex["n_valid"])
+ ]
+ ex["instance_scores"] = [
+ x[:n] for x, n in zip(ex["instance_scores"], ex["n_valid"])
+ ]
- # Loop over instances.
- predicted_instances = []
- for pts, confs, score in zip(points, confidences, scores):
- if np.isnan(pts).all():
- continue
-
- predicted_instances.append(
- sleap.instance.PredictedInstance.from_arrays(
- points=pts,
- point_confidences=confs,
- instance_score=score,
- skeleton=skeleton,
+ # Loop over frames.
+ for image, video_ind, frame_ind, points, confidences, scores in zip(
+ ex["image"],
+ ex["video_ind"],
+ ex["frame_ind"],
+ ex["instance_peaks"],
+ ex["instance_peak_vals"],
+ ex["instance_scores"],
+ ):
+
+ # Loop over instances.
+ predicted_instances = []
+ for pts, confs, score in zip(points, confidences, scores):
+ if np.isnan(pts).all():
+ continue
+
+ predicted_instances.append(
+ PredictedInstance.from_numpy(
+ points=pts,
+ point_confidences=confs,
+ instance_score=score,
+ skeleton=skeleton,
+ )
)
- )
- if self.tracker:
- # Set tracks for predicted instances in this frame.
- predicted_instances = self.tracker.track(
- untracked_instances=predicted_instances, img=image, t=frame_ind
- )
+ if self.tracker:
+ # Set tracks for predicted instances in this frame.
+ predicted_instances = self.tracker.track(
+ untracked_instances=predicted_instances,
+ img=image,
+ t=frame_ind,
+ )
- predicted_frames.append(
- sleap.LabeledFrame(
- video=data_provider.videos[video_ind],
- frame_idx=frame_ind,
- instances=predicted_instances,
+ predicted_frames.append(
+ LabeledFrame(
+ video=data_provider.videos[video_ind],
+ frame_idx=frame_ind,
+ instances=predicted_instances,
+ )
)
- )
+
+ # Set up threaded object builder.
+ prediction_queue = Queue()
+ object_builder = Thread(target=_object_builder)
+ object_builder.start()
+
+ # Loop over batches.
+ try:
+ for ex in generator:
+ prediction_queue.put(ex)
+ finally:
+ prediction_queue.put(None)
+ object_builder.join()
if self.tracker:
self.tracker.final_pass(predicted_frames)
@@ -3595,47 +3651,64 @@ def _make_labeled_frames_from_generator(
names = self.config.model.heads.multi_class_bottomup.class_maps.classes
tracks = [sleap.Track(name=n, spawned_on=0) for n in names]
- # Loop over batches.
predicted_frames = []
- for ex in generator:
-
- # Loop over frames.
- for image, video_ind, frame_ind, points, confidences, scores in zip(
- ex["image"],
- ex["video_ind"],
- ex["frame_ind"],
- ex["instance_peaks"],
- ex["instance_peak_vals"],
- ex["instance_scores"],
- ):
- # Loop over instances.
- predicted_instances = []
- for i, (pts, confs, score) in enumerate(
- zip(points, confidences, scores)
+ def _object_builder():
+ while True:
+ ex = prediction_queue.get()
+ if ex is None:
+ return
+
+ # Loop over frames.
+ for image, video_ind, frame_ind, points, confidences, scores in zip(
+ ex["image"],
+ ex["video_ind"],
+ ex["frame_ind"],
+ ex["instance_peaks"],
+ ex["instance_peak_vals"],
+ ex["instance_scores"],
):
- if np.isnan(pts).all():
- continue
- track = None
- if tracks is not None and len(tracks) >= (i - 1):
- track = tracks[i]
- predicted_instances.append(
- sleap.instance.PredictedInstance.from_arrays(
- points=pts,
- point_confidences=confs,
- instance_score=np.nanmean(score),
- skeleton=skeleton,
- track=track,
+
+ # Loop over instances.
+ predicted_instances = []
+ for i, (pts, confs, score) in enumerate(
+ zip(points, confidences, scores)
+ ):
+ if np.isnan(pts).all():
+ continue
+ track = None
+ if tracks is not None and len(tracks) >= (i - 1):
+ track = tracks[i]
+ predicted_instances.append(
+ PredictedInstance.from_numpy(
+ points=pts,
+ point_confidences=confs,
+ instance_score=np.nanmean(score),
+ skeleton=skeleton,
+ track=track,
+ )
)
- )
- predicted_frames.append(
- sleap.LabeledFrame(
- video=data_provider.videos[video_ind],
- frame_idx=frame_ind,
- instances=predicted_instances,
+ predicted_frames.append(
+ LabeledFrame(
+ video=data_provider.videos[video_ind],
+ frame_idx=frame_ind,
+ instances=predicted_instances,
+ )
)
- )
+
+ # Set up threaded object builder.
+ prediction_queue = Queue()
+ object_builder = Thread(target=_object_builder)
+ object_builder.start()
+
+ # Loop over batches.
+ try:
+ for ex in generator:
+ prediction_queue.put(ex)
+ finally:
+ prediction_queue.put(None)
+ object_builder.join()
return predicted_frames
@@ -4267,47 +4340,64 @@ def _make_labeled_frames_from_generator(
)
tracks = [sleap.Track(name=n, spawned_on=0) for n in names]
- # Loop over batches.
predicted_frames = []
- for ex in generator:
-
- # Loop over frames.
- for image, video_ind, frame_ind, points, confidences, scores in zip(
- ex["image"],
- ex["video_ind"],
- ex["frame_ind"],
- ex["instance_peaks"],
- ex["instance_peak_vals"],
- ex["instance_scores"],
- ):
- # Loop over instances.
- predicted_instances = []
- for i, (pts, confs, score) in enumerate(
- zip(points, confidences, scores)
+ def _object_builder():
+ while True:
+ ex = prediction_queue.get()
+ if ex is None:
+ break
+
+ # Loop over frames.
+ for image, video_ind, frame_ind, points, confidences, scores in zip(
+ ex["image"],
+ ex["video_ind"],
+ ex["frame_ind"],
+ ex["instance_peaks"],
+ ex["instance_peak_vals"],
+ ex["instance_scores"],
):
- if np.isnan(pts).all():
- continue
- track = None
- if tracks is not None and len(tracks) >= (i - 1):
- track = tracks[i]
- predicted_instances.append(
- sleap.instance.PredictedInstance.from_arrays(
- points=pts,
- point_confidences=confs,
- instance_score=np.nanmean(score),
- skeleton=skeleton,
- track=track,
+
+ # Loop over instances.
+ predicted_instances = []
+ for i, (pts, confs, score) in enumerate(
+ zip(points, confidences, scores)
+ ):
+ if np.isnan(pts).all():
+ continue
+ track = None
+ if tracks is not None and len(tracks) >= (i - 1):
+ track = tracks[i]
+ predicted_instances.append(
+ PredictedInstance.from_numpy(
+ points=pts,
+ point_confidences=confs,
+ instance_score=np.nanmean(score),
+ skeleton=skeleton,
+ track=track,
+ )
)
- )
- predicted_frames.append(
- sleap.LabeledFrame(
- video=data_provider.videos[video_ind],
- frame_idx=frame_ind,
- instances=predicted_instances,
+ predicted_frames.append(
+ LabeledFrame(
+ video=data_provider.videos[video_ind],
+ frame_idx=frame_ind,
+ instances=predicted_instances,
+ )
)
- )
+
+ # Set up threaded object builder.
+ prediction_queue = Queue()
+ object_builder = Thread(target=_object_builder)
+ object_builder.start()
+
+ # Loop over batches.
+ try:
+ for ex in generator:
+ prediction_queue.put(ex)
+ finally:
+ prediction_queue.put(None)
+ object_builder.join()
return predicted_frames
@@ -4537,41 +4627,57 @@ def _make_labeled_frames_from_generator(
) -> List[sleap.LabeledFrame]:
skeleton = MOVENET_SKELETON
-
- # Loop over batches.
predicted_frames = []
- for ex in generator:
-
- # Loop over frames.
- for video_ind, frame_ind, points, confidences in zip(
- ex["video_ind"],
- ex["frame_ind"],
- ex["instance_peaks"],
- ex["confidences"],
- ):
- # Filter out points with low confidences
- points[confidences < self.peak_threshold] = np.nan
- # Create predicted instances from MoveNet predictions
- if np.isnan(points).all():
- predicted_instances = []
- else:
- predicted_instances = [
- sleap.instance.PredictedInstance.from_arrays(
- points=points[0], # (nodes, 2)
- point_confidences=confidences[0], # (nodes,)
- instance_score=np.nansum(confidences[0]), # ()
- skeleton=skeleton,
- )
- ]
+ def _object_builder():
+ while True:
+ ex = prediction_queue.get()
+ if ex is None:
+ break
- predicted_frames.append(
- sleap.LabeledFrame(
- video=data_provider.videos[video_ind],
- frame_idx=frame_ind,
- instances=predicted_instances,
+ # Loop over frames.
+ for video_ind, frame_ind, points, confidences in zip(
+ ex["video_ind"],
+ ex["frame_ind"],
+ ex["instance_peaks"],
+ ex["confidences"],
+ ):
+ # Filter out points with low confidences
+ points[confidences < self.peak_threshold] = np.nan
+
+ # Create predicted instances from MoveNet predictions
+ if np.isnan(points).all():
+ predicted_instances = []
+ else:
+ predicted_instances = [
+ PredictedInstance.from_numpy(
+ points=points[0], # (nodes, 2)
+ point_confidences=confidences[0], # (nodes,)
+ instance_score=np.nansum(confidences[0]), # ()
+ skeleton=skeleton,
+ )
+ ]
+
+ predicted_frames.append(
+ LabeledFrame(
+ video=data_provider.videos[video_ind],
+ frame_idx=frame_ind,
+ instances=predicted_instances,
+ )
)
- )
+
+ # Set up threaded object builder.
+ prediction_queue = Queue()
+ object_builder = Thread(target=_object_builder)
+ object_builder.start()
+
+ # Loop over batches.
+ try:
+ for ex in generator:
+ prediction_queue.put(ex)
+ finally:
+ prediction_queue.put(None)
+ object_builder.join()
return predicted_frames
@@ -5218,6 +5324,9 @@ def main(args: Optional[list] = None):
# Setup tracker.
tracker = _make_tracker_from_cli(args)
+ if args.models is not None and "movenet" in args.models[0]:
+ args.models = args.models[0]
+
# Either run inference (and tracking) or just run tracking
if args.models is not None:
diff --git a/sleap/nn/training.py b/sleap/nn/training.py
index 43be5cf31..7d0d25a56 100644
--- a/sleap/nn/training.py
+++ b/sleap/nn/training.py
@@ -1799,8 +1799,8 @@ def visualize_example(example):
)
-def main(args: Optional[List] = None):
- """Create CLI for training and run."""
+def create_trainer_using_cli(args: Optional[List] = None):
+ """Create CLI for training."""
import argparse
parser = argparse.ArgumentParser()
@@ -2004,10 +2004,15 @@ def main(args: Optional[List] = None):
test_labels=args.test_labels,
video_search_paths=args.video_paths,
)
- trainer.train()
return trainer
+def main(args: Optional[List] = None):
+ """Create CLI for training and run."""
+ trainer = create_trainer_using_cli(args=args)
+ trainer.train()
+
+
if __name__ == "__main__":
main()
diff --git a/sleap/skeleton.py b/sleap/skeleton.py
index e159aeb05..eca393b8e 100644
--- a/sleap/skeleton.py
+++ b/sleap/skeleton.py
@@ -95,13 +95,15 @@ class Skeleton:
_skeleton_idx: An index variable used to give skeletons a default name that
should be unique across all skeletons.
preview_image: A byte string containing an encoded preview image for the
- skeleton.
- description: A text description of the skeleton. Used mostly for presets.
+ skeleton. Used only for templates.
+ description: A text description of the skeleton. Used only for templates.
+ _is_template: Whether this skeleton is a template. Used only for templates.
"""
_skeleton_idx = count(0)
preview_image: Optional[bytes] = None
description: Optional[str] = None
+ _is_template: bool = False
def __init__(self, name: str = None):
"""Initialize an empty skeleton object.
@@ -125,12 +127,12 @@ def __init__(self, name: str = None):
def __repr__(self) -> str:
"""Return full description of the skeleton."""
return (
- f"Skeleton(name='{self.name}', ",
- f"description='{self.description}', ",
+ f"Skeleton(name='{self.name}', "
+ f"description='{self.description}', "
f"nodes={self.node_names}, "
f"edges={self.edge_names}, "
f"symmetries={self.symmetry_names}"
- ")",
+ ")"
)
def __str__(self) -> str:
@@ -176,6 +178,32 @@ def dict_match(dict1, dict2):
return True
+ @property
+ def is_template(self) -> bool:
+ """Return whether this skeleton is a template.
+
+ If is_template is True, then the preview image and description are saved.
+ If is_template is False, then the preview image and description are not saved.
+
+ Only provided template skeletons are considered templates. To save a new
+ template skeleton, change this to True before saving.
+ """
+ return self._is_template
+
+ @is_template.setter
+ def is_template(self, value: bool):
+ """Set whether this skeleton is a template."""
+
+ self._is_template = False
+ if value and ((self.preview_image is None) or (self.description is None)):
+ raise ValueError(
+ "For a skeleton to be a template, it must have both a preview image "
+ "and description. Checkout `generate_skeleton_preview_image` to "
+ "generate a preview image."
+ )
+
+ self._is_template = value
+
@property
def is_arborescence(self) -> bool:
"""Return whether this skeleton graph forms an arborescence."""
@@ -956,8 +984,7 @@ def from_names_and_edge_inds(
return skeleton
def to_json(self, node_to_idx: Optional[Dict[Node, int]] = None) -> str:
- """
- Convert the :class:`Skeleton` to a JSON representation.
+ """Convert the :class:`Skeleton` to a JSON representation.
Args:
node_to_idx: optional dict which maps :class:`Node`sto index
@@ -981,12 +1008,22 @@ def to_json(self, node_to_idx: Optional[Dict[Node, int]] = None) -> str:
indexed_node_graph = self._graph
# Encode to JSON
- dicts = {
- "nx_graph": json_graph.node_link_data(indexed_node_graph),
- "description": self.description,
- "preview_image": self.preview_image,
- }
- json_str = jsonpickle.encode(dicts)
+ graph = json_graph.node_link_data(indexed_node_graph)
+
+ # SLEAP v1.3.0 added `description` and `preview_image` to `Skeleton`, but saving
+ # these fields breaks data format compatibility. Currently, these are only
+ # added in our custom template skeletons. To ensure backwards data format
+ # compatibilty of user data, we only save these fields if they are not None.
+ if self.is_template:
+ data = {
+ "nx_graph": graph,
+ "description": self.description,
+ "preview_image": self.preview_image,
+ }
+ else:
+ data = graph
+
+ json_str = jsonpickle.encode(data)
return json_str
@@ -1020,8 +1057,7 @@ def save_json(self, filename: str, node_to_idx: Optional[Dict[Node, int]] = None
def from_json(
cls, json_str: str, idx_to_node: Dict[int, Node] = None
) -> "Skeleton":
- """
- Instantiate :class:`Skeleton` from JSON string.
+ """Instantiate :class:`Skeleton` from JSON string.
Args:
json_str: The JSON encoded Skeleton.
@@ -1036,9 +1072,8 @@ def from_json(
An instance of the `Skeleton` object decoded from the JSON.
"""
dicts = jsonpickle.decode(json_str)
- if "nx_graph" not in dicts:
- dicts = {"nx_graph": dicts, "description": None, "preview_image": None}
- graph = json_graph.node_link_graph(dicts["nx_graph"])
+ nx_graph = dicts.get("nx_graph", dicts)
+ graph = json_graph.node_link_graph(nx_graph)
# Replace graph node indices with corresponding nodes from node_map
if idx_to_node is not None:
@@ -1046,8 +1081,8 @@ def from_json(
skeleton = Skeleton()
skeleton._graph = graph
- skeleton.description = dicts["description"]
- skeleton.preview_image = dicts["preview_image"]
+ skeleton.description = dicts.get("description", None)
+ skeleton.preview_image = dicts.get("preview_image", None)
return skeleton
@@ -1055,8 +1090,7 @@ def from_json(
def load_json(
cls, filename: str, idx_to_node: Dict[int, Node] = None
) -> "Skeleton":
- """
- Load a skeleton from a JSON file.
+ """Load a skeleton from a JSON file.
This method will load the Skeleton from JSON file saved
with; :meth:`~Skeleton.save_json`
diff --git a/sleap/version.py b/sleap/version.py
index bfde9e82e..557ec831a 100644
--- a/sleap/version.py
+++ b/sleap/version.py
@@ -12,7 +12,7 @@
"""
-__version__ = "1.3.0a0"
+__version__ = "1.3.0"
def versions():
diff --git a/tests/data/dlc/labeled-data/video/CollectedData_LM.csv b/tests/data/dlc/labeled-data/video/CollectedData_LM.csv
new file mode 100644
index 000000000..f57b667f4
--- /dev/null
+++ b/tests/data/dlc/labeled-data/video/CollectedData_LM.csv
@@ -0,0 +1,8 @@
+scorer,,,LM,LM,LM,LM,LM,LM,LM,LM,LM,LM,LM,LM
+individuals,,,individual1,individual1,individual1,individual1,individual1,individual1,individual2,individual2,individual2,individual2,individual2,individual2
+bodyparts,,,A,A,B,B,C,C,A,A,B,B,C,C
+coords,,,x,y,x,y,x,y,x,y,x,y,x,y
+labeled-data,video,img000.png,0,1,2,3,4,5,6,7,8,9,10,11
+labeled-data,video,img001.png,12,13,,,15,16,17,18,,,20,21
+labeled-data,video,img002.png,,,,,,,,,,,,
+labeled-data,video,img003.png,22,23,24,25,26,27,,,,,,
diff --git a/tests/data/dlc/dlc_testdata.csv b/tests/data/dlc/labeled-data/video/dlc_testdata.csv
similarity index 100%
rename from tests/data/dlc/dlc_testdata.csv
rename to tests/data/dlc/labeled-data/video/dlc_testdata.csv
diff --git a/tests/data/dlc/dlc_testdata_v2.csv b/tests/data/dlc/labeled-data/video/dlc_testdata_v2.csv
similarity index 100%
rename from tests/data/dlc/dlc_testdata_v2.csv
rename to tests/data/dlc/labeled-data/video/dlc_testdata_v2.csv
diff --git a/tests/data/dlc/img000.png b/tests/data/dlc/labeled-data/video/img000.png
similarity index 100%
rename from tests/data/dlc/img000.png
rename to tests/data/dlc/labeled-data/video/img000.png
diff --git a/tests/data/dlc/img001.png b/tests/data/dlc/labeled-data/video/img001.png
similarity index 100%
rename from tests/data/dlc/img001.png
rename to tests/data/dlc/labeled-data/video/img001.png
diff --git a/tests/data/dlc/img002.png b/tests/data/dlc/labeled-data/video/img002.png
similarity index 100%
rename from tests/data/dlc/img002.png
rename to tests/data/dlc/labeled-data/video/img002.png
diff --git a/tests/data/dlc/img003.png b/tests/data/dlc/labeled-data/video/img003.png
similarity index 100%
rename from tests/data/dlc/img003.png
rename to tests/data/dlc/labeled-data/video/img003.png
diff --git a/tests/data/dlc/madlc_testdata.csv b/tests/data/dlc/labeled-data/video/madlc_testdata.csv
similarity index 100%
rename from tests/data/dlc/madlc_testdata.csv
rename to tests/data/dlc/labeled-data/video/madlc_testdata.csv
diff --git a/tests/data/dlc/madlc_testdata_v2.csv b/tests/data/dlc/labeled-data/video/madlc_testdata_v2.csv
similarity index 100%
rename from tests/data/dlc/madlc_testdata_v2.csv
rename to tests/data/dlc/labeled-data/video/madlc_testdata_v2.csv
diff --git a/tests/data/dlc/madlc_230_config.yaml b/tests/data/dlc/madlc_230_config.yaml
new file mode 100644
index 000000000..ae2cbb44b
--- /dev/null
+++ b/tests/data/dlc/madlc_230_config.yaml
@@ -0,0 +1,67 @@
+ # Project definitions (do not edit)
+Task: madlc_2.3.0
+scorer: LM
+date: Mar1
+multianimalproject: true
+identity: false
+
+ # Project path (change when moving around)
+project_path: D:\social-leap-estimates-animal-poses\pull-requests\sleap\tests\data\dlc\madlc_testdata_v3
+
+ # Annotation data set configuration (and individual video cropping parameters)
+video_sets:
+ D:\social-leap-estimates-animal-poses\pull-requests\sleap\tests\data\videos\centered_pair_small.mp4:
+ crop: 0, 384, 0, 384
+individuals:
+- individual1
+- individual2
+- individual3
+uniquebodyparts: []
+multianimalbodyparts:
+- A
+- B
+- C
+bodyparts: MULTI!
+
+ # Fraction of video to start/stop when extracting frames for labeling/refinement
+start: 0
+stop: 1
+numframes2pick: 20
+
+ # Plotting configuration
+skeleton:
+- - A
+ - B
+- - B
+ - C
+- - A
+ - C
+skeleton_color: black
+pcutoff: 0.6
+dotsize: 12
+alphavalue: 0.7
+colormap: rainbow
+
+ # Training,Evaluation and Analysis configuration
+TrainingFraction:
+- 0.95
+iteration: 0
+default_net_type: dlcrnet_ms5
+default_augmenter: multi-animal-imgaug
+default_track_method: ellipse
+snapshotindex: -1
+batch_size: 8
+
+ # Cropping Parameters (for analysis and outlier frame detection)
+cropping: false
+ #if cropping is true for analysis, then set the values here:
+x1: 0
+x2: 640
+y1: 277
+y2: 624
+
+ # Refinement configuration (parameters from annotation dataset configuration also relevant in this stage)
+corner2move2:
+- 50
+- 50
+move2corner: true
diff --git a/tests/data/tracks/clip.2node.slp b/tests/data/tracks/clip.2node.slp
index ccfaabacae207c30d61775014a4d29aa4fd84168..2d0344f4cd14e133880c9514e4bd2948c3964f75 100644
GIT binary patch
delta 46596
zcmZ9V30#(C_Wl7W78kTI5ERrj5LC1*6c@BC+|jbZ9W5PHRJ3eBP05x)#U&+u6!(@v
zac>!1Qn74s$*pBT+)G2nwQO+re?8~i*Zs@C^ZDfQe!tgspX+{J@I5@f?`%93xN1ku
z4z^zI|G~n1HFB7+CLiUl@T#_Q<$@4xVLYm
z$YM_y+xY0=TF=O@KjN!YD=w
zd-p&pGpGuxGEFdLcp1
zf6jaUpMBl>Go^zsHr9XIGjwbvZ}jQu@rZ{n{x|Z?v@vemk9&G}B<9?EvT5yMZ@&J<
zh_T~Gq>ZT`H|DKTqZ-#AlaZYA#*neY>&N9pHuy)?7LDtVAM!@Zu<`Zdnm5nMZ}3J;
z^X8KqPpUs+X#KbrlLt<2Tz~8v!~QX1SbFk^(L+*(smtVn@JSmw?9KXd{c}nhM(5-{
z6_%6SFg~Z!Q{g!ipQ@Wv|EW4T1F*KN;R`v14Ijy1rV?7<&%(GKYjYi>~|H^6JD7>Ec-}N#K3en$(xpDvU>$RM5
zU<-5Rf(`phA0WHW_b(vIw{wnwEzP-(|F+Dj5f$#aAg4M0dn2b8{`+#yIQ(~G&fKW*
zdex@k0P%}X?*_^r?SDq)909xUz3Wj0*P}dtTvye7O1KxnmF;nklz29(JlMemwC1e|l_u`0>MkeH{CH^eKH9nCbWM1}(fbG81&dwg2m{>P;Vh+`IB$ANBCK
zl=SeC@$pu`zVP;|^{8{{oM(0Ct6x2<<7^5u@x3e9X@3FVI4)ud+QLj{Zg;Pm&fo#w
z-p;jicp&^NABZ{Y`K&<-yaJu_bDlMv>39@}SeW^Uhcj+~_v6m`vz~PgRIgI76Mw<8
zYFHK1WHsAtVWvIVO3s;co}o@8`ZBugN`X$dbDs6|kE74oUH`-btuJ8LA`2JeQQ5zW
zhvPXA`?tB^Sv@e=Y-GOT@h9Z!@SEqe=%6suw&9~&FaC{NuK}XFo<=1nRKq#*n`dCo
z;RtU%r?@h6bdNdjlz9d^$z{x&S!P-Z^l;{~73U~;X|SeQy>A6!=S@RFJeSJaPzJtP
zYa7#mXtkZN%~Vz4AOuz<;(}?Qwue)15YEEA)`2aJc^o&9YfLoEBd2+Yl1@3Cq}6bD
zF-Y^)y-53J7tN8>bDCd7a(O1rz{7G*^_|1+;)aVnjzhr3t$~NL$?axviKjWkM&)Ls
zhUTFjNY4>geoC#OVnLKM&mtCP?2L$rOjb$^T+D!0OHWPW3%?v~(u)W@O^m$TX;UXY%
z4sAS~A%l4i8OD4fH*wFQ`8DI#j;rD7mVJ#|F9xEQ;4mtQ+`&0=jpq<`-JHYo&e-c%
zOuWumY1d5~9X*`uZX52GuCo`sH20!eydnc=^)nG7J>%sRqd2sc#N
z=MTa`f0zckc{o#t@EjHzK9@T89Hs)5Q|-ak2z5JV-O7RJ)^%@~bLi#NyMf4(YvK)a
z4lg@}?&9(rJcpgYC2?;Lr^aih4X4(h<{Vx%tL^@zV9=kYnItw7tyqwrhx$-&Pp~AxY?L*%$>K0u=x7HQhdMqk=J`8cj
zRq!140hGnpoa#5Rc(sDD0&bc%hI%;7U*|cDxM|K|m|2~DlY;dSNHZyH<{3Q)-&<@Z
z_!gV#j!NK>grC*U^DJBdL@vZA4`&w}2rMytDRu5S)Vpom#&9)O-6q}U*4aSx5)`76
z$m5({w|NdBcg#7w;S9cm#rQjnm3+svG2X-JHI(PD1-vx(mRUV%1@|G4Xz6UGwKmh|
zE}I#Cm(46iCGbSTZ56Jtu+Kfyz}p_q88#4j)9|;cbI)NMP;r~W)p&K=e~()q1ES|p
z{k}PesZNdiJcp6@%{jd5Y;+gr+~+xL04|B&$4iT0JO{rA<{UmSt4$wJ(Bpw=CX3Cy
zpwEX@&}a*rfoOG^@C?E)3Oql-OWFBJUb_8Eyt4Sa**-AXye1B@uZ;^c=kM~;Lzqpg
z1U-aMPvhl8dwQ9D2cwcF!(3;GCtgL#H;1hNWpUmDFRY#N^fIrZ9)OlJ_z15a*o4#9
z%S)dN^UdlMFAA1Cg*oaCJuXHwf^BDXeF~{(+)AJE5#yrAUiI12z
zKE*2rw+(smu^PNI_nBEeXa!dwkZ5^q<`q4MPLHygK991QS*QfgC;Y0yWfragB3rG+
zYlq={HuN$1`~CGChWHq_^>j;8w{1S$dOr|3hXuao7&bYczDO*w`uUn;_}p3HE@t}j
z7?uN~eZl<~ig8lw99<q3RN-OFu3|bWVn?rQzVp!N5!?i#RgRBsN^t?I4i5+Fyvdr
zZGf`)gVR0$i{Sx`6(3;Q_zABRQh5%G0?ax5Y*u$z!AS_@$`rGiRE^fMI-BWKoy|-}
zC2%R>(b{>jh0B1*IUL992R0D6+2r5)*K_Dq!?>NKTbjD9sllzc0nu}~hDsuzajw_k
zlcEcLP9}@LI3GTS#k9wG4s#weZJfm`g;6|*3*e=>b7r;L;}kS~+%!|hX5Q3hrajJP
z76Q>`4iLUb_$@uOh(N-zfu?~=c>TZz0!vK((!ZX=N}zJ8SLl|mZg;F(<(k~OJt~QO
z-RV%1=a5^|oWt+VX?JmFO~yI}ToT{FD}~WKhgMIRbNJJ&4tj!uDNmSYD%i{s)K}m
z2*0O&M1~M<8A5n8D!F+6gg;Q{A`2H=xJoEx9Z-p^8f953TO7(g)>!y7D#?YPKZNp{
z$JI7r@w128SWK?Xu$i@ucOboIsP|DwG~xvyT7}gya1h~a4VzJi@H8L_Zzmi=c$UIJ
zVT2>X2oJE;TvQS^jIue}P`Q=eAySvfhwA)lxL0-2*89H){2RiV
z|4_Kut^p~ah$@sA*!qZ{wBaEXPh)-!Mv;rZ&EQjgB*K=iQI+Ug8t
zO(}D9kJj}m>sp_(cTh<#`~aaoa>+Bh6re2P2MAAM@j-ot4S&+K)Pml*>OJ8}dQS(U
zE$tv2OL(#NAKZX&WCOwjQOU`~5nig!1r{!{@LgLqZ)jxgC|jM0WzKP7kyi-}J$Y}!-CyCc0nR`0!#sN+!}T2+fMa01~^H0rlOKF>Oona%8prCxrOUCGGXzffJO{Ev5^Uj9|gFJ%NsH5
zPT=C*o8I~AT`P($HHuaM0ky!%w`MkvG6^@{Rpp9I4PRWDbaLZVyh#lWRF3V
zZP13I8&lT0F=b;>$(6;t-x#^%X}k}hEMneog2k(i88)Da@g7R=&((WG6M9boq8DpD
z;S|DODD2ymaBx$?-BC%`BME<{Ve>3pVBt%)suyEqV<;<7*`yfmkqtx}Dnuo@Fx1EJ
z%7!#EVKLM>16(ds_obi&&-Y@gd%<4nB1RX*pbheu$bK4#W~L~>;~ZC{XV_F
zQE$H%^sd!{E%iVpa2DZj6<%fG%@)2)cn0C`G;G4NbWVDf&NFSb3zh6Ko3bL^BlJ1S
zqMxH|FeyK!?Pdn-xOY0G_v|ffvV2AL2?c=6}
zeOnT4i%Q^J!UxpZX-Vfi3m>;tjaEjsjIu*2OKnBjBp~`s+lWeHVGM7@tp0H(E5`6R
zEXKq!Y+{`8{*>N7sP}3})Nul*MnHYwqHq_6&O!xOnfT_b=K~=MMDl+kxJ*Q3+f`_*aF`Tll7hn>=se
zeT2_x*ICcg*#V*l`n|0@Uof(Rl$EKh{|l6*0#VrtRFVq=^9#JPw*kr`2Ih`f4DZOW
z@g0r#PxQX1-itcYdo>XCK1sNkaJhzU`6A&?FA|=NN=~Mf@MU!_ws4t+YrjNUFH|Bs
zN!eB1W6ew4W1EGqp^{vfieF;bF8HaXEMh87z+zeg!_G-C-e>9kyS8+JKj?IB0nxBc
zI~ll)@E;0K>%>MD0#Wz?;fsX-R5+qD;n>cEN7-s6DhYdqvI-4*$I2>qp{zYB$%WCk
z3&ZAiF<~+Kx{EuzFzhMd;(deOx7E8f3
zy&IjgfauA5ZL8~)-KXq$Nb7W&OKzzU^+ZV!rK;SeFvLsyi3K$pK)#?SaLB9?Tlk
z!*t}$e`ew3oJsTwb*A^A^&%kJ(LusKgey5y{{}b`n<&yUk??3#axngcA5pl-!o?P@
z(vz|ds6m2N2ExqVPJxT?jW+XP<$Dg9Z}rW~-^Fb$xc@{3P@FiQ-
z8){@@D2vswlZJASY#`cDAu7p*VRk6Ph72=dG0YCbV*D_MO&(^v$J0AbTiODNI_?Fc
z)qTS0gj*}z=kJ7v|DEtsR1$U~;kMd!g@t`m2)D7-I8-8=LRq}Z_NQ=hzD#%q;TIK7
z7)j@(k#wGEt6ivMkJ*$Zs4O&%OCCJz^BCm!T5aA>36j>`e>%jwaj|mB6`#yQ{M^n$CF^K5nZTV~lJWWr-?F
z9Yfh9Ao@((h)QB%m>t6l>z`(_Vwg?CVoVytCZ-whPwD-#_P-hub=(3(t1E=_2*09m
zr?G_lj3qn^m4wYF{Hk_cX5k78Hy%gX5L6;tPg#=gv27gp*iU%D8zw7;**BQA-y0??
zhFNzp^9^QQ4qU9iq;)^Fz6FUU^m~(iv_mCu0pb4I{=zp2ud?t7!dnRsROi_7gcHWo
z`E6TmLM40bpbU41-EZNQ-=Zw^Ey{YLk_(Gr_AQ3Z2PlgeX5Ga@Z!zpu;NrcT-mhu@
zo&Q1azW<>2Y*Ydl5q@3a^A^5o;U?(@-bZ+tcAb?@X9tKL==Zkr%rLTpl%=Swe+Ff#
zKvcE@mE^)Oo58TR0m>qV*$G$-pTMy36O8vy^d71GFPcE_)j-tyB;jJhsS3B8NVwBP
z!jn!nG$+)(e%$PEwYpd#stnJ+@i+8Y;Jb_WWo!9D13nMMZ#~XbHo(Fu~P_-
zveimd67~vZ>AJ@qE32GIS$k9x7Q<{N!{%n1uo!0D#hsZ9dkVOC-=OzI^=|bJTk8A{
zy{DiOxPtH`?Z3pr=Pg`ks)274o~+L4Q|X)qL{H{xTV1E@K4tiak?s)L^M+XV}5PnbL$oC1i
ze4p@WRB|x>gg;QY$il@IuJQq89Z-p^8f953Tl@j{SYzSSs3aGL*$)^t?w=+shS`5&
zG5MbioB2=U9Z2sP>U|Uvjd%fwR$*BN4kDbba7Gs4X+RX-PB?_{EPWOPO(PsJjqm_l
z%|#_)!zi1hdz4$*9Sb*`Zo*=iosL}ciekoe6Bfg)ySQOG!yW=I-VNwIPg`mTy1H>EP77|qAl$p97}ky!ojl%N6scZ5S5%v
z9O0$fb%BM8EPU5i&F2_dJIa>p9@%ra$6_Gb&|y@P3&ZRjhK>5rgvBuXAr=!qWZ1M1
zjdw?Sf2`hnAyLPpK(wkh*T4ycKT&x2T*4VZ6kbQT3t`-ub^qq^nMXKi9^r1bnuVD81JF<{#;uc;m~^m5WQII38xVLLSf&9go76n?v6^j9!dBs
z?K;oG1s1+!t9px!Yz$=uy2qqN+#?%^HdKg8a$%TV#48(;Yr--3m|^
z=Pg)?wNopY^#N$H{)pE5)Vl9VT8~&s>t(0}b_nCnta~swE$sU-;kKv*&Lw<6VdrBy
z=UMo;t!jK?WXmW!qz$EhLfIrB`b^u1N@8J{{e)TlKQ&n~%zlc+m`@ot@l)gdDZPJC
z@70i~;}#%VT_K!D_($!((`SVHd`5T{DhZoU_-A!4vv7rl8~=;4A*e*Qp0Z+<~HV?7nXKnrROjZoD?qX&hvn~fN)?d=PRIP78q6z(0v5$7B1TG+aOtUUr
zMR=8kPY~Wp_=Li-s|hEprt{mj+Js8>*g@GTZK!fSWuf_$^+Y8X7Q<{l!{!5&MGUj<
z;-P$oy$W2schmb9_3pfe-hJ25dp0V8iwOU!{hzn+O$#?!Yv6r^PPS~@#G^gzG2
zmFGGmJ4hMs%(`DG{nt^J3PfcqP)RNfv+Iyc{+7KBP!=)FuE%2ddWMZ(Z@hn^H}1^3
z-iy}Ldo>XCK1sNkaJly1as%N`8wgKEB_~r#__8_|Te!@^wKr1M3zf)DQg&7MShJCP
zY_sq+RFVtB>_%SME}KkP46~cCn6`;w=WH_GXX*XBdS8GEg
zYg=8X>^@}=RMxeCvi=1eW(!PK472V#pD%53@63Xe1Hdr58H)j%nKfp!>Bzj9+rz`l
zX}%rL;^~`dy$FbQbdazQ{w%*LIcK&2j>IO4wA?~?G%7h5f5MNbbCHFMEnHH$Tx86(yO665
z4#X)1D2teDzro^zT?`xkjcKU`Tf$9R_hL=>hThYGXiGZ?#}dX(S{DxfmT=^^ga@LM
zlZhi-N4qYtaFK=Y+N$|(BWp)lxXQA3bC1P9w4uYOBp2q|-Mq3<-CUBG1g*RJxkA?3M?nmb)dWn+u(K%%wotN0^2rAiQ5M?pCNA!NmTJNW9EGoIO
zm}~bl>^^|9h`IKAEMDEuumRs2@1gX@O|FN$bLS7A~;xC0o@yXk=q3i`6rlbdY;w1JQ;GQAsY$wFi+){uT;3WWr*u
zJ%q*hLkydI$as&Zcbs}}fkYkm0@3O|;dH{dN$d9C=P=>nhY2r5C1EELZmV#Gg?)|?
zZey!)s6;k}vUqK1{}JwS%)-@wFkvy*{=h3c@&^+ZbFI5L=Ld$}09?G^r}y*PlHZT?
zuJt2Z>VZn&EW$4+yvo9xEqs~q48kueobVH!lYXM}Ok3?jC40=KEJ4GD{!Cf)&y)>D
zC07=6?a#cjTLH@AyaoTo+NqzJ^#N$H{)pCH)Vl9~X+7eI&gJ!njH6hV4{JxKAnJS*RpzKH*mtF0*ijg&QBE
zYzQimt*0zW8`^e^d+aB?;JC?(x%N1-_B(E}Vy<-;GmkUta^Pb9C9V6Z^({!Sgwyjo
z!9Lod61aeHe}xyGAiT=LCkSsPJW%1-lY|pa()n#$Z9*k`?4T@JWtC4+7J7=Zo~Y!)
zVy-=fT=J44AD}E^u5}j=onqLlz{Ptvy_aO9i5`6{sW^=GtF)Wp4wNMa;Fo
zVln(zhK>K#c>hH2k=oLtU+KLXhmc|3T5dkyJKaQ
zFHqJVm4wAydx2Lr_kszFxz=6Wd4XY10T=HZ^q#2RtuC^q&KK!D1(m=RgmIJBJ(&^<
zpSN(Gas%HcJXxL7%juj21P|gszqZwN%I;H^sj{w@DC>WTbL}OQ6?5$+X1xeb4ghoQ
zWh@3*<$iy$FbQbdazQ;rBFa!y7Dqw|~m}{>xY}_>y7Q^f{EGA!L*vxCjJCNQpw56kv
zXv7OZv
z7A~=H)jtg!MR>kCr~FChbRc>%Yi)IgvZj>bCawFG()xdtb^RY@@1O!!n2BNbf4s7#
z0A&%wYy}n{{EuP7D@;o*=$)%AO{k#vbRgQ&4#Kg77i%NIHwj1HBs>t6oJ<_yrRrQ@
z;UWv)wN>+5M%Iq9H(cE`X8gg?=+!|xEz0HW|Z!d(b|rp`Wh2?yOJ+|5=~QOOzgpe#@K
zIA&$#7Os2GgvBs>k6|a?Ghs2zx{J&2G3-v@;@z9x`RZNkK3i&ZpDhhSC2$g9ynWU^
znavj7W8r&*`w?EJ&PfmGobrIqOKf!nmFzKyvJJAw{OC%0`14y=(&L;zwvt}fA@j%K
zQx4tPZvMVX`V7V#>scvq{?$t6LxBQ3E2-;Hx_+*mkMK0M&IBNOnbs3dA^e4Q?(0Q3
z*o$y?RMPKA!g%AXdnS1nF0k+=Th*&%WMe2RP}!tP+#?%^HdKfTSYal{*h+{h-qh*tLrrxV_$Vf#Enc=#iPm!gue6A9z?touEt
z!oog}5^iIwai~N#g|eO6(Edlc$1w|6_c38H#`>7+>WuU;VKK(Ki*tM!b^~znexKgo
zsJEXly=(chr5>mR&LaG+_P@%)n=O2q@C?Fu`&t?|ZLx1+w5X+5Gct(Tz^*de@M
zv);6@ZxzCAQ3;$&7;l_)ot-Ll&a?1wTh*v)WXmW!q)Nk|$%--7UCgY(tjmFm^_R3R
zRqI=jXhOfo*hf250v8a*8)w~XweT^*t1Nti@K(aOJ?nnO#y(Cs;c+^@ZL3YFWRD$`
z;f=Gdta2b_p@Ee3Ly$W2schmb9?Z0zPdiSkK@7bsX
zE+YJ^Hgev=H!a-c2?OsVd`{u4C+O?|(F6V7R-Qpdc960%ZK!_`WvM_^wgMHf!c2^@
zK@58vpe$mH4aQ=4FvG?N8}Fa!eNnsvSDB9?&bJ>&jG1ILXx-){b;Z<`Ym}C?*i)xk
zgeRktLnRuI
zp;$}{W$rnl#``S2e^>7dkZ8nPK(uOF+rVXn|4?{ZZNdwID13nMMZ$mT$wbs499xI*
zC|j*WC1I~nR-v*xR#rKTvi7Ke6=q_h4MQ&ZR+SrO!eXLz7k7p+>?z>leS_Y&)w@+V
zTk0H6?N9IhebbRQKCeoCc{#gcE8$LU
zeOfO9q8%M1>_fPcbLL6FkxvqC`6S`dsN`V$2|uFFMHVi$aFqs>bwDMuYLxk?Y;gnb
zvBtuuQAsXLv<(CYpQcf6rIz7=*g_L)fvi~
zQWm7L)|>^LuF;gegG#O}CfaC*Ed?lxm}nbg@j*1hhBr1XwV-#1dQWJ~mZk&ImUa-1
zB^;_@gPRbJY(jV-Dmj@r!gbWSz`{iqzH6)IO^vJ_W#KBzZpu9t1JQ;Kqmo>hXqzI}
zC47ZAQ86YgCfXP*CdM#qT8#1TNbmaUy%!R7JPJgsYRwFsK=?_8hc_df0Yu?-gu4)K
zsIX6S!a>amceB-0RB}c=D2q_pF)JaNVa(SWL7}^U6+q+JwbK>n<*TnqhYW7w_Km
zj#BSh4aNr|9#>J504|f6qSUXNEo+g
z-LMrF_Ti5moHn)^he~8qD2rFw{?^>%n1!piF<~*+w&9f>*~Wy$TM!Tx&?_Q^h;nL?NA9^K)AnNwuK3VS6TQ3;jM%Rs&i~7!U>(|{I;z&
zp^`mzP?oHFRPIb!XlKfLqLOQj5w|mP$&2KCfU<}Y*Ihi+nPIO27w_HleoehQccFLR
zF7%#_O5h^GuPc1s!Z$74q^p7V5gw*+R#!SZK=eSrx0Po%BRfbL-cIYDe*bQir2wDZD~<=danke-X{qc6He7eTJ|8^sR!Z7
zsN`fy36ECiVhfj9xOO6Cy-_Eo`Oo?3c{20nwMDkyoKw$YT(<1C#!S%t8~r+q6hr7t*%pcpR!Dq
zb^RM<{r|>^_irXECf>i9tTnyRq5I3*{BDNdBs?CF#Mm)Ora5mPrSYCy9e$Iz%OW7!
z+(E)Vgx}NHk$nia>_d1oDmf#6!XKz}k%fybT%|8%9Z-p^8f953Tilm>tg-NERC0|m
z{PyKFj_YTxF^1oMSWNE6u$ldgcOboIsP|DwG~xvyT7~sDa1h~a4V%%Q@H8L_Zzmi=
zc$PW`4ImsbfbalY%|#_)!zi1hvT`fCW8r24O;`-S0~vP4Kob_jue-QmAj2L4F5V63
zJx{$G4Pr~J2C=0vs05B6oTFj)Sh&Q(Rg(=IMR>l#DamwB2cjpl)>daIYf71;4YeLj
zS=Ygoy@N`wEQa5~3|k6N7BTz|!Qz9#3>!YgwA6y$x#~S(2)(BR(Ux`)jwQTU`wxDN
zaO7)*2cnXbi6gvJoeL~nWZ}EEYW}*BwWDmg%CcYQ9*co!Lx)jGE)2h~Gi=mQ6Bfho
zP%I`6W!SW##=9fEKUVL(kf`HPAX-%$X5a+EpJ>?O!w6>pQFtBUE`&c*XP>_l4*EOc
zZnm0=O3tVUWqB$)W@Y6TuA5@QV)#u#E_vZGF~x+%@arxvPhr@dz{R^ay>UC&{d>OF
zaJJNFI9nQoO5h~IYqXKg7T#mwdxZNDUZ-%<2s)>Xpz{)29YG~~45Dm<%A!Y7)_Npm
zV^PVK#qc|lS9TvjS;X+0ip8rV88#r*cn_ub=juHomEIG8=*3!3IEC;R3j2;C96X9}
zcU02#NWx#~G3Hsgz`~boRd2MBjiIbSWs^p8k8B{?P$4SGh2eKJuWZN|6Bfho7%aw*
zVc6s`#(O-y@t#}v%5H%~9rps!>OSFg!rK(?lSX)W8sVj=B`u+Lb+ZEQ6T
zmB^-0wo_&M$8wKj7OpwVf$-?wNz;w@S)LnW|7c)#{>
z)55<0Alw#}z`2AEsI&7AI_FvVxUFiW8`(0-4yi0Pow7+l^hvi7mBhjvoQ_=bO4>ie
zWW^kufyI~%hE2>c-k;L@2lZYJi8^ipqSY0`d4zw|u$?9l?lXb#EL0LUpYYG>TxQ`4
z3pbuf*$`ABTTfZB%C=499{UL|m}IhI_?^V8{U({L7=GQw%t_3;9JpA2N$XOzz6FUU
z^n06qv_mCu0pVksb>Z8DS6TQ3;jM&EsB`RO!U>b<{I;z&p^`mzPH
zsN}+8_?^PA`2b}R!>_w|XbQt#1uovZ>HUj(ch01D-%NVXMkR0&Vcg$!zpb6O@J$Oh
zdB?!}2%l5utas?_0MP^e-d3JdjqD(0Wh(1Gm9kVIDqDd{a$)$L%CNTq$|8o}cd;1$
zF2lyZYrKD=_eJ$y^e(+u15xjjgo_E6YuJ|W5$^OJ;mN4vWJ(ENR_9_1msz;>`;_%U
zC9;#0T~*nd_qoS53tvMexiI{`+xSFkvzLet^Za4;XgN2gds>y?lzKh-%Ri*Rff;Ze3)iAus=p{zn>cdV@PG|JkelCT(Y
zr!j2qG!qshuDiH%8pECfF5WljeOtX-O=nA;r_*~1DuF8q-_fuo7Cvv`Ix`G>oA5n#
zPM<;NEFgL^U)$qS7cql1Kf2;-*iEWnZ2M3I)W2#-c32jfrp5p^!IaIuA}
z%%-dZDv?#A%tvL5XLFA=7Cwzia$&BW&9HHEOjyjdbFi2^hha147;pQ3MeVQNM_Y?^=_2ImRjYor7@@kj$k8?YuG&&F0pXc
zj|?0|xTeA>AJI7-h@Q+^Tb-e-DP=+0Q0w`Wb)8SyJE-K!Vy>Ofu%!TH5p(SVEIyde
zu;B|#OD*UfqTUl0(0e)%ZD|MLSi+&&f3QP1(jhz$m7Gi*VSF&Mdu0nOTx8+9wral6
z$l6gBuCnZf++#5iZRjv6$%VOgA;U&3GGQ^-F2Z8sB8E*{WV}1lyS{qwg+v{X0@12k
zu7MK>KdE7d=Mv5UqVPJxT?jW+XP?D{gBBC+W~-^FX&8!bVi}gpe?xNOx^JzUI
zpVrGz3G5K=s#$MZ*mn)#wx|ToC5-pnx-USSHFVCi@Nrw!SZidV05^9n43pY?=ip^~uqgmEX=buP1Tg@qe$plk>#k*%i;cXD0Xwhi24Kj8%%O;!xE8=1A=
zMw1o8th<=Gky)1m7wa!+jXSxn^({y=q2DI<(GHcs1%&az$nF>6!cBx%S@;Cut%PwW
z*L9BloN&VDbbi}bn^4IfJ19$5S>-P%3;lw!o~Y!)VwnAcVej>;TaN{oYod
z|2DFNl;KXUE9?Jn%2I)-Yy~RGg<Xqmq*;C5-pny3WNGF0*j$EtK^_C9;#0rKxPq7Vfdl
z!q-qqE)26<7`Dq+6BfhlRxGA%W!O1ejrUo4<2|=-OBW!~h_`@f)pVPI%LwCxk=?M<
zwh>+kMBxL3FA{!Bog=;`9Q!rlQMOu%O2S^DEL~-HtgP~O%G#rnuoz~yGi>g56Bfg)
zySQ^Z!=3^z-Z$tyQN3I3U`w5M(0d9hfh!14(y%2KK5yYVI}LoB@MLvP-$~~zAbK)i
z+v+-H_bJO%S=U0!`WJGTEi_p%%oZ~1MR0Ne7-n~2F<=+7#_Td3dHdO$v~ppm@1pf0
zAllJE!ajuY!N~4~jr@ji%WnveMkNR1PZ;mHb)Pat7B04Mm2WBQfJ$W5D9cjW;%~Xf
z8VjFBCAl!ne#@|NyG>XOv%9gFyqjS&cN_0OdgDE}Zc9fY(TEp-XchLIfrAL+gOS~^
z8Q&3}21McQghL49J-4oN(0>R={D<%WTg^o!VZ$h!qq1@FWQDnkm
zm~|I76fx`};NsnY-t*ME(H^$cY7bi)gG%5C!uVihH|!n@msq&!UIRxFp0Ca+d+D4G
zL{Da|tL!QT^({GRYYRB|$LgqNywfrX1KeAiaZ4;WcH%9iUM*$24CVj$Yk
zVN{X}!|Va%k{>`t9W-Gv%pSyI;z5Q@J7~N+()(le-V2F39tEORwL=C@ApD8;Kl~8k
z3?K@xBix1XXA1iqCLDB_a5q~`MI~p{gR(r89ka4>3)ekj!eW>`!mtyMn6MaT-Nofc
z75)S?^;qIuU>yd=NQs+Di7g+d`t?CsU*%-}=CM
zMeAj#1a=7T*Q_@!?0cGUTT}w)59`#
zSQuu{AeVeI_W#9X#W4E|7Gr*4*u-Cq_owv!LA_T)qK;dDXmy2f9^oIg|4zRW?(-|*
zS*RpzK4E+?vimG3vv7rl8=s|Y2r7}Sr>t0I+s<;2{e%~sGg&dro@3U2=S)@%v+iQ%
zIc8lBT&%yOb*Wn4f=
zHldO|c2I^7Ms|;=@_EWa&r{YDm0Va1v*#H$AD}E^m~|HqooCpqz{Ptvy?;^f&KKz2
z_X53VqY}7?@UI&7yoGOCxXDEW?<0&4Ms~Z-x=3dSh#u(ow(=}DvV)YB=^p*dDN6;S
zvK6Q#7lzq#hP@3?7BS3T!eaO(hK;{uynmwiMQv%(C3>$0qTVM77Zb(@BfI^#yiB;$
zWx|tD$;p%wzN~Puh082l`wC^fP>Jj$Wmi?U<_h=NX5nk7Bo~I+D-7G^stJo>_9_qm@Svo|!2740y28DC%S8;l|HERy|Cl=F
zf2KEYe>3oUIcF*mWcvTO$s!=y+d;xUgey7C?*NX(CW^GIAUqnC9Fad^e0Z`87g@O2
z!c}fk)&Z5ss!`^n4K2ROJ=R$GG%5*;f%hhI$qB^WGADq6_ZAkDZ!v7n?7%%dm%li+2NhKc?P|?y;p-_t?@HR02m3#wRAb
zC$q=GB^IuF-@s9Xarf3enUwo=G--6B|;yizXw;uld>E7DT{2hjjB^jzYgFOxEM0y(58Hh>_
zC5|vYEZIGj0t**e_^z#*dl^|f%EC2gwiowU3`CFTFe-_JS=S4htXm0-
ziIu#~XAz}UGTt5OU0++;3yC@&1)^0oZv!U~#*JI|It}+GoB>4Pb%eVRZm6)&BZPw<
zA>7SYQ&Gto^`I<58#-oXc>@kS47?nl)QP$dz
zvazV-%3{{_W7vHFWf8NkKNhe0F>HXp@g7R=r`3CeKfNaa(Tla7a0=mP6!xu5IJh$5
z?x>{ek%XUBIM2cb7QSSwdR2^U3}vw@n^c8+WCPKL3Qb(ULb=(U?tNVo03FF4Cdu97nBRsqs;iae~>_ozC)w#mLJ^_T=*lHXq
zkxij2US<0OxW_RISFdiuV%DwBup_IRu$Xn-#W~d(b^~znexKgYtG8bbde^GKmU^HP
zIEyfD+`3^`S$MOBFB6_Y_(gS2c#O_TkI{Lit#+Z3J!VsupnHTqPFeKhlnq8DR~EDG
zCkX&X^VEX=x3@WT2BnXH&~gRmGA#IT7$
z#`{xxzpUP?AyLOIK(x9-IFIlv+JC2D!hM1X&q5_(^9jGI&Se&^uyEsAlnp^8vh|cD
zscc&$;1F>M-n8;NrcT-mj^5=P-Kr4WsvLR00oBU
z6h1)sBH_2xIpQh8u}={mWvi8_B$3|l@AevcYqD@EM{7kfW)p~j~
ztrr2&jt&y`A^e`UAK932%f^I9qmqO1CyX1nZq_0T7hAYW6UsWE5?M9Mvb3SaO}NJz
z3!g?MxiH5zVc58)CM@RIrdUjF%CMPDjdviuXQ=m4NHpRFAX&VVuwj(VQCYc_-LY`9<|ZuW*yap7qqzx-Io4g=(41ip
z0T=HE^q!~Qjh<#pt)6B}V^9ekK{!Xl?y+!*g{wYe;3&fL)j8!EI;R8ClUZx4Gn6%@
z%u!kE7L;{uLD@T~ZNadm0A&$3ZlA^CgBA=M{;X-K1-*0Cd&0Byo(@D?+Cey$
z@L~-c{2bxP=Lio(B_|U{7$24_!>omiEPU5i&0~$M9c9Z^mL1DI76Z|S4x^G>7-nM`
zHmaowi($4U786@CY+6g>-I3lOtM^_=)bS_~t*W&$a01~^H0CwF!%1wl%{}Y;D3~m~|JIw`SO#z{R^az4O((
zRvWg|s0~{hgi7Ef!fQ0_W()7J@IAu)2(MG;q_%WUX-nrNwmO1J_83Ij29-s(qpWp1
z%EqFSD~n;a9dgOvJo^C3B8J&`EM9HLumSPLdnmm>SML$=^qv4jFV=d(DTKe!u)ggH
z2e&8O9hG!FlJHmRoM+(z3tzHTy$(h;hOz>cP3pisvVmwrg{UMKhS?6hvLVl#uoz~a
z$71~R44eGC@g7g_t?Io65_Q}QM63IR(+O|Wuzg-2Jp2XYH-X64f`tA4g#L*3hWXw7
zVf#cf+jY+hYva?Ad$zIFI8<_aQz+Z1vi%*o`!Newf6*KR#@rWq3?pAO$AB@{U7YhG
zk6{CF@qVA)->A3WOZ2Yw5?ktlO5iNQ-|8`}vhZdLUnV?*@OSE*kU;081Uk>O)h<-B
z$85@qR2JHavgl5f4Mru`9Aj=LhTRHK7UwPKjI~ppnDqf@vHpnG`_#H`XIhWwOzUN+
z1a=7T*Q_@!?AwKKTT}w)5e9O;(J#iM+1;5=~Z&x$a_SBC{?BF4kYt
zx>T)iL81x$da{pps01z`d`z=0?CJfOS7GL+O?x~GdcN$P?dg^Ao^#?A?{M!b*FAQcNp+`0*BA4?lj>{2$cN|JuWkXL=0(
z^I?Ov{Qu=NJ|zKoir^OwhAVC!_3-Ev_3+~j^pE!O%^e?J;NuO1C}{es_q(P456s>L
AyZ`_I
delta 33600
zcmZ9V4Sdh_-v9l!G5Z=Ov6*4W-3*g3vzVORj!5o@$q{BTIpuCxIpuDNcI@kBX>xMA
zG&$vNv2x1Y5;^5=v68!4OcG}JKVSQt&v{(^&-J)o=d<_o^?JYGe*30R-)omP?yZr1
z{_&+vy|Pwgg71Lzdwz|%ca^^q9
zKl~47S2$K{)k^*6X6Jx%0cE_Pp|QoMt@Q2c8J+f{Zzg*KQSG!_jq0U6*|<*H
zasKzY#&y#kYaE)E-l%R`cH_Xb3QY#2{Zygu_UTP#dAtgmrrmB@&wL(1%_2MjX+4|O
z^Gr&c(5#+U-q^H7&FXr>(l$|AkamdDrGKRrQu<4pUsyfQ2WidFa%nx$k!cgqoV0~#
zaM~txdfK6|dR{-wPb&v<~8uX#PsyYreg-_fkO=fp>q^$pMmz^}R70@v@fr&*;?
zFMUzduUxsdJDxu63G{NGn&(rma{O79%azOFf7|kD*H!d|=bwGn!#DZS|M#Dw#>=+V
zoV)abi4-+1OO{2=$!N0URd%p^y0QPyGQRbn`BUc0|M3@w{FxX3na?d3GxyJiEbsqi
z%MRh||IEkhdH6s73Hs*G{E71a2XgfJ{m;DMPke5c=BqWu+SRQPj){4RgyLwx_t
z503vcf4t(K`M=)zGe7o!IrGb9e)eZWD&N-s`=8Vw|IB+h|Cg34H{Hj4B=#5JQ|ZtA
zug(Abky+jrn|tNH$A3q_IZulUZr)-af49#so@Q?GVjn+u-Y=eT$4tiPala_?)OJt(
z;t6nze(|(*<5GS6JYJ^S>mt<@3huazo^aQ_2tOt%Qtk9bkH1^@k|)#+c$-XllNoo3
z%&beEwvNADw}K(3MUt_)e_WzlrcUE8(<4h`dS6CHUG}u~3Ml7(k?K?3{pd2S{g>c6
za~XexP6topuXn{0>Z$JLp7sQ}$BHP7zoO1pSS$mR$8pzQ8WfocdnADRLJ%rcKa%ZOyD;&H%H003h7p$6TiXzcl)!VH^_N~
zmUDl4o9&;zM(fz6xDu{;!aWV#(*Lcy@0usTX=wXbGyU&Ubp4B1FQ1U-F;h(_@&u$W
zqsjathVBq9mL#fnPrvp~zcObjUwUeok9q#DG+O;Dzl3l*qNZtfPoz}In%C(T`Yxvh
z*XfbR6nElvWbSo-2@&O7&pZ4QzPV28RJlsz&(^8U4W6Dhws9>pa={Iv88_G~B#lLa
zO|?YT7z%D+G4bxj@DxdwY72`Qe6*Me$CRmKRt&sJruj{lkG3=F8WR2wpIYwvn>335
z2l1fa>_>lJk*dG3cT73A{!;cH@f%(9b8@=cp7l-7pj$-4Z?RI`a&~B7
zs*6-jq2O+~Mf{|A!0$+rYPEi6@W|hp5Rgu$k;!cMoy;CgPOP2L*pOPcA<0WcKOAk@84$me&yUQ%UJ%`+qCv~aRvW@KSHMgoSXr+aWnJrZv2C2&L8ZRfyWbO
zs=;@t<|@?w4)Jk!;F*%}71TD}Ves`kOz6j#o)%`s_IJtjzRU7)c1BA>Hi_gY`So48
z73dUs&n{p49<4#@N2C&6bfRBx6fhD<6;N
zNmJcSHGLJC10|&I2uIvU5>@NKZ)onE`wUphmsHFC8b5HKMrScOiFQOgLpqc~N|hW`
zO1IF}oDP=S)whW5D`oYha_;MCeD0MW&^lGFs0aA7b(+e_nQ9w%Fk2pcKs5gW8-!%A
zNR+8YJ){~#!5#dN`22_PTuGK{haNJx$|EKmQ|4*2;^0SQCO%^MWILmiA%{ew-y?bH
z5#8cBEg#}#ey>^H(JNs2b6#z|Qp&k&|KU@8`FmbG?Q$IU;$c_F+v64P%(Feam<@+`
zyaL=rj~5U78g_WbR1d3~LczUE$PM&@_mdQq*ujeDBL
zSA2+;`mk399#1c`$guKMa}}Cap7@6H@B&HrT54CzGq`01CiLS=Pao51S_LwTE3kZq
zozd5HyDE~SWCdTk73ehG*DgQY7dqLOzrmrL8>qol1A2>;psM-ua#_siFz*4^S3^Wm6heov>R?o5{
z2AN;f43SbL(;d2n{*zP1O7tlEo&B2VBXVBZ{La3D&%IA2TBpi2vl9Mnowjpww%f)-
zP2-CGL__@9E965K8DclULhdz#kZ>HAlaRyI%oC*E-((|%eamV9ij$m@~?Tkr=G^+v0
zQL=m+=MP{1nZdGF_xF>^&`__iXNU~JxS)0L2YBS-OGP6zQWNk9n
zFgc}m#vDU>hCre}Cizkb-QsmR6=Ij)Bl>NKS3BLF+kaGh
z-Fzgc>X0j{!%}hCZ1IMf)vGSm6bkORy2RbO@MDr9)lS!CYTbHl9}{gZv!e-@hc
z#@AQF`nWUjcov!J8C7!?x3feCb(kKF;}#X|%dAJBQm5>89B|ky0gV
zHlbVSXPg!^p@+GnH-XM=Li3%HRM8UovUO_Hl%3nyhAYje1x<-$G{qgVg9TQZYKf{b
z6x_gO#Je|xr%19?TiA?&N1N%nR^~mkT3{HN=3y)yZD*`8WT8m(PLk`x=oYV2P;Kdm#+OJwAGU|A1{gh9J={Qd`UW8
z;N*<44Y!$wk!^^?w!xi`$McD)Zl;>Pm&}1Sr0)nvJcT5x*5N7U&3Q_XG+%l?HIV~P
z(daBDC((}h%#aTJBfVRyZ#7LG-
zwlnq`a!4fl3zC;2=@zfkkPh~P?$`nPTnCyjkmQLT?trgYC+{eho@X0=VHysJB9a(|
zJMK#s_|jAltC~W=y-dgr>uNPMU3a$b6<(o+eT%cLpBMVN)H}
ziE6Gw(>f8~&e~u=G0H@VIF>y$g}lF1YjYc)l}B7g9|>K*skO
z((RuCCrJ`joAC_u_CBLWnlC-yn}}~$8rAN~&RKTE4`##+ky0hoyV5Q6Ag79du^;p`
z(MROm$iE<|qBH-3FI%VWoSf~p;b}9f;?}jH!
zCBJUW>)TCFv?4#6h}(_G7EDf#9dXW#@aqnVK1{N4cbdiPw4%G+d6wuq-D!SMk|*l)
zz*nqOS5D4;+wdpTa77OySv_#a9bti=P1Wg1HHCuPq$lygJ>g3wMXIgr$-r|x^+e~9
zxo9#?dXeeai>34Ij7#R1vr;4(tNU3mx@GFrw6|TlQg3Lz-ZUQ~@&5+8syDs}olbCa
zPS}Q5O~a8UXdfi}D79vN7&xhqo@l=G{A!lo(TB_t
zOisR?aovz+eIYqYcI!*G0-bjDrAOHly;}6+zBIogiTxHD_8h(>oyKr-F4%@QO~akf
z5y|79`Q3axp5M&UVf}cbkCEx#kMy*D;A}~vYPP2;DMu8b6dCSsK%OC^Bj&%~weLe}JADiZ4Q^>cd#Nx^3v^wjR#X%_LU#zcO@_ncCE+KjO&HF=t;6+P^QB&a>z99LIGIXuEFEHJR59Hq
z#6fbDd@GJ_1v=f0vrC^9y+O`xIs%D31x*-%FG;6$oScTXVKvk6?g%3OBXQ^B;hlHc
z(g`DZu1}MhKa%tg;bKXmYWGGmuhS?!*L=x4@3P4JQ8ZdTik-vl2;OW8j0$XON$He*UujiKNM
zzDT_Li|`almTC)MWZ=;k^;|2%yKb!(IEGB~F)STzXYj6DWT8lOKFRfC=oYV2&|mG+
z_eHDzmF7Jpd7_K{imzCw{hXYxwqbpf{m23hOm&f}DHPldV~L*>4|oYF
zQmxiY3>^8A9_e#r8kyBLyhLUXCMVX;XlzKWagb!JZo6@G%hYM(IJ@*>(f7yE{FKE1
zJT!Pbz6hNLaB>FNhRsaFjpK>rjK`gUhZo#spH1)ts<{fapFn)v1bC(-ynx!K2@JeG
zL60fN6LII`;mvm0n9Wqve)jGV)yg4uHk>*QY
z8JF#R;AI+}#pEQ~5xm*f&K)K}N|hWmiEg16I31j1citkpZxYS#N>W9mCgaQ2X(}gY
zs%_Z8j5;`(Nd9EpA-}Ldlo=D1L^XzjJ2;8>{3Lj;BulkJNerwqMUS*HywTQbgQt+0
zIEAH??F`;%iyRV(zDV-Y6uQOhG~^Y#bjMeq&%HwP1(H0`!>`~g*2#M+OV6_nyO^bi
zOeK;y6?fbv7I?-~538C&!M#k#4g4FtpQK2&p?_oGn!o9hzD(v>v)bjqk-3k_NwqV&
z88Y-$NHSLUwO8qusneBL?b1g?&%a9Zpw|%pE6`!D;fv5IjgymR8}b@kpTd>bh?L5m
zfrr=FW#8&y)2QYuG;JF34b$KSlJKk4u1;fM%jtTg`BJa3)oI#vGK;6PbcUVL*O03s
zIZ9TTLAL^(rq8fT51#>@JcH&NC9#FjLiv((YB-an*V%^sO~dIkiKNcNosWn2*JTS7
zQcb@`#&;I!_OrlAl0?;J%wpc&S$d@T(lgLRe3NNZJDHuc?1(|;6PO`Vs$_aH-9n2v
zRh&(aX#LITn&=~PZscqvRdnWTeAzl}=j3d+4TqYB73UBMnS(pzR~8s%7MQ7O3Pjoi6JwqSB{?1+)3nO_Pd`Z~$RDKv}MX+?_Nd6wuq
zDKtMQ$rE*6$5*UVS5D4;+idH87oDSvAUnlqg$ph2iC(2@u0lKBBz{>u^erU(H)_q^V&J5=
z^hEO|@2kr)JKiF51e23*XG}7r*?dTjlHKOhtw5)p^XXA`Sz0an@qC(Jk;LADhAqIC
zq|+Eq&IQ|Wiun|FE+CS(0Czqf-cy%7(P0aDqJJmTeIe;-3&GivMAddJWZuJtdZPJ~
z_ta%OcV9%K;fvV0$c}i;kX<6BO8&5jZlSk1^;m3oZoU}Wc`?nWOHxI5FUFUx(=ATU
zE!%L0Y1ku`NL(uJkUv;prm5~$HHLzFGL^XR+wd4kmTEoUX5f;y^++o-+x$9DzD?#D
zCa2WSm}5xKC6MSlBwt!Ww|JdSEwM}Q5&d=v_i4X(yqr1uUMyfoE&fO|GwpY
z-K=-&9U?{Vuut4w7I?!{do87!Lctxkl(@STeoRuN+UcbXth-E)^gS|fn$^ZFBQt9m
zOZ(dyZy9o0BpIvw$1=KQ>NNgcyL9h&p`+fV`3i}D3G~dn_#$+wp2pJEZNr78;rKK)
zOv9alhgZ{O-|91}<|=fDklW-R@OVl1eQFc_!N9Hm&?C*4yqYe{-1!HYO3PU~#Lie^
z$b{vP93|gcPPYP`?k=}WpB249&TX2G#Fj!6((xtfw2qV0&^BCVmcE-##NWl8kB3*&
zWsh`%%Om}O%zT&h4&h=+qH6bC=5<=3N18A7YFd9|nZJTYt5>jdxE+yh$UTu#C2Ovv
zTj)bh3s%yj>^JF&E1`2&(tM{RRkTFDY@OPyV&^utA+M%2YQZWZ8LM!IJYs=W=A$W5
zHHLy4xSDwP)$kNamTC)EGw|qYJ<`g&XEK2qWSVENbhMqZ#>`jn
z7JWaH=BFh7-q7H+_#$)~z{we48*VZUH?Ad;vle#-9$rhAEgk#;)m(+ze?WZP2k=Zu
zxDT~WA29Ix2YQ_OlGoB@zx?*=$n;*v(s6diRzo(44#@L42OvA_ziNt=0J0B0PrOTGyOf|g%nFAk^z9Ss59!XTK
z!+Pe;S+B>LFL^Cp7CEq<7I?bl(P=-<70_
zMs38Gt5;BP<}0(><&VhR
z$K<5i8DASRbSoqotNYqkx@GEgWvgBKi0JvPG!Ob1@%M)g`xsw@PHCK+G~4iyX?W#h
zBBgR?;Ni7&*|&OFHq~5(rezb~kPR=8gjc3^HJgDgx9O4QOI}NtWu|Q-vv?azXV@7>
z4Y?|kqhy6o=vJW9^iS;4!#{ye{)FZmC9zeYh4Llo)NngXud@x0o6mImb|R_Uap&XV
zwRG76g;dk4lJWf)>GuBuCrJ`joAEE^?fsV?X};8JY5gwb`zeiTf6C5TcEk^+*$k0V
zCDT8pTWA2Mil5n?uZcb)=SF^pq>9e`3}3cR+c`PgZ9`s5YgEM@L_&7p4ynchXUv$H
zs>V=o-5taaz|(i)$x_L0C-eI5)Dx}9k0#>oB(epQlVeAmGb8+VL82cc*?1St;&ocF
z%kDf&^qpNaKPbr)b#~(`)~PEeXTNRulWDkOH<7H}xZ|p`z|Up@XAjjB3T~4<#0T$z
zFO?LjwsH>x&+X9@{WzJ6rc;v~G97bRI?v9yWXMX9WUTIIIdsd^sp;qRDEs}c(&x~6
zpVNGZ#J>h~)#vykbUMMwIbj=KHS0CqOC)+P?hHJ{4FL|Pa$aMdb^t3O**^)%nc74gb
zhhOT6=1aYn)@I%J(`fj9b}q6b{xD>hNU4%P?5A63ElxfDZFg?|Z)oR#(|o!lRdn~i
z@n!3Di<5K9HoRxH>+uzlxUX=B1hYVisqR)ahJt(YE8@Oi!(${_s`dPuflI#DBdyE>
zlR5b{nQNGwQaj_JAw3U3qHB|U=>Xm0bvku`9%WC*9?@?P=o)=c*XY{ic_V#LZ=~g$
zcaWva{(mEMljf(hkMa4OI!L7GAp69HumEqQ4^i!Ph-wN2cibVyxQF1!Bt@#7KE%Mf
zhxJI;AyeLT8h4n?tivqrZ-024pU&2d(;~@O-9HY~EmNoQN9d7v4AA=sGU^D;S4jNp
zLeCt*7ok)2JeIC*8~T~`#^h3*h?n|uS0mxR}&HsKovZv94&
zG+*kCv@&^vKd@b5`^QIk)MzNNgxH;ahx3
zI<4d6G_(z?nNQ*Fw?zDp;m*gSpTgDx348?U^~uaXMtX;Eu_RHod&ij9>9`(gzSK`)
z>*Jh%oJOmUvvar|QPYrnBBe^!{Elv+4LB|M&VJAnzk|;Gj^;ZhsiGzFW$V=D1Ut8}
z4QrY07MviGaRPTpLly`&V@gzwq2LDohj{n@z*8hysxABv1CRbkkF+v%OeXMqGR?nd
z>1aEnt|1FWqMsnS{(HK`>lE~ZUHZOg)gNfyLy{-D=m&hoI_>A=bhQoZn}$IriG-iT
z9oL8j8kp)LRZ}Rq8%`2GDIRbNDN?Q0DF%)_rAN9k8Qw_io3r5*nLU`CSUaP!A+=6J
zlCiq&PSY(@r;VrSQTB8!7JdIT%}+`En?Qrl;ET{{04HaFZP?7LxA6>-oHMvH@aRXb
z^{oy*OEp)a_GgKYI}6X0gg2$O=_~`UpVcGHm%NcK%e2oY(>tG~PMQ7m&7)MZvGKpl1>*mIb&?Y)@D@XIU=#=aOdOE&suBg%~aFF
z$Q(FF`i^kKc_dM_4(FLS=e!W9me!`cn(^O8*RNIiBvDS76e%+`vol
zeu!tG+y9c+1z9nj5?p_d`aSl!ny(<)P^E0^uYM?}wG
zrhU*A#6JQ$>4QRgoMeD_o;nflkw}*`Q=BT^usdH9eMHWUyn&>O&b)yyTc_=u
zob9&ZP&2AxF_DmB+#zjQfY;I5d8Vo{6kNBM_yKtOO*~mD`Q2n*-W=&C>PMd);blXJp0oM6^#dWT5#9o!js
z^yAZd23M(?tI&=+#4n47-bKQrs5QIGz)5%YMDwM7d|JQ!9e2qb!Q|xI8IufYb`O%H
zWVd^CE6{1@J$jV=ZDqCS$M^@Jl-duIRPkP#YaJD2-wO#j__wc@+Xui~sO)Jv9lt#l#*}2G$c+HI1B~q&752bVq
zeVS8`2lj(*{s7wf0nMjNQbl(^z?ZGlEl$oY+mIiZ)~Fs2iNrm`9ny&fW|}d(RgIzG
zo_t8$_YpislBHVDM+{u@NRPBKvrVUykH}oZ;{;hXA{eLU+lhPh|9^CDvQ?X9-I62<-|0ncyv)(BW9g95PJkl{N@P?`O
z@}inT!5!yC-1UMVlN70T+KZWWy}fy)yO4R)WX5@undR-xJ=)HA%lvXqizH)p|L~?;
zrcUF1{=2u^+Xp(zhvq9J{?9hOZk%5(Pf*>cWAWQVdro=g4fY%c2A^K$(ohu7TS%|f=YJh
ziIt#pE75$XBvrITzHFV^__K2x+mP4MYPi6kNQOV|knSw7%2Z2KjiKNMRwmxPGCW0+
zrP{*E3_MzyrH?7|o>@Au3Yq3rSUTFySYyaSk?0;I*H@uiyiP$?=}~r5x-VL_D$RRH
z@0(f1#t`6-EiZ)k9Jd=WYg
z;N%Rj4f#oFJ#HJT6UnKLI|Gk?Qd-sE$EoHj)c$ee;~s}+O2T8QZF-!6*B{p-&6j#5
zt?ya;8f1FcVCgtJW2+&XL~@k;x(3|}bc(ELKk3>vp{;7te7q#K4|H=)d`UW8;N*<4
z4Y!$wk%2^F199i$;gxjR0-LF(_a$>6kn|nlh#(|UwGKhdn-io*nlJT}()x8C2%^zh
zOirR5@tGkVYC%es98`;Lq0ezTSj+CbMRZ>+n%|YAibe(F%hqWsCugc{xXUy=7)&HT
z73OyxKPj!xXGmQliFI+uJkl`n#)#-9QGWRh#sdmQKh71jbBx7}7
z3#D78PFF(h(nmzkhtfQ#KH~oZbXa|S5jv%Da?)(WLuS1z^@)_qoqnITfDWO@_2g%06Vv8nx_
zuZcb)=SDU~QblJr#h0zqc23TA+mKh%+OA?VA|cIihYV$bGp0II)fft{+l=@DczPI~
zES3DinAbNc(Q4S6N4hAYB}WQF678_ohho2t`-Y6=CnNekkGTfmn}id0+Kf`R8+
z=!uRabJ1j)v?SB9B}?bo8N8C#jFlqESl!QB(k)Y`rmgG;U8xncUMrdpk@$~*u4;uZ
zLZ=g)oD;U;RnxF(1d-?n+!=WEN?K!9shX?MjtJtH#Y0;o;UlRvYt6t(t@T9nC9kB*
zo{k-@$sEDtSlN3`_m!{&?SnQGZDwz{<$a~_otrTylfUO5-D$qm%Fwjq*q7C$c_YE$uY#^=N|9K
z#JbUJ>{x$!(`j5Z{#ns<^0%adA*V%tg?OsC@lTU4(p|sz)5xf&X}v;nT;g;hUfst1
z%p&7EVN4dOVF|C7wc;6(>kzj|XCxl+)N)66X2jOctQc(lmCYh|I^(YtL#Gf+s+dj_
zVj#02p1N*vj9u}psyE~hwf+F(@6!b`x(h>BAx=Y!tC>M}h5es_G_vF|6HIspG6&-B
zkQ5{QQ7t#0VPvPStQ=(u6SS7sy%4kL-M+0H{x`)@%pB5kRFrpo{(oPX{HH~GG9W^cRVVpZR#>YkER
zi$!`N&Hx*4X5t%TG3JO2v?R=|7~BWavJb&=l1#+I`=LIJxZZ~qhgp9M)2V%5{Js0q
zDbA9Xrqd>o&mo?XZp3qT#oEtNZS@?j$4k~q&LGYh8*gpmk^L~n_JfSIt#YAkI`9?_e4q9Dp%jJ9uoNx;+f<28$>=!zrS@HggiHh)(a$GNJ+(4ml*hmv3777VqY;t^HP
zQ+0!eA^jzb5GT#Xdz$!_VHitAmRr)xd@93+LnaL;xIt2YcviYU4QE8lI96O`{e4WQ
zX>s@$$I&UnlD?+XRgnh}&l)#rg#8qUkDxkv1g$qpev$-_B)-nZ`K41mYe*(#J4hm+KkBguY`~aJRLO
zFpbGU$eX_9P2m&unI@$h9p)Zy>r%
zPPQvHpG>v$WLi&`Y?Bls&Mh0CVHW9;gfT7&a@&%bhU^wO3UPg>ATfw8lT#S6WC|;m
zSpRIZ$jK@AuijsY6LGxlpPI)j
zR(*=6reZ7-DR0RehV=RyWaQrnx{_mvZjXOsMBP_e(XswF%_8Go#Xsv+I{91jmLaD_
zeue1v_?lg@_iI!~y+-R5lH(F*8u98j&I?toIDQ(&WRV({EHdPb$aRR@WI7U$==M0B
z5nHFTVzBkUZ5FvR9e<@6bPBO#i6IkaKxRX9dz@icJge#rs&3Pn$Y{wb#A#^b%giEo
zXJYi91!-i-yM`ppg3N)qJ0!)3Zi}-R(J7e~!>oU~@y}1jzdD&t;g+PE74L~uo=u+H
z;%vL(#MxBmQgwGqZc4)D5N~7S{B1)2n6O|D#tf0RmaH-(OGK*7CD>h(g6Ot5mk~$j
zvSO6=zh^oHrr>X$LZ@g;))=x-|I9+X=ccc172fcwY
z{0+#nmV98yB9S!^_oO6X9wWFd&SS*Ld92vW`ad*_Y?z0C4=yLxlJ$nv`a7h>-^p`Z
z{JULov8wM=bx%pEy@~WfoB=kz$;3Cli7`iHpe36P34ROG@-2enB$C1((4jE!$I@yLZ3V;4fk
zTJnkc6gP|Pg}8Sl5sMhXZE+DJ<}6~x1nd9Q_zx_?e-@XMXvt@0Mu)|aK8wk7TU<=O
z>?*cJ)qPamyOP$aNCM(aweejhelQhdzR0VV>^3CoZAjm@3C@?~BDyWU&4?;XSaGKH
ze{NPByafNmC3H%*WUnEIM1F+mw)l=+vEw^bpL>Va3nX7iN)TtBjelVl8L|{(;!?<)
zmV9X%9~Lq0a#iF3M7PCs@?|%j;ptQ-r_*|)WW
zT!l=LY(|{zHhtRcQE@fKkkyb+EjeSzOpzrJ_W+Wf!2oWH8BFM#!GgQ3{YSF~KdsMg
z5tox=$vH#(-h4wd
zr0)9!2TPVBx-GuXh;#3=;$iE*XclRbiN9keo$@TXWXMX9k081&uC*&xT1&OwT3Qd0
zER!5YoD(+At4FQa^aG61A3(mhq|lI6B3mKuWl88dMsQnP$B0SmSn-VY|7sT5u@3(c
zTu#0v*9~d*Psr2%B+qT}pLWI7s(wt>y&|dqAuCq{k+Vaho8wEh#Z%x5!b5>$@3=L3CT(%!nnMS+T_WADBf>ZpMENms4uV
zLqmFQfsEKfp4;LUyW$>Izum%Zkv}c?akowuFNC{fF=-~^c-ucH+2gMLi0}5PEZjvR
z~RXcKSoA*0^jQ;5UfEvakBLXr0%x-#yzE8bVN>TX*1ki0J0jW}IxyuMi^Xb;BlJ&BHl)_)kQSel=gRoGU2(Ch?^AV8
zNviEddLhmL8*gS7*|-;Dj>teu!VC%C2Wh#F;5bPpqATM*MqJ;=io>kGg;}J1F8)Nt7W`UqkwSO>n*>7txjRYerN#z=|`i|7o+x-~;$49-vdQ
zC7ld8B=RFfSH^>O#f}H5K6jAT3nX7iN)TtBjdwAN3^{}`@et%qOP(?0u*f-x8+aJ$
zhv>?9m=S9Zv*IG_f7UE=`7r+bxSUi=x*0O`2;}7>MN&lV!Tp7P-Lf`LMaJRMdf>CR@-(%l`%gM2X
z7mOl)KS1jIK%Oh(4|c;@s=h@WSQhJ
z;+(MY31*R|XE8>fg?w*Gf+4F!wnE&?lF)ocaAnMA#H4&yJY)U*;HMVZk&pifE+^lT
zNrp805%Tnpc>>wE0X%>kYR{(!N#YUMRuOUm?v`4l2;4~I}hn}p5Qb|
zHli!zc}6@u&x(cC|EgJ}djbC81#~L1slKpY66Y%M>NdX6EHeHo#$=HimMk*ljL3C}+oTYQM|5Q@WW?4&Rt&a&er!`K-YLXi
z=^C9vELmd6glmx55M3Fs*%i;KdV{Llv$b
z!itHvvGJ8=kp;yVGep{2vdWMWkt#O{c9*0ex-#Bm#L=6q7-jwMnMDGB!{7WjIz?Nu
z#&lXJ@*YH2##`je{@LKZs#R~%x`*U-$!^5yYU687kmkaKge@s{KI~Vi&cG}s(VUO?GDll
zaR%7>!Vn4;&B~)9L(0aUNt>g^ijIr@;W|7GI7-R24##-`;A)7_^Lfku&h*Cyy
zWh`aHoKjXyu>McYA_q$GpT*@QTJo789UegXJRr}N@qt}&i>mvmx_2e5A0i2eGu6g-
znfSqn81qG5wPd$hG3pVd?<0cqCAo;MjE}q@UtYzB8>6-Jl99ilmk;&{SU%B*`=YJ$
zic$4L)PM9TyDfTr{(H~q=<(sU=<(sUDEUHCf;jVR_zSb$5HF00UXV8}`O=WXBIh7(
zpf}PF(QVP25o^3zZ;|zXWqvi6z4712<)m8jwIM@&ATRrn=eFo$S3IKXd8%$ud8EH&
z5#prT_#v~%mGT%%MV4D~*pOirAd@N(+#o4HbX%;zh?c&rxXSv!F^f#|#lP5>P8pUQ
zHJz@CJb>u7Sdo0$U(CZRQk`6p)*B^1NrL@|ue0&vrtx$?jHx0YTJoJCg(43jZhHqw
zLUdbn7_rx3#m(0Ly;;P!68_qi=#*s%|Dra%rI;b|HiX-vzuoYfijOF|k^aaO$!5ga
zZqujD9u+HN45M7i9?LV3|+$z|&;Bs;-IcJDp
zRY;wxoQ=TQ4OsAD1A3=0;tWLh{9}p^4r&_N%
zt%pdKNe&~<2^;5SqyF~V^l^;Qk3+tP+Hb(LAUbCwx>W`-{$UX77uwiuvt;*LOc`E_DMgn2VaP6#
zZy>r=2HW+U2UG1FOzY{AZIVL7xn<+`%#uB7V~nc}xot^_A-hG6LR{YvBnHu~GK3LJ
zLRhiH`X87@PKMyWhRZ3nyR&d2==J@Z5^L>W&esY_p!P@&D}0_
znKTn|yzRem&EvLi=);T1y10u(%3I>)uB`{@RSz<<9zj=f4AHH!9&_r3vZrJH<&A$_
zDE?WYbn>^Pf|+qzA+JJ~FC4tyf5nOPmJ8tJ}DriH~o9F
z%Zat5v01TJD@co08oJ3~6OgY;=fo?B%*yW$pA_fd84N?Nx^5)fyqjdw7M9Bhvxhz
zsWZV1k^)3m%g&5w8N-UJtiO+0WLgaV#W8fsu%xdcS4AE`bhYeaKgHo)s7~%e>y471
zB*D)RUuWa}O?>(@7*j<)wB&h13Pm15-1c3OBt%!su8i2*l@&Kz|3LFg^8E|`+JB)_
zmLb;r%AFAT^ai`;$eSQEVTYt%_7~O$3OgeIu%*M
zpMR>4VVB4^5M3Du*cF=(pxSu=t*1-2NeU6?mW|IajeER+G42J(ZA)gF#=Aw1LR{a0
zNDQJY<3L6%8OVwy)<4_$PY%R?4VP1D$s9v^4uXsrM4l_-AiLroRlgm?mC?*QulSq)l}^!?tTAMv$a@go7RTBZ@2gsMEUkM;UYG1foUS&$
z)+`eA62|bCAkSLzfgy`T)iFU=>iBwx9(t5mPt>g^ijIr@;W|7F3
zF~+_O8EeTWhHMtu3vusAA|^3{-|r_eV$LL1OtAh>%_0XT;XjMZNwnlMLpn@`^qEYa
z+u~%q;ucl+QFZT1S|=e1h%?p3cbP>FCSlAMdDW8LhD1$)^qoR*z9bjXZE*@Cs=UIA
zGp+w~v&i6A@K1b&PRW++HRO=Uj}YA!r`i=ePNn+XR9Y{Pd?6`8oOw3>g;`|C-!LZr
z4f3WXUm9{)G>u{W6&H(USrW)a`n_-oImQ2kf=hQjxcz60dp9rsrICt|rpSoU|uepcy-vP_qLj8Bj7T5VZ|6Tl*8^(Wo
f#~1jxz5ni0$NN}`k2{frS4@o^i~jDj=FI;Cv&3U>
diff --git a/tests/gui/learning/test_dialog.py b/tests/gui/learning/test_dialog.py
index 2bf9e63b3..34a4e1c83 100644
--- a/tests/gui/learning/test_dialog.py
+++ b/tests/gui/learning/test_dialog.py
@@ -349,3 +349,32 @@ def assert_form_state(
assert ted._use_trained_model is None
assert ted.use_trained
assert not ted.resume_training
+
+
+def test_movenet_selection(qtbot, min_dance_labels):
+
+ app = MainWindow(no_usage_data=True)
+
+ # learning dialog expects path to video
+ video_path = Path(min_dance_labels.video.backend.filename)
+
+ ld = LearningDialog(
+ mode="inference", labels_filename=video_path, labels=min_dance_labels
+ )
+
+ # to make sure combo box has movenet options
+ combo_box = ld.pipeline_form_widget.pipeline_field.combo_box
+ model_options = [combo_box.itemText(i) for i in range(combo_box.count())]
+
+ models = ["movenet-lightning", "movenet-thunder"]
+
+ for model in models:
+ assert model in model_options
+
+ # change model type
+ combo_box.setCurrentText(model)
+
+ pipeline_form_data = ld.pipeline_form_widget.get_form_data()
+
+ # ensure pipeline version matches model type
+ assert pipeline_form_data["_pipeline"] == model
diff --git a/tests/gui/test_commands.py b/tests/gui/test_commands.py
index 6423b5099..bfa92ea1a 100644
--- a/tests/gui/test_commands.py
+++ b/tests/gui/test_commands.py
@@ -526,6 +526,222 @@ def test_SetSelectedInstanceTrack(centered_pair_predictions: Labels):
assert pred_inst.track == new_instance.track
+def test_DeleteMultipleTracks(min_tracks_2node_labels: Labels):
+ """Test that deleting multiple tracks works as expected."""
+ labels = min_tracks_2node_labels
+ tracks = labels.tracks
+ tracks.append(Track(name="unused", spawned_on=0))
+ assert len(tracks) == 3
+
+ # Set-up command context
+ context: CommandContext = CommandContext.from_labels(labels)
+ context.state["labels"] = labels
+
+ # Delete unused tracks
+ context.deleteMultipleTracks(delete_all=False)
+ assert len(labels.tracks) == 2
+
+ # Add back an unused track and delete all tracks
+ tracks.append(Track(name="unused", spawned_on=0))
+ assert len(tracks) == 3
+ context.deleteMultipleTracks(delete_all=True)
+ assert len(labels.tracks) == 0
+
+
+def test_CopyInstance(min_tracks_2node_labels: Labels):
+ """Test that copying an instance works as expected."""
+ labels = min_tracks_2node_labels
+ instance = labels[0].instances[0]
+
+ # Set-up command context
+ context: CommandContext = CommandContext.from_labels(labels)
+
+ # Copy instance
+ assert context.state["instance"] is None
+ context.copyInstance()
+ assert context.state["clipboard_instance"] is None
+
+ # Copy instance
+ context.state["instance"] = instance
+ context.copyInstance()
+ assert context.state["clipboard_instance"] == instance
+
+
+def test_PasteInstance(min_tracks_2node_labels: Labels):
+ """Test that pasting an instance works as expected."""
+ labels = min_tracks_2node_labels
+ lf_to_copy: LabeledFrame = labels.labeled_frames[0]
+ instance: Instance = lf_to_copy.instances[0]
+
+ # Set-up command context
+ context: CommandContext = CommandContext.from_labels(labels)
+
+ def paste_instance(
+ lf_to_paste: LabeledFrame, assertions_pre_paste, assertions_post_paste
+ ):
+ """Helper function to test pasting an instance."""
+ instances_checkpoint = list(lf_to_paste.instances)
+ assertions_pre_paste(instance, lf_to_copy)
+
+ context.pasteInstance()
+ assertions_post_paste(instances_checkpoint, lf_to_copy, lf_to_paste)
+
+ # Case 1: No instance copied, but frame selected
+
+ def assertions_prior(*args):
+ assert context.state["clipboard_instance"] is None
+
+ def assertions_post(instances_checkpoint, lf_to_copy, *args):
+ assert instances_checkpoint == lf_to_copy.instances
+
+ context.state["labeled_frame"] = lf_to_copy
+ paste_instance(lf_to_copy, assertions_prior, assertions_post)
+
+ # Case 2: No frame selected, but instance copied
+
+ def assertions_prior(*args):
+ assert context.state["labeled_frame"] is None
+
+ context.state["labeled_frame"] = None
+ context.state["clipboard_instance"] = instance
+ paste_instance(lf_to_copy, assertions_prior, assertions_post)
+
+ # Case 3: Instance copied and current frame selected
+
+ def assertions_prior(instance, lf_to_copy, *args):
+ assert context.state["clipboard_instance"] == instance
+ assert context.state["labeled_frame"] == lf_to_copy
+
+ def assertions_post(instances_checkpoint, lf_to_copy, lf_to_paste, *args):
+ lf_checkpoint_tracks = [
+ inst.track for inst in instances_checkpoint if inst.track is not None
+ ]
+ lf_to_copy_tracks = [
+ inst.track for inst in lf_to_copy.instances if inst.track is not None
+ ]
+ assert len(lf_checkpoint_tracks) == len(lf_to_copy_tracks)
+ assert len(lf_to_paste.instances) == len(instances_checkpoint) + 1
+ assert lf_to_paste.instances[-1].points == instance.points
+
+ context.state["labeled_frame"] = lf_to_copy
+ context.state["clipboard_instance"] = instance
+ paste_instance(lf_to_copy, assertions_prior, assertions_post)
+
+ # Case 4: Instance copied and different frame selected, but new frame has same track
+
+ def assertions_prior(instance, lf_to_copy, *args):
+ assert context.state["clipboard_instance"] == instance
+ assert context.state["labeled_frame"] != lf_to_copy
+ lf_to_paste = context.state["labeled_frame"]
+ tracks_in_lf_to_paste = [
+ inst.track for inst in lf_to_paste.instances if inst.track is not None
+ ]
+ assert instance.track in tracks_in_lf_to_paste
+
+ lf_to_paste = labels.labeled_frames[1]
+ context.state["labeled_frame"] = lf_to_paste
+ paste_instance(lf_to_paste, assertions_prior, assertions_post)
+
+ # Case 5: Instance copied and different frame selected, and track not in new frame
+
+ def assertions_prior(instance, lf_to_copy, *args):
+ assert context.state["clipboard_instance"] == instance
+ assert context.state["labeled_frame"] != lf_to_copy
+ lf_to_paste = context.state["labeled_frame"]
+ tracks_in_lf_to_paste = [
+ inst.track for inst in lf_to_paste.instances if inst.track is not None
+ ]
+ assert instance.track not in tracks_in_lf_to_paste
+
+ def assertions_post(instances_checkpoint, lf_to_copy, lf_to_paste, *args):
+ assert len(lf_to_paste.instances) == len(instances_checkpoint) + 1
+ assert lf_to_paste.instances[-1].points == instance.points
+ assert lf_to_paste.instances[-1].track == instance.track
+
+ lf_to_paste = labels.labeled_frames[2]
+ context.state["labeled_frame"] = lf_to_paste
+ for inst in lf_to_paste.instances:
+ inst.track = None
+ paste_instance(lf_to_paste, assertions_prior, assertions_post)
+
+ # Case 6: Instance copied, different frame selected, and frame not in Labels
+
+ def assertions_prior(instance, lf_to_copy, *args):
+ assert context.state["clipboard_instance"] == instance
+ assert context.state["labeled_frame"] != lf_to_copy
+ assert context.state["labeled_frame"] not in labels.labeled_frames
+
+ def assertions_post(instances_checkpoint, lf_to_copy, lf_to_paste, *args):
+ assert len(lf_to_paste.instances) == len(instances_checkpoint) + 1
+ assert lf_to_paste.instances[-1].points == instance.points
+ assert lf_to_paste.instances[-1].track == instance.track
+ assert lf_to_paste in labels.labeled_frames
+
+ lf_to_paste = labels.get((labels.video, 3))
+ labels.labeled_frames.remove(lf_to_paste)
+ lf_to_paste.instances = []
+ context.state["labeled_frame"] = lf_to_paste
+ paste_instance(lf_to_paste, assertions_prior, assertions_post)
+
+
+def test_CopyInstanceTrack(min_tracks_2node_labels: Labels):
+ """Test that copying a track from one instance to another works."""
+ labels = min_tracks_2node_labels
+ instance = labels.labeled_frames[0].instances[0]
+
+ # Set-up CommandContext
+ context: CommandContext = CommandContext.from_labels(labels)
+
+ # Case 1: No instance selected
+ context.copyInstanceTrack()
+ assert context.state["clipboard_track"] is None
+
+ # Case 2: Instance selected and track
+ context.state["instance"] = instance
+ context.copyInstanceTrack()
+ assert context.state["clipboard_track"] == instance.track
+
+ # Case 3: Instance selected and no track
+ instance.track = None
+ context.copyInstanceTrack()
+ assert context.state["clipboard_track"] is None
+
+
+def test_PasteInstanceTrack(min_tracks_2node_labels: Labels):
+ """Test that pasting a track from one instance to another works."""
+ labels = min_tracks_2node_labels
+ instance = labels.labeled_frames[0].instances[0]
+
+ # Set-up CommandContext
+ context: CommandContext = CommandContext.from_labels(labels)
+
+ # Case 1: No instance selected
+ context.state["clipboard_track"] = instance.track
+
+ context.pasteInstanceTrack()
+ assert context.state["instance"] is None
+
+ # Case 2: Instance selected and track
+ lf_to_paste = labels.labeled_frames[1]
+ instance_with_same_track = lf_to_paste.instances[0]
+ instance_to_paste = lf_to_paste.instances[1]
+ context.state["instance"] = instance_to_paste
+ assert instance_to_paste.track != instance.track
+ assert instance_with_same_track.track == instance.track
+
+ context.pasteInstanceTrack()
+ assert instance_to_paste.track == instance.track
+ assert instance_with_same_track.track != instance.track
+
+ # Case 3: Instance selected and no track
+ lf_to_paste = labels.labeled_frames[2]
+ instance_to_paste = lf_to_paste.instances[0]
+ instance.track = None
+
+ context.pasteInstanceTrack()
+ assert isinstance(instance_to_paste.track, Track)
+
+
@pytest.mark.skipif(
sys.platform.startswith("win"),
reason="Files being using in parallel by linux CI tests via Github Actions "
diff --git a/tests/gui/test_import.py b/tests/gui/test_import.py
index 85b2c0bf3..f9f10589a 100644
--- a/tests/gui/test_import.py
+++ b/tests/gui/test_import.py
@@ -7,6 +7,7 @@ def test_gui_import(qtbot):
file_names = [
"tests/data/hdf5_format_v1/training.scale=0.50,sigma=10.h5",
"tests/data/videos/small_robot.mp4",
+ "tests/data/videos/robot0.jpg",
]
importer = ImportParamDialog(file_names)
@@ -15,24 +16,24 @@ def test_gui_import(qtbot):
qtbot.addWidget(importer)
data = importer.get_data()
- assert len(data) == 2
+ assert len(data) == len(file_names)
assert len(data[0]["params"]) > 1
- for import_item in importer.import_widgets:
+ for import_item in importer.import_widgets[:2]:
btn = import_item.enabled_checkbox_widget
- with qtbot.waitSignal(btn.stateChanged, timeout=10):
+ with qtbot.waitSignal(btn.stateChanged, timeout=100):
qtbot.mouseClick(btn, QtCore.Qt.LeftButton)
assert not import_item.is_enabled()
- assert len(importer.get_data()) == 0
+ assert len(importer.get_data()) == 1
- for import_item in importer.import_widgets:
+ for import_item in importer.import_widgets[:2]:
btn = import_item.enabled_checkbox_widget
with qtbot.waitSignal(btn.stateChanged, timeout=10):
qtbot.mouseClick(btn, QtCore.Qt.LeftButton)
assert import_item.is_enabled()
- assert len(importer.get_data()) == 2
+ assert len(importer.get_data()) == len(file_names)
def test_video_import_detect_grayscale():
diff --git a/tests/gui/test_inference_gui.py b/tests/gui/test_inference_gui.py
index 16c989b71..a533a0001 100644
--- a/tests/gui/test_inference_gui.py
+++ b/tests/gui/test_inference_gui.py
@@ -153,3 +153,27 @@ def test_inference_merging():
assert len(labels[2].user_instances) == 1
# Only predicted instances with graphable points should be merged
assert len(labels[2].predicted_instances) == 2
+
+
+def test_inference_movenet_cli(movenet_video):
+
+ models = ["movenet-lightning", "movenet-thunder"]
+
+ for model in models:
+
+ inference_task = runners.InferenceTask(
+ trained_job_paths=[model],
+ inference_params={"tracking.tracker": None},
+ )
+
+ item_for_inference = runners.VideoItemForInference(
+ video=movenet_video, frames=[1, 2, 3]
+ )
+
+ cli_args, output_path = inference_task.make_predict_cli_call(item_for_inference)
+
+ # make sure cli call contains model
+ assert cli_args[0] == "sleap-track"
+ assert model in cli_args
+ assert "--frames" in cli_args
+ assert "--tracking.tracker" in cli_args
diff --git a/tests/io/test_dataset.py b/tests/io/test_dataset.py
index d1f349bff..a6fa7fdd7 100644
--- a/tests/io/test_dataset.py
+++ b/tests/io/test_dataset.py
@@ -1400,6 +1400,17 @@ def test_remove_all_tracks(centered_pair_predictions):
assert all(inst.track is None for inst in labels.instances())
+def test_remove_unused_tracks(min_tracks_2node_labels: Labels):
+ labels = min_tracks_2node_labels
+ assert len(labels.tracks) == 2
+
+ labels.tracks.append(Track(name="unused", spawned_on=0))
+ assert len(labels.tracks) == 3
+
+ labels.remove_unused_tracks()
+ assert len(labels.tracks) == 2
+
+
def test_remove_empty_frames(min_labels):
min_labels.append(sleap.LabeledFrame(video=min_labels.video, frame_idx=2))
assert len(min_labels) == 2
diff --git a/tests/io/test_formats.py b/tests/io/test_formats.py
index 5001febb5..b28de176e 100644
--- a/tests/io/test_formats.py
+++ b/tests/io/test_formats.py
@@ -176,8 +176,9 @@ def test_matching_adaptor(centered_pair_predictions_hdf5_path):
@pytest.mark.parametrize(
"test_data",
[
- "tests/data/dlc/madlc_testdata.csv",
- "tests/data/dlc/madlc_testdata_v2.csv",
+ "tests/data/dlc/labeled-data/video/madlc_testdata.csv",
+ "tests/data/dlc/labeled-data/video/madlc_testdata_v2.csv",
+ "tests/data/dlc/madlc_230_config.yaml",
],
)
def test_madlc(test_data):
@@ -213,7 +214,10 @@ def test_madlc(test_data):
@pytest.mark.parametrize(
"test_data",
- ["tests/data/dlc/dlc_testdata.csv", "tests/data/dlc/dlc_testdata_v2.csv"],
+ [
+ "tests/data/dlc/labeled-data/video/dlc_testdata.csv",
+ "tests/data/dlc/labeled-data/video/dlc_testdata_v2.csv",
+ ],
)
def test_sadlc(test_data):
labels = read(
diff --git a/tests/io/test_video.py b/tests/io/test_video.py
index 44b08b361..82ddfe7a4 100644
--- a/tests/io/test_video.py
+++ b/tests/io/test_video.py
@@ -32,6 +32,10 @@
def test_from_filename(hdf5_file_path, small_robot_mp4_path):
assert type(Video.from_filename(hdf5_file_path).backend) == HDF5Video
assert type(Video.from_filename(small_robot_mp4_path).backend) == MediaVideo
+ assert (
+ type(Video.from_filename(TEST_SMALL_ROBOT_SIV_FILE0).backend)
+ == SingleImageVideo
+ )
def test_backend_extra_kwargs(hdf5_file_path, small_robot_mp4_path):
diff --git a/tests/nn/test_training.py b/tests/nn/test_training.py
index b3eda8676..b95d177ff 100644
--- a/tests/nn/test_training.py
+++ b/tests/nn/test_training.py
@@ -22,7 +22,7 @@
TopdownConfmapsModelTrainer,
TopDownMultiClassModelTrainer,
Trainer,
- main as sleap_train,
+ create_trainer_using_cli as sleap_train,
)
sleap.use_cpu_only()
diff --git a/tests/test_skeleton.py b/tests/test_skeleton.py
index e409f3bbe..1f7c3a853 100644
--- a/tests/test_skeleton.py
+++ b/tests/test_skeleton.py
@@ -1,6 +1,7 @@
import os
import copy
+import jsonpickle
import pytest
from sleap.skeleton import Skeleton
@@ -182,12 +183,37 @@ def test_symmetry():
]
-def test_json(skeleton, tmpdir):
- """
- Test saving and loading a Skeleton object in JSON.
- """
+def test_json(skeleton: Skeleton, tmpdir):
+ """Test saving and loading a Skeleton object in JSON."""
JSON_TEST_FILENAME = os.path.join(tmpdir, "skeleton.json")
+ # Test that `to_json` does not save unused `None` fields (to ensure backwards data
+ # format compatibility)
+ skeleton.description = (
+ "Test that description is not saved when given (if is_template is False)."
+ )
+ assert skeleton.is_template == False
+ json_str = skeleton.to_json()
+ json_dict = jsonpickle.decode(json_str)
+ json_dict_keys = list(json_dict.keys())
+ assert "nx_graph" not in json_dict_keys
+ assert "preview_image" not in json_dict_keys
+ assert "description" not in json_dict_keys
+
+ # Test that `is_template` can only be set to True
+ # when has both `description` and `preview_image`
+ with pytest.raises(ValueError):
+ skeleton.is_template = True
+ assert skeleton.is_template == False
+
+ skeleton._is_template = True
+ json_str = skeleton.to_json()
+ json_dict = jsonpickle.decode(json_str)
+ json_dict_keys = list(json_dict.keys())
+ assert "nx_graph" in json_dict_keys
+ assert "preview_image" in json_dict_keys
+ assert "description" in json_dict_keys
+
# Save it to a JSON filename
skeleton.save_json(JSON_TEST_FILENAME)