From e0b739fe71c22068878c317e5943c9f11c570638 Mon Sep 17 00:00:00 2001 From: Mario Lucic Date: Tue, 26 Feb 2019 18:16:22 +0100 Subject: [PATCH] Project import generated by Copybara. PiperOrigin-RevId: 235733444 --- README.md | 186 ++-- .../{src => architectures}/__init__.py | 0 compare_gan/architectures/abstract_arch.py | 127 +++ compare_gan/architectures/arch_ops.py | 758 ++++++++++++++ compare_gan/architectures/arch_ops_test.py | 136 +++ .../architectures/arch_ops_tpu_test.py | 137 +++ .../architectures/architectures_test.py | 159 +++ compare_gan/architectures/dcgan.py | 129 +++ compare_gan/architectures/infogan.py | 100 ++ compare_gan/architectures/resnet30.py | 143 +++ compare_gan/architectures/resnet5.py | 145 +++ compare_gan/architectures/resnet5_biggan.py | 428 ++++++++ .../architectures/resnet5_biggan_test.py | 190 ++++ compare_gan/architectures/resnet_cifar.py | 167 ++++ compare_gan/architectures/resnet_init_test.py | 116 +++ compare_gan/architectures/resnet_norm_test.py | 373 +++++++ compare_gan/architectures/resnet_ops.py | 219 +++++ compare_gan/architectures/resnet_stl.py | 108 ++ compare_gan/architectures/sndcgan.py | 127 +++ .../bin/compare_gan_prepare_datasets.sh | 37 - compare_gan/bin/compare_gan_run_one_task | 71 -- compare_gan/datasets.py | 641 ++++++++++++ compare_gan/datasets_test.py | 114 +++ compare_gan/eval_gan_lib.py | 212 ++++ compare_gan/eval_gan_lib_test.py | 125 +++ compare_gan/eval_utils.py | 206 ++++ compare_gan/{src => }/gans/__init__.py | 0 compare_gan/gans/abstract_gan.py | 92 ++ compare_gan/{src => }/gans/consts.py | 30 +- compare_gan/gans/gpu_modular_gan.py | 181 ++++ compare_gan/gans/loss_lib.py | 154 +++ compare_gan/gans/modular_gan.py | 658 +++++++++++++ .../gans/modular_gan_conditional_test.py | 109 ++ compare_gan/gans/modular_gan_test.py | 230 +++++ compare_gan/gans/modular_gan_tpu_test.py | 98 ++ .../{src/params_test.py => gans/ops.py} | 21 +- compare_gan/gans/penalty_lib.py | 113 +++ compare_gan/gans/s3gan.py | 339 +++++++ compare_gan/gans/s3gan_test.py | 86 ++ compare_gan/gans/ssgan.py | 261 +++++ compare_gan/gans/ssgan_test.py | 88 ++ compare_gan/gans/utils.py | 54 + compare_gan/hooks.py | 148 +++ compare_gan/main.py | 134 +++ .../{src/multi_gan => metrics}/__init__.py | 5 +- compare_gan/metrics/accuracy.py | 145 +++ compare_gan/metrics/eval_task.py | 76 ++ compare_gan/metrics/fid_score.py | 75 ++ .../fid_score_test.py} | 37 +- compare_gan/metrics/fractal_dimension.py | 97 ++ compare_gan/metrics/fractal_dimension_test.py | 40 + compare_gan/{src => metrics}/gilbo.py | 439 ++++----- .../{src => metrics}/image_similarity.py | 50 +- compare_gan/metrics/inception_score.py | 48 + .../{src => metrics}/jacobian_conditioning.py | 66 +- .../jacobian_conditioning_test.py | 24 +- compare_gan/metrics/kid_score.py | 149 +++ compare_gan/{src => metrics}/ms_ssim_score.py | 58 +- .../{src => metrics}/ms_ssim_score_test.py | 7 +- compare_gan/{src => metrics}/prd_score.py | 6 +- .../{src => metrics}/prd_score_test.py | 2 +- compare_gan/runner_lib.py | 354 +++++++ compare_gan/runner_lib_test.py | 290 ++++++ compare_gan/src/datasets.py | 253 ----- compare_gan/src/datasets_test.py | 88 -- compare_gan/src/eval_gan_lib.py | 929 ------------------ compare_gan/src/eval_gan_lib_test.py | 435 -------- compare_gan/src/fid_score.py | 179 ---- compare_gan/src/fid_score_test.py | 61 -- compare_gan/src/gan_lib.py | 256 ----- compare_gan/src/gan_lib_test.py | 135 --- compare_gan/src/gans/BEGAN.py | 171 ---- compare_gan/src/gans/DRAGAN.py | 122 --- compare_gan/src/gans/GAN.py | 121 --- compare_gan/src/gans/LSGAN.py | 89 -- compare_gan/src/gans/VAE.py | 154 --- compare_gan/src/gans/WGAN.py | 106 -- compare_gan/src/gans/WGAN_GP.py | 105 -- .../src/gans/ablation_resnet_architecture.py | 318 ------ compare_gan/src/gans/abstract_gan.py | 453 --------- .../src/gans/abstract_gan_estimator_test.py | 135 --- compare_gan/src/gans/dcgan_architecture.py | 241 ----- compare_gan/src/gans/gan_test.py | 91 -- compare_gan/src/gans/gans_with_penalty.py | 489 --------- .../gans/gans_with_penalty_estimator_test.py | 196 ---- .../src/gans/gans_with_penalty_test.py | 214 ---- compare_gan/src/gans/ops.py | 313 ------ compare_gan/src/gans/resnet_architecture.py | 570 ----------- .../src/gans/resnet_architecture_test.py | 90 -- compare_gan/src/generate_tasks_lib.py | 731 -------------- compare_gan/src/kid_score.py | 216 ---- compare_gan/src/kid_score_test.py | 61 -- compare_gan/src/multi_gan/README.md | 60 -- compare_gan/src/multi_gan/dataset.py | 139 --- .../illustrations/clevr_generated.png | Bin 334677 -> 0 bytes compare_gan/src/multi_gan/multi_gan.py | 353 ------- .../src/multi_gan/multi_gan_background.py | 244 ----- compare_gan/src/multi_gan/run_one_task.py | 345 ------- compare_gan/src/multi_gan/visualize_data.py | 73 -- compare_gan/src/multi_gan/visualize_gan.py | 439 --------- compare_gan/src/params.py | 413 -------- compare_gan/src/simple_task.proto | 19 - compare_gan/src/simple_task_pb2.py | 159 --- compare_gan/src/task_utils.py | 147 --- .../test_data/image_celeba-dev-00000-of-00010 | Bin 7171 -> 0 bytes .../image_celebahq-128-00090-of-00100 | Bin 34906 -> 0 bytes .../image_cifar10-dev-00000-of-00001 | Bin 2478 -> 0 bytes .../image_fashion_mnist-dev-00000-of-00001 | Bin 871 -> 0 bytes .../image_lsun_bedrooms-train-00099-of-00100 | Bin 170141 -> 0 bytes .../test_data/image_mnist-dev-00000-of-00001 | Bin 400 -> 0 bytes compare_gan/src/test_utils.py | 42 - compare_gan/src/tfhub_models.ipynb | 342 ------- .../__init__.py} | 11 +- compare_gan/tpu/tpu_ops.py | 125 +++ compare_gan/tpu/tpu_ops_test.py | 132 +++ compare_gan/tpu/tpu_random.py | 154 +++ compare_gan/tpu/tpu_random_test.py | 172 ++++ compare_gan/tpu/tpu_summaries.py | 113 +++ compare_gan/utils.py | 158 +++ example_configs/README.md | 9 + example_configs/biggan_imagenet128.gin | 51 + example_configs/dcgan_celeba64.gin | 25 + example_configs/resnet_cifar10.gin | 25 + example_configs/resnet_lsun-bedroom128.gin | 25 + example_configs/sndcgan_celebahq128.gin | 25 + setup.py | 36 +- 126 files changed, 10399 insertions(+), 10677 deletions(-) rename compare_gan/{src => architectures}/__init__.py (100%) create mode 100644 compare_gan/architectures/abstract_arch.py create mode 100644 compare_gan/architectures/arch_ops.py create mode 100644 compare_gan/architectures/arch_ops_test.py create mode 100644 compare_gan/architectures/arch_ops_tpu_test.py create mode 100644 compare_gan/architectures/architectures_test.py create mode 100644 compare_gan/architectures/dcgan.py create mode 100644 compare_gan/architectures/infogan.py create mode 100644 compare_gan/architectures/resnet30.py create mode 100644 compare_gan/architectures/resnet5.py create mode 100644 compare_gan/architectures/resnet5_biggan.py create mode 100644 compare_gan/architectures/resnet5_biggan_test.py create mode 100644 compare_gan/architectures/resnet_cifar.py create mode 100644 compare_gan/architectures/resnet_init_test.py create mode 100644 compare_gan/architectures/resnet_norm_test.py create mode 100644 compare_gan/architectures/resnet_ops.py create mode 100644 compare_gan/architectures/resnet_stl.py create mode 100644 compare_gan/architectures/sndcgan.py delete mode 100755 compare_gan/bin/compare_gan_prepare_datasets.sh delete mode 100755 compare_gan/bin/compare_gan_run_one_task create mode 100644 compare_gan/datasets.py create mode 100644 compare_gan/datasets_test.py create mode 100644 compare_gan/eval_gan_lib.py create mode 100644 compare_gan/eval_gan_lib_test.py create mode 100644 compare_gan/eval_utils.py rename compare_gan/{src => }/gans/__init__.py (100%) create mode 100644 compare_gan/gans/abstract_gan.py rename compare_gan/{src => }/gans/consts.py (74%) create mode 100644 compare_gan/gans/gpu_modular_gan.py create mode 100644 compare_gan/gans/loss_lib.py create mode 100644 compare_gan/gans/modular_gan.py create mode 100644 compare_gan/gans/modular_gan_conditional_test.py create mode 100644 compare_gan/gans/modular_gan_test.py create mode 100644 compare_gan/gans/modular_gan_tpu_test.py rename compare_gan/{src/params_test.py => gans/ops.py} (60%) create mode 100644 compare_gan/gans/penalty_lib.py create mode 100644 compare_gan/gans/s3gan.py create mode 100644 compare_gan/gans/s3gan_test.py create mode 100644 compare_gan/gans/ssgan.py create mode 100644 compare_gan/gans/ssgan_test.py create mode 100644 compare_gan/gans/utils.py create mode 100644 compare_gan/hooks.py create mode 100644 compare_gan/main.py rename compare_gan/{src/multi_gan => metrics}/__init__.py (82%) create mode 100644 compare_gan/metrics/accuracy.py create mode 100644 compare_gan/metrics/eval_task.py create mode 100644 compare_gan/metrics/fid_score.py rename compare_gan/{bin/compare_gan_generate_tasks => metrics/fid_score_test.py} (54%) mode change 100755 => 100644 create mode 100644 compare_gan/metrics/fractal_dimension.py create mode 100644 compare_gan/metrics/fractal_dimension_test.py rename compare_gan/{src => metrics}/gilbo.py (60%) rename compare_gan/{src => metrics}/image_similarity.py (89%) create mode 100644 compare_gan/metrics/inception_score.py rename compare_gan/{src => metrics}/jacobian_conditioning.py (60%) rename compare_gan/{src => metrics}/jacobian_conditioning_test.py (90%) create mode 100644 compare_gan/metrics/kid_score.py rename compare_gan/{src => metrics}/ms_ssim_score.py (56%) rename compare_gan/{src => metrics}/ms_ssim_score_test.py (89%) rename compare_gan/{src => metrics}/prd_score.py (98%) rename compare_gan/{src => metrics}/prd_score_test.py (99%) create mode 100644 compare_gan/runner_lib.py create mode 100644 compare_gan/runner_lib_test.py delete mode 100644 compare_gan/src/datasets.py delete mode 100644 compare_gan/src/datasets_test.py delete mode 100644 compare_gan/src/eval_gan_lib.py delete mode 100644 compare_gan/src/eval_gan_lib_test.py delete mode 100644 compare_gan/src/fid_score.py delete mode 100644 compare_gan/src/fid_score_test.py delete mode 100644 compare_gan/src/gan_lib.py delete mode 100644 compare_gan/src/gan_lib_test.py delete mode 100644 compare_gan/src/gans/BEGAN.py delete mode 100644 compare_gan/src/gans/DRAGAN.py delete mode 100644 compare_gan/src/gans/GAN.py delete mode 100644 compare_gan/src/gans/LSGAN.py delete mode 100644 compare_gan/src/gans/VAE.py delete mode 100644 compare_gan/src/gans/WGAN.py delete mode 100644 compare_gan/src/gans/WGAN_GP.py delete mode 100644 compare_gan/src/gans/ablation_resnet_architecture.py delete mode 100644 compare_gan/src/gans/abstract_gan.py delete mode 100644 compare_gan/src/gans/abstract_gan_estimator_test.py delete mode 100644 compare_gan/src/gans/dcgan_architecture.py delete mode 100644 compare_gan/src/gans/gan_test.py delete mode 100644 compare_gan/src/gans/gans_with_penalty.py delete mode 100644 compare_gan/src/gans/gans_with_penalty_estimator_test.py delete mode 100644 compare_gan/src/gans/gans_with_penalty_test.py delete mode 100644 compare_gan/src/gans/ops.py delete mode 100644 compare_gan/src/gans/resnet_architecture.py delete mode 100644 compare_gan/src/gans/resnet_architecture_test.py delete mode 100644 compare_gan/src/generate_tasks_lib.py delete mode 100644 compare_gan/src/kid_score.py delete mode 100644 compare_gan/src/kid_score_test.py delete mode 100644 compare_gan/src/multi_gan/README.md delete mode 100644 compare_gan/src/multi_gan/dataset.py delete mode 100644 compare_gan/src/multi_gan/illustrations/clevr_generated.png delete mode 100644 compare_gan/src/multi_gan/multi_gan.py delete mode 100644 compare_gan/src/multi_gan/multi_gan_background.py delete mode 100644 compare_gan/src/multi_gan/run_one_task.py delete mode 100644 compare_gan/src/multi_gan/visualize_data.py delete mode 100644 compare_gan/src/multi_gan/visualize_gan.py delete mode 100644 compare_gan/src/params.py delete mode 100644 compare_gan/src/simple_task.proto delete mode 100644 compare_gan/src/simple_task_pb2.py delete mode 100644 compare_gan/src/task_utils.py delete mode 100644 compare_gan/src/test_data/image_celeba-dev-00000-of-00010 delete mode 100644 compare_gan/src/test_data/image_celebahq-128-00090-of-00100 delete mode 100644 compare_gan/src/test_data/image_cifar10-dev-00000-of-00001 delete mode 100644 compare_gan/src/test_data/image_fashion_mnist-dev-00000-of-00001 delete mode 100644 compare_gan/src/test_data/image_lsun_bedrooms-train-00099-of-00100 delete mode 100644 compare_gan/src/test_data/image_mnist-dev-00000-of-00001 delete mode 100644 compare_gan/src/test_utils.py delete mode 100644 compare_gan/src/tfhub_models.ipynb rename compare_gan/{bin/compare_gan_run_test.sh => tpu/__init__.py} (57%) mode change 100755 => 100644 create mode 100644 compare_gan/tpu/tpu_ops.py create mode 100644 compare_gan/tpu/tpu_ops_test.py create mode 100644 compare_gan/tpu/tpu_random.py create mode 100644 compare_gan/tpu/tpu_random_test.py create mode 100644 compare_gan/tpu/tpu_summaries.py create mode 100644 compare_gan/utils.py create mode 100644 example_configs/README.md create mode 100644 example_configs/biggan_imagenet128.gin create mode 100644 example_configs/dcgan_celeba64.gin create mode 100644 example_configs/resnet_cifar10.gin create mode 100644 example_configs/resnet_lsun-bedroom128.gin create mode 100644 example_configs/sndcgan_celebahq128.gin diff --git a/README.md b/README.md index b230bd5..525b321 100644 --- a/README.md +++ b/README.md @@ -1,73 +1,113 @@ -## Compare GAN code. - -This is the code that was used in "Are GANs Created Equal? A Large-Scale Study" -paper (https://arxiv.org/abs/1711.10337) and in "The GAN Landscape: Losses, -Architectures, Regularization, and Normalization" -(https://arxiv.org/abs/1807.04720). - -If you want to see the version used only in the first paper - please see the -*v1* branch of this repository. - -## Pre-trained models - -The pre-trained models are available on TensorFlow Hub. Please see -[this colab](https://colab.research.google.com/github/google/compare_gan/blob/master/compare_gan/src/tfhub_models.ipynb) -for an example how to use them. - -### Best hyperparameters - -This repository also contains the values for the best hyperparameters for -different combinations of models, regularizations and penalties. You can see -them in `generate_tasks_lib.py` file and train using -`--experiment=best_models_sndcgan` - -### Installation: - -To install, run: - -```shell -python -m pip install -e . --user -``` - -After installing, make sure to run - -```shell -compare_gan_prepare_datasets.sh -``` - -It will download all the necessary datasets and frozen TF graphs. By default it -will store them in `/tmp/datasets`. - -WARNING: by default this script only downloads and installs small datasets - it -doesn't download celebaHQ or lsun bedrooms. - -* **Lsun bedrooms dataset**: If you want to install lsun-bedrooms you need to - run t2t-datagen yourself (this dataset will take couple hours to download - and unpack). - -* **CelebaHQ dataset**: currently it is not available in tensor2tensor. Please - use the - [ProgressiveGAN github](https://github.com/tkarras/progressive_growing_of_gans) - for instructions on how to prepare it. - -### Running - -compare_gan has two binaries: - -* `generate_tasks` - that creates a list of files with parameters to execute -* `run_one_task` - that executes a given task, both training and evaluation, - and stores results in the CSV file. - -```shell -# Create tasks for experiment "test" in directory /tmp/results. See "src/generate_tasks_lib.py" to see other possible experiments. -compare_gan_generate_tasks --workdir=/tmp/results --experiment=test - -# Run task 0 (training and eval) -compare_gan_run_one_task --workdir=/tmp/results --task_num=0 --dataset_root=/tmp/datasets - -# Run task 1 (training and eval) -compare_gan_run_one_task --workdir=/tmp/results --task_num=1 --dataset_root=/tmp/datasets -``` - -Results (all computed metrics) will be stored in -`/tmp/results/TASK_NUM/scores.csv`. +# Compare GAN + +This repository offers TensorFlow implementations for many components related to +**Generative Adversarial Networks**: + +* losses (such non-saturating GAN, least-squares GAN, and WGAN), +* penalties (such as the gradient penalty), +* normalization techniques (such as spectral normalization, batch + normalization, and layer normalization), +* neural architectures (BigGAN, ResNet, DCGAN), and +* evaluation metrics (FID score, Inception Score, precision-recall, and KID + score). + +The code is **configurable via [Gin](https://github.com/google/gin-config)** and +runs on **GPU/TPU/CPUs**. Several research papers make use of this repository, +including: + +1. [Are GANs Created Equal? A Large-Scale Study](https://arxiv.org/abs/1711.10337) + [[Code]](https://github.com/google/compare_gan/tree/v1) + \ + Mario Lucic*, Karol Kurach*, Marcin Michalski, Sylvain Gelly, Olivier + Bousquet **[NeurIPS 2018]** + +2. [The GAN Landscape: Losses, Architectures, Regularization, and Normalization](https://arxiv.org/abs/1807.04720) + [[Code]](https://github.com/google/compare_gan/tree/v2) + \ + Karol Kurach*, Mario Lucic*, Xiaohua Zhai, Marcin Michalski, Sylvain Gelly + **[2018]** + +3. [Assessing Generative Models via Precision and Recall](https://arxiv.org/abs/1806.00035) + [[Code]](https://github.com/google/compare_gan/blob/560697ee213f91048c6b4231ab79fcdd9bf20381/compare_gan/src/prd_score.py) + \ + Mehdi S. M. Sajjadi, Olivier Bachem, Mario Lucic, Olivier Bousquet, Sylvain + Gelly **[NeurIPS 2018]** + +4. [GILBO: One Metric to Measure Them All](https://arxiv.org/abs/1802.04874) + [[Code]](https://github.com/google/compare_gan/blob/560697ee213f91048c6b4231ab79fcdd9bf20381/compare_gan/src/gilbo.py) + \ + Alexander A. Alemi, Ian Fischer **[NeurIPS 2018]** + +5. [A Case for Object Compositionality in Deep Generative Models of Images](https://arxiv.org/abs/1810.10340) + [[Code]](https://github.com/google/compare_gan/tree/v2_multigan) + \ + Sjoerd van Steenkiste, Karol Kurach, Sylvain Gelly **[2018]** + +6. [On Self Modulation for Generative Adversarial Networks](https://arxiv.org/abs/1810.01365) + [[Code]](https://github.com/google/compare_gan) \ + Ting Chen, Mario Lucic, Neil Houlsby, Sylvain Gelly **[ICLR 2019]** + +7. [Self-Supervised Generative Adversarial Networks](https://arxiv.org/abs/1811.11212) + [[Code]](https://github.com/google/compare_gan) \ + Ting Chen, Xiaohua Zhai, Marvin Ritter, Mario Lucic, Neil Houlsby **[CVPR + 2019]** + + +## Installation + +You can easily install the library and all necessary dependencies by running: +`pip install -e .` from the `compare_gan/` folder. + +## Running experiments + +Simply run the `main.py` passing a `--model_dir` (this is where checkpoints are +stored) and a `--gin_config` (defies which model on which data set and other +options). We provide several example configurations in the `example_configs/` +folder, namely: + +* **dcgan_celeba64**: DCGAN architecture with non-saturating loss on CelebA + 64x64px +* **resnet_cifar10**: ResNet architecture with non-saturating loss and + spectral normalization on CIFAR-10 +* **resnet_lsun-bedroom128**: ResNet architecture with WGAN loss and gradient + penalty on LSUN-bedrooms 128x128px +* **sndcgan_celebahq128**: SN-DCGAN architecture with non-saturating loss and + spectral normalization on CelebA-HQ 128x128px +* **biggan_imagenet128**: BigGAN architecture with hinge loss and spectral + normalization on ImageNet 128x128px + +### Training and evaluation + +To see all available options please run `python main.py --help`. Main options: + +* To **train** the model use `--schedule=train` (default). Training is resumed + from the last saved checkpoint. +* To **evaluate** all checkpoints use `--schedule=continuous_eval + --eval_every_steps=0`. To evaluate only checkpoints where the step size is + divisible by 5000, use `--schedule=continuous_eval --eval_every_steps=5000`. + By default, 3 averaging runs are used to estimate the Inception Score and + the FID score. Keep in mind that when running locally on a single GPU it may + not be possible to run training and evaluation simultaneously due to memory + constraints. +* To **train and evaluate** the model use `--schedule=eval_after_train + --eval_every_steps=0`. + +### Training on Cloud TPUs + +We recommend using the +[ctpu tool](https://github.com/tensorflow/tpu/tree/master/tools/ctpu) to create +a Cloud TPU and corresponding Compute Engine VM. We use v3-128 Cloud TPU v3 Pod +for training models on ImageNet in 128x128 resolutions. You can use smaller +slices if you reduce the batch size (`options.batch_size` in the Gin config) or +model parameters. Keep in mind that the model quality might change. Before +training make sure that the environment variable `TPU_NAME` is set. Running +evaluation on TPUs is currently not supported. Use a VM with a single GPU +instead. + +### Datasets + +Compare GAN uses [TensorFlow Datasets](https://www.tensorflow.org/datasets) and +it will automatically download and prepare the data. For ImageNet you will need +to download the archive yourself. For CelebAHq you need to download and prepare +the images on your own. If you are using TPUs make sure to point the training +script to your Google Storage Bucket (`--tfds_data_dir`). diff --git a/compare_gan/src/__init__.py b/compare_gan/architectures/__init__.py similarity index 100% rename from compare_gan/src/__init__.py rename to compare_gan/architectures/__init__.py diff --git a/compare_gan/architectures/abstract_arch.py b/compare_gan/architectures/abstract_arch.py new file mode 100644 index 0000000..a6688c3 --- /dev/null +++ b/compare_gan/architectures/abstract_arch.py @@ -0,0 +1,127 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Defines interfaces for generator and discriminator networks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +from compare_gan import utils +import gin +import six +import tensorflow as tf + + +@gin.configurable("G", blacklist=["name", "image_shape"]) +@six.add_metaclass(abc.ABCMeta) +class AbstractGenerator(object): + """Interface for generator architectures.""" + + def __init__(self, + name="generator", + image_shape=None, + batch_norm_fn=None, + spectral_norm=False): + """Constructor for all generator architectures. + + Args: + name: Scope name of the generator. + image_shape: Image shape to be generated, [height, width, colors]. + batch_norm_fn: Function for batch normalization or None. + spectral_norm: If True use spectral normalization for all weights. + """ + self._name = name + self._image_shape = image_shape + self._batch_norm_fn = batch_norm_fn + self._spectral_norm = spectral_norm + + def __call__(self, z, y, is_training, reuse=tf.AUTO_REUSE): + with tf.variable_scope(self._name, values=[z, y], reuse=reuse): + outputs = self.apply(z=z, y=y, is_training=is_training) + return outputs + + def batch_norm(self, inputs, **kwargs): + if self._batch_norm_fn is None: + return inputs + args = kwargs.copy() + args["inputs"] = inputs + if "use_sn" not in args: + args["use_sn"] = self._spectral_norm + return utils.call_with_accepted_args(self._batch_norm_fn, **args) + + @abc.abstractmethod + def apply(self, z, y, is_training): + """Apply the generator on a input. + + Args: + z: `Tensor` of shape [batch_size, z_dim] with latent code. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: Boolean, whether the architecture should be constructed for + training or inference. + + Returns: + Generated images of shape [batch_size] + self.image_shape. + """ + + +@gin.configurable("D", blacklist=["name"]) +@six.add_metaclass(abc.ABCMeta) +class AbstractDiscriminator(object): + """Interface for discriminator architectures.""" + + def __init__(self, + name="discriminator", + batch_norm_fn=None, + layer_norm=False, + spectral_norm=False): + self._name = name + self._batch_norm_fn = batch_norm_fn + self._layer_norm = layer_norm + self._spectral_norm = spectral_norm + + def __call__(self, x, y, is_training, reuse=tf.AUTO_REUSE): + with tf.variable_scope(self._name, values=[x, y], reuse=reuse): + outputs = self.apply(x=x, y=y, is_training=is_training) + return outputs + + def batch_norm(self, inputs, **kwargs): + if self._batch_norm_fn is None: + return inputs + args = kwargs.copy() + args["inputs"] = inputs + if "use_sn" not in args: + args["use_sn"] = self._spectral_norm + return utils.call_with_accepted_args(self._batch_norm_fn, **args) + + + @abc.abstractmethod + def apply(self, x, y, is_training): + """Apply the discriminator on a input. + + Args: + x: `Tensor` of shape [batch_size, ?, ?, ?] with real or fake images. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: Boolean, whether the architecture should be constructed for + training or inference. + + Returns: + Tuple of 3 Tensors, the final prediction of the discriminator, the logits + before the final output activation function and logits form the second + last layer. + """ diff --git a/compare_gan/architectures/arch_ops.py b/compare_gan/architectures/arch_ops.py new file mode 100644 index 0000000..0024be4 --- /dev/null +++ b/compare_gan/architectures/arch_ops.py @@ -0,0 +1,758 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides a library of custom architecture-related operations. + +It currently provides the following operations: +- linear, conv2d, deconv2d, lrelu +- batch norm, conditional batch norm, self-modulation +- spectral norm, weight norm, layer norm +- self-attention block +- various weight initialization schemes + +These operations are supported on both GPUs and TPUs. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +from absl import logging + +from compare_gan.gans import consts +from compare_gan.tpu import tpu_ops +import gin +from six.moves import range +import tensorflow as tf + +from tensorflow.contrib.tpu.python.tpu import tpu_function +from tensorflow.python.training import moving_averages # pylint: disable=g-direct-tensorflow-import + + +@gin.configurable("weights") +def weight_initializer(initializer=consts.NORMAL_INIT, stddev=0.02): + """Returns the initializer for the given name. + + Args: + initializer: Name of the initalizer. Use one in consts.INITIALIZERS. + stddev: Standard deviation passed to initalizer. + + Returns: + Initializer from `tf.initializers`. + """ + if initializer == consts.NORMAL_INIT: + return tf.initializers.random_normal(stddev=stddev) + if initializer == consts.TRUNCATED_INIT: + return tf.initializers.truncated_normal(stddev=stddev) + if initializer == consts.ORTHOGONAL_INIT: + return tf.initializers.orthogonal() + raise ValueError("Unknown weight initializer {}.".format(initializer)) + + +def _moving_moments_for_inference(mean, variance, is_training, decay): + """Use moving averages of moments during inference. + + Args: + mean: Tensor of shape [num_channels] with the mean of the current batch. + variance: Tensor of shape [num_channels] with the variance of the current + batch. + is_training: Boolean, wheather to construct ops for training or inference + graph. + decay: Decay rate to use for moving averages. + + Returns: + Tuple of (mean, variance) to use. This can the same as the inputs. + """ + # Create the moving average variables and add them to the appropriate + # collections. + variable_collections = [ + tf.GraphKeys.MOVING_AVERAGE_VARIABLES, + tf.GraphKeys.MODEL_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES, + ] + # Disable partition setting for moving_mean and moving_variance + # as assign_moving_average op below doesn"t support partitioned variable. + moving_mean = tf.get_variable( + "moving_mean", + shape=mean.shape, + initializer=tf.zeros_initializer(), + trainable=False, + partitioner=None, + collections=variable_collections) + moving_variance = tf.get_variable( + "moving_variance", + shape=variance.shape, + initializer=tf.ones_initializer(), + trainable=False, + partitioner=None, + collections=variable_collections) + if is_training: + logging.debug("Adding update ops for moving averages of mean and variance.") + # Update variables for mean and variance during training. + update_moving_mean = moving_averages.assign_moving_average( + moving_mean, + tf.cast(mean, moving_mean.dtype), + decay, + zero_debias=False) + update_moving_variance = moving_averages.assign_moving_average( + moving_variance, + tf.cast(variance, moving_variance.dtype), + decay, + zero_debias=False) + tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_moving_mean) + tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_moving_variance) + return mean, variance + logging.debug("Using moving mean and variance.") + return moving_mean, moving_variance + + +def _accumulated_moments_for_inference(mean, variance, is_training): + """Use accumulated statistics for moments during inference. + + After training the user is responsible for filling the accumulators with the + actual values. See _UpdateBnAccumulators() in eval_gan_lib.py for an example. + + Args: + mean: Tensor of shape [num_channels] with the mean of the current batch. + variance: Tensor of shape [num_channels] with the variance of the current + batch. + is_training: Boolean, wheather to construct ops for training or inference + graph. + + Returns: + Tuple of (mean, variance) to use. This can the same as the inputs. + """ + variable_collections = [ + tf.GraphKeys.MODEL_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES, + ] + with tf.variable_scope("accu", values=[mean, variance]): + # Create variables for accumulating batch statistic and use them during + # inference. The ops for filling the accumulators must be created and run + # before eval. See docstring above. + accu_mean = tf.get_variable( + "accu_mean", + shape=mean.shape, + initializer=tf.zeros_initializer(), + trainable=False, + collections=variable_collections) + accu_variance = tf.get_variable( + "accu_variance", + shape=variance.shape, + initializer=tf.zeros_initializer(), + trainable=False, + collections=variable_collections) + accu_counter = tf.get_variable( + "accu_counter", + shape=[], + initializer=tf.initializers.constant(1e-12), + trainable=False, + collections=variable_collections) + update_accus = tf.get_variable( + "update_accus", + shape=[], + dtype=tf.int32, + initializer=tf.zeros_initializer(), + trainable=False, + collections=variable_collections) + + mean = tf.identity(mean, "mean") + variance = tf.identity(variance, "variance") + + if is_training: + return mean, variance + + logging.debug("Using accumulated moments.") + # Return the accumulated batch statistics and add current batch statistics + # to accumulators if update_accus variables equals 1. + def update_accus_fn(): + return tf.group([ + tf.assign_add(accu_mean, mean), + tf.assign_add(accu_variance, variance), + tf.assign_add(accu_counter, 1), + ]) + dep = tf.cond( + tf.equal(update_accus, 1), + update_accus_fn, + tf.no_op) + with tf.control_dependencies([dep]): + return accu_mean / accu_counter, accu_variance / accu_counter + + +@gin.configurable(whitelist=["decay", "epsilon", "use_cross_replica_mean", + "use_moving_averages"]) +def standardize_batch(inputs, + is_training, + decay=0.999, + epsilon=1e-3, + data_format="NHWC", + use_moving_averages=True, + use_cross_replica_mean=None): + """Adds TPU-enabled batch normalization layer. + + This version does not apply trainable scale or offset! + It normalizes a tensor by mean and variance. + + Details on Batch Normalization can be found in "Batch Normalization: + Accelerating Deep Network Training by Reducing Internal Covariate Shift", + Ioffe S. and Szegedy C. 2015 [http://arxiv.org/abs/1502.03167]. + + Note #1: This method computes the batch statistic across all TPU replicas, + thus simulating the true batch norm in the distributed setting. If one wants + to avoid the cross-replica communication set use_cross_replica_mean=False. + + Note #2: When is_training is True the moving_mean and moving_variance need + to be updated in each training step. By default, the update_ops are placed + in `tf.GraphKeys.UPDATE_OPS` and they need to be added as a dependency to + the `train_op`. For example: + + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + if update_ops: + updates = tf.group(*update_ops) + total_loss = control_flow_ops.with_dependencies([updates], total_loss) + + Note #3: Reasonable values for `decay` are close to 1.0, typically in the + multiple-nines range: 0.999, 0.99, 0.9, etc. Lower the `decay` value (trying + `decay`=0.9) if model experiences reasonably good training performance but + poor validation and/or test performance. + + Args: + inputs: A tensor with 2 or 4 dimensions, where the first dimension is + `batch_size`. The normalization is over all but the last dimension if + `data_format` is `NHWC`, and the second dimension if `data_format` is + `NCHW`. + is_training: Whether or not the layer is in training mode. In training + mode it would accumulate the statistics of the moments into the + `moving_mean` and `moving_variance` using an exponential moving average + with the given `decay`. When is_training=False, these variables are not + updated, and the precomputed values are used verbatim. + decay: Decay for the moving averages. See notes above for reasonable + values. + epsilon: Small float added to variance to avoid dividing by zero. + data_format: Input data format. NHWC or NCHW. + use_moving_averages: If True keep moving averages of mean and variance that + are used during inference. Otherwise use accumlators. + use_cross_replica_mean: If True add operations to do computes batch norm + statistics across all TPU cores. These ops are not compatible with other + platforms. The default (None) will only add the operations if running + on TPU. + + Returns: + The normalized tensor with the same type and shape as `inputs`. + """ + if data_format not in {"NCHW", "NHWC"}: + raise ValueError( + "Invalid data_format {}. Allowed: NCHW, NHWC.".format(data_format)) + if use_cross_replica_mean is None: + # Default to global batch norm only on TPUs. + use_cross_replica_mean = ( + tpu_function.get_tpu_context().number_of_shards is not None) + logging.debug("Automatically determined use_cross_replica_mean=%s.", + use_cross_replica_mean) + + inputs = tf.convert_to_tensor(inputs) + inputs_dtype = inputs.dtype + inputs_shape = inputs.get_shape() + + num_channels = inputs.shape[-1].value + if num_channels is None: + raise ValueError("`C` dimension must be known but is None") + + inputs_rank = inputs_shape.ndims + if inputs_rank is None: + raise ValueError("Inputs %s has undefined rank" % inputs.name) + elif inputs_rank not in [2, 4]: + raise ValueError( + "Inputs %s has unsupported rank." + " Expected 2 or 4 but got %d" % (inputs.name, inputs_rank)) + # Bring 2-D inputs into 4-D format. + if inputs_rank == 2: + new_shape = [-1, 1, 1, num_channels] + if data_format == "NCHW": + new_shape = [-1, num_channels, 1, 1] + inputs = tf.reshape(inputs, new_shape) + + # Execute a distributed batch normalization + axis = 1 if data_format == "NCHW" else 3 + inputs = tf.cast(inputs, tf.float32) + reduction_axes = [i for i in range(4) if i != axis] + if use_cross_replica_mean: + mean, variance = tpu_ops.cross_replica_moments(inputs, reduction_axes) + else: + counts, mean_ss, variance_ss, _ = tf.nn.sufficient_statistics( + inputs, reduction_axes, keep_dims=False) + mean, variance = tf.nn.normalize_moments( + counts, mean_ss, variance_ss, shift=None) + + if use_moving_averages: + mean, variance = _moving_moments_for_inference( + mean=mean, variance=variance, is_training=is_training, decay=decay) + else: + mean, variance = _accumulated_moments_for_inference( + mean=mean, variance=variance, is_training=is_training) + + outputs = tf.nn.batch_normalization( + inputs, + mean=mean, + variance=variance, + offset=None, + scale=None, + variance_epsilon=epsilon) + outputs = tf.cast(outputs, inputs_dtype) + + # Bring 2-D inputs back into 2-D format. + if inputs_rank == 2: + outputs = tf.reshape(outputs, [-1] + inputs_shape[1:].as_list()) + outputs.set_shape(inputs_shape) + return outputs + + +@gin.configurable(blacklist=["inputs"]) +def no_batch_norm(inputs): + return inputs + + +@gin.configurable( + blacklist=["inputs", "is_training", "center", "scale", "name"]) +def batch_norm(inputs, is_training, center=True, scale=True, name="batch_norm"): + """Performs the vanilla batch normalization with trainable scaling and offset. + + Args: + inputs: A tensor with 2 or 4 dimensions, where the first dimension is + `batch_size`. The normalization is over all but the last dimension if + `data_format` is `NHWC`, and the second dimension if `data_format` is + `NCHW`. + is_training: Whether or not the layer is in training mode. + center: If True, add offset of beta to normalized tensor. + scale: If True, multiply by gamma. When the next layer is linear this can + be disabled since the scaling will be done by the next layer. + name: Name of the variable scope. + + Returns: + The normalized tensor with the same type and shape as `inputs`. + """ + with tf.variable_scope(name, values=[inputs]): + outputs = standardize_batch(inputs, is_training=is_training) + num_channels = inputs.shape[-1].value + + # Allocate parameters for the trainable variables. + collections = [tf.GraphKeys.MODEL_VARIABLES, + tf.GraphKeys.GLOBAL_VARIABLES] + if scale: + gamma = tf.get_variable( + "gamma", + [num_channels], + collections=collections, + initializer=tf.ones_initializer()) + outputs *= gamma + if center: + beta = tf.get_variable( + "beta", + [num_channels], + collections=collections, + initializer=tf.zeros_initializer()) + outputs += beta + return outputs + + +@gin.configurable(whitelist=["num_hidden"]) +def self_modulated_batch_norm(inputs, z, is_training, use_sn, + center=True, scale=True, + name="batch_norm", num_hidden=32): + """Performs a self-modulated batch normalization. + + Details can be found in "On Self Modulation for Generative Adversarial + Networks", Chen T. et al., 2018. [https://arxiv.org/abs/1810.01365] + + Like a normal batch normalization but the scale and offset are trainable + transformation of `z`. + + Args: + inputs: A tensor with 2 or 4 dimensions, where the first dimension is + `batch_size`. The normalization is over all but the last dimension if + `data_format` is `NHWC`, and the second dimension if `data_format` is + `NCHW`. + z: 2-D tensor with shape [batch_size, ?] with the latent code. + is_training: Whether or not the layer is in training mode. + use_sn: Whether to apply spectral normalization to the weights of the + hidden layer and the linear transformations. + center: If True, add offset of beta to normalized tensor. + scale: If True, multiply by gamma. When the next layer is linear this can + be disabled since the scaling will be done by the next layer. + name: Name of the variable scope. + num_hidden: Number of hidden units in the hidden layer. If 0 the scale and + offset are simple linear transformations of `z`. + + Returns: + """ + if z is None: + raise ValueError("You must provide z for self modulation.") + with tf.variable_scope(name, values=[inputs]): + outputs = standardize_batch(inputs, is_training=is_training) + num_channels = inputs.shape[-1].value + + with tf.variable_scope("sbn", values=[inputs, z]): + h = z + if num_hidden > 0: + h = linear(h, num_hidden, scope="hidden", use_sn=use_sn) + h = tf.nn.relu(h) + if scale: + gamma = linear(h, num_channels, scope="gamma", bias_start=1.0, + use_sn=use_sn) + gamma = tf.reshape(gamma, [-1, 1, 1, num_channels]) + outputs *= gamma + if center: + beta = linear(h, num_channels, scope="beta", use_sn=use_sn) + beta = tf.reshape(beta, [-1, 1, 1, num_channels]) + outputs += beta + return outputs + + +@gin.configurable(whitelist=["use_bias"]) +def conditional_batch_norm(inputs, y, is_training, use_sn, center=True, + scale=True, name="batch_norm", use_bias=False): + """Conditional batch normalization.""" + if y is None: + raise ValueError("You must provide y for conditional batch normalization.") + if y.shape.ndims != 2: + raise ValueError("Conditioning must have rank 2.") + with tf.variable_scope(name, values=[inputs]): + outputs = standardize_batch(inputs, is_training=is_training) + num_channels = inputs.shape[-1].value + with tf.variable_scope("condition", values=[inputs, y]): + if scale: + gamma = linear(y, num_channels, scope="gamma", use_sn=use_sn, + use_bias=use_bias) + gamma = tf.reshape(gamma, [-1, 1, 1, num_channels]) + outputs *= gamma + if center: + beta = linear(y, num_channels, scope="beta", use_sn=use_sn, + use_bias=use_bias) + beta = tf.reshape(beta, [-1, 1, 1, num_channels]) + outputs += beta + return outputs + + +def layer_norm(input_, is_training, scope): + return tf.contrib.layers.layer_norm( + input_, trainable=is_training, scope=scope) + + +@gin.configurable(blacklist=["inputs"]) +def spectral_norm(inputs, epsilon=1e-12, singular_value="left"): + """Performs Spectral Normalization on a weight tensor. + + Details of why this is helpful for GAN's can be found in "Spectral + Normalization for Generative Adversarial Networks", Miyato T. et al., 2018. + [https://arxiv.org/abs/1802.05957]. + + Args: + inputs: The weight tensor to normalize. + epsilon: Epsilon for L2 normalization. + singular_value: Which first singular value to store (left or right). Use + "auto" to automatically choose the one that has fewer dimensions. + + Returns: + The normalized weight tensor. + """ + if len(inputs.shape) < 2: + raise ValueError( + "Spectral norm can only be applied to multi-dimensional tensors") + + # The paper says to flatten convnet kernel weights from (C_out, C_in, KH, KW) + # to (C_out, C_in * KH * KW). Our Conv2D kernel shape is (KH, KW, C_in, C_out) + # so it should be reshaped to (KH * KW * C_in, C_out), and similarly for other + # layers that put output channels as last dimension. This implies that w + # here is equivalent to w.T in the paper. + w = tf.reshape(inputs, (-1, inputs.shape[-1])) + + # Choose whether to persist the first left or first right singular vector. + # As the underlying matrix is PSD, this should be equivalent, but in practice + # the shape of the persisted vector is different. Here one can choose whether + # to maintain the left or right one, or pick the one which has the smaller + # dimension. We use the same variable for the singular vector if we switch + # from normal weights to EMA weights. + var_name = inputs.name.replace("/ExponentialMovingAverage", "").split("/")[-1] + var_name = var_name.split(":")[0] + "/u_var" + if singular_value == "auto": + singular_value = "left" if w.shape[0] <= w.shape[1] else "right" + u_shape = (w.shape[0], 1) if singular_value == "left" else (1, w.shape[-1]) + u_var = tf.get_variable( + var_name, + shape=u_shape, + dtype=w.dtype, + initializer=tf.random_normal_initializer(), + trainable=False) + u = u_var + + # Use power iteration method to approximate the spectral norm. + # The authors suggest that one round of power iteration was sufficient in the + # actual experiment to achieve satisfactory performance. + power_iteration_rounds = 1 + for _ in range(power_iteration_rounds): + if singular_value == "left": + # `v` approximates the first right singular vector of matrix `w`. + v = tf.math.l2_normalize( + tf.matmul(tf.transpose(w), u), axis=None, epsilon=epsilon) + u = tf.math.l2_normalize(tf.matmul(w, v), axis=None, epsilon=epsilon) + else: + v = tf.math.l2_normalize(tf.matmul(u, w, transpose_b=True), + epsilon=epsilon) + u = tf.math.l2_normalize(tf.matmul(v, w), epsilon=epsilon) + + # Update the approximation. + with tf.control_dependencies([tf.assign(u_var, u, name="update_u")]): + u = tf.identity(u) + + # The authors of SN-GAN chose to stop gradient propagating through u and v + # and we maintain that option. + u = tf.stop_gradient(u) + v = tf.stop_gradient(v) + + if singular_value == "left": + norm_value = tf.matmul(tf.matmul(tf.transpose(u), w), v) + else: + norm_value = tf.matmul(tf.matmul(v, w), u, transpose_b=True) + norm_value.shape.assert_is_fully_defined() + norm_value.shape.assert_is_compatible_with([1, 1]) + + w_normalized = w / norm_value + + # Deflate normalized weights to match the unnormalized tensor. + w_tensor_normalized = tf.reshape(w_normalized, inputs.shape) + return w_tensor_normalized + + +def linear(inputs, output_size, scope=None, stddev=0.02, bias_start=0.0, + use_sn=False, use_bias=True): + """Linear layer without the non-linear activation applied.""" + shape = inputs.get_shape().as_list() + with tf.variable_scope(scope or "linear"): + kernel = tf.get_variable( + "kernel", + [shape[1], output_size], + initializer=weight_initializer(stddev=stddev)) + if use_sn: + kernel = spectral_norm(kernel) + outputs = tf.matmul(inputs, kernel) + if use_bias: + bias = tf.get_variable( + "bias", + [output_size], + initializer=tf.constant_initializer(bias_start)) + outputs += bias + return outputs + + +def conv2d(inputs, output_dim, k_h, k_w, d_h, d_w, stddev=0.02, name="conv2d", + use_sn=False, use_bias=True): + """Performs 2D convolution of the input.""" + with tf.variable_scope(name): + w = tf.get_variable( + "kernel", [k_h, k_w, inputs.shape[-1].value, output_dim], + initializer=weight_initializer(stddev=stddev)) + if use_sn: + w = spectral_norm(w) + outputs = tf.nn.conv2d(inputs, w, strides=[1, d_h, d_w, 1], padding="SAME") + if use_bias: + bias = tf.get_variable( + "bias", [output_dim], initializer=tf.constant_initializer(0.0)) + outputs += bias + return outputs + + +conv1x1 = functools.partial(conv2d, k_h=1, k_w=1, d_h=1, d_w=1) + + +def deconv2d(inputs, output_shape, k_h, k_w, d_h, d_w, + stddev=0.02, name="deconv2d", use_sn=False): + """Performs transposed 2D convolution of the input.""" + with tf.variable_scope(name): + w = tf.get_variable( + "kernel", [k_h, k_w, output_shape[-1], inputs.get_shape()[-1]], + initializer=weight_initializer(stddev=stddev)) + if use_sn: + w = spectral_norm(w) + deconv = tf.nn.conv2d_transpose( + inputs, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) + bias = tf.get_variable( + "bias", [output_shape[-1]], initializer=tf.constant_initializer(0.0)) + return tf.reshape(tf.nn.bias_add(deconv, bias), tf.shape(deconv)) + + +def lrelu(inputs, leak=0.2, name="lrelu"): + """Performs leaky-ReLU on the input.""" + return tf.maximum(inputs, leak * inputs, name=name) + + +def weight_norm_linear(input_, output_size, + init=False, init_scale=1.0, + name="wn_linear", + initializer=tf.truncated_normal_initializer, + stddev=0.02): + """Linear layer with Weight Normalization (Salimans, Kingma '16).""" + with tf.variable_scope(name): + if init: + v = tf.get_variable("V", [int(input_.get_shape()[1]), output_size], + tf.float32, initializer(0, stddev), trainable=True) + v_norm = tf.nn.l2_normalize(v.initialized_value(), [0]) + x_init = tf.matmul(input_, v_norm) + m_init, v_init = tf.nn.moments(x_init, [0]) + scale_init = init_scale / tf.sqrt(v_init + 1e-10) + g = tf.get_variable("g", dtype=tf.float32, + initializer=scale_init, trainable=True) + b = tf.get_variable("b", dtype=tf.float32, initializer= + -m_init*scale_init, trainable=True) + x_init = tf.reshape(scale_init, [1, output_size]) * ( + x_init - tf.reshape(m_init, [1, output_size])) + return x_init + else: + # Note that the original implementation uses Polyak averaging. + v = tf.get_variable("V") + g = tf.get_variable("g") + b = tf.get_variable("b") + tf.assert_variables_initialized([v, g, b]) + x = tf.matmul(input_, v) + scaler = g / tf.sqrt(tf.reduce_sum(tf.square(v), [0])) + x = tf.reshape(scaler, [1, output_size]) * x + tf.reshape( + b, [1, output_size]) + return x + + +def weight_norm_conv2d(input_, output_dim, + k_h, k_w, d_h, d_w, + init, init_scale, + stddev=0.02, + name="wn_conv2d", + initializer=tf.truncated_normal_initializer): + """Performs convolution with Weight Normalization.""" + with tf.variable_scope(name): + if init: + v = tf.get_variable( + "V", [k_h, k_w] + [int(input_.get_shape()[-1]), output_dim], + tf.float32, initializer(0, stddev), trainable=True) + v_norm = tf.nn.l2_normalize(v.initialized_value(), [0, 1, 2]) + x_init = tf.nn.conv2d(input_, v_norm, strides=[1, d_h, d_w, 1], + padding="SAME") + m_init, v_init = tf.nn.moments(x_init, [0, 1, 2]) + scale_init = init_scale / tf.sqrt(v_init + 1e-8) + g = tf.get_variable( + "g", dtype=tf.float32, initializer=scale_init, trainable=True) + b = tf.get_variable( + "b", dtype=tf.float32, initializer=-m_init*scale_init, trainable=True) + x_init = tf.reshape(scale_init, [1, 1, 1, output_dim]) * ( + x_init - tf.reshape(m_init, [1, 1, 1, output_dim])) + return x_init + else: + v = tf.get_variable("V") + g = tf.get_variable("g") + b = tf.get_variable("b") + tf.assert_variables_initialized([v, g, b]) + w = tf.reshape(g, [1, 1, 1, output_dim]) * tf.nn.l2_normalize( + v, [0, 1, 2]) + x = tf.nn.bias_add( + tf.nn.conv2d(input_, w, [1, d_h, d_w, 1], padding="SAME"), b) + return x + + +def weight_norm_deconv2d(x, output_dim, + k_h, k_w, d_h, d_w, + init=False, init_scale=1.0, + stddev=0.02, + name="wn_deconv2d", + initializer=tf.truncated_normal_initializer): + """Performs Transposed Convolution with Weight Normalization.""" + xs = x.get_shape().as_list() + target_shape = [xs[0], xs[1] * d_h, xs[2] * d_w, output_dim] + with tf.variable_scope(name): + if init: + v = tf.get_variable( + "V", [k_h, k_w] + [output_dim, int(x.get_shape()[-1])], + tf.float32, initializer(0, stddev), trainable=True) + v_norm = tf.nn.l2_normalize(v.initialized_value(), [0, 1, 3]) + x_init = tf.nn.conv2d_transpose(x, v_norm, target_shape, + [1, d_h, d_w, 1], padding="SAME") + m_init, v_init = tf.nn.moments(x_init, [0, 1, 2]) + scale_init = init_scale/tf.sqrt(v_init + 1e-8) + g = tf.get_variable("g", dtype=tf.float32, + initializer=scale_init, trainable=True) + b = tf.get_variable("b", dtype=tf.float32, + initializer=-m_init*scale_init, trainable=True) + x_init = tf.reshape(scale_init, [1, 1, 1, output_dim]) * ( + x_init - tf.reshape(m_init, [1, 1, 1, output_dim])) + return x_init + else: + v = tf.get_variable("v") + g = tf.get_variable("g") + b = tf.get_variable("b") + tf.assert_variables_initialized([v, g, b]) + w = tf.reshape(g, [1, 1, output_dim, 1]) * tf.nn.l2_normalize( + v, [0, 1, 3]) + x = tf.nn.conv2d_transpose(x, w, target_shape, strides=[1, d_h, d_w, 1], + padding="SAME") + x = tf.nn.bias_add(x, b) + return x + + +def non_local_block(x, name, use_sn): + """Self-attention (non-local) block. + + This method is used to exactly reproduce SAGAN and ignores Gin settings on + weight initialization and spectral normalization. + + + Args: + x: Input tensor of shape [batch, h, w, c]. + name: Name of the variable scope. + use_sn: Apply spectral norm to the weights. + + Returns: + A tensor of the same shape after self-attention was applied. + """ + def _spatial_flatten(inputs): + shape = inputs.shape + return tf.reshape(inputs, (-1, shape[1] * shape[2], shape[3])) + + with tf.variable_scope(name): + h, w, num_channels = x.get_shape().as_list()[1:] + num_channels_attn = num_channels // 8 + num_channels_g = num_channels // 2 + + # Theta path + theta = conv1x1(x, num_channels_attn, name="conv2d_theta", use_sn=use_sn, + use_bias=False) + theta = _spatial_flatten(theta) + + # Phi path + phi = conv1x1(x, num_channels_attn, name="conv2d_phi", use_sn=use_sn, + use_bias=False) + phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2) + phi = _spatial_flatten(phi) + + attn = tf.matmul(theta, phi, transpose_b=True) + attn = tf.nn.softmax(attn) + + # G path + g = conv1x1(x, num_channels_g, name="conv2d_g", use_sn=use_sn, + use_bias=False) + g = tf.layers.max_pooling2d(inputs=g, pool_size=[2, 2], strides=2) + g = _spatial_flatten(g) + + attn_g = tf.matmul(attn, g) + attn_g = tf.reshape(attn_g, [-1, h, w, num_channels_g]) + sigma = tf.get_variable("sigma", [], initializer=tf.zeros_initializer()) + attn_g = conv1x1(attn_g, num_channels, name="conv2d_attn_g", use_sn=use_sn, + use_bias=False) + return x + sigma * attn_g diff --git a/compare_gan/architectures/arch_ops_test.py b/compare_gan/architectures/arch_ops_test.py new file mode 100644 index 0000000..b5bb26a --- /dev/null +++ b/compare_gan/architectures/arch_ops_test.py @@ -0,0 +1,136 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for custom architecture operations.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan.architectures import arch_ops +import numpy as np +import tensorflow as tf + + +class ArchOpsTest(tf.test.TestCase): + + def testBatchNorm(self): + with tf.Graph().as_default(): + # 4 images with resolution 2x1 and 3 channels. + x1 = tf.constant([[[5, 7, 2]], [[5, 8, 8]]], dtype=tf.float32) + x2 = tf.constant([[[1, 2, 0]], [[4, 0, 4]]], dtype=tf.float32) + x3 = tf.constant([[[6, 2, 6]], [[5, 0, 5]]], dtype=tf.float32) + x4 = tf.constant([[[2, 4, 2]], [[6, 4, 1]]], dtype=tf.float32) + x = tf.stack([x1, x2, x3, x4]) + self.assertAllEqual(x.shape.as_list(), [4, 2, 1, 3]) + + core_bn = tf.layers.batch_normalization(x, training=True) + contrib_bn = tf.contrib.layers.batch_norm(x, is_training=True) + custom_bn = arch_ops.batch_norm(x, is_training=True) + with self.session() as sess: + sess.run(tf.global_variables_initializer()) + core_bn, contrib_bn, custom_bn = sess.run( + [core_bn, contrib_bn, custom_bn]) + tf.logging.info("core_bn: %s", core_bn[0]) + tf.logging.info("contrib_bn: %s", contrib_bn[0]) + tf.logging.info("custom_bn: %s", custom_bn[0]) + self.assertAllClose(core_bn, contrib_bn) + self.assertAllClose(custom_bn, contrib_bn) + expected_values = np.asarray( + [[[[0.4375205, 1.30336881, -0.58830315]], + [[0.4375205, 1.66291881, 1.76490951]]], + [[[-1.89592218, -0.49438119, -1.37270737]], + [[-0.14584017, -1.21348119, 0.19610107]]], + [[[1.02088118, -0.49438119, 0.98050523]], + [[0.4375205, -1.21348119, 0.58830321]]], + [[[-1.31256151, 0.22471881, -0.58830315]], + [[1.02088118, 0.22471881, -0.98050523]]]], + dtype=np.float32) + self.assertAllClose(custom_bn, expected_values) + + def testAccumulatedMomentsDuringTraing(self): + with tf.Graph().as_default(): + mean_in = tf.placeholder(tf.float32, shape=[2]) + variance_in = tf.placeholder(tf.float32, shape=[2]) + mean, variance = arch_ops._accumulated_moments_for_inference( + mean=mean_in, variance=variance_in, is_training=True) + variables_by_name = {v.op.name: v for v in tf.global_variables()} + tf.logging.error(variables_by_name) + accu_mean = variables_by_name["accu/accu_mean"] + accu_variance = variables_by_name["accu/accu_variance"] + accu_counter = variables_by_name["accu/accu_counter"] + with self.session() as sess: + sess.run(tf.global_variables_initializer()) + m1, v1 = sess.run( + [mean, variance], + feed_dict={mean_in: [1.0, 2.0], variance_in: [3.0, 4.0]}) + self.assertAllClose(m1, [1.0, 2.0]) + self.assertAllClose(v1, [3.0, 4.0]) + m2, v2 = sess.run( + [mean, variance], + feed_dict={mean_in: [5.0, 6.0], variance_in: [7.0, 8.0]}) + self.assertAllClose(m2, [5.0, 6.0]) + self.assertAllClose(v2, [7.0, 8.0]) + am, av, ac = sess.run([accu_mean, accu_variance, accu_counter]) + self.assertAllClose(am, [0.0, 0.0]) + self.assertAllClose(av, [0.0, 0.0]) + self.assertAllClose([ac], [0.0]) + + def testAccumulatedMomentsDuringEal(self): + with tf.Graph().as_default(): + mean_in = tf.placeholder(tf.float32, shape=[2]) + variance_in = tf.placeholder(tf.float32, shape=[2]) + mean, variance = arch_ops._accumulated_moments_for_inference( + mean=mean_in, variance=variance_in, is_training=False) + variables_by_name = {v.op.name: v for v in tf.global_variables()} + tf.logging.error(variables_by_name) + accu_mean = variables_by_name["accu/accu_mean"] + accu_variance = variables_by_name["accu/accu_variance"] + accu_counter = variables_by_name["accu/accu_counter"] + update_accus = variables_by_name["accu/update_accus"] + with self.session() as sess: + sess.run(tf.global_variables_initializer()) + # Fill accumulators. + sess.run(tf.assign(update_accus, 1)) + m1, v1 = sess.run( + [mean, variance], + feed_dict={mean_in: [1.0, 2.0], variance_in: [3.0, 4.0]}) + self.assertAllClose(m1, [1.0, 2.0]) + self.assertAllClose(v1, [3.0, 4.0]) + m2, v2 = sess.run( + [mean, variance], + feed_dict={mean_in: [5.0, 6.0], variance_in: [7.0, 8.0]}) + self.assertAllClose(m2, [3.0, 4.0]) + self.assertAllClose(v2, [5.0, 6.0]) + # Check accumulators. + am, av, ac = sess.run([accu_mean, accu_variance, accu_counter]) + self.assertAllClose(am, [6.0, 8.0]) + self.assertAllClose(av, [10.0, 12.0]) + self.assertAllClose([ac], [2.0]) + # Use accumulators. + sess.run(tf.assign(update_accus, 0)) + m3, v3 = sess.run( + [mean, variance], + feed_dict={mean_in: [2.0, 2.0], variance_in: [3.0, 3.0]}) + self.assertAllClose(m3, [3.0, 4.0]) + self.assertAllClose(v3, [5.0, 6.0]) + am, av, ac = sess.run([accu_mean, accu_variance, accu_counter]) + self.assertAllClose(am, [6.0, 8.0]) + self.assertAllClose(av, [10.0, 12.0]) + self.assertAllClose([ac], [2.0]) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/architectures/arch_ops_tpu_test.py b/compare_gan/architectures/arch_ops_tpu_test.py new file mode 100644 index 0000000..30a9e79 --- /dev/null +++ b/compare_gan/architectures/arch_ops_tpu_test.py @@ -0,0 +1,137 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for custom architecture operations on TPUs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +from compare_gan.architectures import arch_ops +import gin +import numpy as np +import tensorflow as tf + + +class ArchOpsTpuTest(tf.test.TestCase): + + def setUp(self): + # Construct input for batch norm tests: + # 4 images with resolution 2x1 and 3 channels. + x1 = np.asarray([[[5, 7, 2]], [[5, 8, 8]]], dtype=np.float32) + x2 = np.asarray([[[1, 2, 0]], [[4, 0, 4]]], dtype=np.float32) + x3 = np.asarray([[[6, 2, 6]], [[5, 0, 5]]], dtype=np.float32) + x4 = np.asarray([[[2, 4, 2]], [[6, 4, 1]]], dtype=np.float32) + self._inputs = np.stack([x1, x2, x3, x4]) + self.assertAllEqual(self._inputs.shape, [4, 2, 1, 3]) + # And the expected output for applying batch norm (without additional + # scaling/shifting). + self._expected_outputs = np.asarray( + [[[[0.4375205, 1.30336881, -0.58830315]], + [[0.4375205, 1.66291881, 1.76490951]]], + [[[-1.89592218, -0.49438119, -1.37270737]], + [[-0.14584017, -1.21348119, 0.19610107]]], + [[[1.02088118, -0.49438119, 0.98050523]], + [[0.4375205, -1.21348119, 0.58830321]]], + [[[-1.31256151, 0.22471881, -0.58830315]], + [[1.02088118, 0.22471881, -0.98050523]]]], + dtype=np.float32) + self.assertAllEqual(self._expected_outputs.shape, [4, 2, 1, 3]) + + def testRunsOnTpu(self): + """Verify that the test cases runs on a TPU chip and has 2 cores.""" + expected_device_names = [ + "/job:localhost/replica:0/task:0/device:CPU:0", + "/job:localhost/replica:0/task:0/device:TPU:0", + "/job:localhost/replica:0/task:0/device:TPU:1", + "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0", + ] + with self.session() as sess: + devices = sess.list_devices() + tf.logging.info("devices:\n%s", "\n".join([str(d) for d in devices])) + self.assertAllEqual([d.name for d in devices], expected_device_names) + + def testBatchNormOneCore(self): + def computation(x): + core_bn = tf.layers.batch_normalization(x, training=True) + contrib_bn = tf.contrib.layers.batch_norm(x, is_training=True) + custom_bn = arch_ops.batch_norm(x, is_training=True) + tf.logging.info("custom_bn tensor: %s", custom_bn) + return core_bn, contrib_bn, custom_bn + + with tf.Graph().as_default(): + x = tf.constant(self._inputs) + core_bn, contrib_bn, custom_bn = tf.contrib.tpu.batch_parallel( + computation, [x], num_shards=1) + + with self.session() as sess: + sess.run(tf.contrib.tpu.initialize_system()) + sess.run(tf.global_variables_initializer()) + core_bn, contrib_bn, custom_bn = sess.run( + [core_bn, contrib_bn, custom_bn]) + logging.info("core_bn: %s", core_bn) + logging.info("contrib_bn: %s", contrib_bn) + logging.info("custom_bn: %s", custom_bn) + self.assertAllClose(core_bn, self._expected_outputs) + self.assertAllClose(contrib_bn, self._expected_outputs) + self.assertAllClose(custom_bn, self._expected_outputs) + + def testBatchNormTwoCoresCoreAndContrib(self): + def computation(x): + core_bn = tf.layers.batch_normalization(x, training=True) + contrib_bn = tf.contrib.layers.batch_norm(x, is_training=True) + return core_bn, contrib_bn + + with tf.Graph().as_default(): + x = tf.constant(self._inputs) + core_bn, contrib_bn = tf.contrib.tpu.batch_parallel( + computation, [x], num_shards=2) + + with self.session() as sess: + sess.run(tf.contrib.tpu.initialize_system()) + sess.run(tf.global_variables_initializer()) + core_bn, contrib_bn = sess.run([core_bn, contrib_bn]) + logging.info("core_bn: %s", core_bn) + logging.info("contrib_bn: %s", contrib_bn) + self.assertNotAllClose(core_bn, self._expected_outputs) + self.assertNotAllClose(contrib_bn, self._expected_outputs) + + def testBatchNormTwoCoresCustom(self): + def computation(x): + custom_bn = arch_ops.batch_norm(x, is_training=True, name="custom_bn") + gin.bind_parameter("cross_replica_moments.parallel", False) + custom_bn_seq = arch_ops.batch_norm(x, is_training=True, + name="custom_bn_seq") + return custom_bn, custom_bn_seq + + with tf.Graph().as_default(): + x = tf.constant(self._inputs) + custom_bn, custom_bn_seq = tf.contrib.tpu.batch_parallel( + computation, [x], num_shards=2) + + with self.session() as sess: + sess.run(tf.contrib.tpu.initialize_system()) + sess.run(tf.global_variables_initializer()) + custom_bn, custom_bn_seq = sess.run( + [custom_bn, custom_bn_seq]) + logging.info("custom_bn: %s", custom_bn) + logging.info("custom_bn_seq: %s", custom_bn_seq) + self.assertAllClose(custom_bn, self._expected_outputs) + self.assertAllClose(custom_bn_seq, self._expected_outputs) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/architectures/architectures_test.py b/compare_gan/architectures/architectures_test.py new file mode 100644 index 0000000..de417ee --- /dev/null +++ b/compare_gan/architectures/architectures_test.py @@ -0,0 +1,159 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for neural architectures.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +from compare_gan.architectures import dcgan +from compare_gan.architectures import infogan +from compare_gan.architectures import resnet30 +from compare_gan.architectures import resnet5 +from compare_gan.architectures import resnet5_biggan +from compare_gan.architectures import resnet_cifar +from compare_gan.architectures import resnet_stl +from compare_gan.architectures import sndcgan +import tensorflow as tf + + +class ArchitectureTest(parameterized.TestCase, tf.test.TestCase): + + def assertArchitectureBuilds(self, gen, disc, image_shape, z_dim=120): + with tf.Graph().as_default(): + batch_size = 2 + num_classes = 10 + # Prepare inputs + z = tf.random.normal((batch_size, z_dim), name="z") + y = tf.one_hot(tf.range(batch_size), num_classes) + # Run check output shapes for G and D. + x = gen(z=z, y=y, is_training=True, reuse=False) + self.assertAllEqual(x.shape.as_list()[1:], image_shape) + out, _, _ = disc( + x, y=y, is_training=True, reuse=False) + self.assertAllEqual(out.shape.as_list(), (batch_size, 1)) + # Check that G outputs valid pixel values (we use [0, 1] everywhere) and + # D outputs a probablilty. + with self.session() as sess: + sess.run(tf.global_variables_initializer()) + image, pred = sess.run([x, out]) + self.assertAllGreaterEqual(image, 0) + self.assertAllLessEqual(image, 1) + self.assertAllGreaterEqual(pred, 0) + self.assertAllLessEqual(pred, 1) + + @parameterized.parameters( + {"image_shape": (28, 28, 1)}, + {"image_shape": (32, 32, 1)}, + {"image_shape": (32, 32, 3)}, + {"image_shape": (64, 64, 3)}, + {"image_shape": (128, 128, 3)}, + ) + def testDcGan(self, image_shape): + self.assertArchitectureBuilds( + gen=dcgan.Generator(image_shape=image_shape), + disc=dcgan.Discriminator(), + image_shape=image_shape) + + @parameterized.parameters( + {"image_shape": (28, 28, 1)}, + {"image_shape": (32, 32, 1)}, + {"image_shape": (32, 32, 3)}, + {"image_shape": (64, 64, 3)}, + {"image_shape": (128, 128, 3)}, + ) + def testInfoGan(self, image_shape): + self.assertArchitectureBuilds( + gen=infogan.Generator(image_shape=image_shape), + disc=infogan.Discriminator(), + image_shape=image_shape) + + def testResNet30(self, image_shape=(128, 128, 3)): + self.assertArchitectureBuilds( + gen=resnet30.Generator(image_shape=image_shape), + disc=resnet30.Discriminator(), + image_shape=image_shape) + + @parameterized.parameters( + {"image_shape": (32, 32, 1)}, + {"image_shape": (32, 32, 3)}, + {"image_shape": (64, 64, 3)}, + {"image_shape": (128, 128, 3)}, + ) + def testResNet5(self, image_shape): + self.assertArchitectureBuilds( + gen=resnet5.Generator(image_shape=image_shape), + disc=resnet5.Discriminator(), + image_shape=image_shape) + + @parameterized.parameters( + {"image_shape": (32, 32, 3)}, + {"image_shape": (64, 64, 3)}, + {"image_shape": (128, 128, 3)}, + {"image_shape": (256, 256, 3)}, + {"image_shape": (512, 512, 3)}, + ) + def testResNet5BigGan(self, image_shape): + if image_shape[0] == 512: + z_dim = 160 + elif image_shape[0] == 256: + z_dim = 140 + else: + z_dim = 120 + # Use channel multiplier 4 to avoid OOM errors. + self.assertArchitectureBuilds( + gen=resnet5_biggan.Generator(image_shape=image_shape, ch=16), + disc=resnet5_biggan.Discriminator(ch=16), + image_shape=image_shape, + z_dim=z_dim) + + @parameterized.parameters( + {"image_shape": (32, 32, 1)}, + {"image_shape": (32, 32, 3)}, + ) + def testResNetCifar(self, image_shape): + self.assertArchitectureBuilds( + gen=resnet_cifar.Generator(image_shape=image_shape), + disc=resnet_cifar.Discriminator(), + image_shape=image_shape) + + @parameterized.parameters( + {"image_shape": (48, 48, 1)}, + {"image_shape": (48, 48, 3)}, + ) + def testResNetStl(self, image_shape): + self.assertArchitectureBuilds( + gen=resnet_stl.Generator(image_shape=image_shape), + disc=resnet_stl.Discriminator(), + image_shape=image_shape) + + @parameterized.parameters( + {"image_shape": (28, 28, 1)}, + {"image_shape": (32, 32, 1)}, + {"image_shape": (32, 32, 3)}, + {"image_shape": (64, 64, 3)}, + {"image_shape": (128, 128, 3)}, + ) + def testSnDcGan(self, image_shape): + self.assertArchitectureBuilds( + gen=sndcgan.Generator(image_shape=image_shape), + disc=sndcgan.Discriminator(), + image_shape=image_shape) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/architectures/dcgan.py b/compare_gan/architectures/dcgan.py new file mode 100644 index 0000000..a471b2f --- /dev/null +++ b/compare_gan/architectures/dcgan.py @@ -0,0 +1,129 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of DCGAN generator and discriminator architectures. + +Details are available in https://arxiv.org/abs/1511.06434. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan.architectures import abstract_arch +from compare_gan.architectures.arch_ops import conv2d +from compare_gan.architectures.arch_ops import deconv2d +from compare_gan.architectures.arch_ops import linear +from compare_gan.architectures.arch_ops import lrelu + +import numpy as np +import tensorflow as tf + + +def conv_out_size_same(size, stride): + return int(np.ceil(float(size) / float(stride))) + + +class Generator(abstract_arch.AbstractGenerator): + """DCGAN generator. + + Details are available at https://arxiv.org/abs/1511.06434. Notable changes + include BatchNorm in the generator, ReLu instead of LeakyReLu and ReLu in the + generator, except for output which uses tanh. + """ + + def apply(self, z, y, is_training): + """Build the generator network for the given inputs. + + Args: + z: `Tensor` of shape [batch_size, z_dim] with latent code. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: boolean, are we in train or eval model. + + Returns: + A tensor of size [batch_size] + self._image_shape with values in [0, 1]. + """ + gf_dim = 64 # Dimension of filters in first convolutional layer. + bs = z.shape[0].value + s_h, s_w, colors = self._image_shape + s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2) + s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2) + s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2) + s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2) + + net = linear(z, gf_dim * 8 *s_h16 * s_w16, scope="g_fc1") + net = tf.reshape(net, [-1, s_h16, s_w16, gf_dim * 8]) + net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn1") + net = tf.nn.relu(net) + net = deconv2d(net, [bs, s_h8, s_w8, gf_dim*4], 5, 5, 2, 2, name="g_dc1") + net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn2") + net = tf.nn.relu(net) + net = deconv2d(net, [bs, s_h4, s_w4, gf_dim*2], 5, 5, 2, 2, name="g_dc2") + net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn3") + net = tf.nn.relu(net) + net = deconv2d(net, [bs, s_h2, s_w2, gf_dim*1], 5, 5, 2, 2, name="g_dc3") + net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn4") + net = tf.nn.relu(net) + net = deconv2d(net, [bs, s_h, s_w, colors], 5, 5, 2, 2, name="g_dc4") + net = 0.5 * tf.nn.tanh(net) + 0.5 + return net + + +class Discriminator(abstract_arch.AbstractDiscriminator): + """DCGAN discriminator. + + Details are available at https://arxiv.org/abs/1511.06434. Notable changes + include BatchNorm in the discriminator and LeakyReLU for all layers. + """ + + def apply(self, x, y, is_training): + """Apply the discriminator on a input. + + Args: + x: `Tensor` of shape [batch_size, ?, ?, ?] with real or fake images. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: Boolean, whether the architecture should be constructed for + training or inference. + + Returns: + Tuple of 3 Tensors, the final prediction of the discriminator, the logits + before the final output activation function and logits form the second + last layer. + """ + bs = x.shape[0].value + df_dim = 64 # Dimension of filters in the first convolutional layer. + net = lrelu(conv2d(x, df_dim, 5, 5, 2, 2, name="d_conv1", + use_sn=self._spectral_norm)) + net = conv2d(net, df_dim * 2, 5, 5, 2, 2, name="d_conv2", + use_sn=self._spectral_norm) + + net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn1") + net = lrelu(net) + net = conv2d(net, df_dim * 4, 5, 5, 2, 2, name="d_conv3", + use_sn=self._spectral_norm) + + net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn2") + net = lrelu(net) + net = conv2d(net, df_dim * 8, 5, 5, 2, 2, name="d_conv4", + use_sn=self._spectral_norm) + + net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn3") + net = lrelu(net) + out_logit = linear( + tf.reshape(net, [bs, -1]), 1, scope="d_fc4", use_sn=self._spectral_norm) + out = tf.nn.sigmoid(out_logit) + return out, out_logit, net diff --git a/compare_gan/architectures/infogan.py b/compare_gan/architectures/infogan.py new file mode 100644 index 0000000..e4121ec --- /dev/null +++ b/compare_gan/architectures/infogan.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of InfoGAN generator and discriminator architectures. + +Details are available in https://arxiv.org/pdf/1606.03657.pdf. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan.architectures import abstract_arch +from compare_gan.architectures.arch_ops import batch_norm +from compare_gan.architectures.arch_ops import conv2d +from compare_gan.architectures.arch_ops import deconv2d +from compare_gan.architectures.arch_ops import linear +from compare_gan.architectures.arch_ops import lrelu + +import tensorflow as tf + + +class Generator(abstract_arch.AbstractGenerator): + """Generator architecture based on InfoGAN.""" + + def apply(self, z, y, is_training): + """Build the generator network for the given inputs. + + Args: + z: `Tensor` of shape [batch_size, z_dim] with latent code. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: boolean, are we in train or eval model. + + Returns: + A tensor of size [batch_size] + self._image_shape with values in [0, 1]. + """ + del y + h, w, c = self._image_shape + bs = z.shape.as_list()[0] + net = linear(z, 1024, scope="g_fc1") + net = lrelu(batch_norm(net, is_training=is_training, name="g_bn1")) + net = linear(net, 128 * (h // 4) * (w // 4), scope="g_fc2") + net = lrelu(batch_norm(net, is_training=is_training, name="g_bn2")) + net = tf.reshape(net, [bs, h // 4, w // 4, 128]) + net = deconv2d(net, [bs, h // 2, w // 2, 64], 4, 4, 2, 2, name="g_dc3") + net = lrelu(batch_norm(net, is_training=is_training, name="g_bn3")) + net = deconv2d(net, [bs, h, w, c], 4, 4, 2, 2, name="g_dc4") + out = tf.nn.sigmoid(net) + return out + + +class Discriminator(abstract_arch.AbstractDiscriminator): + """Discriminator architecture based on InfoGAN.""" + + def apply(self, x, y, is_training): + """Apply the discriminator on a input. + + Args: + x: `Tensor` of shape [batch_size, ?, ?, ?] with real or fake images. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: Boolean, whether the architecture should be constructed for + training or inference. + + Returns: + Tuple of 3 Tensors, the final prediction of the discriminator, the logits + before the final output activation function and logits form the second + last layer. + """ + use_sn = self._spectral_norm + batch_size = x.shape.as_list()[0] + # Resulting shape: [bs, h/2, w/2, 64]. + net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name="d_conv1", use_sn=use_sn)) + # Resulting shape: [bs, h/4, w/4, 128]. + net = conv2d(net, 128, 4, 4, 2, 2, name="d_conv2", use_sn=use_sn) + net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn2") + net = lrelu(net) + # Resulting shape: [bs, h * w * 8]. + net = tf.reshape(net, [batch_size, -1]) + # Resulting shape: [bs, 1024]. + net = linear(net, 1024, scope="d_fc3", use_sn=use_sn) + net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn3") + net = lrelu(net) + # Resulting shape: [bs, 1]. + out_logit = linear(net, 1, scope="d_fc4", use_sn=use_sn) + out = tf.nn.sigmoid(out_logit) + return out, out_logit, net diff --git a/compare_gan/architectures/resnet30.py b/compare_gan/architectures/resnet30.py new file mode 100644 index 0000000..9485da1 --- /dev/null +++ b/compare_gan/architectures/resnet30.py @@ -0,0 +1,143 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A 30-block resnet. + +It contains 6 "super-blocks" and each such block contains 5 residual blocks in +both the generator and discriminator. It supports the 128x128 resolution. +Details can be found in "Improved Training of Wasserstein GANs", Gulrajani I. +et al. 2017. The related code is available at +https://github.com/igul222/improved_wgan_training/blob/master/gan_64x64.py. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan.architectures import arch_ops as ops +from compare_gan.architectures import resnet_ops + +from six.moves import range +import tensorflow as tf + + +class Generator(resnet_ops.ResNetGenerator): + """ResNet30 generator, 30 blocks, generates images of resolution 128x128. + + Trying to match the architecture defined in [1]. Difference is that there + the final resolution is 64x64, while here we have 128x128. + """ + + def apply(self, z, y, is_training): + """Build the generator network for the given inputs. + + Args: + z: `Tensor` of shape [batch_size, z_dim] with latent code. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: boolean, are we in train or eval model. + + Returns: + A tensor of size [batch_size] + self._image_shape with values in [0, 1]. + """ + z_shape = z.get_shape().as_list() + if len(z_shape) != 2: + raise ValueError("Expected shape [batch_size, z_dim], got %s." % z_shape) + ch = 64 + colors = self._image_shape[2] + # Map noise to the actual seed. + output = ops.linear(z, 4 * 4 * 8 * ch, scope="fc_noise") + # Reshape the seed to be a rank-4 Tensor. + output = tf.reshape(output, [-1, 4, 4, 8 * ch], name="fc_reshaped") + in_channels = 8 * ch + out_channels = 4 * ch + for superblock in range(6): + for i in range(5): + block = self._resnet_block( + name="B_{}_{}".format(superblock, i), + in_channels=in_channels, + out_channels=in_channels, + scale="none") + output = block(output, z=z, y=y, is_training=is_training) + # We want to upscale 5 times. + if superblock < 5: + block = self._resnet_block( + name="B_{}_up".format(superblock), + in_channels=in_channels, + out_channels=out_channels, + scale="up") + output = block(output, z=z, y=y, is_training=is_training) + in_channels /= 2 + out_channels /= 2 + + output = ops.conv2d( + output, output_dim=colors, k_h=3, k_w=3, d_h=1, d_w=1, + name="final_conv") + output = tf.nn.sigmoid(output) + return output + + +class Discriminator(resnet_ops.ResNetDiscriminator): + """ResNet discriminator, 30 blocks, 128x128x3 and 128x128x1 resolution.""" + + def apply(self, x, y, is_training): + """Apply the discriminator on a input. + + Args: + x: `Tensor` of shape [batch_size, ?, ?, ?] with real or fake images. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: Boolean, whether the architecture should be constructed for + training or inference. + + Returns: + Tuple of 3 Tensors, the final prediction of the discriminator, the logits + before the final output activation function and logits form the second + last layer. + """ + resnet_ops.validate_image_inputs(x) + colors = x.get_shape().as_list()[-1] + assert colors in [1, 3] + ch = 64 + output = ops.conv2d( + x, output_dim=ch // 4, k_h=3, k_w=3, d_h=1, d_w=1, + name="color_conv") + in_channels = ch // 4 + out_channels = ch // 2 + for superblock in range(6): + for i in range(5): + block = self._resnet_block( + name="B_{}_{}".format(superblock, i), + in_channels=in_channels, + out_channels=in_channels, + scale="none") + output = block(output, z=None, y=y, is_training=is_training) + # We want to downscale 5 times. + if superblock < 5: + block = self._resnet_block( + name="B_{}_up".format(superblock), + in_channels=in_channels, + out_channels=out_channels, + scale="down") + output = block(output, z=None, y=y, is_training=is_training) + in_channels *= 2 + out_channels *= 2 + + # Final part + output = tf.reshape(output, [-1, 4 * 4 * 8 * ch]) + out_logit = ops.linear(output, 1, scope="disc_final_fc", + use_sn=self._spectral_norm) + out = tf.nn.sigmoid(out_logit) + return out, out_logit, output diff --git a/compare_gan/architectures/resnet5.py b/compare_gan/architectures/resnet5.py new file mode 100644 index 0000000..cef4249 --- /dev/null +++ b/compare_gan/architectures/resnet5.py @@ -0,0 +1,145 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A deep neural architecture with residual blocks and skip connections. + +It contains 5 residual blocks in both the generator and discriminator and +supports 128x128 resolution. Details can be found in "Improved Training +of Wasserstein GANs", Gulrajani I. et al. 2017. The related code is available at +https://github.com/igul222/improved_wgan_training/blob/master/gan_64x64.py. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan.architectures import arch_ops as ops +from compare_gan.architectures import resnet_ops + +import numpy as np +from six.moves import range +import tensorflow as tf + + +class Generator(resnet_ops.ResNetGenerator): + """ResNet generator consisting of 5 blocks, outputs 128x128x3 resolution.""" + + def __init__(self, ch=64, channels=(8, 8, 4, 4, 2, 1), **kwargs): + super(Generator, self).__init__(**kwargs) + self._ch = ch + self._channels = channels + + def apply(self, z, y, is_training): + """Build the generator network for the given inputs. + + Args: + z: `Tensor` of shape [batch_size, z_dim] with latent code. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: boolean, are we in train or eval model. + + Returns: + A tensor of size [batch_size] + self._image_shape with values in [0, 1]. + """ + # Each block upscales by a factor of 2. + seed_size = 4 + image_size = self._image_shape[0] + + # Map noise to the actual seed. + net = ops.linear( + z, + self._ch * self._channels[0] * seed_size * seed_size, + scope="fc_noise") + # Reshape the seed to be a rank-4 Tensor. + net = tf.reshape( + net, + [-1, seed_size, seed_size, self._ch * self._channels[0]], + name="fc_reshaped") + + up_layers = np.log2(float(image_size) / seed_size) + if not up_layers.is_integer(): + raise ValueError("log2({}/{}) must be an integer.".format( + image_size, seed_size)) + if up_layers < 0 or up_layers > 5: + raise ValueError("Invalid image_size {}.".format(image_size)) + up_layers = int(up_layers) + + for block_idx in range(5): + block = self._resnet_block( + name="B{}".format(block_idx + 1), + in_channels=self._ch * self._channels[block_idx], + out_channels=self._ch * self._channels[block_idx + 1], + scale="up" if block_idx < up_layers else "none") + net = block(net, z=z, y=y, is_training=is_training) + + net = self.batch_norm( + net, z=z, y=y, is_training=is_training, name="final_norm") + net = tf.nn.relu(net) + net = ops.conv2d(net, output_dim=self._image_shape[2], + k_h=3, k_w=3, d_h=1, d_w=1, name="final_conv") + net = tf.nn.sigmoid(net) + return net + + +class Discriminator(resnet_ops.ResNetDiscriminator): + """ResNet5 discriminator, 5 blocks, supporting 128x128x3 and 128x128x1.""" + + def __init__(self, ch=64, channels=(1, 2, 4, 4, 8, 8), **kwargs): + super(Discriminator, self).__init__(**kwargs) + self._ch = ch + self._channels = channels + + def apply(self, x, y, is_training): + """Apply the discriminator on a input. + + Args: + x: `Tensor` of shape [batch_size, ?, ?, ?] with real or fake images. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: Boolean, whether the architecture should be constructed for + training or inference. + + Returns: + Tuple of 3 Tensors, the final prediction of the discriminator, the logits + before the final output activation function and logits form the second + last layer. + """ + resnet_ops.validate_image_inputs(x) + colors = x.shape[3].value + if colors not in [1, 3]: + raise ValueError("Number of color channels not supported: {}".format( + colors)) + + block = self._resnet_block( + name="B0", + in_channels=colors, + out_channels=self._ch, + scale="down") + output = block(x, z=None, y=y, is_training=is_training) + + for block_idx in range(5): + block = self._resnet_block( + name="B{}".format(block_idx + 1), + in_channels=self._ch * self._channels[block_idx], + out_channels=self._ch * self._channels[block_idx + 1], + scale="down") + output = block(output, z=None, y=y, is_training=is_training) + + output = tf.nn.relu(output) + pre_logits = tf.reduce_mean(output, axis=[1, 2]) + out_logit = ops.linear(pre_logits, 1, scope="disc_final_fc", + use_sn=self._spectral_norm) + out = tf.nn.sigmoid(out_logit) + return out, out_logit, pre_logits diff --git a/compare_gan/architectures/resnet5_biggan.py b/compare_gan/architectures/resnet5_biggan.py new file mode 100644 index 0000000..e15b49a --- /dev/null +++ b/compare_gan/architectures/resnet5_biggan.py @@ -0,0 +1,428 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-implementation of BigGAN architecture. + +Disclaimer: We note that this is our best-effort re-implementation and stress +that even minor implementation differences may lead to large differences in +trained models due to sensitivity of GANs to optimization hyperparameters and +details of neural architectures. That being said, this code suffices to +reproduce the reported FID on ImageNet 128x128. + +Based on "Large Scale GAN Training for High Fidelity Natural Image Synthesys", +Brock A. et al., 2018 [https://arxiv.org/abs/1809.11096]. + +Supported resolutions: 32, 64, 128, 256, 512. The location of the self-attention +block must be set in the Gin config. See below. + +Notable differences to resnet5.py: +- Much wider layers by default. +- 1x1 convs for shortcuts in D and G blocks. +- Last BN in G is unconditional. +- tanh activation in G. +- No shortcut in D block if in_channels == out_channels. +- sum pooling instead of mean pooling in D. +- Last block in D does not downsample. + +Information related to parameter counts and Gin configuration: +128x128 +------- +Number of parameters: (D) 87,982,370 (G) 70,433,988 +Required Gin settings: +options.z_dim = 120 +resnet5_biggan.Generator.blocks_with_attention = "B4" +resnet5_biggan.Discriminator.blocks_with_attention = "B1" + +256x256 +------- +Number of parameters: (D) 98,635,298 (G) 82,097,604 +Required Gin settings: +options.z_dim = 140 +resnet5_biggan.Generator.blocks_with_attention = "B5" +resnet5_biggan.Discriminator.blocks_with_attention = "B2" + +512x512 +------- +Number of parameters: (D) 98,801,378 (G) 82,468,068 +Required Gin settings: +options.z_dim = 160 +resnet5_biggan.Generator.blocks_with_attention = "B4" +resnet5_biggan.Discriminator.blocks_with_attention = "B3" +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging + +from compare_gan.architectures import abstract_arch +from compare_gan.architectures import arch_ops as ops +from compare_gan.architectures import resnet_ops + +import gin +from six.moves import range +import tensorflow as tf + + +@gin.configurable(whitelist=["sc_kernel_size"]) +class BigGanResNetBlock(resnet_ops.ResNetBlock): + """ResNet block with options for various normalizations. + + This block uses a 1x1 convolution for the (optional) shortcut connection. + """ + + def __init__(self, + add_shortcut=True, + sc_kernel_size=1, + **kwargs): + """Constructs a new ResNet block for BigGAN. + + Args: + add_shortcut: Whether to add a shortcut connection. + sc_kernel_size: Integer, kernel size to use for shortcut connections. + **kwargs: Additional arguments for ResNetBlock. + """ + super(BigGanResNetBlock, self).__init__(**kwargs) + self._add_shortcut = add_shortcut + self._sc_kernel_size = (sc_kernel_size, sc_kernel_size) + + def apply(self, inputs, z, y, is_training): + """"ResNet block containing possible down/up sampling, shared for G / D. + + Args: + inputs: a 3d input tensor of feature map. + z: the latent vector for potential self-modulation. Can be None if use_sbn + is set to False. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: boolean, whether or notthis is called during the training. + + Returns: + output: a 3d output tensor of feature map. + """ + if inputs.shape[-1].value != self._in_channels: + raise ValueError( + "Unexpected number of input channels (expected {}, got {}).".format( + self._in_channels, inputs.shape[-1].value)) + + with tf.variable_scope(self._name, values=[inputs]): + outputs = inputs + + outputs = self.batch_norm( + outputs, z=z, y=y, is_training=is_training, name="bn1") + if self._layer_norm: + outputs = ops.layer_norm(outputs, is_training=is_training, scope="ln1") + + outputs = tf.nn.relu(outputs) + outputs = self._get_conv( + outputs, self._in_channels, self._out_channels, self._scale1, + suffix="conv1") + + outputs = self.batch_norm( + outputs, z=z, y=y, is_training=is_training, name="bn2") + if self._layer_norm: + outputs = ops.layer_norm(outputs, is_training=is_training, scope="ln2") + + outputs = tf.nn.relu(outputs) + outputs = self._get_conv( + outputs, self._out_channels, self._out_channels, self._scale2, + suffix="conv2") + + # Combine skip-connection with the convolved part. + if self._add_shortcut: + shortcut = self._get_conv( + inputs, self._in_channels, self._out_channels, self._scale, + kernel_size=self._sc_kernel_size, + suffix="conv_shortcut") + outputs += shortcut + logging.info("[Block] %s (z=%s, y=%s) -> %s", inputs.shape, + None if z is None else z.shape, + None if y is None else y.shape, outputs.shape) + return outputs + + +@gin.configurable +class Generator(abstract_arch.AbstractGenerator): + """ResNet-based generator supporting resolutions 32, 64, 128, 256, 512.""" + + def __init__(self, + ch=96, + blocks_with_attention="B4", + hierarchical_z=True, + embed_z=False, + embed_y=True, + embed_y_dim=128, + embed_bias=False, + **kwargs): + """Constructor for BigGAN generator. + + Args: + ch: Channel multiplier. + blocks_with_attention: Comma-separated list of blocks that are followed by + a non-local block. + hierarchical_z: Split z into chunks and only give one chunk to each. + Each chunk will also be concatenated to y, the one hot encoded labels. + embed_z: If True use a learnable embedding of z that is used instead. + The embedding will have the length of z. + embed_y: If True use a learnable embedding of y that is used instead. + embed_y_dim: Size of the embedding of y. + embed_bias: Use bias with for the embedding of z and y. + **kwargs: additional arguments past on to ResNetGenerator. + """ + super(Generator, self).__init__(**kwargs) + self._ch = ch + self._blocks_with_attention = set(blocks_with_attention.split(",")) + self._hierarchical_z = hierarchical_z + self._embed_z = embed_z + self._embed_y = embed_y + self._embed_y_dim = embed_y_dim + self._embed_bias = embed_bias + + def _resnet_block(self, name, in_channels, out_channels, scale): + """ResNet block for the generator.""" + if scale not in ["up", "none"]: + raise ValueError( + "Unknown generator ResNet block scaling: {}.".format(scale)) + return BigGanResNetBlock( + name=name, + in_channels=in_channels, + out_channels=out_channels, + scale=scale, + is_gen_block=True, + spectral_norm=self._spectral_norm, + batch_norm=self.batch_norm) + + def _get_in_out_channels(self): + resolution = self._image_shape[0] + if resolution == 512: + channel_multipliers = [16, 16, 8, 8, 4, 2, 1, 1] + elif resolution == 256: + channel_multipliers = [16, 16, 8, 8, 4, 2, 1] + elif resolution == 128: + channel_multipliers = [16, 16, 8, 4, 2, 1] + elif resolution == 64: + channel_multipliers = [16, 16, 8, 4, 2] + elif resolution == 32: + channel_multipliers = [4, 4, 4, 4] + else: + raise ValueError("Unsupported resolution: {}".format(resolution)) + in_channels = [self._ch * c for c in channel_multipliers[:-1]] + out_channels = [self._ch * c for c in channel_multipliers[1:]] + return in_channels, out_channels + + def apply(self, z, y, is_training): + """Build the generator network for the given inputs. + + Args: + z: `Tensor` of shape [batch_size, z_dim] with latent code. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: boolean, are we in train or eval model. + + Returns: + A tensor of size [batch_size] + self._image_shape with values in [0, 1]. + """ + shape_or_none = lambda t: None if t is None else t.shape + logging.info("[Generator] inputs are z=%s, y=%s", z.shape, shape_or_none(y)) + # Each block upscales by a factor of 2. + seed_size = 4 + z_dim = z.shape[1].value + + in_channels, out_channels = self._get_in_out_channels() + num_blocks = len(in_channels) + + if self._embed_z: + z = ops.linear(z, z_dim, scope="embed_z", use_sn=False, + use_bias=self._embed_bias) + if self._embed_y: + y = ops.linear(y, self._embed_y_dim, scope="embed_y", use_sn=False, + use_bias=self._embed_bias) + y_per_block = num_blocks * [y] + if self._hierarchical_z: + z_per_block = tf.split(z, num_blocks + 1, axis=1) + z0, z_per_block = z_per_block[0], z_per_block[1:] + if y is not None: + y_per_block = [tf.concat([zi, y], 1) for zi in z_per_block] + else: + z0 = z + z_per_block = num_blocks * [z] + + logging.info("[Generator] z0=%s, z_per_block=%s, y_per_block=%s", + z0.shape, [str(shape_or_none(t)) for t in z_per_block], + [str(shape_or_none(t)) for t in y_per_block]) + + # Map noise to the actual seed. + net = ops.linear( + z0, + in_channels[0] * seed_size * seed_size, + scope="fc_noise", + use_sn=self._spectral_norm) + # Reshape the seed to be a rank-4 Tensor. + net = tf.reshape( + net, + [-1, seed_size, seed_size, in_channels[0]], + name="fc_reshaped") + + for block_idx in range(num_blocks): + name = "B{}".format(block_idx + 1) + block = self._resnet_block( + name=name, + in_channels=in_channels[block_idx], + out_channels=out_channels[block_idx], + scale="up") + net = block( + net, + z=z_per_block[block_idx], + y=y_per_block[block_idx], + is_training=is_training) + if name in self._blocks_with_attention: + logging.info("[Generator] Applying non-local block to %s", net.shape) + net = ops.non_local_block(net, "non_local_block", + use_sn=self._spectral_norm) + # Final processing of the net. + # Use unconditional batch norm. + logging.info("[Generator] before final processing: %s", net.shape) + net = ops.batch_norm(net, is_training=is_training, name="final_norm") + net = tf.nn.relu(net) + net = ops.conv2d(net, output_dim=self._image_shape[2], k_h=3, k_w=3, + d_h=1, d_w=1, name="final_conv", + use_sn=self._spectral_norm) + logging.info("[Generator] after final processing: %s", net.shape) + net = (tf.nn.tanh(net) + 1.0) / 2.0 + return net + + +@gin.configurable +class Discriminator(abstract_arch.AbstractDiscriminator): + """ResNet-based discriminator supporting resolutions 32, 64, 128, 256, 512.""" + + def __init__(self, + ch=96, + blocks_with_attention="B1", + project_y=True, + **kwargs): + """Constructor for BigGAN discriminator. + + Args: + ch: Channel multiplier. + blocks_with_attention: Comma-separated list of blocks that are followed by + a non-local block. + project_y: Add an embedding of y in the output layer. + **kwargs: additional arguments past on to ResNetDiscriminator. + """ + super(Discriminator, self).__init__(**kwargs) + self._ch = ch + self._blocks_with_attention = set(blocks_with_attention.split(",")) + self._project_y = project_y + + def _resnet_block(self, name, in_channels, out_channels, scale): + """ResNet block for the generator.""" + if scale not in ["down", "none"]: + raise ValueError( + "Unknown discriminator ResNet block scaling: {}.".format(scale)) + return BigGanResNetBlock( + name=name, + in_channels=in_channels, + out_channels=out_channels, + scale=scale, + is_gen_block=False, + add_shortcut=in_channels != out_channels, + layer_norm=self._layer_norm, + spectral_norm=self._spectral_norm, + batch_norm=self.batch_norm) + + def _get_in_out_channels(self, colors, resolution): + if colors not in [1, 3]: + raise ValueError("Unsupported color channels: {}".format(colors)) + if resolution == 512: + channel_multipliers = [1, 1, 2, 4, 8, 8, 16, 16] + elif resolution == 256: + channel_multipliers = [1, 2, 4, 8, 8, 16, 16] + elif resolution == 128: + channel_multipliers = [1, 2, 4, 8, 16, 16] + elif resolution == 64: + channel_multipliers = [2, 4, 8, 16, 16] + elif resolution == 32: + channel_multipliers = [2, 2, 2, 2] + else: + raise ValueError("Unsupported resolution: {}".format(resolution)) + out_channels = [self._ch * c for c in channel_multipliers] + in_channels = [colors] + out_channels[:-1] + return in_channels, out_channels + + def apply(self, x, y, is_training): + """Apply the discriminator on a input. + + Args: + x: `Tensor` of shape [batch_size, ?, ?, ?] with real or fake images. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: Boolean, whether the architecture should be constructed for + training or inference. + + Returns: + Tuple of 3 Tensors, the final prediction of the discriminator, the logits + before the final output activation function and logits form the second + last layer. + """ + logging.info("[Discriminator] inputs are x=%s, y=%s", x.shape, + None if y is None else y.shape) + resnet_ops.validate_image_inputs(x) + + in_channels, out_channels = self._get_in_out_channels( + colors=x.shape[-1].value, resolution=x.shape[1].value) + num_blocks = len(in_channels) + + net = x + for block_idx in range(num_blocks): + name = "B{}".format(block_idx + 1) + is_last_block = block_idx == num_blocks - 1 + block = self._resnet_block( + name=name, + in_channels=in_channels[block_idx], + out_channels=out_channels[block_idx], + scale="none" if is_last_block else "down") + net = block(net, z=None, y=y, is_training=is_training) + if name in self._blocks_with_attention: + logging.info("[Discriminator] Applying non-local block to %s", + net.shape) + net = ops.non_local_block(net, "non_local_block", + use_sn=self._spectral_norm) + + # Final part + logging.info("[Discriminator] before final processing: %s", net.shape) + net = tf.nn.relu(net) + h = tf.math.reduce_sum(net, axis=[1, 2]) + out_logit = ops.linear(h, 1, scope="final_fc", use_sn=self._spectral_norm) + logging.info("[Discriminator] after final processing: %s", net.shape) + if self._project_y: + if y is None: + raise ValueError("You must provide class information y to project.") + with tf.variable_scope("embedding_fc"): + y_embedding_dim = out_channels[-1] + # We do not use ops.linear() below since it does not have an option to + # override the initializer. + kernel = tf.get_variable( + "kernel", [y.shape[1], y_embedding_dim], tf.float32, + initializer=tf.initializers.glorot_normal()) + if self._spectral_norm: + kernel = ops.spectral_norm(kernel) + embedded_y = tf.matmul(y, kernel) + logging.info("[Discriminator] embedded_y for projection: %s", + embedded_y.shape) + out_logit += tf.reduce_sum(embedded_y * h, axis=1, keepdims=True) + out = tf.nn.sigmoid(out_logit) + return out, out_logit, h diff --git a/compare_gan/architectures/resnet5_biggan_test.py b/compare_gan/architectures/resnet5_biggan_test.py new file mode 100644 index 0000000..18f2b2b --- /dev/null +++ b/compare_gan/architectures/resnet5_biggan_test.py @@ -0,0 +1,190 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test whether resnet5_biggan matches the original BigGAN architecture. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +from compare_gan import utils +from compare_gan.architectures import arch_ops +from compare_gan.architectures import resnet5_biggan +import gin +import numpy as np +import tensorflow as tf + + +def guess_initializer(var, graph=None): + """Helper function to guess the initializer of a variable. + + The function looks at the operations in the initializer name space for the + variable (e.g. my_scope/my_var_name/Initializer/*). The TF core initializers + have characteristic sets of operations that can be used to determine the + initializer. + + Args: + var: `tf.Variable`. The function will use the name to look for initializer + operations in the same scope. + graph: Optional `tf.Graph` that contains the variable. If None the default + graph is used. + + Returns: + Tuple of the name of the guessed initializer. + """ + if graph is None: + graph = tf.get_default_graph() + prefix = var.op.name + "/Initializer" + ops = [op for op in graph.get_operations() + if op.name.startswith(prefix)] + assert ops, "No operations found for prefix {}".format(prefix) + op_names = [op.name[len(prefix) + 1:] for op in ops] + if len(op_names) == 1: + if op_names[0] == "Const": + value = ops[0].get_attr("value").float_val[0] + if value == 0.0: + return "zeros" + if np.isclose(value, 1.0): + return "ones" + return "constant" + return op_names[0] # ones or zeros + if "Qr" in op_names and "DiagPart" in op_names: + return "orthogonal" + if "random_uniform" in op_names: + return "glorot_uniform" + stddev_ops = [op for op in ops if op.name.endswith("stddev")] + if stddev_ops: + assert len(stddev_ops) == 1 + stddev = stddev_ops[0].get_attr("value").float_val[0] + else: + stddev = None + if "random_normal" in op_names: + return "random_normal" + if "truncated_normal" in op_names: + if len(str(stddev)) > 5: + return "glorot_normal" + return "truncated_normal" + + +class ResNet5BigGanTest(tf.test.TestCase): + + def setUp(self): + super(ResNet5BigGanTest, self).setUp() + gin.clear_config() + + def testNumberOfParameters(self): + with tf.Graph().as_default(): + batch_size = 16 + z = tf.zeros((batch_size, 120)) + y = tf.one_hot(tf.ones((batch_size,), dtype=tf.int32), 1000) + generator = resnet5_biggan.Generator( + image_shape=(128, 128, 3), + batch_norm_fn=arch_ops.conditional_batch_norm) + fake_images = generator(z, y=y, is_training=True, reuse=False) + self.assertEqual(fake_images.shape.as_list(), [batch_size, 128, 128, 3]) + discriminator = resnet5_biggan.Discriminator() + predictions = discriminator(fake_images, y, is_training=True) + self.assertLen(predictions, 3) + + t_vars = tf.trainable_variables() + g_vars = [var for var in t_vars if "generator" in var.name] + d_vars = [var for var in t_vars if "discriminator" in var.name] + g_param_overview = utils.get_parameter_overview(g_vars, limit=None) + d_param_overview = utils.get_parameter_overview(d_vars, limit=None) + logging.info("Generator variables:\n%s", g_param_overview) + logging.info("Discriminator variables:\n%s", d_param_overview) + + for v in g_vars: + parts = v.op.name.split("/") + layer, var_name = parts[-2], parts[-1] + layers_with_bias = {"fc_noise", "up_conv_shortcut", "up_conv1", + "same_conv2", "final_conv"} + # No biases in conditional BN or self-attention. + if layer not in layers_with_bias: + self.assertNotEqual(var_name, "bias", msg=str(v)) + + # Batch norm variables. + if parts[-3] == "condition": + if parts[-4] == "final_bn": + self.assertEqual(var_name, "kernel", msg=str(v)) + self.assertEqual(v.shape.as_list(), [1, 1, 1, 96], msg=str(v)) + else: + self.assertEqual(var_name, "kernel", msg=str(v)) + self.assertEqual(v.shape[0].value, 148, msg=str(v)) + + # Embedding layer. + if layer == "embed_y": + self.assertEqual(var_name, "kernel", msg=str(v)) + self.assertAllEqual(v.shape.as_list(), [1000, 128], msg=str(v)) + + # Shortcut connections use 1x1 convolution. + if layer == "up_conv_shortcut" and var_name == "kernel": + self.assertEqual(v.shape.as_list()[:2], [1, 1], msg=str(v)) + g_num_weights = sum([v.get_shape().num_elements() for v in g_vars]) + self.assertEqual(g_num_weights, 70433988) + + for v in d_vars: + parts = v.op.name.split("/") + layer, var_name = parts[-2], parts[-1] + layers_with_bias = {"down_conv_shortcut", "same_conv1", "down_conv2", + "same_conv_shortcut", "same_conv2", "final_fc"} + # No biases in conditional BN or self-attention. + if layer not in layers_with_bias: + self.assertNotEqual(var_name, "bias", msg=str(v)) + + # no Shortcut in last block. + if parts[-3] == "B6": + self.assertNotEqual(layer, "same_shortcut", msg=str(v)) + d_num_weights = sum([v.get_shape().num_elements() for v in d_vars]) + self.assertEqual(d_num_weights, 87982370) + + def testInitializers(self): + gin.bind_parameter("weights.initializer", "orthogonal") + with tf.Graph().as_default(): + z = tf.zeros((8, 120)) + y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 1000) + generator = resnet5_biggan.Generator( + image_shape=(128, 128, 3), + batch_norm_fn=arch_ops.conditional_batch_norm) + fake_images = generator(z, y=y, is_training=True, reuse=False) + discriminator = resnet5_biggan.Discriminator() + discriminator(fake_images, y, is_training=True) + + for v in tf.trainable_variables(): + parts = v.op.name.split("/") + layer, var_name = parts[-2], parts[-1] + initializer_name = guess_initializer(v) + logging.info("%s => %s", v.op.name, initializer_name) + if layer == "embedding_fc" and var_name == "kernel": + self.assertEqual(initializer_name, "glorot_normal") + elif layer == "non_local_block" and var_name == "sigma": + self.assertEqual(initializer_name, "zeros") + elif layer == "final_norm" and var_name == "gamma": + self.assertEqual(initializer_name, "ones") + elif layer == "final_norm" and var_name == "beta": + self.assertEqual(initializer_name, "zeros") + elif var_name == "kernel": + self.assertEqual(initializer_name, "orthogonal") + elif var_name == "bias": + self.assertEqual(initializer_name, "zeros") + else: + self.fail("Unknown variables {}".format(v)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/architectures/resnet_cifar.py b/compare_gan/architectures/resnet_cifar.py new file mode 100644 index 0000000..df64a00 --- /dev/null +++ b/compare_gan/architectures/resnet_cifar.py @@ -0,0 +1,167 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Resnet generator and discriminator for CIFAR. + +Based on Table 4 from "Spectral Normalization for Generative Adversarial +Networks", Miyato T. et al., 2018. [https://arxiv.org/pdf/1802.05957.pdf]. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan.architectures import arch_ops as ops +from compare_gan.architectures import resnet_ops + +import gin +from six.moves import range +import tensorflow as tf + + +@gin.configurable +class Generator(resnet_ops.ResNetGenerator): + """ResNet generator, 4 blocks, supporting 32x32 resolution.""" + + def __init__(self, + hierarchical_z=False, + embed_z=False, + embed_y=False, + **kwargs): + """Constructor for the ResNet Cifar generator. + + Args: + hierarchical_z: Split z into chunks and only give one chunk to each. + Each chunk will also be concatenated to y, the one hot encoded labels. + embed_z: If True use a learnable embedding of z that is used instead. + The embedding will have the length of z. + embed_y: If True use a learnable embedding of y that is used instead. + The embedding will have the length of z (not y!). + **kwargs: additional arguments past on to ResNetGenerator. + """ + super(Generator, self).__init__(**kwargs) + self._hierarchical_z = hierarchical_z + self._embed_z = embed_z + self._embed_y = embed_y + + def apply(self, z, y, is_training): + """Build the generator network for the given inputs. + + Args: + z: `Tensor` of shape [batch_size, z_dim] with latent code. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: boolean, are we in train or eval model. + + Returns: + A tensor of size [batch_size, 32, 32, colors] with values in [0, 1]. + """ + assert self._image_shape[0] == 32 + assert self._image_shape[1] == 32 + num_blocks = 3 + z_dim = z.shape[1].value + + if self._embed_z: + z = ops.linear(z, z_dim, scope="embed_z", use_sn=self._spectral_norm) + if self._embed_y: + y = ops.linear(y, z_dim, scope="embed_y", use_sn=self._spectral_norm) + y_per_block = num_blocks * [y] + if self._hierarchical_z: + z_per_block = tf.split(z, num_blocks + 1, axis=1) + z0, z_per_block = z_per_block[0], z_per_block[1:] + if y is not None: + y_per_block = [tf.concat([zi, y], 1) for zi in z_per_block] + else: + z0 = z + z_per_block = num_blocks * [z] + + output = ops.linear(z0, 4 * 4 * 256, scope="fc_noise", + use_sn=self._spectral_norm) + output = tf.reshape(output, [-1, 4, 4, 256], name="fc_reshaped") + for block_idx in range(3): + block = self._resnet_block( + name="B{}".format(block_idx + 1), + in_channels=256, + out_channels=256, + scale="up") + output = block( + output, + z=z_per_block[block_idx], + y=y_per_block[block_idx], + is_training=is_training) + + # Final processing of the output. + output = self.batch_norm( + output, z=z, y=y, is_training=is_training, name="final_norm") + output = tf.nn.relu(output) + output = ops.conv2d(output, output_dim=self._image_shape[2], k_h=3, k_w=3, + d_h=1, d_w=1, name="final_conv", + use_sn=self._spectral_norm,) + return tf.nn.sigmoid(output) + + +@gin.configurable +class Discriminator(resnet_ops.ResNetDiscriminator): + """ResNet discriminator, 4 blocks, supporting 32x32 with 1 or 3 colors.""" + + def __init__(self, project_y=False, **kwargs): + super(Discriminator, self).__init__(**kwargs) + self._project_y = project_y + + def apply(self, x, y, is_training): + """Apply the discriminator on a input. + + Args: + x: `Tensor` of shape [batch_size, 32, 32, ?] with real or fake images. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: Boolean, whether the architecture should be constructed for + training or inference. + + Returns: + Tuple of 3 Tensors, the final prediction of the discriminator, the logits + before the final output activation function and logits form the second + last layer. + """ + resnet_ops.validate_image_inputs(x) + colors = x.shape[3].value + if colors not in [1, 3]: + raise ValueError("Number of color channels not supported: {}".format( + colors)) + + output = x + for block_idx in range(4): + block = self._resnet_block( + name="B{}".format(block_idx + 1), + in_channels=colors if block_idx == 0 else 128, + out_channels=128, + scale="down" if block_idx <= 1 else "none") + output = block(output, z=None, y=y, is_training=is_training) + + # Final part - ReLU + output = tf.nn.relu(output) + + h = tf.reduce_mean(output, axis=[1, 2]) + + out_logit = ops.linear(h, 1, scope="disc_final_fc", + use_sn=self._spectral_norm) + if self._project_y: + if y is None: + raise ValueError("You must provide class information y to project.") + embedded_y = ops.linear(y, 128, use_bias=False, + scope="embedding_fc", use_sn=self._spectral_norm) + out_logit += tf.reduce_sum(embedded_y * h, axis=1, keepdims=True) + out = tf.nn.sigmoid(out_logit) + return out, out_logit, h diff --git a/compare_gan/architectures/resnet_init_test.py b/compare_gan/architectures/resnet_init_test.py new file mode 100644 index 0000000..89e9275 --- /dev/null +++ b/compare_gan/architectures/resnet_init_test.py @@ -0,0 +1,116 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests weight initialization ops using ResNet5 architecture.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan.architectures import resnet5 +from compare_gan.gans import consts +import gin +import tensorflow as tf + + +class ResNetInitTest(tf.test.TestCase): + + def setUp(self): + super(ResNetInitTest, self).setUp() + gin.clear_config() + + def testInitializersOldDefault(self): + valid_initalizer = [ + "kernel/Initializer/random_normal", + "bias/Initializer/Const", + # truncated_normal is the old default for conv2d. + "kernel/Initializer/truncated_normal", + "bias/Initializer/Const", + "beta/Initializer/zeros", + "gamma/Initializer/ones", + ] + valid_op_names = "/({}):0$".format("|".join(valid_initalizer)) + with tf.Graph().as_default(): + z = tf.zeros((2, 128)) + fake_image = resnet5.Generator(image_shape=(128, 128, 3))( + z, y=None, is_training=True) + resnet5.Discriminator()(fake_image, y=None, is_training=True) + for var in tf.trainable_variables(): + op_name = var.initializer.inputs[1].name + self.assertRegex(op_name, valid_op_names) + + def testInitializersRandomNormal(self): + gin.bind_parameter("weights.initializer", consts.NORMAL_INIT) + valid_initalizer = [ + "kernel/Initializer/random_normal", + "bias/Initializer/Const", + "kernel/Initializer/random_normal", + "bias/Initializer/Const", + "beta/Initializer/zeros", + "gamma/Initializer/ones", + ] + valid_op_names = "/({}):0$".format("|".join(valid_initalizer)) + with tf.Graph().as_default(): + z = tf.zeros((2, 128)) + fake_image = resnet5.Generator(image_shape=(128, 128, 3))( + z, y=None, is_training=True) + resnet5.Discriminator()(fake_image, y=None, is_training=True) + for var in tf.trainable_variables(): + op_name = var.initializer.inputs[1].name + self.assertRegex(op_name, valid_op_names) + + def testInitializersTruncatedNormal(self): + gin.bind_parameter("weights.initializer", consts.TRUNCATED_INIT) + valid_initalizer = [ + "kernel/Initializer/truncated_normal", + "bias/Initializer/Const", + "kernel/Initializer/truncated_normal", + "bias/Initializer/Const", + "beta/Initializer/zeros", + "gamma/Initializer/ones", + ] + valid_op_names = "/({}):0$".format("|".join(valid_initalizer)) + with tf.Graph().as_default(): + z = tf.zeros((2, 128)) + fake_image = resnet5.Generator(image_shape=(128, 128, 3))( + z, y=None, is_training=True) + resnet5.Discriminator()(fake_image, y=None, is_training=True) + for var in tf.trainable_variables(): + op_name = var.initializer.inputs[1].name + self.assertRegex(op_name, valid_op_names) + + def testGeneratorInitializersOrthogonal(self): + gin.bind_parameter("weights.initializer", consts.ORTHOGONAL_INIT) + valid_initalizer = [ + "kernel/Initializer/mul_1", + "bias/Initializer/Const", + "kernel/Initializer/mul_1", + "bias/Initializer/Const", + "beta/Initializer/zeros", + "gamma/Initializer/ones", + ] + valid_op_names = "/({}):0$".format("|".join(valid_initalizer)) + with tf.Graph().as_default(): + z = tf.zeros((2, 128)) + fake_image = resnet5.Generator(image_shape=(128, 128, 3))( + z, y=None, is_training=True) + resnet5.Discriminator()(fake_image, y=None, is_training=True) + for var in tf.trainable_variables(): + op_name = var.initializer.inputs[1].name + self.assertRegex(op_name, valid_op_names) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/architectures/resnet_norm_test.py b/compare_gan/architectures/resnet_norm_test.py new file mode 100644 index 0000000..8b94987 --- /dev/null +++ b/compare_gan/architectures/resnet_norm_test.py @@ -0,0 +1,373 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests batch normalizations using ResNet5 CIFAR.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +from compare_gan.architectures import arch_ops +from compare_gan.architectures import resnet_cifar +from six.moves import zip +import tensorflow as tf + + +class ResNetNormTest(tf.test.TestCase): + + def testDefaultGenerator(self): + with tf.Graph().as_default(): + # batch size 8, 32x32x3 images, 10 classes. + z = tf.zeros((8, 128)) + y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10) + generator = resnet_cifar.Generator(image_shape=(32, 32, 3)) + fake_images = generator(z, y=y, is_training=True, reuse=False) + self.assertEqual(fake_images.shape.as_list(), [8, 32, 32, 3]) + expected_variables = [ + # Name and shape. + ("generator/fc_noise/kernel:0", [128, 4096]), + ("generator/fc_noise/bias:0", [4096]), + ("generator/B1/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B1/up_conv_shortcut/bias:0", [256]), + ("generator/B1/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B1/up_conv1/bias:0", [256]), + ("generator/B1/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B1/same_conv2/bias:0", [256]), + ("generator/B2/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B2/up_conv_shortcut/bias:0", [256]), + ("generator/B2/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B2/up_conv1/bias:0", [256]), + ("generator/B2/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B2/same_conv2/bias:0", [256]), + ("generator/B3/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B3/up_conv_shortcut/bias:0", [256]), + ("generator/B3/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B3/up_conv1/bias:0", [256]), + ("generator/B3/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B3/same_conv2/bias:0", [256]), + ("generator/final_conv/kernel:0", [3, 3, 256, 3]), + ("generator/final_conv/bias:0", [3]), + ] + actual_variables = [(v.name, v.shape.as_list()) + for v in tf.trainable_variables()] + for a, e in zip(actual_variables, expected_variables): + logging.info("actual: %s, expected: %s", a, e) + self.assertEqual(a, e) + self.assertEqual(len(actual_variables), len(expected_variables)) + + def testDefaultDiscriminator(self): + with tf.Graph().as_default(): + # batch size 8, 32x32x3 images, 10 classes. + x = tf.zeros((8, 32, 32, 3)) + y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10) + discriminator = resnet_cifar.Discriminator() + _ = discriminator(x, y=y, is_training=True, reuse=False) + expected_variables = [ + # Name and shape. + ("discriminator/B1/down_conv_shortcut/kernel:0", [3, 3, 3, 128]), + ("discriminator/B1/down_conv_shortcut/bias:0", [128]), + ("discriminator/B1/same_conv1/kernel:0", [3, 3, 3, 128]), + ("discriminator/B1/same_conv1/bias:0", [128]), + ("discriminator/B1/down_conv2/kernel:0", [3, 3, 128, 128]), + ("discriminator/B1/down_conv2/bias:0", [128]), + ("discriminator/B2/down_conv_shortcut/kernel:0", [3, 3, 128, 128]), + ("discriminator/B2/down_conv_shortcut/bias:0", [128]), + ("discriminator/B2/same_conv1/kernel:0", [3, 3, 128, 128]), + ("discriminator/B2/same_conv1/bias:0", [128]), + ("discriminator/B2/down_conv2/kernel:0", [3, 3, 128, 128]), + ("discriminator/B2/down_conv2/bias:0", [128]), + ("discriminator/B3/same_conv_shortcut/kernel:0", [3, 3, 128, 128]), + ("discriminator/B3/same_conv_shortcut/bias:0", [128]), + ("discriminator/B3/same_conv1/kernel:0", [3, 3, 128, 128]), + ("discriminator/B3/same_conv1/bias:0", [128]), + ("discriminator/B3/same_conv2/kernel:0", [3, 3, 128, 128]), + ("discriminator/B3/same_conv2/bias:0", [128]), + ("discriminator/B4/same_conv_shortcut/kernel:0", [3, 3, 128, 128]), + ("discriminator/B4/same_conv_shortcut/bias:0", [128]), + ("discriminator/B4/same_conv1/kernel:0", [3, 3, 128, 128]), + ("discriminator/B4/same_conv1/bias:0", [128]), + ("discriminator/B4/same_conv2/kernel:0", [3, 3, 128, 128]), + ("discriminator/B4/same_conv2/bias:0", [128]), + ("discriminator/disc_final_fc/kernel:0", [128, 1]), + ("discriminator/disc_final_fc/bias:0", [1]), + ] + actual_variables = [(v.name, v.shape.as_list()) + for v in tf.trainable_variables()] + for a, e in zip(actual_variables, expected_variables): + logging.info("actual: %s, expected: %s", a, e) + self.assertEqual(a, e) + self.assertEqual(len(actual_variables), len(expected_variables)) + + def testDefaultGeneratorWithBatchNorm(self): + with tf.Graph().as_default(): + # batch size 8, 32x32x3 images, 10 classes. + z = tf.zeros((8, 128)) + y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10) + generator = resnet_cifar.Generator( + image_shape=(32, 32, 3), + batch_norm_fn=arch_ops.batch_norm) + fake_images = generator(z, y=y, is_training=True, reuse=False) + self.assertEqual(fake_images.shape.as_list(), [8, 32, 32, 3]) + expected_variables = [ + # Name and shape. + ("generator/fc_noise/kernel:0", [128, 4096]), + ("generator/fc_noise/bias:0", [4096]), + ("generator/B1/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B1/up_conv_shortcut/bias:0", [256]), + ("generator/B1/bn1/gamma:0", [256]), + ("generator/B1/bn1/beta:0", [256]), + ("generator/B1/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B1/up_conv1/bias:0", [256]), + ("generator/B1/bn2/gamma:0", [256]), + ("generator/B1/bn2/beta:0", [256]), + ("generator/B1/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B1/same_conv2/bias:0", [256]), + ("generator/B2/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B2/up_conv_shortcut/bias:0", [256]), + ("generator/B2/bn1/gamma:0", [256]), + ("generator/B2/bn1/beta:0", [256]), + ("generator/B2/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B2/up_conv1/bias:0", [256]), + ("generator/B2/bn2/gamma:0", [256]), + ("generator/B2/bn2/beta:0", [256]), + ("generator/B2/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B2/same_conv2/bias:0", [256]), + ("generator/B3/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B3/up_conv_shortcut/bias:0", [256]), + ("generator/B3/bn1/gamma:0", [256]), + ("generator/B3/bn1/beta:0", [256]), + ("generator/B3/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B3/up_conv1/bias:0", [256]), + ("generator/B3/bn2/gamma:0", [256]), + ("generator/B3/bn2/beta:0", [256]), + ("generator/B3/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B3/same_conv2/bias:0", [256]), + ("generator/final_norm/gamma:0", [256]), + ("generator/final_norm/beta:0", [256]), + ("generator/final_conv/kernel:0", [3, 3, 256, 3]), + ("generator/final_conv/bias:0", [3]), + ] + actual_variables = [(v.name, v.shape.as_list()) + for v in tf.trainable_variables()] + for a in actual_variables: + logging.info(a) + for a, e in zip(actual_variables, expected_variables): + logging.info("actual: %s, expected: %s", a, e) + self.assertEqual(a, e) + self.assertEqual(len(actual_variables), len(expected_variables)) + + def testDefaultGeneratorWithConditionalBatchNorm(self): + with tf.Graph().as_default(): + # Batch size 8, 32x32x3 images, 10 classes. + z = tf.zeros((8, 128)) + y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10) + generator = resnet_cifar.Generator( + image_shape=(32, 32, 3), + batch_norm_fn=arch_ops.conditional_batch_norm) + fake_images = generator(z, y=y, is_training=True, reuse=False) + self.assertEqual(fake_images.shape.as_list(), [8, 32, 32, 3]) + expected_variables = [ + # Name and shape. + ("generator/fc_noise/kernel:0", [128, 4096]), + ("generator/fc_noise/bias:0", [4096]), + ("generator/B1/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B1/up_conv_shortcut/bias:0", [256]), + ("generator/B1/bn1/condition/gamma/kernel:0", [10, 256]), + ("generator/B1/bn1/condition/beta/kernel:0", [10, 256]), + ("generator/B1/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B1/up_conv1/bias:0", [256]), + ("generator/B1/bn2/condition/gamma/kernel:0", [10, 256]), + ("generator/B1/bn2/condition/beta/kernel:0", [10, 256]), + ("generator/B1/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B1/same_conv2/bias:0", [256]), + ("generator/B2/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B2/up_conv_shortcut/bias:0", [256]), + ("generator/B2/bn1/condition/gamma/kernel:0", [10, 256]), + ("generator/B2/bn1/condition/beta/kernel:0", [10, 256]), + ("generator/B2/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B2/up_conv1/bias:0", [256]), + ("generator/B2/bn2/condition/gamma/kernel:0", [10, 256]), + ("generator/B2/bn2/condition/beta/kernel:0", [10, 256]), + ("generator/B2/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B2/same_conv2/bias:0", [256]), + ("generator/B3/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B3/up_conv_shortcut/bias:0", [256]), + ("generator/B3/bn1/condition/gamma/kernel:0", [10, 256]), + ("generator/B3/bn1/condition/beta/kernel:0", [10, 256]), + ("generator/B3/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B3/up_conv1/bias:0", [256]), + ("generator/B3/bn2/condition/gamma/kernel:0", [10, 256]), + ("generator/B3/bn2/condition/beta/kernel:0", [10, 256]), + ("generator/B3/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B3/same_conv2/bias:0", [256]), + ("generator/final_norm/condition/gamma/kernel:0", [10, 256]), + ("generator/final_norm/condition/beta/kernel:0", [10, 256]), + ("generator/final_conv/kernel:0", [3, 3, 256, 3]), + ("generator/final_conv/bias:0", [3]), + ] + actual_variables = [(v.name, v.shape.as_list()) + for v in tf.trainable_variables()] + for a in actual_variables: + logging.info(a) + for a, e in zip(actual_variables, expected_variables): + logging.info("actual: %s, expected: %s", a, e) + self.assertEqual(a, e) + self.assertEqual(len(actual_variables), len(expected_variables)) + + def testDefaultGeneratorWithSelfModulatedBatchNorm(self): + with tf.Graph().as_default(): + # Batch size 8, 32x32x3 images, 10 classes. + z = tf.zeros((8, 128)) + y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10) + generator = resnet_cifar.Generator( + image_shape=(32, 32, 3), + batch_norm_fn=arch_ops.self_modulated_batch_norm) + fake_images = generator(z, y=y, is_training=True, reuse=False) + self.assertEqual(fake_images.shape.as_list(), [8, 32, 32, 3]) + expected_variables = [ + # Name and shape. + ("generator/fc_noise/kernel:0", [128, 4096]), + ("generator/fc_noise/bias:0", [4096]), + ("generator/B1/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B1/up_conv_shortcut/bias:0", [256]), + ("generator/B1/bn1/sbn/hidden/kernel:0", [128, 32]), + ("generator/B1/bn1/sbn/hidden/bias:0", [32]), + ("generator/B1/bn1/sbn/gamma/kernel:0", [32, 256]), + ("generator/B1/bn1/sbn/gamma/bias:0", [256]), + ("generator/B1/bn1/sbn/beta/kernel:0", [32, 256]), + ("generator/B1/bn1/sbn/beta/bias:0", [256]), + ("generator/B1/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B1/up_conv1/bias:0", [256]), + ("generator/B1/bn2/sbn/hidden/kernel:0", [128, 32]), + ("generator/B1/bn2/sbn/hidden/bias:0", [32]), + ("generator/B1/bn2/sbn/gamma/kernel:0", [32, 256]), + ("generator/B1/bn2/sbn/gamma/bias:0", [256]), + ("generator/B1/bn2/sbn/beta/kernel:0", [32, 256]), + ("generator/B1/bn2/sbn/beta/bias:0", [256]), + ("generator/B1/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B1/same_conv2/bias:0", [256]), + ("generator/B2/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B2/up_conv_shortcut/bias:0", [256]), + ("generator/B2/bn1/sbn/hidden/kernel:0", [128, 32]), + ("generator/B2/bn1/sbn/hidden/bias:0", [32]), + ("generator/B2/bn1/sbn/gamma/kernel:0", [32, 256]), + ("generator/B2/bn1/sbn/gamma/bias:0", [256]), + ("generator/B2/bn1/sbn/beta/kernel:0", [32, 256]), + ("generator/B2/bn1/sbn/beta/bias:0", [256]), + ("generator/B2/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B2/up_conv1/bias:0", [256]), + ("generator/B2/bn2/sbn/hidden/kernel:0", [128, 32]), + ("generator/B2/bn2/sbn/hidden/bias:0", [32]), + ("generator/B2/bn2/sbn/gamma/kernel:0", [32, 256]), + ("generator/B2/bn2/sbn/gamma/bias:0", [256]), + ("generator/B2/bn2/sbn/beta/kernel:0", [32, 256]), + ("generator/B2/bn2/sbn/beta/bias:0", [256]), + ("generator/B2/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B2/same_conv2/bias:0", [256]), + ("generator/B3/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B3/up_conv_shortcut/bias:0", [256]), + ("generator/B3/bn1/sbn/hidden/kernel:0", [128, 32]), + ("generator/B3/bn1/sbn/hidden/bias:0", [32]), + ("generator/B3/bn1/sbn/gamma/kernel:0", [32, 256]), + ("generator/B3/bn1/sbn/gamma/bias:0", [256]), + ("generator/B3/bn1/sbn/beta/kernel:0", [32, 256]), + ("generator/B3/bn1/sbn/beta/bias:0", [256]), + ("generator/B3/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B3/up_conv1/bias:0", [256]), + ("generator/B3/bn2/sbn/hidden/kernel:0", [128, 32]), + ("generator/B3/bn2/sbn/hidden/bias:0", [32]), + ("generator/B3/bn2/sbn/gamma/kernel:0", [32, 256]), + ("generator/B3/bn2/sbn/gamma/bias:0", [256]), + ("generator/B3/bn2/sbn/beta/kernel:0", [32, 256]), + ("generator/B3/bn2/sbn/beta/bias:0", [256]), + ("generator/B3/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B3/same_conv2/bias:0", [256]), + ("generator/final_norm/sbn/hidden/kernel:0", [128, 32]), + ("generator/final_norm/sbn/hidden/bias:0", [32]), + ("generator/final_norm/sbn/gamma/kernel:0", [32, 256]), + ("generator/final_norm/sbn/gamma/bias:0", [256]), + ("generator/final_norm/sbn/beta/kernel:0", [32, 256]), + ("generator/final_norm/sbn/beta/bias:0", [256]), + ("generator/final_conv/kernel:0", [3, 3, 256, 3]), + ("generator/final_conv/bias:0", [3]), + ] + actual_variables = [(v.name, v.shape.as_list()) + for v in tf.trainable_variables()] + for a in actual_variables: + logging.info(a) + for a, e in zip(actual_variables, expected_variables): + logging.info("actual: %s, expected: %s", a, e) + self.assertEqual(a, e) + self.assertEqual(len(actual_variables), len(expected_variables)) + + def testDefaultGeneratorWithSpectralNorm(self): + with tf.Graph().as_default(): + # Batch size 8, 32x32x3 images, 10 classes. + z = tf.zeros((8, 128)) + y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10) + generator = resnet_cifar.Generator( + image_shape=(32, 32, 3), + spectral_norm=True) + fake_images = generator(z, y=y, is_training=True, reuse=False) + self.assertEqual(fake_images.shape.as_list(), [8, 32, 32, 3]) + expected_variables = [ + # Name and shape. + ("generator/fc_noise/kernel:0", [128, 4096]), + ("generator/fc_noise/kernel/u_var:0", [128, 1]), + ("generator/fc_noise/bias:0", [4096]), + ("generator/B1/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B1/up_conv_shortcut/kernel/u_var:0", [3 * 3 * 256, 1]), + ("generator/B1/up_conv_shortcut/bias:0", [256]), + ("generator/B1/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B1/up_conv1/kernel/u_var:0", [3 * 3 * 256, 1]), + ("generator/B1/up_conv1/bias:0", [256]), + ("generator/B1/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B1/same_conv2/kernel/u_var:0", [3 * 3 * 256, 1]), + ("generator/B1/same_conv2/bias:0", [256]), + ("generator/B2/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B2/up_conv_shortcut/kernel/u_var:0", [3 * 3 * 256, 1]), + ("generator/B2/up_conv_shortcut/bias:0", [256]), + ("generator/B2/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B2/up_conv1/kernel/u_var:0", [3 * 3 * 256, 1]), + ("generator/B2/up_conv1/bias:0", [256]), + ("generator/B2/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B2/same_conv2/kernel/u_var:0", [3 * 3 * 256, 1]), + ("generator/B2/same_conv2/bias:0", [256]), + ("generator/B3/up_conv_shortcut/kernel:0", [3, 3, 256, 256]), + ("generator/B3/up_conv_shortcut/kernel/u_var:0", [3 * 3 * 256, 1]), + ("generator/B3/up_conv_shortcut/bias:0", [256]), + ("generator/B3/up_conv1/kernel:0", [3, 3, 256, 256]), + ("generator/B3/up_conv1/kernel/u_var:0", [3 * 3 * 256, 1]), + ("generator/B3/up_conv1/bias:0", [256]), + ("generator/B3/same_conv2/kernel:0", [3, 3, 256, 256]), + ("generator/B3/same_conv2/kernel/u_var:0", [3 * 3 * 256, 1]), + ("generator/B3/same_conv2/bias:0", [256]), + ("generator/final_conv/kernel:0", [3, 3, 256, 3]), + ("generator/final_conv/kernel/u_var:0", [3 * 3 * 256, 1]), + ("generator/final_conv/bias:0", [3]), + ] + actual_variables = [(v.name, v.shape.as_list()) + for v in tf.global_variables()] + for a in actual_variables: + logging.info(a) + for a, e in zip(actual_variables, expected_variables): + logging.info("actual: %s, expected: %s", a, e) + self.assertEqual(a, e) + self.assertEqual(len(actual_variables), len(expected_variables)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/architectures/resnet_ops.py b/compare_gan/architectures/resnet_ops.py new file mode 100644 index 0000000..5627d88 --- /dev/null +++ b/compare_gan/architectures/resnet_ops.py @@ -0,0 +1,219 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ResNet specific operations. + +Defines the default ResNet generator and discriminator blocks and some helper +operations such as unpooling. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +from compare_gan.architectures import abstract_arch +from compare_gan.architectures import arch_ops as ops + +from six.moves import range +import tensorflow as tf + + +def unpool(value, name="unpool"): + """Unpooling operation. + + N-dimensional version of the unpooling operation from + https://www.robots.ox.ac.uk/~vgg/rg/papers/Dosovitskiy_Learning_to_Generate_2015_CVPR_paper.pdf + Taken from: https://github.com/tensorflow/tensorflow/issues/2169 + + Args: + value: a Tensor of shape [b, d0, d1, ..., dn, ch] + name: name of the op + Returns: + A Tensor of shape [b, 2*d0, 2*d1, ..., 2*dn, ch] + """ + with tf.name_scope(name) as scope: + sh = value.get_shape().as_list() + dim = len(sh[1:-1]) + out = (tf.reshape(value, [-1] + sh[-dim:])) + for i in range(dim, 0, -1): + out = tf.concat([out, tf.zeros_like(out)], i) + out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]] + out = tf.reshape(out, out_size, name=scope) + return out + + +def validate_image_inputs(inputs, validate_power2=True): + inputs.get_shape().assert_has_rank(4) + inputs.get_shape()[1:3].assert_is_fully_defined() + if inputs.get_shape()[1] != inputs.get_shape()[2]: + raise ValueError("Input tensor does not have equal width and height: ", + inputs.get_shape()[1:3]) + width = inputs.get_shape().as_list()[1] + if validate_power2 and math.log(width, 2) != int(math.log(width, 2)): + raise ValueError("Input tensor `width` is not a power of 2: ", width) + + +class ResNetBlock(object): + """ResNet block with options for various normalizations.""" + + def __init__(self, + name, + in_channels, + out_channels, + scale, + is_gen_block, + layer_norm=False, + spectral_norm=False, + batch_norm=None): + """Constructs a new ResNet block. + + Args: + name: Scope name for the resent block. + in_channels: Integer, the input channel size. + out_channels: Integer, the output channel size. + scale: Whether or not to scale up or down, choose from "up", "down" or + "none". + is_gen_block: Boolean, deciding whether this is a generator or + discriminator block. + layer_norm: Apply layer norm before both convolutions. + spectral_norm: Use spectral normalization for all weights. + batch_norm: Function for batch normalization. + """ + assert scale in ["up", "down", "none"] + self._name = name + self._in_channels = in_channels + self._out_channels = out_channels + self._scale = scale + # In SN paper, if they upscale in generator they do this in the first conv. + # For discriminator downsampling happens after second conv. + self._scale1 = scale if is_gen_block else "none" + self._scale2 = "none" if is_gen_block else scale + self._layer_norm = layer_norm + self._spectral_norm = spectral_norm + self.batch_norm = batch_norm + + def __call__(self, inputs, z, y, is_training): + return self.apply(inputs=inputs, z=z, y=y, is_training=is_training) + + def _get_conv(self, inputs, in_channels, out_channels, scale, suffix, + kernel_size=(3, 3), strides=(1, 1)): + """Performs a convolution in the ResNet block.""" + if inputs.get_shape().as_list()[-1] != in_channels: + raise ValueError("Unexpected number of input channels.") + if scale not in ["up", "down", "none"]: + raise ValueError( + "Scale: got {}, expected 'up', 'down', or 'none'.".format(scale)) + + outputs = inputs + if scale == "up": + outputs = unpool(outputs) + outputs = ops.conv2d( + outputs, + output_dim=out_channels, + k_h=kernel_size[0], k_w=kernel_size[1], + d_h=strides[0], d_w=strides[1], + use_sn=self._spectral_norm, + name="{}_{}".format("same" if scale == "none" else scale, suffix)) + if scale == "down": + outputs = tf.nn.pool(outputs, [2, 2], "AVG", "SAME", strides=[2, 2], + name="pool_%s" % suffix) + return outputs + + def apply(self, inputs, z, y, is_training): + """"ResNet block containing possible down/up sampling, shared for G / D. + + Args: + inputs: a 3d input tensor of feature map. + z: the latent vector for potential self-modulation. Can be None if use_sbn + is set to False. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: boolean, whether or notthis is called during the training. + + Returns: + output: a 3d output tensor of feature map. + """ + if inputs.get_shape().as_list()[-1] != self._in_channels: + raise ValueError("Unexpected number of input channels.") + + with tf.variable_scope(self._name, values=[inputs]): + output = inputs + + shortcut = self._get_conv( + output, self._in_channels, self._out_channels, self._scale, + suffix="conv_shortcut") + + output = self.batch_norm( + output, z=z, y=y, is_training=is_training, name="bn1") + if self._layer_norm: + output = ops.layer_norm(output, is_training=is_training, scope="ln1") + + output = tf.nn.relu(output) + output = self._get_conv( + output, self._in_channels, self._out_channels, self._scale1, + suffix="conv1") + + output = self.batch_norm( + output, z=z, y=y, is_training=is_training, name="bn2") + if self._layer_norm: + output = ops.layer_norm(output, is_training=is_training, scope="ln2") + + output = tf.nn.relu(output) + output = self._get_conv( + output, self._out_channels, self._out_channels, self._scale2, + suffix="conv2") + + # Combine skip-connection with the convolved part. + output += shortcut + return output + + +class ResNetGenerator(abstract_arch.AbstractGenerator): + """Abstract base class for generators based on the ResNet architecture.""" + + def _resnet_block(self, name, in_channels, out_channels, scale): + """ResNet block for the generator.""" + if scale not in ["up", "none"]: + raise ValueError( + "Unknown generator ResNet block scaling: {}.".format(scale)) + return ResNetBlock( + name=name, + in_channels=in_channels, + out_channels=out_channels, + scale=scale, + is_gen_block=True, + spectral_norm=self._spectral_norm, + batch_norm=self.batch_norm) + + +class ResNetDiscriminator(abstract_arch.AbstractDiscriminator): + """Abstract base class for discriminators based on the ResNet architecture.""" + + def _resnet_block(self, name, in_channels, out_channels, scale): + """ResNet block for the generator.""" + if scale not in ["down", "none"]: + raise ValueError( + "Unknown discriminator ResNet block scaling: {}.".format(scale)) + return ResNetBlock( + name=name, + in_channels=in_channels, + out_channels=out_channels, + scale=scale, + is_gen_block=False, + layer_norm=self._layer_norm, + spectral_norm=self._spectral_norm, + batch_norm=self.batch_norm) diff --git a/compare_gan/architectures/resnet_stl.py b/compare_gan/architectures/resnet_stl.py new file mode 100644 index 0000000..517568d --- /dev/null +++ b/compare_gan/architectures/resnet_stl.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A deep neural architecture with residual blocks and skip connections. + +Based on Table 5 from "Spectral Normalization for Generative Adversarial +Networks", Miyato T. et al., 2018. [https://arxiv.org/pdf/1802.05957.pdf]. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan.architectures import arch_ops as ops +from compare_gan.architectures import resnet_ops + +from six.moves import range +import tensorflow as tf + + +class Generator(resnet_ops.ResNetGenerator): + """ResNet generator, 3 blocks, supporting 48x48 resolution.""" + + def apply(self, z, y, is_training): + """Build the generator network for the given inputs. + + Args: + z: `Tensor` of shape [batch_size, z_dim] with latent code. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: boolean, are we in train or eval model. + + Returns: + A tensor of size [batch_size, 32, 32, colors] with values in [0, 1]. + """ + ch = 64 + colors = self._image_shape[2] + batch_size = z.get_shape().as_list()[0] + magic = [(8, 4), (4, 2), (2, 1)] + output = ops.linear(z, 6 * 6 * 512, scope="fc_noise") + output = tf.reshape(output, [batch_size, 6, 6, 512], name="fc_reshaped") + for block_idx in range(3): + block = self._resnet_block( + name="B{}".format(block_idx + 1), + in_channels=ch * magic[block_idx][0], + out_channels=ch * magic[block_idx][1], + scale="up") + output = block(output, z=z, y=y, is_training=is_training) + output = self.batch_norm( + output, z=z, y=y, is_training=is_training, scope="final_norm") + output = tf.nn.relu(output) + output = ops.conv2d(output, output_dim=colors, k_h=3, k_w=3, d_h=1, d_w=1, + name="final_conv") + return tf.nn.sigmoid(output) + + +class Discriminator(resnet_ops.ResNetDiscriminator): + """ResNet discriminator, 4 blocks, suports 48x48 resolution.""" + + def apply(self, x, y, is_training): + """Apply the discriminator on a input. + + Args: + x: `Tensor` of shape [batch_size, 32, 32, ?] with real or fake images. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: Boolean, whether the architecture should be constructed for + training or inference. + + Returns: + Tuple of 3 Tensors, the final prediction of the discriminator, the logits + before the final output activation function and logits form the second + last layer. + """ + resnet_ops.validate_image_inputs(x, validate_power2=False) + colors = x.shape[-1].value + if colors not in [1, 3]: + raise ValueError("Number of color channels unknown: %s" % colors) + ch = 64 + block = self._resnet_block( + name="B0", in_channels=colors, out_channels=ch, scale="down") + output = block(x, z=None, y=y, is_training=is_training) + magic = [(1, 2), (2, 4), (4, 8), (8, 16)] + for block_idx in range(4): + block = self._resnet_block( + name="B{}".format(block_idx + 1), + in_channels=ch * magic[block_idx][0], + out_channels=ch * magic[block_idx][1], + scale="down" if block_idx < 3 else "none") + output = block(output, z=None, y=y, is_training=is_training) + output = tf.nn.relu(output) + pre_logits = tf.reduce_mean(output, axis=[1, 2]) + out_logit = ops.linear(pre_logits, 1, scope="disc_final_fc", + use_sn=self._spectral_norm) + out = tf.nn.sigmoid(out_logit) + return out, out_logit, pre_logits diff --git a/compare_gan/architectures/sndcgan.py b/compare_gan/architectures/sndcgan.py new file mode 100644 index 0000000..9d86f6f --- /dev/null +++ b/compare_gan/architectures/sndcgan.py @@ -0,0 +1,127 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of SNDCGAN generator and discriminator architectures.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan.architectures import abstract_arch +from compare_gan.architectures.arch_ops import conv2d +from compare_gan.architectures.arch_ops import deconv2d +from compare_gan.architectures.arch_ops import linear +from compare_gan.architectures.arch_ops import lrelu + +import numpy as np +import tensorflow as tf + + +def conv_out_size_same(size, stride): + return int(np.ceil(float(size) / float(stride))) + + +class Generator(abstract_arch.AbstractGenerator): + """SNDCGAN generator. + + Details are available at https://openreview.net/pdf?id=B1QRgziT-. + """ + + def apply(self, z, y, is_training): + """Build the generator network for the given inputs. + + Args: + z: `Tensor` of shape [batch_size, z_dim] with latent code. + y: `Tensor` of shape [batch_size, num_classes] of one hot encoded labels. + is_training: boolean, are we in train or eval model. + + Returns: + A tensor of size [batch_size] + self._image_shape with values in [0, 1]. + """ + batch_size = z.shape[0].value + s_h, s_w, colors = self._image_shape + s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2) + s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2) + s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2) + + net = linear(z, s_h8 * s_w8 * 512, scope="g_fc1") + net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn1") + net = tf.nn.relu(net) + net = tf.reshape(net, [batch_size, s_h8, s_w8, 512]) + net = deconv2d(net, [batch_size, s_h4, s_w4, 256], 4, 4, 2, 2, name="g_dc2") + net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn2") + net = tf.nn.relu(net) + net = deconv2d(net, [batch_size, s_h2, s_w2, 128], 4, 4, 2, 2, name="g_dc3") + net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn3") + net = tf.nn.relu(net) + net = deconv2d(net, [batch_size, s_h, s_w, 64], 4, 4, 2, 2, name="g_dc4") + net = self.batch_norm(net, z=z, y=y, is_training=is_training, name="g_bn4") + net = tf.nn.relu(net) + net = deconv2d( + net, [batch_size, s_h, s_w, colors], 3, 3, 1, 1, name="g_dc5") + out = tf.tanh(net) + + # This normalization from [-1, 1] to [0, 1] is introduced for consistency + # with other models. + out = tf.div(out + 1.0, 2.0) + return out + + +class Discriminator(abstract_arch.AbstractDiscriminator): + """SNDCGAN discriminator. + + Details are available at https://openreview.net/pdf?id=B1QRgziT-. + """ + + def apply(self, x, y, is_training): + """Apply the discriminator on a input. + + Args: + x: `Tensor` of shape [batch_size, ?, ?, ?] with real or fake images. + y: `Tensor` of shape [batch_size, num_classes] with one hot encoded + labels. + is_training: Boolean, whether the architecture should be constructed for + training or inference. + + Returns: + Tuple of 3 Tensors, the final prediction of the discriminator, the logits + before the final output activation function and logits form the second + last layer. + """ + del is_training, y + use_sn = self._spectral_norm + # In compare gan framework, the image preprocess normalize image pixel to + # range [0, 1], while author used [-1, 1]. Apply this trick to input image + # instead of changing our preprocessing function. + x = x * 2.0 - 1.0 + net = conv2d(x, 64, 3, 3, 1, 1, name="d_conv1", use_sn=use_sn) + net = lrelu(net, leak=0.1) + net = conv2d(net, 128, 4, 4, 2, 2, name="d_conv2", use_sn=use_sn) + net = lrelu(net, leak=0.1) + net = conv2d(net, 128, 3, 3, 1, 1, name="d_conv3", use_sn=use_sn) + net = lrelu(net, leak=0.1) + net = conv2d(net, 256, 4, 4, 2, 2, name="d_conv4", use_sn=use_sn) + net = lrelu(net, leak=0.1) + net = conv2d(net, 256, 3, 3, 1, 1, name="d_conv5", use_sn=use_sn) + net = lrelu(net, leak=0.1) + net = conv2d(net, 512, 4, 4, 2, 2, name="d_conv6", use_sn=use_sn) + net = lrelu(net, leak=0.1) + net = conv2d(net, 512, 3, 3, 1, 1, name="d_conv7", use_sn=use_sn) + net = lrelu(net, leak=0.1) + batch_size = x.shape.as_list()[0] + net = tf.reshape(net, [batch_size, -1]) + out_logit = linear(net, 1, scope="d_fc1", use_sn=use_sn) + out = tf.nn.sigmoid(out_logit) + return out, out_logit, net diff --git a/compare_gan/bin/compare_gan_prepare_datasets.sh b/compare_gan/bin/compare_gan_prepare_datasets.sh deleted file mode 100755 index 477c8b3..0000000 --- a/compare_gan/bin/compare_gan_prepare_datasets.sh +++ /dev/null @@ -1,37 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#!/bin/bash - -T2T_DATAGEN="$HOME/.local/bin/t2t-datagen" -DATASET_DIR="/tmp/datasets" -TMP_DIR="/tmp" - -if [ ! -f ${T2T_DATAGEN?} ]; then - echo "tensor2tensor not found!" - exit 1 -fi - -echo "Preparing datasets (mnist, fashionmnist, cifar10, celeba) in ${DATASET_DIR?}" -mkdir ${DATASET_DIR?} - -datasets=(image_mnist image_fashion_mnist image_cifar10 image_celeba) -for dataset in "${datasets[@]}"; do - echo "Dataset is ${dataset?}" - ${T2T_DATAGEN?} --data_dir=${DATASET_DIR?} --problem=${dataset?} --tmp_dir=${TMP_DIR?} -done - -echo "Getting inception model." -(cd ${DATASET_DIR?}; curl "http://download.tensorflow.org/models/frozen_inception_v1_2015_12_05.tar.gz" | tar -zxvf - inceptionv1_for_inception_score.pb ) diff --git a/compare_gan/bin/compare_gan_run_one_task b/compare_gan/bin/compare_gan_run_one_task deleted file mode 100755 index d4e2000..0000000 --- a/compare_gan/bin/compare_gan_run_one_task +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#!/usr/bin/env python - -from __future__ import absolute_import -from __future__ import division - -import os - -import tensorflow as tf -from google.protobuf import text_format -from compare_gan.src import gan_lib -from compare_gan.src import eval_gan_lib -from compare_gan.src import simple_task_pb2 -from compare_gan.src import task_utils - -from compare_gan.src import fid_score - -flags = tf.flags -FLAGS = flags.FLAGS -logging = tf.logging - -flags.DEFINE_string("workdir", "", "The working directory for all tasks.") -flags.DEFINE_integer("task_num", 0, "The task number to use.") -flags.DEFINE_string("command", "all", "What command to execute: all, train, eval.") -flags.DEFINE_bool("enable_tf_profile", False, "Whether to run TFProf.") - -def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - task_workdir = os.path.join(FLAGS.workdir, str(FLAGS.task_num)) - task = simple_task_pb2.Task() - with open(os.path.join(task_workdir, "task"), "r") as f: - text_format.Parse(f.read(), task) - - options = task_utils.ParseOptions(task) - options["___workdir"] = task_workdir - - task_string = text_format.MessageToString(task) - logging.info("\nWill run task\n%s\n\n", task_string) - - if FLAGS.command in ["all", "train"]: - logging.info("\n====== Running training ======\n\n") - gan_lib.run_with_options(options, task_workdir) - - if FLAGS.command in ["all", "eval"]: - logging.info("\n====== Running evaluation ======\n\n") - eval_gan_lib.RunTaskEval( - options, task_workdir, - tf.contrib.gan.eval.get_graph_def_from_disk( - os.path.join(FLAGS.dataset_root, - "inceptionv1_for_inception_score.pb"))) - - logging.info("\n====== Task finished ======\n\n") - - -if __name__ == "__main__": - tf.app.run() diff --git a/compare_gan/datasets.py b/compare_gan/datasets.py new file mode 100644 index 0000000..a407a7f --- /dev/null +++ b/compare_gan/datasets.py @@ -0,0 +1,641 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Dataset loading utilities. + +Creates a thin wrapper around TensorFlow Datasets (TFDS) to enable seamless +CPU/GPU/TPU workloads. The main entry point is 'get_dataset' which takes a +dataset name and a random seed and returns the corresponding tf.data.Dataset +object. + +Available datasets are defined in the DATASETS dictionary. To add any dataset +supported by TFDS, simply extend the ImageDatasetV2 class as shown below with +the MNIST example and add it to DICTIONARY dictionary. Alternatively, you can +extend the ImageDatasetV2 class and load the datasets from another source. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import inspect + +from absl import flags +from absl import logging +from compare_gan.tpu import tpu_random +import gin +import numpy as np +import tensorflow as tf +import tensorflow_datasets as tfds + +FLAGS = flags.FLAGS + +flags.DEFINE_string( + "tfds_data_dir", None, + "TFDS (TensorFlow Datasets) directory. If not set it will default to " + "'~/tensorflow_datasets'. If the directory does not contain the requested " + "dataset TFDS will download the dataset to this folder.") + +flags.DEFINE_boolean( + "data_fake_dataset", False, + "If True don't load datasets from disk but create fake values.") + +flags.DEFINE_integer( + "data_shuffle_buffer_size", 10000, + "Number of examples for the shuffle buffer.") + + +class ImageDatasetV2(object): + """Interface for Image datasets based on TFDS (TensorFlow Datasets). + + This method handles both CPU/GPU and TPU data loading settings. If the flag + --data_fake_dataset is True the methods will create a small fake dataset from + in-memory NumPy arrays and not read from disk. + The pipleline of input operations is as follows: + 1) Shuffle filenames (with seed). + 2) Load file content from disk. Decode images. + Dataset content after this step is a dictionary. + 3) Prefetch call here. + 4) Filter examples (e.g. by size or label). + 5) Parse example. + Dataset content after this step is a tuple of tensors (image, label). + 6) train_only: Repeat dataset. + 7) Transform (random cropping with seed, resizing). + 8) Preprocess (adding sampled noise/labels with seed). + Dataset content after this step is a tuple (feature dictionary, label tensor). + 9) train only: Shuffle examples (with seed). + 10) Batch examples. + 11) Prefetch examples. + + Step 1-3 are done by _load_dataset() and wrap tfds.load(). + Step 4-11 are done by train_input_fn() and eval_input_fn(). + """ + + def __init__(self, + name, + tfds_name, + resolution, + colors, + num_classes, + eval_test_samples, + seed): + logging.info("ImageDatasetV2(name=%s, tfds_name=%s, resolution=%d, " + "colors=%d, num_classes=%s, eval_test_samples=%s, seed=%s)", + name, tfds_name, resolution, colors, num_classes, + eval_test_samples, seed) + self._name = name + self._tfds_name = tfds_name + self._resolution = resolution + self._colors = colors + self._num_classes = num_classes + self._eval_test_sample = eval_test_samples + self._seed = seed + + self._train_split = tfds.Split.TRAIN + self._eval_split = tfds.Split.TEST + + @property + def name(self): + """Name of the dataset.""" + return self._name + + @property + def num_classes(self): + return self._num_classes + + @property + def eval_test_samples(self): + """Number of examples in the "test" split of this dataset.""" + if FLAGS.data_fake_dataset: + return 100 + return self._eval_test_sample + + @property + def image_shape(self): + """Returns a tuple with the image shape.""" + return (self._resolution, self._resolution, self._colors) + + def _make_fake_dataset(self, split): + """Returns a fake data set with the correct shapes.""" + np.random.seed(self._seed) + num_samples_per_epoch = 100 + num_epochs = self.eval_test_samples // 100 if split == "test" else None + images_shape = [num_samples_per_epoch] + list(self.image_shape) + images = np.random.uniform(size=images_shape).astype(np.float32) + labels = np.ones((num_samples_per_epoch,), dtype=np.int32) + ds = tf.data.Dataset.from_tensor_slices((images, labels)) + return ds.repeat(num_epochs) + + def _get_per_host_random_seed(self, tpu_context=None): + """Returns the dataset seed for according to the TPUContext. + + On CPU/GPU it returns the default seed. For TPUs the input_fn is executed + on every host machine (if per-host input is set, which is set by default). + We use a different (but deterministically computed) random seed on each host + to ensure each host machine sees a different stream of input data. + + Args: + tpu_context: TPU execution context. + + Returns: + The current seed if CPU/GPU and a host-specific seed for TPU. + """ + if self._seed is None: + logging.warning("Dataset seed not set.") + return None + if tpu_context is None: + logging.warning("No TPUContext, using unmodified dataset seed %s.", + self._seed) + return self._seed + seed = self._seed + tpu_context.current_host + logging.info("Running with %d hosts, modifying dataset seed for " + "host %d to %s.", tpu_context.num_hosts, + tpu_context.current_host, seed) + return seed + + @gin.configurable("replace_labels", whitelist=["file_pattern"]) + def _replace_labels(self, split, ds, file_pattern=None): + """Replaces the labels in the dataset with labels from separate files. + + This functionality is used if one wants to either replace the labels with + soft labels (i.e. softmax over the logits) or label the instances with + a new classifier. + + Args: + split: Dataset split (e.g. train/test/validation). + ds: The underlying TFDS object. + file_pattern: Path to the replacement files. + + Returns: + An instance of tf.data.Dataset with the updated labels. + """ + if not file_pattern: + return ds + file_pattern = file_pattern.format(split=split) + logging.warning("Using labels from %s for split %s.", file_pattern, split) + label_ds = tf.data.Dataset.list_files(file_pattern, shuffle=False) + label_ds = label_ds.interleave( + tf.data.TFRecordDataset, + cycle_length=FLAGS.data_reading_num_threads) + return tf.data.Dataset.zip((ds, label_ds)).map(self._replace_label) + + def _replace_label(self, feature_dict, new_unparsed_label): + """Replaces the label from the feature_dict with the new label. + + Furthermore, if the feature_dict contains a key for the file_name which + identifies an instance, we double-check that the we are replacing the label + of the correct instance. + + Args: + feature_dict: A serialized TFRecord containing the old label. + new_unparsed_label: A serialized TFRecord containing the new label. + + Returns: + Updates the label in the label dict to the new label. + """ + label_spec = { + "file_name": tf.FixedLenFeature((), tf.string), + "label": tf.FixedLenFeature((), tf.int64), + } + parsed_label = tf.parse_single_example(new_unparsed_label, label_spec) + with tf.control_dependencies([ + tf.assert_equal(parsed_label["file_name"], feature_dict["file_name"])]): + feature_dict["label"] = tf.identity(parsed_label["label"]) + return feature_dict + + def _parse_fn(self, features): + image = tf.cast(features["image"], tf.float32) / 255.0 + return image, features["label"] + + def _load_dataset(self, split): + """Loads the underlying dataset split from disk. + + Args: + split: Name of the split to load. + + Returns: + Returns a `tf.data.Dataset` object with a tuple of image and label tensor. + """ + if FLAGS.data_fake_dataset: + return self._make_fake_dataset(split) + ds = tfds.load( + self._tfds_name, + split=split, + data_dir=FLAGS.tfds_data_dir, + as_dataset_kwargs={"shuffle_files": False}) + ds = self._replace_labels(split, ds) + ds = ds.map(self._parse_fn) + return ds.prefetch(tf.contrib.data.AUTOTUNE) + + def _train_filter_fn(self, image, label): + del image, label + return True + + def _train_transform_fn(self, image, label, seed): + del seed + return image, label + + def _eval_transform_fn(self, image, label, seed): + del seed + return image, label + + def train_input_fn(self, params=None, preprocess_fn=None): + """Input function for reading data. + + Args: + params: Python dictionary with parameters. Must contain the key + "batch_size". TPUEstimator will set this for you! + preprocess_fn: Function to process single examples. This is allowed to + have a `seed` argument. + + Returns: + `tf.data.Dataset` with preprocessed and batched examples. + """ + if params is None: + params = {} + seed = self._get_per_host_random_seed(params.get("context", None)) + logging.info("train_input_fn(): params=%s seed=%s", params, seed) + + ds = self._load_dataset(split=self._train_split) + ds = ds.filter(self._train_filter_fn) + ds = ds.repeat() + ds = ds.map(functools.partial(self._train_transform_fn, seed=seed)) + if preprocess_fn is not None: + if "seed" in inspect.getargspec(preprocess_fn).args: + preprocess_fn = functools.partial(preprocess_fn, seed=seed) + ds = ds.map(preprocess_fn) + # Add a feature for the random offset of operations in tpu_random.py. + ds = tpu_random.add_random_offset_to_features(ds) + ds = ds.shuffle(FLAGS.data_shuffle_buffer_size, seed=seed) + if "batch_size" in params: + ds = ds.batch(params["batch_size"], drop_remainder=True) + return ds.prefetch(tf.contrib.data.AUTOTUNE) + + def eval_input_fn(self, params=None, split=None): + """Input function for reading data. + + Args: + params: Python dictionary with parameters. Must contain the key + "batch_size". TPUEstimator will set this for you! + split: Name of the split to use. If None will use the default eval split + of the dataset. + + Returns: + `tf.data.Dataset` with preprocessed and batched examples. + """ + if params is None: + params = {} + if split is None: + split = self._eval_split + seed = self._get_per_host_random_seed(params.get("context", None)) + logging.info("eval_input_fn(): params=%s seed=%s", params, seed) + + ds = self._load_dataset(split=split) + # No filter, no rpeat. + ds = ds.map(functools.partial(self._eval_transform_fn, seed=seed)) + # No shuffle. + if "batch_size" in params: + ds = ds.batch(params["batch_size"], drop_remainder=True) + return ds.prefetch(tf.contrib.data.AUTOTUNE) + + # For backwards compatibility ImageDataset. + def input_fn(self, params, mode=tf.estimator.ModeKeys.TRAIN, + preprocess_fn=None): + assert mode == tf.estimator.ModeKeys.TRAIN, mode + return self.train_input_fn(params=params, preprocess_fn=preprocess_fn) + + # For backwards compatibility ImageDataset. + def load_dataset(self, split_name): + assert split_name == "test", split_name + return self.eval_input_fn() + + +class MnistDataset(ImageDatasetV2): + """Wrapper for the MNIST dataset from TFDS.""" + + def __init__(self, seed): + super(MnistDataset, self).__init__( + name="mnist", + tfds_name="mnist", + resolution=28, + colors=1, + num_classes=10, + eval_test_samples=10000, + seed=seed) + + +class FashionMnistDataset(ImageDatasetV2): + """Wrapper for the Fashion-MNIST dataset from TDFS.""" + + def __init__(self, seed): + super(FashionMnistDataset, self).__init__( + name="fashion_mnist", + tfds_name="fashion_mnist", + resolution=28, + colors=1, + num_classes=10, + eval_test_samples=10000, + seed=seed) + + +class Cifar10Dataset(ImageDatasetV2): + """Wrapper for the CIFAR10 dataset from TDFS.""" + + def __init__(self, seed): + super(Cifar10Dataset, self).__init__( + name="cifar10", + tfds_name="cifar10", + resolution=32, + colors=3, + num_classes=10, + eval_test_samples=10000, + seed=seed) + + +class CelebaDataset(ImageDatasetV2): + """Wrapper for the CelebA dataset from TFDS.""" + + def __init__(self, seed): + super(CelebaDataset, self).__init__( + name="celeb_a", + tfds_name="celeb_a", + resolution=64, + colors=3, + num_classes=None, + eval_test_samples=10000, + seed=seed) + + def _parse_fn(self, features): + """Returns 64x64x3 image and constant label.""" + image = features["image"] + image = tf.image.resize_image_with_crop_or_pad(image, 160, 160) + # Note: possibly consider using NumPy's imresize(image, (64, 64)) + image = tf.image.resize_images(image, [64, 64]) + image.set_shape(self.image_shape) + image = tf.cast(image, tf.float32) / 255.0 + label = tf.constant(0, dtype=tf.int32) + return image, label + + +class LsunBedroomDataset(ImageDatasetV2): + """Wrapper from the LSUN Bedrooms dataset from TFDS.""" + + def __init__(self, seed): + super(LsunBedroomDataset, self).__init__( + name="lsun-bedroom", + tfds_name="lsun/bedroom", + resolution=128, + colors=3, + num_classes=None, + eval_test_samples=30000, + seed=seed) + + # As the official LSUN validation set only contains 300 samples, which is + # insufficient for FID computation, we're splitting off some trianing + # samples. The smallest percentage selectable through TFDS is 1%, so we're + # going to use that (corresponding roughly to 30000 samples). + # If you want to use fewer eval samples, just modify eval_test_samples. + self._train_split, self._eval_split = \ + tfds.Split.TRAIN.subsplit([99, 1]) + + def _parse_fn(self, features): + """Returns a 128x128x3 Tensor with constant label 0.""" + image = features["image"] + image = tf.image.resize_image_with_crop_or_pad( + image, target_height=128, target_width=128) + image = tf.cast(image, tf.float32) / 255.0 + label = tf.constant(0, dtype=tf.int32) + return image, label + + +def _transform_imagnet_image(image, target_image_shape, crop_method, seed): + """Preprocesses ImageNet images to have a target image shape. + + Args: + image: 3-D tensor with a single image. + target_image_shape: List/Tuple with target image shape. + crop_method: Method for cropping the image: + One of: distorted, random, middle, none + seed: Random seed, only used for `crop_method=distorted`. + + Returns: + Image tensor with shape `target_image_shape`. + """ + if crop_method == "distorted": + begin, size, _ = tf.image.sample_distorted_bounding_box( + tf.shape(image), + tf.zeros([0, 0, 4], tf.float32), + aspect_ratio_range=[1.0, 1.0], + area_range=[0.5, 1.0], + use_image_if_no_bounding_boxes=True, + seed=seed) + image = tf.slice(image, begin, size) + # Unfortunately, the above operation loses the depth-dimension. So we need + # to restore it the manual way. + image.set_shape([None, None, target_image_shape[-1]]) + elif crop_method == "random": + tf.set_random_seed(seed) + shape = tf.shape(image) + h, w = shape[0], shape[1] + size = tf.minimum(h, w) + begin = [h - size, w - size] * tf.random.uniform([2], 0, 1) + begin = tf.cast(begin, tf.int32) + begin = tf.concat([begin, [0]], axis=0) # Add channel dimension. + image = tf.slice(image, begin, [size, size, 3]) + elif crop_method == "middle": + shape = tf.shape(image) + h, w = shape[0], shape[1] + size = tf.minimum(h, w) + begin = tf.cast([h - size, w - size], tf.float32) / 2.0 + begin = tf.cast(begin, tf.int32) + begin = tf.concat([begin, [0]], axis=0) # Add channel dimension. + image = tf.slice(image, begin, [size, size, 3]) + elif crop_method != "none": + raise ValueError("Unsupported crop method: {}".format(crop_method)) + image = tf.image.resize_images( + image, [target_image_shape[0], target_image_shape[1]]) + image.set_shape(target_image_shape) + return image + + +@gin.configurable("train_imagenet_transform", whitelist=["crop_method"]) +def _train_imagenet_transform(image, target_image_shape, seed, + crop_method="distorted"): + return _transform_imagnet_image( + image, + target_image_shape=target_image_shape, + crop_method=crop_method, + seed=seed) + + +@gin.configurable("eval_imagenet_transform", whitelist=["crop_method"]) +def _eval_imagenet_transform(image, target_image_shape, seed, + crop_method="middle"): + return _transform_imagnet_image( + image, + target_image_shape=target_image_shape, + crop_method=crop_method, + seed=seed) + + +class ImagenetDataset(ImageDatasetV2): + """ImageNet2012 as defined by TF Datasets.""" + + def __init__(self, resolution, seed, filter_unlabeled=False): + if resolution not in [128, 256, 512]: + raise ValueError("Unsupported resolution: {}".format(resolution)) + super(ImagenetDataset, self).__init__( + name="imagenet_{}".format(resolution), + tfds_name="imagenet2012", + resolution=resolution, + colors=3, + num_classes=1000, + eval_test_samples=50000, + seed=seed) + self._eval_split = tfds.Split.VALIDATION + self._filter_unlabeled = filter_unlabeled + + def _train_filter_fn(self, image, label): + del image + if not self._filter_unlabeled: + return True + logging.warning("Filtering unlabeled examples.") + return tf.math.greater_equal(label, 0) + + def _train_transform_fn(self, image, label, seed): + image = _train_imagenet_transform( + image=image, target_image_shape=self.image_shape, seed=seed) + return image, label + + def _eval_transform_fn(self, image, label, seed): + image = _eval_imagenet_transform( + image=image, target_image_shape=self.image_shape, seed=seed) + return image, label + + +class SizeFilteredImagenetDataset(ImagenetDataset): + """ImageNet from TFDS filtered by image size.""" + + def __init__(self, resolution, threshold, seed): + super(SizeFilteredImagenetDataset, self).__init__( + resolution=resolution, + seed=seed) + self._name = "imagenet_{}_hq{}".format(resolution, threshold) + self._threshold = threshold + + def _train_filter_fn(self, image, label): + """The minimum image dimension has to be larger than the threshold.""" + del label + size = tf.math.reduce_min(tf.shape(image)[:2]) + return tf.greater_equal(size, self._threshold) + + +class SingleClassImagenetDataset(ImagenetDataset): + """ImageNet from TFDS with all instances having a constant label 0. + + It can be used to simmulate the setting where no labels are provided. + """ + + def __init__(self, resolution, seed): + super(SingleClassImagenetDataset, self).__init__( + resolution=resolution, + seed=seed) + self._name = "single_class_" + self._name + self._num_classes = 1 + + def _parse_fn(self, features): + image, _ = super(SingleClassImagenetDataset, self)._parse_fn(features) + label = tf.constant(0, dtype=tf.int32) + return image, label + + +class RandomClassImagenetDataset(ImagenetDataset): + """ImageNet2012 dataset with random labels.""" + + def __init__(self, resolution, seed): + super(RandomClassImagenetDataset, self).__init__( + resolution=resolution, + seed=seed) + self._name = "random_class_" + self._name + self._num_classes = 1000 + + def _parse_fn(self, features): + image, _ = super(RandomClassImagenetDataset, self)._parse_fn(features) + label = tf.random.uniform(minval=0, maxval=1000, dtype=tf.int32) + return image, label + + +class SoftLabeledImagenetDataset(ImagenetDataset): + """ImageNet2012 dataset with soft labels.""" + + def __init__(self, resolution, seed): + super(SoftLabeledImagenetDataset, self).__init__( + resolution=resolution, + seed=seed) + self._name = "soft_labeled_" + self._name + + def _replace_label(self, feature_dict, new_unparsed_label): + """Replaces the label from the feature_dict with the new (soft) label. + + The function assumes that the new_unparsed_label contains a list of logits + which will be converted to a soft label using the softmax. + + Args: + feature_dict: A serialized TFRecord containing the old label. + new_unparsed_label: A serialized TFRecord containing the new label. + + Returns: + Updates the label in the label dict to the new soft label. + """ + label_spec = { + "file_name": tf.FixedLenFeature((), tf.string), + "label": tf.FixedLenFeature([self._num_classes], tf.float32) + } + parsed_label = tf.parse_single_example(new_unparsed_label, label_spec) + with tf.control_dependencies([ + tf.assert_equal(parsed_label["file_name"], feature_dict["file_name"])]): + feature_dict["label"] = tf.nn.softmax(logits=parsed_label["label"]) + return feature_dict + + +DATASETS = { + "celeb_a": CelebaDataset, + "cifar10": Cifar10Dataset, + "fashion-mnist": FashionMnistDataset, + "lsun-bedroom": LsunBedroomDataset, + "mnist": MnistDataset, + "imagenet_128": functools.partial(ImagenetDataset, resolution=128), + "imagenet_256": functools.partial(ImagenetDataset, resolution=256), + "imagenet_512": functools.partial(ImagenetDataset, resolution=512), + "imagenet_512_hq400": (functools.partial( + SizeFilteredImagenetDataset, resolution=512, threshold=400)), + "soft_labeled_imagenet_128": functools.partial( + SoftLabeledImagenetDataset, resolution=128), + "single_class_imagenet_128": functools.partial( + SingleClassImagenetDataset, resolution=128), + "random_class_imagenet_128": functools.partial( + RandomClassImagenetDataset, resolution=128), + "labeled_only_imagenet_128": functools.partial( + ImagenetDataset, resolution=128, filter_unlabeled=True), +} + + +@gin.configurable("dataset") +def get_dataset(name, seed=547): + """Instantiates a data set and sets the random seed.""" + if name not in DATASETS: + raise ValueError("Dataset %s is not available." % name) + return DATASETS[name](seed=seed) diff --git a/compare_gan/datasets_test.py b/compare_gan/datasets_test.py new file mode 100644 index 0000000..24eac61 --- /dev/null +++ b/compare_gan/datasets_test.py @@ -0,0 +1,114 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +from absl.testing import flagsaver +from absl.testing import parameterized +from compare_gan import datasets + +import tensorflow as tf + +FLAGS = flags.FLAGS + +_TPU_SUPPORTED_TYPES = { + tf.float32, tf.int32, tf.complex64, tf.int64, tf.bool, tf.bfloat16 +} + + +def _preprocess_fn_id(images, labels): + return {"images": images}, labels + + +def _preprocess_fn_add_noise(images, labels, seed=None): + del labels + tf.set_random_seed(seed) + noise = tf.random.uniform([128], maxval=1.0) + return {"images": images}, noise + + +class DatasetsTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + super(DatasetsTest, self).setUp() + FLAGS.data_shuffle_buffer_size = 100 + + def get_element_and_verify_shape(self, dataset_name, expected_shape): + dataset = datasets.get_dataset(dataset_name) + dataset = dataset.eval_input_fn() + image, label = dataset.make_one_shot_iterator().get_next() + # Check if shape is known at compile time, required for TPUs. + self.assertAllEqual(image.shape.as_list(), expected_shape) + self.assertEqual(image.dtype, tf.float32) + self.assertIn(label.dtype, _TPU_SUPPORTED_TYPES) + with self.cached_session() as session: + image = session.run(image) + self.assertEqual(image.shape, expected_shape) + self.assertGreaterEqual(image.min(), 0.0) + self.assertLessEqual(image.max(), 1.0) + + def test_mnist(self): + self.get_element_and_verify_shape("mnist", (28, 28, 1)) + + def test_fashion_mnist(self): + self.get_element_and_verify_shape("fashion-mnist", (28, 28, 1)) + + def test_celeba(self): + self.get_element_and_verify_shape("celeb_a", (64, 64, 3)) + + def test_lsun(self): + self.get_element_and_verify_shape("lsun-bedroom", (128, 128, 3)) + + def _run_train_input_fn(self, dataset_name, preprocess_fn): + dataset = datasets.get_dataset(dataset_name) + with tf.Graph().as_default(): + dataset = dataset.input_fn(params={"batch_size": 1}, + preprocess_fn=preprocess_fn) + iterator = dataset.make_initializable_iterator() + with self.session() as sess: + sess.run(iterator.initializer) + next_batch = iterator.get_next() + return [sess.run(next_batch) for _ in range(5)] + + @parameterized.named_parameters( + ("FakeCifar", _preprocess_fn_id), + ("FakeCifarWithRandomNoise", _preprocess_fn_add_noise), + ) + @flagsaver.flagsaver + def test_train_input_fn_is_determinsitic(self, preprocess_fn): + FLAGS.data_fake_dataset = True + batches1 = self._run_train_input_fn("cifar10", preprocess_fn) + batches2 = self._run_train_input_fn("cifar10", preprocess_fn) + for i in range(len(batches1)): + # Check that both runs got the same images/noise + self.assertAllClose(batches1[i][0], batches2[i][0]) + self.assertAllClose(batches1[i][1], batches2[i][1]) + + @flagsaver.flagsaver + def test_train_input_fn_noise_changes(self): + FLAGS.data_fake_dataset = True + batches = self._run_train_input_fn("cifar10", _preprocess_fn_add_noise) + for i in range(1, len(batches)): + self.assertNotAllClose(batches[0][1], batches[i][1]) + self.assertNotAllClose(batches[i - 1][1], batches[i][1]) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/eval_gan_lib.py b/compare_gan/eval_gan_lib.py new file mode 100644 index 0000000..aa84e59 --- /dev/null +++ b/compare_gan/eval_gan_lib.py @@ -0,0 +1,212 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Evaluation library.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from absl import flags +from absl import logging + +from compare_gan import datasets +from compare_gan import eval_utils +from compare_gan import utils + +import gin +import numpy as np +from six.moves import range +import tensorflow as tf + +import tensorflow_hub as hub + +FLAGS = flags.FLAGS + +# Special value returned when a fake image generated by a GAN has NaNs. +NAN_DETECTED = 31337.0 + + +@gin.configurable("eval_z", blacklist=["shape", "name"]) +def z_generator(shape, distribution_fn=tf.random.uniform, + minval=-1.0, maxval=1.0, stddev=1.0, name=None): + """Random noise distributions as TF op. + + Args: + shape: A 1-D integer Tensor or Python array. + distribution_fn: Function that create a Tensor. If the function has any + of the arguments 'minval', 'maxval' or 'stddev' these are passed to it. + minval: The lower bound on the range of random values to generate. + maxval: The upper bound on the range of random values to generate. + stddev: The standard deviation of a normal distribution. + name: A name for the operation. + + Returns: + Tensor with the given shape and dtype tf.float32. + """ + return utils.call_with_accepted_args( + distribution_fn, shape=shape, minval=minval, maxval=maxval, + stddev=stddev, name=name) + + +def _update_bn_accumulators(sess, generated, num_accu_examples): + """Returns True if the accumlators for batch norm were updated. + + Args: + sess: `tf.Session` object. Checkpoint should already be loaded. + generated: Output tensor of the generator. + num_accu_examples: How many examples should be used to update accumulators. + + Returns: + True if there were accumlators. + """ + # Create update ops for batch statistic updates for each batch normalization + # with accumlators. + update_accu_switches = [v for v in tf.global_variables() + if "accu/update_accus" in v.name] + logging.info("update_accu_switches: %s", update_accu_switches) + if not update_accu_switches: + return False + sess.run([tf.assign(v, 1) for v in update_accu_switches]) + batch_size = generated.shape[0].value + num_batches = num_accu_examples // batch_size + for i in range(num_batches): + if i % 500 == 0: + logging.info("Updating BN accumulators %d/%d steps.", i, num_batches) + sess.run(generated) + sess.run([tf.assign(v, 0) for v in update_accu_switches]) + logging.info("Done updating BN accumulators.") + return True + + +def evaluate_tfhub_module(module_spec, eval_tasks, use_tpu, + num_averaging_runs): + """Evaluate model at given checkpoint_path. + + Args: + module_spec: string, path to a TF hub module. + eval_tasks: List of objects that inherit from EvalTask. + use_tpu: Whether to use TPUs. + num_averaging_runs: Determines how many times each metric is computed. + + Returns: + Dict[Text, float] with all the computed results. + + Raises: + NanFoundError: If generator output has any NaNs. + """ + # Make sure that the same latent variables are used for each evaluation. + np.random.seed(42) + dataset = datasets.get_dataset() + num_test_examples = dataset.eval_test_samples + + batch_size = 64 + num_batches = int(np.ceil(num_test_examples / batch_size)) + + # Load and update the generator. + result_dict = {} + fake_dsets = [] + with tf.Graph().as_default(): + tf.set_random_seed(42) + with tf.Session() as sess: + if use_tpu: + sess.run(tf.contrib.tpu.initialize_system()) + def sample_from_generator(): + """Create graph for sampling images.""" + generator = hub.Module( + module_spec, + name="gen_module", + tags={"gen", "bs{}".format(batch_size)}) + logging.info("Generator inputs: %s", generator.get_input_info_dict()) + z_dim = generator.get_input_info_dict()["z"].get_shape()[1].value + z = z_generator(shape=[batch_size, z_dim]) + if "labels" in generator.get_input_info_dict(): + # Conditional GAN. + assert dataset.num_classes + labels = tf.random.uniform( + [batch_size], maxval=dataset.num_classes, dtype=tf.int32) + inputs = dict(z=z, labels=labels) + else: + # Unconditional GAN. + assert "labels" not in generator.get_input_info_dict() + inputs = dict(z=z) + return generator(inputs=inputs, as_dict=True)["generated"] + if use_tpu: + + generated = tf.contrib.tpu.rewrite(sample_from_generator) + else: + generated = sample_from_generator() + + tf.global_variables_initializer().run() + + + if _update_bn_accumulators(sess, generated, num_accu_examples=204800): + saver = tf.train.Saver() + save_path = os.path.join(module_spec, "model-with-accu.ckpt") + checkpoint_path = saver.save( + sess, + save_path=save_path) + logging.info("Exported generator with accumulated batch stats to " + "%s.", checkpoint_path) + if not eval_tasks: + logging.error("Task list is empty, returning.") + return + for i in range(num_averaging_runs): + logging.info("Generating fake data set %d/%d.", i+1, num_averaging_runs) + fake_dset = eval_utils.EvalDataSample( + eval_utils.sample_fake_dataset(sess, generated, num_batches)) + fake_dsets.append(fake_dset) + logging.info("Computing inception features for generated data %d/%d.", + i+1, num_averaging_runs) + activations, logits = eval_utils.inception_transform_np( + fake_dset.images, batch_size) + fake_dset.set_inception_features( + activations=activations, logits=logits) + fake_dset.set_num_examples(num_test_examples) + if i != 0: + # Free up some memory by releasing additional fake data samples. + # For ImageNet128 50k images are ~9 GiB. This will blow up metrics + # (such as fractal dimension) if num_averaging_runs > 1. + fake_dset.discard_images() + + real_dset = eval_utils.EvalDataSample( + eval_utils.get_real_images( + dataset=dataset, num_examples=num_test_examples)) + logging.info("Getting Inception features for real images.") + real_dset.activations, _ = eval_utils.inception_transform_np( + real_dset.images, batch_size) + real_dset.set_num_examples(num_test_examples) + + # Run all the tasks and update the result dictionary with the task statistics. + result_dict = {} + for task in eval_tasks: + task_results_dicts = [ + task.run_after_session(fake_dset, real_dset) + for fake_dset in fake_dsets + ] + # Average the score for each key. + result_statistics = {} + for key in task_results_dicts[0].keys(): + scores_for_key = np.array([d[key] for d in task_results_dicts]) + mean, std = np.mean(scores_for_key), np.std(scores_for_key) + scores_as_string = "_".join([str(x) for x in scores_for_key]) + result_statistics[key + "_mean"] = mean + result_statistics[key + "_std"] = std + result_statistics[key + "_list"] = scores_as_string + logging.info("Computed results for task %s: %s", task, result_statistics) + + result_dict.update(result_statistics) + return result_dict diff --git a/compare_gan/eval_gan_lib_test.py b/compare_gan/eval_gan_lib_test.py new file mode 100644 index 0000000..094c983 --- /dev/null +++ b/compare_gan/eval_gan_lib_test.py @@ -0,0 +1,125 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for eval_gan_lib.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import os.path + +from absl import flags +from absl.testing import flagsaver +from absl.testing import parameterized + +from compare_gan import datasets +from compare_gan import eval_gan_lib +from compare_gan import eval_utils +from compare_gan.gans import consts as c +from compare_gan.gans.modular_gan import ModularGAN +from compare_gan.metrics import fid_score +from compare_gan.metrics import fractal_dimension +from compare_gan.metrics import inception_score +from compare_gan.metrics import ms_ssim_score + +import gin +import mock +import tensorflow as tf + +FLAGS = flags.FLAGS + + +def create_fake_inception_graph(): + """Creates a `GraphDef` with that mocks the Inception V1 graph. + + It takes the input, multiplies it through a matrix full of 0.00001 values, + and provides the results in the endpoints 'pool_3' and 'logits'. This + matches the tensor names in the real Inception V1 model. + the real inception model. + + Returns: + `tf.GraphDef` for the mocked Inception V1 graph. + """ + fake_inception = tf.Graph() + with fake_inception.as_default(): + inputs = tf.placeholder( + tf.float32, shape=[None, 299, 299, 3], name="Mul") + w = tf.ones(shape=[299 * 299 * 3, 10]) * 0.00001 + outputs = tf.matmul(tf.layers.flatten(inputs), w) + tf.identity(outputs, name="pool_3") + tf.identity(outputs, name="logits") + return fake_inception.as_graph_def() + + +class EvalGanLibTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + super(EvalGanLibTest, self).setUp() + gin.clear_config() + FLAGS.data_fake_dataset = True + self.mock_get_graph = mock.patch.object( + eval_utils, "get_inception_graph_def").start() + self.mock_get_graph.return_value = create_fake_inception_graph() + + @parameterized.parameters(c.ARCHITECTURES) + @flagsaver.flagsaver + def test_end2end_checkpoint(self, architecture): + """Takes real GAN (trained for 1 step) and evaluate it.""" + if architecture in {c.RESNET5_ABLATION, c.RESNET_STL, c.RESNET30_ARCH}: + # RESNET5_ABLATION is not supported by ModularGAN. + # RESNET_STL and RESNET107_ARCH does to support Cifar image shape. + return + gin.bind_parameter("dataset.name", "cifar10") + dataset = datasets.get_dataset("cifar10") + options = { + "architecture": architecture, + "z_dim": 120, + "disc_iters": 1, + "lambda": 1, + } + model_dir = os.path.join(tf.test.get_temp_dir(), self.id()) + tf.logging.info("model_dir: %s" % model_dir) + run_config = tf.contrib.tpu.RunConfig(model_dir=model_dir) + gan = ModularGAN(dataset=dataset, + parameters=options, + conditional="biggan" in architecture, + model_dir=model_dir) + estimator = gan.as_estimator(run_config, batch_size=2, use_tpu=False) + estimator.train(input_fn=gan.input_fn, steps=1) + export_path = os.path.join(model_dir, "tfhub") + checkpoint_path = os.path.join(model_dir, "model.ckpt-1") + module_spec = gan.as_module_spec() + module_spec.export(export_path, checkpoint_path=checkpoint_path) + + eval_tasks = [ + fid_score.FIDScoreTask(), + fractal_dimension.FractalDimensionTask(), + inception_score.InceptionScoreTask(), + ms_ssim_score.MultiscaleSSIMTask() + ] + result_dict = eval_gan_lib.evaluate_tfhub_module( + export_path, eval_tasks, use_tpu=False, num_averaging_runs=1) + tf.logging.info("result_dict: %s", result_dict) + for score in ["fid_score", "fractal_dimension", "inception_score", + "ms_ssim"]: + for stats in ["mean", "std", "list"]: + required_key = "%s_%s" % (score, stats) + self.assertIn(required_key, result_dict, "Missing: %s." % required_key) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/eval_utils.py b/compare_gan/eval_utils.py new file mode 100644 index 0000000..d0d8c34 --- /dev/null +++ b/compare_gan/eval_utils.py @@ -0,0 +1,206 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data-related utility functions. + +Includes: +- A helper class to hold images and Inception features for evaluation. +- A method to load a dataset as NumPy array. +- Sample from the generator and return the data as a NumPy array. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import logging + +import numpy as np +from six.moves import range +import tensorflow as tf +import tensorflow_gan as tfgan + + +# Special value returned when fake image generated by GAN has nans. +NAN_DETECTED = 31337.0 + +INCEPTION_URL = "http://download.tensorflow.org/models/frozen_inception_v1_2015_12_05.tar.gz" +INCEPTION_FROZEN_GRAPH = "inceptionv1_for_inception_score.pb" + + +def get_inception_graph_def(): + return tfgan.eval.get_graph_def_from_url_tarball( # pylint: disable=unreachable + url=INCEPTION_URL, + filename=INCEPTION_FROZEN_GRAPH, + tar_filename=os.path.basename(INCEPTION_URL)) + + +class NanFoundError(Exception): + """Exception thrown, when the Nans are present in the output.""" + + +class EvalDataSample(object): + """Helper class to hold images and Inception features for evaluation. + + All properties are tensors. Images are in [0, 255]. + """ + + def __init__(self, images): + self.images = images + self.activations = None + self.logits = None + + def discard_images(self): + logging.info("Deleting references to images: %s", self.images.shape) + del self.images + + def set_inception_features(self, activations, logits): + self.activations = activations + self.logits = logits + + def set_num_examples(self, num_examples): + if self.images is not None: + assert self.images.shape[0] >= num_examples + self.images = self.images[:num_examples] + if self.activations is not None: + assert self.activations.shape[0] >= num_examples + self.activations = self.activations[:num_examples] + if self.logits is not None: + assert self.logits.shape[0] >= num_examples + self.logits = self.logits[:num_examples] + + +def get_real_images(dataset, + num_examples, + split=None, + failure_on_insufficient_examples=True): + """Get num_examples images from the given dataset/split. + + Args: + dataset: `ImageDataset` object. + num_examples: Number of images to read. + split: Split of the dataset to use. If None will use the default split for + eval defined by the dataset. + failure_on_insufficient_examples: If True raise an exception if the + dataset/split does not images. Otherwise will log to error and return + fewer images. + + Returns: + 4-D NumPy array with images with values in [0, 256]. + + Raises: + ValueError: If the dataset/split does not of the number of requested number + requested images and `failure_on_insufficient_examples` is True. + """ + logging.info("Start loading real data.") + with tf.Graph().as_default(): + ds = dataset.eval_input_fn(split=split) + # Get real images from the dataset. In the case of a 1-channel + # dataset (like MNIST) convert it to 3 channels. + next_batch = ds.make_one_shot_iterator().get_next()[0] + shape = [num_examples] + next_batch.shape.as_list() + is_single_channel = shape[-1] == 1 + if is_single_channel: + shape[-1] = 3 + real_images = np.empty(shape, dtype=np.float32) + with tf.Session() as sess: + for i in range(num_examples): + try: + b = sess.run(next_batch) + b *= 255.0 + if is_single_channel: + b = np.tile(b, [1, 1, 3]) + real_images[i] = b + except tf.errors.OutOfRangeError: + logging.error("Reached the end of dataset. Read: %d samples.", i) + break + + if real_images.shape[0] != num_examples: + if failure_on_insufficient_examples: + raise ValueError("Not enough examples in the dataset %s: %d / %d" % + (dataset, real_images.shape[0], num_examples)) + else: + logging.error("Not enough examples in the dataset %s: %d / %d", dataset, + real_images.shape[0], num_examples) + + logging.info("Done loading real data.") + return real_images + + +def sample_fake_dataset(sess, generator, num_batches): + """Returns a generated data set as a NumPy array.""" + logging.info("Generating a fake data set.") + samples = [] + for _ in range(num_batches): + x = sess.run(generator) + # If NaNs were generated, ignore this checkpoint and assign a very high + # FID score which we handle specially later. + if np.isnan(x).any(): + logging.error("Detected NaN in fake_images! Returning NaN.") + raise NanFoundError("Detected NaN in fake images.") + samples.append(x) + fake_images = np.concatenate(samples, axis=0) + fake_images *= 255.0 + # Convert 1-channel datasets (like MNIST) to 3 channels. + if fake_images.shape[3] == 1: + fake_images = np.tile(fake_images, [1, 1, 1, 3]) + logging.info("Done sampling a generated data set.") + return fake_images + + +def inception_transform(inputs): + with tf.control_dependencies([ + tf.assert_greater_equal(inputs, 0.0), + tf.assert_less_equal(inputs, 255.0)]): + inputs = tf.identity(inputs) + preprocessed_inputs = tf.map_fn( + fn=tfgan.eval.preprocess_image, elems=inputs, back_prop=False) + return tfgan.eval.run_inception( + preprocessed_inputs, + graph_def=get_inception_graph_def(), + output_tensor=["pool_3:0", "logits:0"]) + + +def inception_transform_np(inputs, batch_size): + """Computes the inception features and logits for a given NumPy array. + + The inputs are first preprocessed to match the input shape required for + Inception. + + Args: + inputs: NumPy array of shape [-1, H, W, 3]. + batch_size: Batch size. + + Returns: + A tuple of NumPy arrays with Inception features and logits for each input. + """ + with tf.Session(graph=tf.Graph()) as sess: + inputs_placeholder = tf.placeholder( + dtype=tf.float32, shape=[None] + list(inputs[0].shape)) + features_and_logits = inception_transform(inputs_placeholder) + features = [] + logits = [] + num_batches = int(np.ceil(inputs.shape[0] / batch_size)) + for i in range(num_batches): + input_batch = inputs[i * batch_size:(i + 1) * batch_size] + x = sess.run( + features_and_logits, feed_dict={inputs_placeholder: input_batch}) + features.append(x[0]) + logits.append(x[1]) + features = np.vstack(features) + logits = np.vstack(logits) + return features, logits diff --git a/compare_gan/src/gans/__init__.py b/compare_gan/gans/__init__.py similarity index 100% rename from compare_gan/src/gans/__init__.py rename to compare_gan/gans/__init__.py diff --git a/compare_gan/gans/abstract_gan.py b/compare_gan/gans/abstract_gan.py new file mode 100644 index 0000000..5eec5c1 --- /dev/null +++ b/compare_gan/gans/abstract_gan.py @@ -0,0 +1,92 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Interface for GAN models that can be trained using the Estimator API.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc + +import six +import tensorflow as tf + + +@six.add_metaclass(abc.ABCMeta) +class AbstractGAN(object): + """Interface for GAN models that can be training using the Estimator API.""" + + def __init__(self, + dataset, + parameters, + model_dir): + super(AbstractGAN, self).__init__() + self._dataset = dataset + self._parameters = parameters + self._model_dir = model_dir + + def as_estimator(self, run_config, batch_size, use_tpu): + """Returns a TPUEstimator for this GAN.""" + return tf.contrib.tpu.TPUEstimator( + config=run_config, + use_tpu=use_tpu, + model_fn=self.model_fn, + train_batch_size=batch_size) + + @abc.abstractmethod + def as_module_spec(self, params, mode): + """Returns the generator network as TFHub module spec.""" + + @abc.abstractmethod + def input_fn(self, params, mode): + """Input function that retuns a `tf.data.Dataset` object. + + This function will be called once for each host machine. + + Args: + params: Python dictionary with parameters given to TPUEstimator. + Additional TPUEstimator will set the key `batch_size` with the batch + size for this host machine and `tpu_contextu` with a TPUContext + object. + mode: `tf.estimator.MoedeKeys` value. + + Returns: + A `tf.data.Dataset` object with batched features and labels. + """ + + @abc.abstractmethod + def model_fn(self, features, labels, params, mode): + """Constructs the model for the given features and mode. + + This interface only requires implementing the TRAIN mode. + + On TPUs the model_fn should construct a graph for a single TPU core. + Wrap the optimizer with a `tf.contrib.tpu.CrossShardOptimizer` to do + synchronous training with all TPU cores.c + + Args: + features: A dictionary with the feature tensors. + labels: Tensor will labels. Will be None if mode is PREDICT. + params: Dictionary with hyperparameters passed to TPUEstimator. + Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`, + `tpu_context`. `batch_size` is the batch size for this core. + mode: `tf.estimator.ModeKeys` value (TRAIN, EVAL, PREDICT). The mode + should be passed to the TPUEstimatorSpec and your model should be + build this mode. + + Returns: + A `tf.contrib.tpu.TPUEstimatorSpec`. + """ diff --git a/compare_gan/src/gans/consts.py b/compare_gan/gans/consts.py similarity index 74% rename from compare_gan/src/gans/consts.py rename to compare_gan/gans/consts.py index b9684e1..9ee6eae 100644 --- a/compare_gan/src/gans/consts.py +++ b/compare_gan/gans/consts.py @@ -13,16 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Defines constants used across the code base.""" + from __future__ import absolute_import from __future__ import division from __future__ import print_function -GAN_WITH_PENALTY = "GAN_PENALTY" -WGAN_WITH_PENALTY = "WGAN_PENALTY" -LSGAN_WITH_PENALTY = "LSGAN_PENALTY" -MODELS_WITH_PENALTIES = [GAN_WITH_PENALTY, WGAN_WITH_PENALTY, - LSGAN_WITH_PENALTY] +NORMAL_INIT = "normal" +TRUNCATED_INIT = "truncated" +ORTHOGONAL_INIT = "orthogonal" +INITIALIZERS = [NORMAL_INIT, TRUNCATED_INIT, ORTHOGONAL_INIT] NO_NORMALIZATION = "none" BATCH_NORM = "batch_norm" @@ -32,32 +33,29 @@ NORMALIZERS = [NO_NORMALIZATION, BATCH_NORM, LAYER_NORM, SPECTRAL_NORM, WEIGHT_NORM] -NO_PENALTY = "no_penalty" -WGANGP_PENALTY = "wgangp_penalty" -DRAGAN_PENALTY = "dragan_penalty" -L2_PENALTY = "l2_penalty" -PENALTIES = [NO_PENALTY, WGANGP_PENALTY, DRAGAN_PENALTY, L2_PENALTY] - INFOGAN_ARCH = "infogan_arch" DCGAN_ARCH = "dcgan_arch" RESNET5_ARCH = "resnet5_arch" +RESNET5_BIGGAN_ARCH = "resnet5_biggan_arch" SNDCGAN_ARCH = "sndcgan_arch" RESNET_CIFAR = "resnet_cifar_arch" RESNET_STL = "resnet_stl_arch" -RESNET107_ARCH = "resnet107_arch" +RESNET30_ARCH = "resnet30_arch" RESNET5_ABLATION = "resnet5_ablation" # Only to compare changes in Resnet. ARCHITECTURES = [INFOGAN_ARCH, DCGAN_ARCH, RESNET_CIFAR, SNDCGAN_ARCH, - RESNET5_ARCH, RESNET107_ARCH, RESNET_STL, RESNET5_ABLATION] + RESNET5_ARCH, RESNET5_BIGGAN_ARCH, RESNET30_ARCH, RESNET_STL, + RESNET5_ABLATION] # This will be used when checking that the regularization was applied -# on a correct number of layers. Currently used only for L2 regularization. - +# on a correct number of layers. Currently used only for L2 regularization in +# penalty_lib. N_DISCRIMINATOR_LAYERS = { INFOGAN_ARCH: 4, DCGAN_ARCH: 5, SNDCGAN_ARCH: 8, RESNET_CIFAR: 13, RESNET5_ARCH: 19, - RESNET107_ARCH: 107, + RESNET5_BIGGAN_ARCH: 19, + RESNET30_ARCH: 107, RESNET5_ABLATION: 19, } diff --git a/compare_gan/gans/gpu_modular_gan.py b/compare_gan/gans/gpu_modular_gan.py new file mode 100644 index 0000000..0e7ce10 --- /dev/null +++ b/compare_gan/gans/gpu_modular_gan.py @@ -0,0 +1,181 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides ModularGAN for GAN models with penalty loss.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +from absl import logging +from compare_gan import utils +from compare_gan.gans.modular_gan import ModularGAN +from compare_gan.tpu import tpu_summaries +import gin +import tensorflow as tf + + +@gin.configurable(blacklist=["dataset", "parameters", "model_dir"]) +class GPUModularGAN(ModularGAN): + """Base class for GANs models that support the Estimator API on GPU. + + ModularGAN unrolls the generator iterations as this increases efficiency on + TPU. However, on GPU this can lead to OOM errors when the model and/or batch + size is large, or when multiple discriminator iterations per generator + iteration are performed. GPUModularGAN omits unrolling and therefore + might prevent OOM errors on GPU. + + IMPORTANT: In contrast to ModularGAN, GPUModularGAN uses the same batch + of generated samples to update the generator as used in the most recent + discriminator update. This leads to additional memory savinings and is what + used to be done in compare_gan_v2 by default. + """ + + @property + def num_sub_steps(self): + # No unrolling, so number of sub steps is always 1 + return 1 + + def model_fn(self, features, labels, params, mode): + """Constructs the model for the given features and mode. + + Args: + features: A dictionary with the feature tensors. + labels: Tensor will labels. Will be None if mode is PREDICT. + params: Dictionary with hyperparameters passed to TPUEstimator. + Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`, + `tpu_context`. `batch_size` is the batch size for this core. + mode: `tf.estimator.ModeKeys` value (TRAIN, EVAL, PREDICT). The mode + should be passed to the TPUEstimatorSpec and your model should be + build this mode. + + Returns: + A `tf.contrib.tpu.TPUEstimatorSpec`. + """ + logging.info("model_fn(): features=%s, labels=%s,mode=%s, params=%s", + features, labels, mode, params) + + if mode != tf.estimator.ModeKeys.TRAIN: + raise ValueError("Only training mode is supported.") + + is_training = (mode == tf.estimator.ModeKeys.TRAIN) + global_step = tf.train.get_or_create_global_step() + # Variable to count discriminator steps + global_step_disc = tf.get_variable("global_step_discriminator", + dtype=tf.int32, + initializer=tf.constant(0), + trainable=False) + + # Create ops for first D steps here to create the variables. + with tf.name_scope("disc_step"): + self.create_loss(features, labels, params, + is_training=is_training, reuse=False) + + # Divide trainable variables into a group for D and group for G. + t_vars = tf.trainable_variables() + d_vars = [var for var in t_vars if "discriminator" in var.name] + g_vars = [var for var in t_vars if "generator" in var.name] + if len(t_vars) != len(d_vars) + len(g_vars): + logging.error("There variables that neither part of G or D.") + self._check_variables(t_vars, d_vars, g_vars) + + d_optimizer = self.d_optimizer(params["use_tpu"]) + g_optimizer = self.g_optimizer(params["use_tpu"]) + + # In the following each sub-step (disc_iters steps on D + one step on G) + # depends on previous sub-steps. The optimizer ops for each step + # depends on all the update ops (from batch norm etc.). Each update op + # will still only be executed ones. + deps = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + + # Discriminator training. + with tf.control_dependencies(deps): + deps.append(d_optimizer.minimize( + self.d_loss, var_list=d_vars, global_step=global_step_disc)) + + # Clean old summaries from previous calls to model_fn(). + self._tpu_summary = tpu_summaries.TpuSummaries(self._model_dir) + self._tpu_summary.scalar("loss/d", self.d_loss) + with tf.name_scope("fake_images"): + z = features["z"] + sampled_y = None + if self.conditional: + sampled_y = self._get_one_hot_labels(features["sampled_labels"]) + fake_images = self.generator( + z, y=sampled_y, is_training=True, reuse=True) + self._add_images_to_summary(fake_images, "fake_images", params) + self._add_images_to_summary(features["images"], "real_images", params) + + # Generator training. + with tf.name_scope("gen_step"): + with tf.control_dependencies(deps): + self._tpu_summary.scalar("loss/g", self.g_loss) + deps.extend(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) + with tf.control_dependencies(deps): + if self._disc_iters == 1: + train_op = g_optimizer.minimize(self.g_loss, var_list=g_vars, + global_step=global_step) + else: + # We should only train the generator every self.disc_iter steps. + # We can do this using `tf.cond`. Both paths must return a tensor. + # Our true_fn will return a tensor that depends on training the + # generator, while the tensor from false_fn depends on nothing. + def do_train_generator(): + actual_train_op = g_optimizer.minimize(self.g_loss, var_list=g_vars, + global_step=global_step) + with tf.control_dependencies([actual_train_op]): + return tf.constant(0) + def do_not_train_generator(): + return tf.constant(0) + train_op = tf.cond( + tf.equal(global_step_disc % self._disc_iters, 0), + true_fn=do_train_generator, + false_fn=do_not_train_generator, + name="").op + loss = self.g_loss + + if self._g_use_ema: + with tf.name_scope("generator_ema"): + logging.info("Creating moving averages of weights: %s", g_vars) + def do_update_ema(): + # The decay value is set to 0 if we're before the moving-average start + # point, so that the EMA vars will be the normal vars. + decay = self._ema_decay * tf.cast( + tf.greater_equal(global_step, self._ema_start_step), tf.float32) + ema = tf.train.ExponentialMovingAverage(decay=decay) + return ema.apply(g_vars) + def do_not_update_ema(): + return tf.constant(0).op + + with tf.control_dependencies([train_op]): + train_op = tf.cond( + tf.equal(global_step_disc % self._disc_iters, 0), + true_fn=do_update_ema, + false_fn=do_not_update_ema, + name="") + + d_param_overview = utils.get_parameter_overview(d_vars, limit=None) + g_param_overview = utils.get_parameter_overview(g_vars, limit=None) + logging.info("Discriminator variables:\n%s", d_param_overview) + logging.info("Generator variables:\n%s", g_param_overview) + + return tf.contrib.tpu.TPUEstimatorSpec( + mode=mode, + host_call=self._tpu_summary.get_host_call(), + # Estimator requires a loss which gets displayed on TensorBoard. + # The given Tensor is evaluated but not used to create gradients. + loss=loss, + train_op=train_op) diff --git a/compare_gan/gans/loss_lib.py b/compare_gan/gans/loss_lib.py new file mode 100644 index 0000000..1b82453 --- /dev/null +++ b/compare_gan/gans/loss_lib.py @@ -0,0 +1,154 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of popular GAN losses.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan import utils +import gin +import tensorflow as tf + + +def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits): + """Checks the shapes and ranks of logits and prediction tensors. + + Args: + d_real: prediction for real points, values in [0, 1], shape [batch_size, 1]. + d_fake: prediction for fake points, values in [0, 1], shape [batch_size, 1]. + d_real_logits: logits for real points, shape [batch_size, 1]. + d_fake_logits: logits for fake points, shape [batch_size, 1]. + + Raises: + ValueError: if the ranks or shapes are mismatched. + """ + def _check_pair(a, b): + if a != b: + raise ValueError("Shape mismatch: %s vs %s." % (a, b)) + if len(a) != 2 or len(b) != 2: + raise ValueError("Rank: expected 2, got %s and %s" % (len(a), len(b))) + + if (d_real is not None) and (d_fake is not None): + _check_pair(d_real.shape.as_list(), d_fake.shape.as_list()) + if (d_real_logits is not None) and (d_fake_logits is not None): + _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list()) + if (d_real is not None) and (d_real_logits is not None): + _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list()) + + +@gin.configurable(whitelist=[]) +def non_saturating(d_real_logits, d_fake_logits, d_real=None, d_fake=None): + """Returns the discriminator and generator loss for Non-saturating loss. + + Args: + d_real_logits: logits for real points, shape [batch_size, 1]. + d_fake_logits: logits for fake points, shape [batch_size, 1]. + d_real: ignored. + d_fake: ignored. + + Returns: + A tuple consisting of the discriminator loss, discriminator's loss on the + real samples and fake samples, and the generator's loss. + """ + with tf.name_scope("non_saturating_loss"): + check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits) + d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( + logits=d_real_logits, labels=tf.ones_like(d_real_logits), + name="cross_entropy_d_real")) + d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( + logits=d_fake_logits, labels=tf.zeros_like(d_fake_logits), + name="cross_entropy_d_fake")) + d_loss = d_loss_real + d_loss_fake + g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( + logits=d_fake_logits, labels=tf.ones_like(d_fake_logits), + name="cross_entropy_g")) + return d_loss, d_loss_real, d_loss_fake, g_loss + + +@gin.configurable(whitelist=[]) +def wasserstein(d_real_logits, d_fake_logits, d_real=None, d_fake=None): + """Returns the discriminator and generator loss for Wasserstein loss. + + Args: + d_real_logits: logits for real points, shape [batch_size, 1]. + d_fake_logits: logits for fake points, shape [batch_size, 1]. + d_real: ignored. + d_fake: ignored. + + Returns: + A tuple consisting of the discriminator loss, discriminator's loss on the + real samples and fake samples, and the generator's loss. + """ + with tf.name_scope("wasserstein_loss"): + check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits) + d_loss_real = -tf.reduce_mean(d_real_logits) + d_loss_fake = tf.reduce_mean(d_fake_logits) + d_loss = d_loss_real + d_loss_fake + g_loss = -d_loss_fake + return d_loss, d_loss_real, d_loss_fake, g_loss + + +@gin.configurable(whitelist=[]) +def least_squares(d_real, d_fake, d_real_logits=None, d_fake_logits=None): + """Returns the discriminator and generator loss for the least-squares loss. + + Args: + d_real: prediction for real points, values in [0, 1], shape [batch_size, 1]. + d_fake: prediction for fake points, values in [0, 1], shape [batch_size, 1]. + d_real_logits: ignored. + d_fake_logits: ignored. + + Returns: + A tuple consisting of the discriminator loss, discriminator's loss on the + real samples and fake samples, and the generator's loss. + """ + with tf.name_scope("least_square_loss"): + check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits) + d_loss_real = tf.reduce_mean(tf.square(d_real - 1.0)) + d_loss_fake = tf.reduce_mean(tf.square(d_fake)) + d_loss = 0.5 * (d_loss_real + d_loss_fake) + g_loss = 0.5 * tf.reduce_mean(tf.square(d_fake - 1.0)) + return d_loss, d_loss_real, d_loss_fake, g_loss + + +@gin.configurable(whitelist=[]) +def hinge(d_real_logits, d_fake_logits, d_real=None, d_fake=None): + """Returns the discriminator and generator loss for the hinge loss. + + Args: + d_real_logits: logits for real points, shape [batch_size, 1]. + d_fake_logits: logits for fake points, shape [batch_size, 1]. + d_real: ignored. + d_fake: ignored. + + Returns: + A tuple consisting of the discriminator loss, discriminator's loss on the + real samples and fake samples, and the generator's loss. + """ + with tf.name_scope("hinge_loss"): + check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits) + d_loss_real = tf.reduce_mean(tf.nn.relu(1.0 - d_real_logits)) + d_loss_fake = tf.reduce_mean(tf.nn.relu(1.0 + d_fake_logits)) + d_loss = d_loss_real + d_loss_fake + g_loss = - tf.reduce_mean(d_fake_logits) + return d_loss, d_loss_real, d_loss_fake, g_loss + + +@gin.configurable("loss", whitelist=["fn"]) +def get_losses(fn=non_saturating, **kwargs): + """Returns the losses for the discriminator and generator.""" + return utils.call_with_accepted_args(fn, **kwargs) diff --git a/compare_gan/gans/modular_gan.py b/compare_gan/gans/modular_gan.py new file mode 100644 index 0000000..69e341f --- /dev/null +++ b/compare_gan/gans/modular_gan.py @@ -0,0 +1,658 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides ModularGAN for GAN models with penalty loss.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import itertools + +from absl import flags +from absl import logging +from compare_gan import utils +from compare_gan.architectures import abstract_arch +from compare_gan.architectures import dcgan +from compare_gan.architectures import infogan +from compare_gan.architectures import resnet30 +from compare_gan.architectures import resnet5 +from compare_gan.architectures import resnet5_biggan +from compare_gan.architectures import resnet_cifar +from compare_gan.architectures import resnet_stl +from compare_gan.architectures import sndcgan +from compare_gan.gans import consts as c +from compare_gan.gans import loss_lib +from compare_gan.gans import penalty_lib +from compare_gan.gans.abstract_gan import AbstractGAN +from compare_gan.tpu import tpu_random +from compare_gan.tpu import tpu_summaries +import gin +import numpy as np +from six.moves import range +import tensorflow as tf +import tensorflow_gan as tfgan +import tensorflow_hub as hub + +FLAGS = flags.FLAGS + + +@gin.configurable(blacklist=["dataset", "parameters", "model_dir"]) +class ModularGAN(AbstractGAN): + """Base class for GANs models that support the Estimator API.""" + + def __init__(self, + dataset, + parameters, + model_dir, + deprecated_split_disc_calls=False, + experimental_joint_gen_for_disc=False, + g_use_ema=False, + ema_decay=0.9999, + ema_start_step=40000, + g_optimizer_fn=tf.train.AdamOptimizer, + d_optimizer_fn=None, + g_lr=0.0002, + d_lr=None, + conditional=False, + fit_label_distribution=False): + """ModularGAN is a Gin configurable implementation of AbstractGAN. + + Args: + dataset: `ImageDataset` object. If `conditional` the dataset must provide + labels and the number of classes bust known. + parameters: Legacy Python dictionary with additional parameters. This must + have the keys 'architecture', 'z_dim' and 'lambda'. + model_dir: Directory path for storing summary files. + deprecated_split_disc_calls: If True pass fake and real images separately + through the discriminator network. + experimental_joint_gen_for_disc: If True generate fake images for all D + iterations jointly. This increase the batch size in G when generating + fake images for D. The G step is stays the same. + g_use_ema: If True keep moving averages for weights in G and use them in + the TF-Hub module. + ema_decay: Decay rate for moving averages for G's weights. + ema_start_step: Start step for keeping moving averages. Before this the + decay rate is 0. + g_optimizer_fn: Function (or constructor) to return an optimizer for G. + d_optimizer_fn: Function (or constructor) to return an optimizer for D. + If None will call `g_optimizer_fn`. + g_lr: Learning rate for G. + d_lr: Learning rate for D. Defaults to `g_lr`. + conditional: Whether the GAN is conditional. If True both G and Y will + get passed labels. + fit_label_distribution: Whether to fit the label distribution. + """ + super(ModularGAN, self).__init__( + dataset=dataset, parameters=parameters, model_dir=model_dir) + self._deprecated_split_disc_calls = deprecated_split_disc_calls + self._experimental_joint_gen_for_disc = experimental_joint_gen_for_disc + self._g_use_ema = g_use_ema + self._ema_decay = ema_decay + self._ema_start_step = ema_start_step + self._g_optimizer_fn = g_optimizer_fn + self._d_optimizer_fn = d_optimizer_fn + if self._d_optimizer_fn is None: + self._d_optimizer_fn = g_optimizer_fn + self._g_lr = g_lr + self._d_lr = g_lr if d_lr is None else d_lr + + if conditional and not self._dataset.num_classes: + raise ValueError( + "Option 'conditional' selected but dataset {} does not have " + "labels".format(self._dataset.name)) + self._conditional = conditional + self._fit_label_distribution = fit_label_distribution + + self._tpu_summary = tpu_summaries.TpuSummaries(model_dir) + + # Parameters that have not been ported to Gin. + self._architecture = parameters["architecture"] + self._z_dim = parameters["z_dim"] + self._lambda = parameters["lambda"] + + # Number of discriminator iterations per one iteration of the generator. + self._disc_iters = parameters.get("disc_iters", 1) + + # Will be set by create_loss(). + self.d_loss = None + self.g_loss = None + self.penalty_loss = None + + @property + def num_sub_steps(self): + # We are training D and G separately. Since TPU require use to have a single + # training operations we join multiple sub steps in one step. One sub step + # for each discriminator training step (disc_iters) and a separate sub step + # (with new inputs) for the generator training step. + return self._disc_iters + 1 + + @property + def conditional(self): + return self._conditional + + def as_estimator(self, run_config, batch_size, use_tpu): + """Returns a TPUEstimator for this GAN.""" + return tf.contrib.tpu.TPUEstimator( + config=run_config, + use_tpu=use_tpu, + model_fn=self.model_fn, + train_batch_size=batch_size * self.num_sub_steps) + + def _module_fn(self, model, batch_size): + """Module Function to create a TF Hub module spec. + + Args: + model: `tf.estimator.ModeKeys` value. + batch_size: batch size. + """ + if model not in {"gen", "disc"}: + raise ValueError("Model {} not support in module_fn()".format(model)) + placeholder_fn = tf.placeholder if batch_size is None else tf.zeros + is_training = False + inputs = {} + y = None + if model == "gen": + inputs["z"] = placeholder_fn( + shape=(batch_size, self._z_dim), + dtype=tf.float32, + name="z_for_eval") + elif model == "disc": + inputs["images"] = placeholder_fn( + shape=[batch_size] + list(self._dataset.image_shape), + dtype=tf.float32, + name="images_for_eval") + if self.conditional: + inputs["labels"] = placeholder_fn( + shape=(batch_size,), + dtype=tf.int32, + name="labels_for_eval") + y = self._get_one_hot_labels(inputs["labels"]) + else: + y = None + + logging.info("Creating module for model %s with inputs %s and y=%s", + model, inputs, y) + outputs = {} + if model == "disc": + outputs["prediction"], _, _ = self.discriminator( + inputs["images"], y=y, is_training=is_training) + else: + z = inputs["z"] + generated = self.generator(z, y=y, is_training=is_training) + if self._g_use_ema and not is_training: + g_vars = [var for var in tf.trainable_variables() + if "generator" in var.name] + ema = tf.train.ExponentialMovingAverage(decay=self._ema_decay) + # Create the variables that will be loaded from the checkpoint. + ema.apply(g_vars) + def ema_getter(getter, name, *args, **kwargs): + var = getter(name, *args, **kwargs) + ema_var = ema.average(var) + if ema_var is None: + var_names_without_ema = {"u_var", "accu_mean", "accu_variance", + "accu_counter", "update_accus"} + if name.split("/")[-1] not in var_names_without_ema: + logging.warning("Could not find EMA variable for %s.", name) + return var + return ema_var + with tf.variable_scope("", values=[z, y], reuse=True, + custom_getter=ema_getter): + generated = self.generator(z, y=y, is_training=is_training) + outputs["generated"] = generated + + hub.add_signature(inputs=inputs, outputs=outputs) + + def as_module_spec(self): + """Returns the generator network as TFHub module spec.""" + models = ["gen", "disc"] + batch_sizes = [8, 16, 32, 64, 128] + if "resnet" in self._architecture: + # Only ResNet architectures support dynamic batch size. + batch_sizes.append(None) + tags_and_args = [(set(), {"model": "gen", "batch_size": 64})] + for model, bs in itertools.product(models, batch_sizes): + tags = {model, "bs{}".format(bs)} + args = {"model": model, "batch_size": bs} + tags_and_args.append((tags, args)) + return hub.create_module_spec( + self._module_fn, tags_and_args=tags_and_args, + drop_collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES]) + + def _samples_and_grid_shape(self, max_samples_per_replica, num_replicas): + total_samples = num_replicas + while total_samples < 64 or total_samples & (total_samples - 1) != 0: + total_samples += num_replicas + samples_per_replica = min(max_samples_per_replica, + total_samples // num_replicas) + num_summary_images = samples_per_replica * num_replicas + if num_summary_images & (num_summary_images - 1) != 0: + raise ValueError( + "Number of summary images must be a power of 2 to create a grid of " + "images but was {}.".format(num_summary_images)) + # Since b = 2^c we can use x = 2^(floor(c/2)) and y = 2^(ceil(c/2)). + x = 2 ** int(np.log2(num_summary_images) / 2) + y = num_summary_images // x + return samples_per_replica, (x, y) + + def _add_images_to_summary(self, images, summary_name, params): + """Called from model_fn() to add a grid of images as summary.""" + num_replicas = params["context"].num_replicas if "context" in params else 1 + max_samples_per_replica = params["batch_size"] // self.num_sub_steps + samples_per_replica, grid_shape = ( + self._samples_and_grid_shape(max_samples_per_replica, num_replicas)) + def _merge_images_to_grid(tensor): + logging.info("Creating images summary for fake images: %s", tensor) + return tfgan.eval.image_grid( + tensor, + grid_shape=grid_shape, + image_shape=self._dataset.image_shape[:2], + num_channels=self._dataset.image_shape[2]) + self._tpu_summary.image(summary_name, + images[:samples_per_replica], + reduce_fn=_merge_images_to_grid) + + def _check_variables(self, t_vars, d_vars, g_vars): + """Make sure that every variable belongs to generator or discriminator.""" + shared_vars = set(d_vars) & set(g_vars) + if shared_vars: + raise ValueError("Shared trainable variables: %s" % shared_vars) + unused_vars = set(t_vars) - set(d_vars) - set(g_vars) + if unused_vars: + raise ValueError("Unused trainable variables: %s" % unused_vars) + + def _get_one_hot_labels(self, labels): + if not self.conditional: + raise ValueError( + "_get_one_hot_labels() called but GAN is not conditional.") + return tf.one_hot(labels, self._dataset.num_classes) + + @gin.configurable("z", blacklist=["shape", "name"]) + def z_generator(self, shape, distribution_fn=tf.random.uniform, + minval=-1.0, maxval=1.0, stddev=1.0, name=None): + """Random noise distributions as TF op. + + Args: + shape: A 1-D integer Tensor or Python array. + distribution_fn: Function that create a Tensor. If the function has any + of the arguments 'minval', 'maxval' or 'stddev' these are passed to it. + minval: The lower bound on the range of random values to generate. + maxval: The upper bound on the range of random values to generate. + stddev: The standard deviation of a normal distribution. + name: A name for the operation. + + Returns: + Tensor with the given shape and dtype tf.float32. + """ + return utils.call_with_accepted_args( + distribution_fn, shape=shape, minval=minval, maxval=maxval, + stddev=stddev, name=name) + + def label_generator(self, shape, name=None): + if not self.conditional: + raise ValueError("label_generator() called but GAN is not conditional.") + # Assume uniform label distribution. + return tf.random.uniform(shape, minval=0, maxval=self._dataset.num_classes, + dtype=tf.int32, name=name) + + def _preprocess_fn(self, images, labels, seed=None): + """Creates the feature dictionary with images and z.""" + logging.info("_preprocess_fn(): images=%s, labels=%s, seed=%s", + images, labels, seed) + tf.set_random_seed(seed) + features = { + "images": images, + "z": self.z_generator([self._z_dim], name="z"), + } + if self.conditional: + if self._fit_label_distribution: + features["sampled_labels"] = labels + else: + features["sampled_labels"] = self.label_generator( + shape=[], name="sampled_labels") + return features, labels + + def input_fn(self, params, mode): + """Input function that retuns a `tf.data.Dataset` object. + + This function will be called once for each host machine. + + Args: + params: Python dictionary with parameters given to TPUEstimator. + Additional TPUEstimator will set the key `batch_size` with the batch + size for this host machine and `tpu_contextu` with a TPUContext + object. + mode: `tf.estimator.MoedeKeys` value. + + Returns: + A `tf.data.Dataset` object with batched features and labels. + """ + return self._dataset.input_fn(mode=mode, params=params, + preprocess_fn=self._preprocess_fn) + + def model_fn(self, features, labels, params, mode): + """Constructs the model for the given features and mode. + + Args: + features: A dictionary with the feature tensors. + labels: Tensor will labels. Will be None if mode is PREDICT. + params: Dictionary with hyperparameters passed to TPUEstimator. + Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`, + `tpu_context`. `batch_size` is the batch size for this core. + mode: `tf.estimator.ModeKeys` value (TRAIN, EVAL, PREDICT). The mode + should be passed to the TPUEstimatorSpec and your model should be + build this mode. + + Returns: + A `tf.contrib.tpu.TPUEstimatorSpec`. + """ + logging.info("model_fn(): features=%s, labels=%s,mode=%s, params=%s", + features, labels, mode, params) + + if mode != tf.estimator.ModeKeys.TRAIN: + raise ValueError("Only training mode is supported.") + + is_training = (mode == tf.estimator.ModeKeys.TRAIN) + global_step = tf.train.get_or_create_global_step() + + def _create_sub_step_loss(sub_step_idx=0, reuse=True): + """Creates the loss for a slice of the current batch. + + Args: + sub_step_idx: Index of the slice of the batch to use to construct the + loss. If self.unroll_disc_iters is True this must be 0 and the whole + batch will be used. + reuse: Bool, whether to reuse existing variables for the models. + Should be False for the first call and True on all other calls. + + Returns: + Fake images created by the generator. + """ + logging.info("sub_step_idx: %s, params: %s", sub_step_idx, params) + # Set the random offset tensor for operations in tpu_random.py. + tpu_random.set_random_offset_from_features(fs[sub_step_idx]) + self.create_loss(fs[sub_step_idx], ls[sub_step_idx], params, + is_training=is_training, reuse=reuse) + + # Split inputs for sub steps. + fs = [(k, tf.split(features[k], self.num_sub_steps)) for k in features] + fs = [{k: v[i] for k, v in fs} for i in range(self.num_sub_steps)] + ls = tf.split(labels, self.num_sub_steps) + + # Only the last sub step changes the generator weights. Thus we can combine + # all forward passes through G to achieve better efficiency. The forward + # pass for G's step needs to be separated since compute gradients for it. + if self._experimental_joint_gen_for_disc: + logging.info("Running generator forward pass for all D steps.") + with tf.name_scope("gen_for_disc"): + bs = params["batch_size"] // self.num_sub_steps + # D steps. + z = features["z"][:-bs] + sampled_y = None + if self.conditional: + sampled_y = self._get_one_hot_labels(features["sampled_labels"][:-bs]) + generated = tf.stop_gradient(self.generator( + z, y=sampled_y, is_training=is_training, reuse=False)) + assert self.num_sub_steps - 1 == self._disc_iters + generated = tf.split(generated, self._disc_iters) + for i in range(self._disc_iters): + fs[i]["generated"] = generated[i] + del fs[i]["z"] + # G step. + z = features["z"][-bs:] + sampled_y = None + if self.conditional: + sampled_y = self._get_one_hot_labels(features["sampled_labels"][-bs:]) + fs[-1]["generated"] = self.generator( + z, y=sampled_y, is_training=is_training, reuse=True) + del fs[-1]["z"] + + logging.info("fs=%s, ls=%s", fs, ls) + # Create ops for first D steps here to create the variables. + with tf.name_scope("disc_step_1"): + _create_sub_step_loss(0, reuse=tf.AUTO_REUSE) + d_losses = [self.d_loss] + + # Divide trainable variables into a group for D and group for G. + t_vars = tf.trainable_variables() + d_vars = [var for var in t_vars if "discriminator" in var.name] + g_vars = [var for var in t_vars if "generator" in var.name] + if len(t_vars) != len(d_vars) + len(g_vars): + logging.error("There variables that neither part of G or D.") + self._check_variables(t_vars, d_vars, g_vars) + + d_optimizer = self.d_optimizer(params["use_tpu"]) + g_optimizer = self.g_optimizer(params["use_tpu"]) + + # In the following each sub-step (disc_iters steps on D + one step on G) + # depends on previous sub-steps. The optimizer ops for each step + # depends on all the update ops (from batch norm etc.). Each update op + # will still only be executed ones. + deps = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + + # Discriminator training. + with tf.control_dependencies(deps): + deps.append(d_optimizer.minimize( + self.d_loss, var_list=d_vars)) + + for sub_step_idx in range(1, self._disc_iters): + with tf.name_scope("disc_step_{}".format(sub_step_idx + 1)): + with tf.control_dependencies(deps): + _create_sub_step_loss(sub_step_idx) + deps.extend(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) + with tf.control_dependencies(deps): + d_losses.append(self.d_loss) + deps.append(d_optimizer.minimize( + self.d_loss, var_list=d_vars)) + + # Clean old summaries from previous calls to model_fn(). + self._tpu_summary = tpu_summaries.TpuSummaries(self._model_dir) + for i, d_loss in enumerate(d_losses): + self._tpu_summary.scalar("loss/d_{}".format(i), d_loss) + if self._experimental_joint_gen_for_disc: + fake_images = fs[0]["generated"] + else: + with tf.name_scope("fake_images"): + z = fs[0]["z"] + sampled_y = None + if self.conditional: + sampled_y = self._get_one_hot_labels(fs[0]["sampled_labels"]) + fake_images = self.generator( + z, y=sampled_y, is_training=True, reuse=True) + self._add_images_to_summary(fake_images, "fake_images", params) + self._add_images_to_summary(fs[0]["images"], "real_images", params) + + # Generator training. + with tf.name_scope("gen_step"): + with tf.control_dependencies(deps): + # This will use the same inputs as the last for the discriminator step + # above, but the new sub-graph will depend on the updates of the + # discriminator steps. + _create_sub_step_loss(self.num_sub_steps - 1) + self._tpu_summary.scalar("loss/g", self.g_loss) + deps.extend(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) + with tf.control_dependencies(deps): + train_op = g_optimizer.minimize(self.g_loss, var_list=g_vars, + global_step=global_step) + loss = self.g_loss + + if self._g_use_ema: + with tf.name_scope("generator_ema"): + logging.info("Creating moving averages of weights: %s", g_vars) + # The decay value is set to 0 if we're before the moving-average start + # point, so that the EMA vars will be the normal vars. + decay = self._ema_decay * tf.cast( + tf.greater_equal(global_step, self._ema_start_step), tf.float32) + ema = tf.train.ExponentialMovingAverage(decay=decay) + with tf.control_dependencies([train_op]): + train_op = ema.apply(g_vars) + + d_param_overview = utils.get_parameter_overview(d_vars, limit=None) + g_param_overview = utils.get_parameter_overview(g_vars, limit=None) + logging.info("Discriminator variables:\n%s", d_param_overview) + logging.info("Generator variables:\n%s", g_param_overview) + + return tf.contrib.tpu.TPUEstimatorSpec( + mode=mode, + host_call=self._tpu_summary.get_host_call(), + # Estimator requires a loss which gets displayed on TensorBoard. + # The given Tensor is evaluated but not used to create gradients. + loss=loss, + train_op=train_op) + + def generator(self, z, y, is_training, reuse=False): + """Returns the generator network.""" + architecture_fns = { + c.DCGAN_ARCH: dcgan.Generator, + c.INFOGAN_ARCH: infogan.Generator, + c.RESNET5_BIGGAN_ARCH: resnet5_biggan.Generator, + c.RESNET5_ARCH: resnet5.Generator, + c.RESNET30_ARCH: resnet30.Generator, + c.RESNET_STL: resnet_stl.Generator, + c.RESNET_CIFAR: resnet_cifar.Generator, + c.SNDCGAN_ARCH: sndcgan.Generator, + } + if self._architecture not in architecture_fns: + raise NotImplementedError( + "Architecture {} not implemented.".format(self._architecture)) + generator = architecture_fns[self._architecture]( + image_shape=self._dataset.image_shape) + assert isinstance(generator, abstract_arch.AbstractGenerator) + + # Functionality to learn the label distribution. + if self.conditional and self._fit_label_distribution: + with tf.variable_scope("label_counts", reuse=reuse): + label_counts = tf.get_variable( + "label_counts", + initializer=tf.constant_initializer(1e-6), + shape=self._dataset.num_classes, + dtype=tf.float32, + trainable=False) + num_active_labels = tf.reduce_sum(tf.cast(tf.greater(label_counts, .5), + tf.float32)) + self._tpu_summary.scalar("label_counts/active", num_active_labels) + if is_training: + logging.info("Learning the label distribution by counting.") + new_counts = tf.contrib.tpu.cross_replica_sum(tf.reduce_sum(y, axis=0)) + update_label_counts_op = tf.assign_add(label_counts, new_counts) + tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_label_counts_op) + else: + logging.info("Sampling labels using the label counts.") + normalized_counts = label_counts / tf.reduce_sum(label_counts) + dist = tf.distributions.Categorical(probs=normalized_counts) + sampled_labels = dist.sample(tf.shape(y)[0]) + with tf.control_dependencies([y]): + y = self._get_one_hot_labels(sampled_labels) + return generator(z=z, y=y, is_training=is_training, reuse=reuse) + + def discriminator(self, x, y, is_training, reuse=False): + """Returns the discriminator network.""" + architecture_fns = { + c.DCGAN_ARCH: dcgan.Discriminator, + c.INFOGAN_ARCH: infogan.Discriminator, + c.RESNET5_ARCH: resnet5.Discriminator, + c.RESNET5_BIGGAN_ARCH: resnet5_biggan.Discriminator, + c.RESNET30_ARCH: resnet30.Discriminator, + c.RESNET_STL: resnet_stl.Discriminator, + c.RESNET_CIFAR: resnet_cifar.Discriminator, + c.SNDCGAN_ARCH: sndcgan.Discriminator, + } + if self._architecture not in architecture_fns: + raise NotImplementedError( + "Architecture {} not implemented.".format(self._architecture)) + discriminator = architecture_fns[self._architecture]() + assert isinstance(discriminator, abstract_arch.AbstractDiscriminator) + return discriminator(x=x, y=y, is_training=is_training, reuse=reuse) + + def d_optimizer(self, use_tpu=True): + opt = self._d_optimizer_fn(self._d_lr, name="d_") + if use_tpu: + opt = tf.contrib.tpu.CrossShardOptimizer(opt) + return opt + + def g_optimizer(self, use_tpu=True): + opt = self._g_optimizer_fn(self._g_lr, name="g_") + if use_tpu: + opt = tf.contrib.tpu.CrossShardOptimizer(opt) + return opt + + def create_loss(self, features, labels, params, is_training=True, + reuse=False): + """Build the loss tensors for discriminator and generator. + + This method will set self.d_loss and self.g_loss. + + Args: + features: Optional dictionary with inputs to the model ("images" should + contain the real images and "z" the noise for the generator). + labels: Tensor will labels. Use + self._get_one_hot_labels(labels) to get a one hot encoded tensor. + params: Dictionary with hyperparameters passed to TPUEstimator. + Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`, + `tpu_context`. `batch_size` is the batch size for this core. + is_training: If True build the model in training mode. If False build the + model for inference mode (e.g. use trained averages for batch norm). + reuse: Bool, whether to reuse existing variables for the models. + This is only used for unrolling discriminator iterations when training + on TPU. + + Raises: + ValueError: If set of meta/hyper parameters is not supported. + """ + images = features["images"] # Input images. + if self.conditional: + y = self._get_one_hot_labels(labels) + sampled_y = self._get_one_hot_labels(features["sampled_labels"]) + all_y = tf.concat([y, sampled_y], axis=0) + else: + y = None + sampled_y = None + all_y = None + + if self._experimental_joint_gen_for_disc: + assert "generated" in features + generated = features["generated"] + else: + logging.warning("Computing fake images for sub step separately.") + z = features["z"] # Noise vector. + generated = self.generator( + z, y=sampled_y, is_training=is_training, reuse=reuse) + + if self._deprecated_split_disc_calls: + with tf.name_scope("disc_for_real"): + d_real, d_real_logits, _ = self.discriminator( + images, y=y, is_training=is_training, reuse=reuse) + with tf.name_scope("disc_for_fake"): + d_fake, d_fake_logits, _ = self.discriminator( + generated, y=sampled_y, is_training=is_training, reuse=True) + else: + # Compute discriminator output for real and fake images in one batch. + all_images = tf.concat([images, generated], axis=0) + d_all, d_all_logits, _ = self.discriminator( + all_images, y=all_y, is_training=is_training, reuse=reuse) + d_real, d_fake = tf.split(d_all, 2) + d_real_logits, d_fake_logits = tf.split(d_all_logits, 2) + + self.d_loss, _, _, self.g_loss = loss_lib.get_losses( + d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits, + d_fake_logits=d_fake_logits) + + discriminator = functools.partial(self.discriminator, y=y) + penalty_loss = penalty_lib.get_penalty_loss( + x=images, x_fake=generated, is_training=is_training, + discriminator=discriminator, architecture=self._architecture) + self.d_loss += self._lambda * penalty_loss + self._tpu_summary.scalar("loss/penalty", penalty_loss) diff --git a/compare_gan/gans/modular_gan_conditional_test.py b/compare_gan/gans/modular_gan_conditional_test.py new file mode 100644 index 0000000..7a9a1eb --- /dev/null +++ b/compare_gan/gans/modular_gan_conditional_test.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for GANs with different regularizers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +from absl.testing import parameterized +from compare_gan import datasets +from compare_gan.gans import consts as c +from compare_gan.gans import loss_lib +from compare_gan.gans import penalty_lib +from compare_gan.gans.modular_gan import ModularGAN +import gin +import tensorflow as tf + + +FLAGS = flags.FLAGS +TEST_ARCHITECTURES = [c.RESNET_CIFAR, c.RESNET5_ARCH, c.RESNET5_BIGGAN_ARCH] +TEST_LOSSES = [loss_lib.non_saturating, loss_lib.wasserstein, + loss_lib.least_squares, loss_lib.hinge] +TEST_PENALTIES = [penalty_lib.no_penalty, penalty_lib.dragan_penalty, + penalty_lib.wgangp_penalty, penalty_lib.l2_penalty] + + +class ModularGANConditionalTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + super(ModularGANConditionalTest, self).setUp() + FLAGS.data_fake_dataset = True + self.model_dir = os.path.join(FLAGS.test_tmpdir, "model_dir") + if tf.gfile.Exists(self.model_dir): + tf.gfile.DeleteRecursively(self.model_dir) + + def _runSingleTrainingStep(self, architecture, loss_fn, penalty_fn, + labeled_dataset): + parameters = { + "architecture": architecture, + "lambda": 1, + "z_dim": 120, + } + with gin.unlock_config(): + gin.bind_parameter("penalty.fn", penalty_fn) + gin.bind_parameter("loss.fn", loss_fn) + run_config = tf.contrib.tpu.RunConfig( + model_dir=self.model_dir, + tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) + dataset = datasets.get_dataset("cifar10") + gan = ModularGAN( + dataset=dataset, + parameters=parameters, + conditional=True, + model_dir=self.model_dir) + estimator = gan.as_estimator(run_config, batch_size=2, use_tpu=False) + estimator.train(gan.input_fn, steps=1) + + @parameterized.parameters(TEST_ARCHITECTURES) + def testSingleTrainingStepArchitectures(self, architecture): + self._runSingleTrainingStep(architecture, loss_lib.hinge, + penalty_lib.no_penalty, True) + + @parameterized.parameters(TEST_LOSSES) + def testSingleTrainingStepLosses(self, loss_fn): + self._runSingleTrainingStep(c.RESNET_CIFAR, loss_fn, penalty_lib.no_penalty, + True) + + @parameterized.parameters(TEST_PENALTIES) + def testSingleTrainingStepPenalties(self, penalty_fn): + self._runSingleTrainingStep(c.RESNET_CIFAR, loss_lib.hinge, penalty_fn, + True) + + def testUnlabledDatasetRaisesError(self): + parameters = { + "architecture": c.RESNET_CIFAR, + "lambda": 1, + "z_dim": 120, + } + with gin.unlock_config(): + gin.bind_parameter("loss.fn", loss_lib.hinge) + # Use dataset without labels. + dataset = datasets.get_dataset("celeb_a") + with self.assertRaises(ValueError): + gan = ModularGAN( + dataset=dataset, + parameters=parameters, + conditional=True, + model_dir=self.model_dir) + del gan + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/gans/modular_gan_test.py b/compare_gan/gans/modular_gan_test.py new file mode 100644 index 0000000..4032f9b --- /dev/null +++ b/compare_gan/gans/modular_gan_test.py @@ -0,0 +1,230 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test various (unconditional) configurations of ModularGAN.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import datetime +import itertools +import os + +from absl import flags +from absl.testing import parameterized +from compare_gan import datasets +from compare_gan.gans import consts as c +from compare_gan.gans import loss_lib +from compare_gan.gans import penalty_lib +from compare_gan.gans.modular_gan import ModularGAN +import gin +import numpy as np +from six.moves import range +import tensorflow as tf + + +FLAGS = flags.FLAGS +TEST_ARCHITECTURES = [c.INFOGAN_ARCH, c.DCGAN_ARCH, c.RESNET_CIFAR, + c.SNDCGAN_ARCH, c.RESNET5_ARCH] +TEST_LOSSES = [loss_lib.non_saturating, loss_lib.wasserstein, + loss_lib.least_squares, loss_lib.hinge] +TEST_PENALTIES = [penalty_lib.no_penalty, penalty_lib.dragan_penalty, + penalty_lib.wgangp_penalty, penalty_lib.l2_penalty] +GENERATOR_TRAINED_IN_STEPS = [ + # disc_iters=1. + [True, True, True], + # disc_iters=2. + [True, False, True], + # disc_iters=3. + [True, False, False], +] + + +class ModularGANTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + super(ModularGANTest, self).setUp() + FLAGS.data_fake_dataset = True + gin.clear_config() + unused_sub_dir = str(datetime.datetime.now().microsecond) + self.model_dir = os.path.join(FLAGS.test_tmpdir, unused_sub_dir) + assert not tf.gfile.Exists(self.model_dir) + self.run_config = tf.contrib.tpu.RunConfig( + model_dir=self.model_dir, + tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) + + def _runSingleTrainingStep(self, architecture, loss_fn, penalty_fn): + parameters = { + "architecture": architecture, + "lambda": 1, + "z_dim": 128, + } + with gin.unlock_config(): + gin.bind_parameter("penalty.fn", penalty_fn) + gin.bind_parameter("loss.fn", loss_fn) + dataset = datasets.get_dataset("cifar10") + gan = ModularGAN( + dataset=dataset, + parameters=parameters, + model_dir=self.model_dir, + conditional="biggan" in architecture) + estimator = gan.as_estimator(self.run_config, batch_size=2, use_tpu=False) + estimator.train(gan.input_fn, steps=1) + + @parameterized.parameters(TEST_ARCHITECTURES) + def testSingleTrainingStepArchitectures(self, architecture): + self._runSingleTrainingStep(architecture, loss_lib.hinge, + penalty_lib.no_penalty) + + @parameterized.parameters(TEST_LOSSES) + def testSingleTrainingStepLosses(self, loss_fn): + self._runSingleTrainingStep(c.RESNET_CIFAR, loss_fn, penalty_lib.no_penalty) + + @parameterized.parameters(TEST_PENALTIES) + def testSingleTrainingStepPenalties(self, penalty_fn): + self._runSingleTrainingStep(c.RESNET_CIFAR, loss_lib.hinge, penalty_fn) + + def testSingleTrainingStepWithJointGenForDisc(self): + parameters = { + "architecture": c.RESNET5_BIGGAN_ARCH, + "lambda": 1, + "z_dim": 120, + "disc_iters": 2, + } + dataset = datasets.get_dataset("cifar10") + gan = ModularGAN( + dataset=dataset, + parameters=parameters, + model_dir=self.model_dir, + experimental_joint_gen_for_disc=True, + conditional=True) + estimator = gan.as_estimator(self.run_config, batch_size=2, use_tpu=False) + estimator.train(gan.input_fn, steps=1) + + @parameterized.parameters([1, 2, 3]) + def testSingleTrainingStepDiscItersWithEma(self, disc_iters): + parameters = { + "architecture": c.RESNET_CIFAR, + "lambda": 1, + "z_dim": 128, + "dics_iters": disc_iters, + } + gin.bind_parameter("ModularGAN.g_use_ema", True) + dataset = datasets.get_dataset("cifar10") + gan = ModularGAN( + dataset=dataset, + parameters=parameters, + model_dir=self.model_dir) + estimator = gan.as_estimator(self.run_config, batch_size=2, use_tpu=False) + estimator.train(gan.input_fn, steps=1) + # Check for moving average variables in checkpoint. + checkpoint_path = tf.train.latest_checkpoint(self.model_dir) + ema_vars = sorted([v[0] for v in tf.train.list_variables(checkpoint_path) + if v[0].endswith("ExponentialMovingAverage")]) + tf.logging.info("ema_vars=%s", ema_vars) + expected_ema_vars = sorted([ + "generator/fc_noise/kernel/ExponentialMovingAverage", + "generator/fc_noise/bias/ExponentialMovingAverage", + "generator/B1/up_conv_shortcut/kernel/ExponentialMovingAverage", + "generator/B1/up_conv_shortcut/bias/ExponentialMovingAverage", + "generator/B1/up_conv1/kernel/ExponentialMovingAverage", + "generator/B1/up_conv1/bias/ExponentialMovingAverage", + "generator/B1/same_conv2/kernel/ExponentialMovingAverage", + "generator/B1/same_conv2/bias/ExponentialMovingAverage", + "generator/B2/up_conv_shortcut/kernel/ExponentialMovingAverage", + "generator/B2/up_conv_shortcut/bias/ExponentialMovingAverage", + "generator/B2/up_conv1/kernel/ExponentialMovingAverage", + "generator/B2/up_conv1/bias/ExponentialMovingAverage", + "generator/B2/same_conv2/kernel/ExponentialMovingAverage", + "generator/B2/same_conv2/bias/ExponentialMovingAverage", + "generator/B3/up_conv_shortcut/kernel/ExponentialMovingAverage", + "generator/B3/up_conv_shortcut/bias/ExponentialMovingAverage", + "generator/B3/up_conv1/kernel/ExponentialMovingAverage", + "generator/B3/up_conv1/bias/ExponentialMovingAverage", + "generator/B3/same_conv2/kernel/ExponentialMovingAverage", + "generator/B3/same_conv2/bias/ExponentialMovingAverage", + "generator/final_conv/kernel/ExponentialMovingAverage", + "generator/final_conv/bias/ExponentialMovingAverage", + ]) + self.assertAllEqual(ema_vars, expected_ema_vars) + + @parameterized.parameters( + itertools.product([1, 2, 3], [False, True]) + ) + def testDiscItersIsUsedCorrectly(self, disc_iters, use_tpu): + + return + if disc_iters > 1 and use_tpu: + + return + parameters = { + "architecture": c.RESNET_CIFAR, + "disc_iters": disc_iters, + "lambda": 1, + "z_dim": 128, + } + if not use_tpu: + parameters["unroll_disc_iters"] = False + run_config = tf.contrib.tpu.RunConfig( + model_dir=self.model_dir, + save_checkpoints_steps=1, + tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) + dataset = datasets.get_dataset("cifar10") + gan = ModularGAN( + dataset=dataset, + parameters=parameters, + model_dir=self.model_dir) + estimator = gan.as_estimator(run_config, batch_size=2, use_tpu=use_tpu) + estimator.train(gan.input_fn, steps=3) + + # Read checkpoints for each training step. If the weight in the generator + # changed we trained the generator during that step. + previous_values = {} + generator_trained = [] + for step in range(0, 4): + basename = os.path.join(self.model_dir, "model.ckpt-{}".format(step)) + self.assertTrue(tf.gfile.Exists(basename + ".index")) + ckpt = tf.train.load_checkpoint(basename) + + if step == 0: + for name in ckpt.get_variable_to_shape_map(): + previous_values[name] = ckpt.get_tensor(name) + continue + + d_trained = False + g_trained = False + for name in ckpt.get_variable_to_shape_map(): + t = ckpt.get_tensor(name) + diff = np.abs(previous_values[name] - t).max() + previous_values[name] = t + if "discriminator" in name and diff > 1e-10: + d_trained = True + elif "generator" in name and diff > 1e-10: + if name.endswith("moving_mean") or name.endswith("moving_variance"): + # Note: Even when we don't train the generator the batch norm + # values still get updated. + continue + tf.logging.info("step %d: %s changed up to %f", step, name, diff) + g_trained = True + self.assertTrue(d_trained) # Discriminator is trained every step. + generator_trained.append(g_trained) + + self.assertEqual(generator_trained, + GENERATOR_TRAINED_IN_STEPS[disc_iters - 1]) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/gans/modular_gan_tpu_test.py b/compare_gan/gans/modular_gan_tpu_test.py new file mode 100644 index 0000000..89c58d9 --- /dev/null +++ b/compare_gan/gans/modular_gan_tpu_test.py @@ -0,0 +1,98 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests TPU specfic parts of ModularGAN.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +from absl import logging +from absl.testing import parameterized +from compare_gan import datasets +from compare_gan.gans import consts as c +from compare_gan.gans.modular_gan import ModularGAN +import gin +import tensorflow as tf + + +FLAGS = flags.FLAGS + + +class MockModularGAN(ModularGAN): + + def __init__(self, **kwargs): + super(MockModularGAN, self).__init__(**kwargs) + self.gen_args = [] + self.disc_args = [] + + def generator(self, z, y, **kwargs): + self.gen_args.append(dict(z=z, y=y, **kwargs)) + return super(MockModularGAN, self).generator(z=z, y=y, **kwargs) + + def discriminator(self, x, y, **kwargs): + self.disc_args.append(dict(x=x, y=y, **kwargs)) + return super(MockModularGAN, self).discriminator(x=x, y=y, **kwargs) + + +class ModularGANTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + super(ModularGANTest, self).setUp() + FLAGS.data_fake_dataset = True + gin.clear_config() + self.model_dir = os.path.join(FLAGS.test_tmpdir, "model_dir") + if tf.gfile.Exists(self.model_dir): + tf.gfile.DeleteRecursively(self.model_dir) + self.run_config = tf.contrib.tpu.RunConfig( + model_dir=self.model_dir, + tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) + + @parameterized.parameters([1, 2, 5]) + def testBatchSize(self, disc_iters, use_tpu=True): + parameters = { + "architecture": c.RESNET5_ARCH, + "lambda": 1, + "z_dim": 128, + "disc_iters": disc_iters, + } + batch_size = 16 + dataset = datasets.get_dataset("cifar10") + gan = MockModularGAN( + dataset=dataset, + parameters=parameters, + model_dir=self.model_dir) + estimator = gan.as_estimator(self.run_config, batch_size=batch_size, + use_tpu=use_tpu) + estimator.train(gan.input_fn, steps=1) + logging.info("gen_args: %s", "\n".join(str(a) for a in gan.gen_args)) + logging.info("disc_args: %s", "\n".join(str(a) for a in gan.disc_args)) + num_shards = 2 if use_tpu else 1 + assert batch_size % num_shards == 0 + gen_bs = batch_size // num_shards + disc_bs = gen_bs * 2 # merged discriminator calls. + self.assertLen(gan.gen_args, disc_iters + 2) + for args in gan.gen_args: + self.assertAllEqual(args["z"].shape.as_list(), [gen_bs, 128]) + self.assertLen(gan.disc_args, disc_iters + 1) + for args in gan.disc_args: + self.assertAllEqual(args["x"].shape.as_list(), [disc_bs, 32, 32, 3]) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/src/params_test.py b/compare_gan/gans/ops.py similarity index 60% rename from compare_gan/src/params_test.py rename to compare_gan/gans/ops.py index b9b1b7a..1faa0a7 100644 --- a/compare_gan/src/params_test.py +++ b/compare_gan/gans/ops.py @@ -13,26 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for compare_gan.params.""" +"""Customized TensorFlow operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function -from compare_gan.src import params +from compare_gan.tpu import tpu_random -import tensorflow as tf - - -class ParamsTest(tf.test.TestCase): - - def testParameterRanges(self): - training_parameters = params.GetParameters("WGAN", "wide") - self.assertEqual(len(list(training_parameters.keys())), 5) - - training_parameters = params.GetParameters("BEGAN", "wide") - self.assertEqual(len(list(training_parameters.keys())), 6) - - -if __name__ == "__main__": - tf.test.main() +random_uniform = tpu_random.uniform +random_normal = tpu_random.normal diff --git a/compare_gan/gans/penalty_lib.py b/compare_gan/gans/penalty_lib.py new file mode 100644 index 0000000..5a7c434 --- /dev/null +++ b/compare_gan/gans/penalty_lib.py @@ -0,0 +1,113 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of popular GAN penalties.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan import utils +from compare_gan.gans import consts +from compare_gan.gans import ops +import gin +import tensorflow as tf + + +@gin.configurable(whitelist=[]) +def no_penalty(): + return tf.constant(0.0) + + +@gin.configurable(whitelist=[]) +def dragan_penalty(x, discriminator, is_training): + """Returns the DRAGAN gradient penalty. + + Args: + x: samples from the true distribution, shape [bs, h, w, channels]. + discriminator: A function mapping an imput tensor to a triplet: prediction + in [0, 1], logits of the last linear layer, and the last ReLu layer). + is_training: boolean, are we in train or eval model. + Returns: + A tensor with the computed penalty. + """ + with tf.name_scope("dragan_penalty"): + _, var = tf.nn.moments(x, axes=list(range(len(x.get_shape())))) + std = tf.sqrt(var) + x_noisy = x + std * (ops.random_uniform(x.shape) - 0.5) + x_noisy = tf.clip_by_value(x_noisy, 0.0, 1.0) + logits = discriminator(x_noisy, is_training=is_training, reuse=True)[1] + gradients = tf.gradients(logits, [x_noisy])[0] + slopes = tf.sqrt(0.0001 + tf.reduce_sum( + tf.square(gradients), reduction_indices=[1, 2, 3])) + gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.0)) + return gradient_penalty + + +@gin.configurable(whitelist=[]) +def wgangp_penalty(x, x_fake, discriminator, is_training): + """Returns the WGAN gradient penalty. + + Args: + x: samples from the true distribution, shape [bs, h, w, channels]. + x_fake: samples from the fake distribution, shape [bs, h, w, channels]. + discriminator: A function mapping an imput tensor to a triplet: prediction + in [0, 1], logits of the last linear layer, and the last ReLu layer). + is_training: boolean, are we in train or eval model. + Returns: + A tensor with the computed penalty. + """ + with tf.name_scope("wgangp_penalty"): + alpha = ops.random_uniform(shape=[x.shape[0].value, 1, 1, 1], name="alpha") + interpolates = x + alpha * (x_fake - x) + logits = discriminator(interpolates, is_training=is_training, reuse=True)[1] + gradients = tf.gradients(logits, [interpolates])[0] + slopes = tf.sqrt(0.0001 + tf.reduce_sum( + tf.square(gradients), reduction_indices=[1, 2, 3])) + gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.0)) + return gradient_penalty + + +@gin.configurable(whitelist=[]) +def l2_penalty(architecture): + """Returns the L2 penalty for each matrix/vector excluding biases. + + Assumes a specific tensor naming followed throughout the compare_gan library. + We penalize all fully connected, conv2d, and deconv2d layers. + + Args: + architecture: string, has to be in consts.ARCHITECTURES. + Returns: + scalar, the computed penalty. + Raises: + RuntimeError: if the number of layers doesn't match the expected number. + """ + with tf.name_scope("l2_penalty"): + t_vars = tf.trainable_variables() + d_vars = [var for var in t_vars if "discriminator" in var.name] + d_weights = [v for v in d_vars if v.name.endswith("/kernel:0")] + if len(d_weights) != consts.N_DISCRIMINATOR_LAYERS[architecture]: + raise RuntimeError( + "l2_penalty: got %d layers(%s), expected %d layers for %s." % + (len(d_weights), d_weights, + consts.N_DISCRIMINATOR_LAYERS[architecture], architecture)) + return tf.reduce_mean( + [tf.nn.l2_loss(i) for i in d_weights], name="l2_penalty") + + +@gin.configurable("penalty", whitelist=["fn"]) +def get_penalty_loss(fn=no_penalty, **kwargs): + """Returns the penalty loss.""" + return utils.call_with_accepted_args(fn, **kwargs) diff --git a/compare_gan/gans/s3gan.py b/compare_gan/gans/s3gan.py new file mode 100644 index 0000000..a8dafbc --- /dev/null +++ b/compare_gan/gans/s3gan.py @@ -0,0 +1,339 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of Self-Supervised GAN with auxiliary rotation loss.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +from absl import logging +from compare_gan.architectures import arch_ops as ops +from compare_gan.architectures import resnet5 +from compare_gan.architectures import resnet5_biggan +from compare_gan.architectures import resnet_cifar +from compare_gan.architectures import sndcgan + +from compare_gan.gans import consts as c +from compare_gan.gans import loss_lib +from compare_gan.gans import modular_gan +from compare_gan.gans import utils + +import gin +import numpy as np +import tensorflow as tf + +FLAGS = flags.FLAGS +NUM_ROTATIONS = 4 + + +@gin.configurable(blacklist=["kwargs"]) +class S3GAN(modular_gan.ModularGAN): + """S3GAN which enables auxiliary heads for the modular GAN.""" + + def __init__(self, self_supervision="rotation", + rotated_batch_fraction=gin.REQUIRED, + weight_rotation_loss_d=1.0, + weight_rotation_loss_g=0.2, + project_y=False, + use_predictor=False, + use_soft_pred=False, + weight_class_loss=1.0, + use_soft_labels=False, + **kwargs): + """Instantiates the S3GAN. + + Args: + self_supervision: One of [rotation_gan, None]. + rotated_batch_fraction: This must be a divisor of the total batch size. + rotations of each images on each TPU core. For GPU training #CORES is 1. + weight_rotation_loss_d: Weight for the rotation loss for the discriminator + on real images. + weight_rotation_loss_g: Weight for the rotation loss for the generator + on fake images. + project_y: Boolean, whether an embedding layer as in variant 1) should be + used. + use_predictor: Boolean, whether a predictor (classifier) should be used. + use_soft_pred: Boolean, whether soft labels should be used for the + predicted label vectors in 1). + weight_class_loss: weight of the (predictor) classification loss added to + the discriminator loss. + use_soft_labels: Boolean, if true assumes the labels passed for real + examples are soft labels and accordingly does not transform + **kwargs: Additional arguments passed to `ModularGAN` constructor. + """ + super(S3GAN, self).__init__(**kwargs) + if use_predictor and not project_y: + raise ValueError("Using predictor requires projection.") + assert self_supervision in {"none", "rotation"} + self._self_supervision = self_supervision + self._rotated_batch_fraction = rotated_batch_fraction + self._weight_rotation_loss_d = weight_rotation_loss_d + self._weight_rotation_loss_g = weight_rotation_loss_g + + self._project_y = project_y + self._use_predictor = use_predictor + self._use_soft_pred = use_soft_pred + self._weight_class_loss = weight_class_loss + + self._use_soft_labels = use_soft_labels + + assert not self._deprecated_split_disc_calls, \ + "Splitting discriminator calls is not supported in S3GAN." + + def discriminator(self, x, y, is_training, reuse=False, + additional_outputs=False): + """Discriminator architecture with additional heads. + + Possible heads built on top of feature representation of the discriminator: + (1) Classify the image to the correct class. + (2) Classify the rotation of the image. + + Args: + x: An input image tensor. + y: One-hot encoded label. Passing all zeros implies no label was passed. + is_training: boolean, whether or not it is a training call. + reuse: boolean, whether or not to reuse the variables. + additional_outputs: If True return additional outputs auxiliary logits, + rotation logits and whether a label is available. + + Returns: + Tuple of 5 Tensors: (1) discriminator predictions (in [0, 1]), (2) the + corresponding logits, (3) predictions (logits) of the rotation of x from + the auxiliary head, (4) logits of the class prediction from the auxiliary + head, (5) Indicator vector identifying whether y contained a label or -1. + """ + is_label_available = tf.cast(tf.cast( + tf.reduce_sum(y, axis=1, keepdims=True), tf.float32) > 0.5, tf.float32) + d_probs, d_logits, x_rep = super(S3GAN, self).discriminator( + x, y=y, is_training=is_training, reuse=reuse) + assert x_rep.shape.ndims == 2, x_rep.shape + + # Hack to determine if D uses spectral normalization. + + discriminator = { + c.RESNET5_ARCH: resnet5.Discriminator, + c.RESNET5_BIGGAN_ARCH: resnet5_biggan.Discriminator, + c.RESNET_CIFAR: resnet_cifar.Discriminator, + c.SNDCGAN_ARCH: sndcgan.Discriminator, + }[self._architecture]() + use_sn = discriminator._spectral_norm # pylint: disable=protected-access + + # Predict the rotation of the image. + rotation_logits = None + if "rotation" in self._self_supervision: + with tf.variable_scope("discriminator_rotation", reuse=reuse): + rotation_logits = ops.linear(x_rep, NUM_ROTATIONS, + scope="score_classify", use_sn=use_sn) + logging.info("[Discriminator] rotation head %s -> %s", + x_rep.shape, rotation_logits) + + if not self._project_y: + if additional_outputs: + return d_probs, d_logits, rotation_logits, None, is_label_available + return d_probs, d_logits, x_rep + + # Predict the class of the image. + aux_logits = None + if self._use_predictor: + with tf.variable_scope("discriminator_predictor", reuse=reuse): + aux_logits = ops.linear(x_rep, y.shape[1], use_bias=True, + scope="predictor_linear", use_sn=use_sn) + # Apply the projection discriminator if needed. + if self._use_soft_pred: + y_predicted = tf.nn.softmax(aux_logits) + else: + y_predicted = tf.one_hot( + tf.arg_max(aux_logits, 1), aux_logits.shape[1]) + y = (1.0 - is_label_available) * y_predicted + is_label_available * y + y = tf.stop_gradient(y) + logging.info("[Discriminator] %s -> aux_logits=%s, y_predicted=%s", + aux_logits.shape, aux_logits.shape, y_predicted.shape) + + class_embedding = self.get_class_embedding( + y=y, embedding_dim=x_rep.shape[-1].value, reuse=reuse, use_sn=use_sn) + d_logits += tf.reduce_sum(class_embedding * x_rep, axis=1, keepdims=True) + d_probs = tf.nn.sigmoid(d_logits) + if additional_outputs: + return d_probs, d_logits, rotation_logits, aux_logits, is_label_available + return d_probs, d_logits, x_rep + + def get_class_embedding(self, y, embedding_dim, reuse, use_sn): + with tf.variable_scope("discriminator_projection", reuse=reuse): + # We do not use ops.linear() below since it does not have an option to + # override the initializer. + kernel = tf.get_variable( + "kernel", [y.shape[1], embedding_dim], tf.float32, + initializer=tf.initializers.glorot_normal()) + if use_sn: + kernel = ops.spectral_norm(kernel) + embedded_y = tf.matmul(y, kernel) + logging.info("[Discriminator] embedded_y for projection: %s", + embedded_y.shape) + return embedded_y + + def merge_with_rotation_data(self, real, fake, real_labels, fake_labels, + num_rot_examples): + """Returns the original data concatenated with the rotated version.""" + + # Put all rotation angles in a single batch, the first batch_size are + # the original up-right images, followed by rotated_batch_size * 3 + # rotated images with 3 different angles. For NUM_ROTATIONS=4 and + # num_rot_examples=2 we have labels_rotated [0, 0, 1, 1, 2, 2, 3, 3]. + real_to_rot, fake_to_rot = ( + real[-num_rot_examples:], fake[-num_rot_examples:]) + real_rotated = utils.rotate_images(real_to_rot, rot90_scalars=(1, 2, 3)) + fake_rotated = utils.rotate_images(fake_to_rot, rot90_scalars=(1, 2, 3)) + all_features = tf.concat([real, real_rotated, fake, fake_rotated], 0) + all_labels = None + if self.conditional: + real_rotated_labels = tf.tile(real_labels[-num_rot_examples:], [3, 1]) + fake_rotated_labels = tf.tile(fake_labels[-num_rot_examples:], [3, 1]) + all_labels = tf.concat([real_labels, real_rotated_labels, + fake_labels, fake_rotated_labels], 0) + return all_features, all_labels + + def create_loss(self, features, labels, params, is_training=True, + reuse=False): + """Build the loss tensors for discriminator and generator. + + This method will set self.d_loss and self.g_loss. + + Args: + features: Optional dictionary with inputs to the model ("images" should + contain the real images and "z" the noise for the generator). + labels: Tensor will labels. These are class indices. Use + self._get_one_hot_labels(labels) to get a one hot encoded tensor. + params: Dictionary with hyperparameters passed to TPUEstimator. + Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`, + `tpu_context`. `batch_size` is the batch size for this core. + is_training: If True build the model in training mode. If False build the + model for inference mode (e.g. use trained averages for batch norm). + reuse: Bool, whether to reuse existing variables for the models. + This is only used for unrolling discriminator iterations when training + on TPU. + + Raises: + ValueError: If set of meta/hyper parameters is not supported. + """ + real_images = features["images"] + if self.conditional: + if self._use_soft_labels: + assert labels.shape[1] == self._dataset.num_classes, \ + ("Need soft labels of dimension {} but got dimension {}".format( + self._dataset.num_classes, labels.shape[1])) + real_labels = labels + else: + real_labels = self._get_one_hot_labels(labels) + fake_labels = self._get_one_hot_labels(features["sampled_labels"]) + if self._experimental_joint_gen_for_disc: + assert "generated" in features + fake_images = features["generated"] + else: + logging.warning("Computing fake images for every sub step separately.") + fake_images = self.generator( + features["z"], y=fake_labels, is_training=is_training, reuse=reuse) + + bs = real_images.shape[0].value + if self._self_supervision: + assert bs % self._rotated_batch_fraction == 0, ( + "Rotated batch fraction is invalid: %d doesn't divide %d" % + self._rotated_batch_fraction, bs) + rotated_bs = bs // self._rotated_batch_fraction + num_rot_examples = rotated_bs // NUM_ROTATIONS + logging.info("bs=%s, rotated_bs=%s, num_rot_examples=%s", bs, rotated_bs, + num_rot_examples) + assert num_rot_examples > 0 + + # Append the data obtained by rotating the last 'num_rotated_samples' + # from the true and the fake data. + if self._self_supervision == "rotation": + assert num_rot_examples <= bs, (num_rot_examples, bs) + all_features, all_labels = self.merge_with_rotation_data( + real_images, fake_images, real_labels, fake_labels, num_rot_examples) + else: + all_features = tf.concat([real_images, fake_images], 0) + all_labels = None + if self.conditional: + all_labels = tf.concat([real_labels, fake_labels], axis=0) + + d_predictions, d_logits, rot_logits, aux_logits, is_label_available = ( + self.discriminator(all_features, y=all_labels, is_training=is_training, + reuse=reuse, additional_outputs=True)) + + expected_batch_size = 2 * bs + if self._self_supervision == "rotation": + expected_batch_size += 2 * (NUM_ROTATIONS - 1) * num_rot_examples + + if d_logits.shape[0].value != expected_batch_size: + raise ValueError("Batch size unexpected: got %r expected %r" % ( + d_logits.shape[0].value, expected_batch_size)) + + prob_real, prob_fake = tf.split(d_predictions, 2) + prob_real, prob_fake = prob_real[:bs], prob_fake[:bs] + + logits_real, logits_fake = tf.split(d_logits, 2) + logits_real, logits_fake = logits_real[:bs], logits_fake[:bs] + + # Get the true/fake GAN loss. + self.d_loss, _, _, self.g_loss = loss_lib.get_losses( + d_real=prob_real, d_fake=prob_fake, + d_real_logits=logits_real, d_fake_logits=logits_fake) + + # At this point we have the classic GAN loss with possible regularization. + # We now add the rotation loss and summaries if required. + if self._self_supervision == "rotation": + # Extract logits for the rotation task. + rot_real_logits, rot_fake_logits = tf.split(rot_logits, 2) + rot_real_logits = rot_real_logits[-rotated_bs:] + rot_fake_logits = rot_fake_logits[-rotated_bs:] + labels_rotated = tf.constant(np.repeat( + np.arange(NUM_ROTATIONS, dtype=np.int32), num_rot_examples)) + rot_onehot = tf.one_hot(labels_rotated, NUM_ROTATIONS) + rot_real_logp = tf.log(tf.nn.softmax(rot_real_logits) + 1e-10) + rot_fake_logp = tf.log(tf.nn.softmax(rot_fake_logits) + 1e-10) + real_loss = -tf.reduce_mean(tf.reduce_sum(rot_onehot * rot_real_logp, 1)) + fake_loss = -tf.reduce_mean(tf.reduce_sum(rot_onehot * rot_fake_logp, 1)) + self.d_loss += real_loss * self._weight_rotation_loss_d + self.g_loss += fake_loss * self._weight_rotation_loss_g + + rot_real_labels = tf.one_hot( + tf.arg_max(rot_real_logits, 1), NUM_ROTATIONS) + rot_fake_labels = tf.one_hot( + tf.arg_max(rot_fake_logits, 1), NUM_ROTATIONS) + accuracy_real = tf.metrics.accuracy(rot_onehot, rot_real_labels) + accuracy_fake = tf.metrics.accuracy(rot_onehot, rot_fake_labels) + + self._tpu_summary.scalar("loss/real_loss", real_loss) + self._tpu_summary.scalar("loss/fake_loss", fake_loss) + self._tpu_summary.scalar("accuracy/real", accuracy_real) + self._tpu_summary.scalar("accuracy/fake", accuracy_fake) + + # Training the predictor on the features of real data and real labels. + if self._use_predictor: + real_aux_logits, _ = tf.split(aux_logits, 2) + real_aux_logits = real_aux_logits[:bs] + + is_label_available, _ = tf.split(is_label_available, 2) + is_label_available = tf.squeeze(is_label_available[:bs]) + + class_loss_real = tf.losses.softmax_cross_entropy( + real_labels, real_aux_logits, weights=is_label_available) + + # Add the loss to the discriminator + self.d_loss += self._weight_class_loss * class_loss_real + self._tpu_summary.scalar("loss/class_loss_real", class_loss_real) + self._tpu_summary.scalar("label_frac", tf.reduce_mean(is_label_available)) diff --git a/compare_gan/gans/s3gan_test.py b/compare_gan/gans/s3gan_test.py new file mode 100644 index 0000000..4aa7682 --- /dev/null +++ b/compare_gan/gans/s3gan_test.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for S3GANs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import datetime +import os + +from absl import flags +from absl.testing import parameterized +from compare_gan import datasets +from compare_gan.gans import consts as c +from compare_gan.gans import loss_lib +from compare_gan.gans.s3gan import S3GAN +import gin +import tensorflow as tf + +FLAGS = flags.FLAGS + + +class S3GANTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + super(S3GANTest, self).setUp() + FLAGS.data_fake_dataset = True + gin.clear_config() + unused_sub_dir = str(datetime.datetime.now().microsecond) + self.model_dir = os.path.join(FLAGS.test_tmpdir, unused_sub_dir) + assert not tf.gfile.Exists(self.model_dir) + self.run_config = tf.contrib.tpu.RunConfig( + model_dir=self.model_dir, + tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) + + @parameterized.parameters( + {"use_predictor": False, "project_y": False}, # unsupervised. + {"use_predictor": False}, # fully supervised. + {"use_predictor": True}, # only oracle. + {"use_predictor": True, "self_supervision": "rotation"}, # oracle + SS. + {"use_predictor": False, "self_supervision": "rotation"}, # only SS. + ) + def testSingleTrainingStepArchitectures( + self, use_predictor, project_y=True, self_supervision="none"): + parameters = { + "architecture": c.RESNET5_BIGGAN_ARCH, + "discriminator_normalization": c.SPECTRAL_NORM, + "lambda": 1, + "z_dim": 120, + } + with gin.unlock_config(): + gin.bind_parameter("ModularGAN.conditional", True) + gin.bind_parameter("loss.fn", loss_lib.hinge) + gin.bind_parameter("S3GAN.use_predictor", use_predictor) + gin.bind_parameter("S3GAN.project_y", project_y) + gin.bind_parameter("S3GAN.self_supervision", self_supervision) + # Fake ImageNet dataset by overriding the properties. + dataset = datasets.get_dataset("imagenet_128") + gan = S3GAN( + dataset=dataset, + parameters=parameters, + model_dir=self.model_dir, + g_optimizer_fn=tf.train.AdamOptimizer, + g_lr=0.0002, + rotated_batch_fraction=2) + estimator = gan.as_estimator(self.run_config, batch_size=8, use_tpu=False) + estimator.train(gan.input_fn, steps=1) + + +if __name__ == "__main__": + tf.test.main() + diff --git a/compare_gan/gans/ssgan.py b/compare_gan/gans/ssgan.py new file mode 100644 index 0000000..114095f --- /dev/null +++ b/compare_gan/gans/ssgan.py @@ -0,0 +1,261 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of Self-Supervised GAN with auxiliary rotation loss.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +from absl import flags +from absl import logging +from compare_gan.architectures import resnet5 +from compare_gan.architectures import resnet5_biggan +from compare_gan.architectures import resnet_cifar +from compare_gan.architectures import sndcgan +from compare_gan.architectures.arch_ops import linear +from compare_gan.gans import consts as c +from compare_gan.gans import loss_lib +from compare_gan.gans import modular_gan +from compare_gan.gans import penalty_lib +from compare_gan.gans import utils + +import gin +import numpy as np +import tensorflow as tf + +FLAGS = flags.FLAGS +NUM_ROTATIONS = 4 + + +@gin.configurable(blacklist=["kwargs"]) +class SSGAN(modular_gan.ModularGAN): + """Self-Supervised GAN. + + http://arxiv.org/abs/1811.11212 + """ + + def __init__(self, + self_supervision="rotation_gan", + rotated_batch_size=gin.REQUIRED, + weight_rotation_loss_d=1.0, + weight_rotation_loss_g=0.2, + **kwargs): + """Creates a new Self-Supervised GAN. + + Args: + self_supervision: One of [rotation_gan, rotation_only, None]. When it is + rotation_only, no GAN loss is used, degenerates to a pure rotation + model. + rotated_batch_size: The total number images per batch for the rotation + loss. This must be a multiple of (4 * #CORES) since we consider 4 + rotations of each images on each TPU core. For GPU training #CORES is 1. + weight_rotation_loss_d: Weight for the rotation loss for the discriminator + on real images. + weight_rotation_loss_g: Weight for the rotation loss for the generator + on fake images. + **kwargs: Additional arguments passed to `ModularGAN` constructor. + """ + super(SSGAN, self).__init__(**kwargs) + + self._self_supervision = self_supervision + self._rotated_batch_size = rotated_batch_size + self._weight_rotation_loss_d = weight_rotation_loss_d + self._weight_rotation_loss_g = weight_rotation_loss_g + + def discriminator(self, x, y, is_training, reuse=False, rotation_head=False): + """Discriminator network with augmented auxiliary predictions. + + Args: + x: an input image tensor. + y: Tensor with label indices. + is_training: boolean, whether or not it is a training call. + reuse: boolean, whether or not to reuse the variables. + rotation_head: If True add a rotation head on top of the discriminator + logits. + + Returns: + real_probs: the [0, 1] probability tensor of x being real images. + real_scores: the unbounded score tensor of x being real images. + rotation_scores: the categorical probablity of x being rotated in one of + the four directions. + """ + if not rotation_head: + return super(SSGAN, self).discriminator( + x, y=y, is_training=is_training, reuse=reuse) + + real_probs, real_scores, final = super(SSGAN, self).discriminator( + x, y=y, is_training=is_training, reuse=reuse) + + # Hack to get whether to use spectral norm for the rotation head below. + # Spectral norm is configured on the architecture (AbstractGenerator or + # AbstrtactDiscriminator). The layer below is be part of the architecture. + + discriminator = { + c.RESNET5_ARCH: resnet5.Discriminator, + c.RESNET5_BIGGAN_ARCH: resnet5_biggan.Discriminator, + c.RESNET_CIFAR: resnet_cifar.Discriminator, + c.SNDCGAN_ARCH: sndcgan.Discriminator, + }[self._architecture]() + use_sn = discriminator._spectral_norm # pylint: disable=protected-access + + with tf.variable_scope("discriminator_rotation", reuse=reuse): + rotation_scores = linear(tf.reshape(final, (tf.shape(x)[0], -1)), + NUM_ROTATIONS, + scope="score_classify", + use_sn=use_sn) + return real_probs, real_scores, rotation_scores + + def create_loss(self, features, labels, params, is_training=True, + reuse=False): + """Build the loss tensors for discriminator and generator. + + This method will set self.d_loss and self.g_loss. + + Args: + features: Optional dictionary with inputs to the model ("images" should + contain the real images and "z" the noise for the generator). + labels: Tensor will labels. These are class indices. Use + self._get_one_hot_labels(labels) to get a one hot encoded tensor. + params: Dictionary with hyperparameters passed to TPUEstimator. + Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`, + `tpu_context`. `batch_size` is the batch size for this core. + is_training: If True build the model in training mode. If False build the + model for inference mode (e.g. use trained averages for batch norm). + reuse: Bool, whether to reuse existing variables for the models. + This is only used for unrolling discriminator iterations when training + on TPU. + + Raises: + ValueError: If set of meta/hyper parameters is not supported. + """ + images = features["images"] # Input images. + if self.conditional: + y = self._get_one_hot_labels(labels) + sampled_y = self._get_one_hot_labels(features["sampled_labels"]) + else: + y = None + sampled_y = None + all_y = None + + if self._experimental_joint_gen_for_disc: + assert "generated" in features + generated = features["generated"] + else: + logging.warning("Computing fake images for every sub step separately.") + z = features["z"] # Noise vector. + generated = self.generator( + z, y=sampled_y, is_training=is_training, reuse=reuse) + + # Batch size per core. + bs = params["batch_size"] // self.num_sub_steps + num_replicas = params["context"].num_replicas if "context" in params else 1 + assert self._rotated_batch_size % num_replicas == 0 + # Rotated batch size per core. + rotated_bs = self._rotated_batch_size // num_replicas + assert rotated_bs % 4 == 0 + # Number of images to rotate. Each images gets rotated 3 times. + num_rotated_examples = rotated_bs // 4 + logging.info("num_replicas=%s, bs=%s, rotated_bs=%s, " + "num_rotated_examples=%s, params=%s", + num_replicas, bs, rotated_bs, num_rotated_examples, params) + + # Augment the images with rotation. + if "rotation" in self._self_supervision: + # Put all rotation angles in a single batch, the first batch_size are + # the original up-right images, followed by rotated_batch_size * 3 + # rotated images with 3 different angles. + assert num_rotated_examples <= bs, (num_rotated_examples, bs) + images_rotated = utils.rotate_images( + images[-num_rotated_examples:], rot90_scalars=(1, 2, 3)) + generated_rotated = utils.rotate_images( + generated[-num_rotated_examples:], rot90_scalars=(1, 2, 3)) + # Labels for rotation loss (unrotated and 3 rotated versions). For + # NUM_ROTATIONS=4 and num_rotated_examples=2 this is: + # [0, 0, 1, 1, 2, 2, 3, 3] + rotate_labels = tf.constant( + np.repeat(np.arange(NUM_ROTATIONS, dtype=np.int32), + num_rotated_examples)) + rotate_labels_onehot = tf.one_hot(rotate_labels, NUM_ROTATIONS) + all_images = tf.concat([images, images_rotated, + generated, generated_rotated], 0) + if self.conditional: + y_rotated = tf.tile(y[-num_rotated_examples:], [3, 1]) + sampled_y_rotated = tf.tile(y[-num_rotated_examples:], [3, 1]) + all_y = tf.concat([y, y_rotated, sampled_y, sampled_y_rotated], 0) + else: + all_images = tf.concat([images, generated], 0) + if self.conditional: + all_y = tf.concat([y, sampled_y], axis=0) + + # Compute discriminator output for real and fake images in one batch. + d_all, d_all_logits, c_all_logits = self.discriminator( + all_images, y=all_y, is_training=is_training, reuse=reuse, + rotation_head=True) + d_real, d_fake = tf.split(d_all, 2) + d_real_logits, d_fake_logits = tf.split(d_all_logits, 2) + c_real_logits, c_fake_logits = tf.split(c_all_logits, 2) + + # Separate the true/fake scores from whole rotation batch. + d_real_logits = d_real_logits[:bs] + d_fake_logits = d_fake_logits[:bs] + d_real = d_real[:bs] + d_fake = d_fake[:bs] + + self.d_loss, _, _, self.g_loss = loss_lib.get_losses( + d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits, + d_fake_logits=d_fake_logits) + + discriminator = functools.partial(self.discriminator, y=y) + penalty_loss = penalty_lib.get_penalty_loss( + x=images, x_fake=generated, is_training=is_training, + discriminator=discriminator, architecture=self._architecture) + self.d_loss += self._lambda * penalty_loss + + # Add rotation augmented loss. + if "rotation" in self._self_supervision: + # We take an even pieces for all rotation angles + assert len(c_real_logits.shape.as_list()) == 2, c_real_logits.shape + assert len(c_fake_logits.shape.as_list()) == 2, c_fake_logits.shape + c_real_logits = c_real_logits[- rotated_bs:] + c_fake_logits = c_fake_logits[- rotated_bs:] + preds_onreal = tf.cast(tf.argmax(c_real_logits, -1), rotate_labels.dtype) + accuracy = tf.reduce_mean( + tf.cast(tf.equal(rotate_labels, preds_onreal), tf.float32)) + c_real_probs = tf.nn.softmax(c_real_logits) + c_fake_probs = tf.nn.softmax(c_fake_logits) + c_real_loss = - tf.reduce_mean( + tf.reduce_sum(rotate_labels_onehot * tf.log(c_real_probs + 1e-10), 1)) + c_fake_loss = - tf.reduce_mean( + tf.reduce_sum(rotate_labels_onehot * tf.log(c_fake_probs + 1e-10), 1)) + if self._self_supervision == "rotation_only": + self.d_loss *= 0.0 + self.g_loss *= 0.0 + self.d_loss += c_real_loss * self._weight_rotation_loss_d + self.g_loss += c_fake_loss * self._weight_rotation_loss_g + else: + c_real_loss = 0.0 + c_fake_loss = 0.0 + accuracy = tf.zeros([]) + + self._tpu_summary.scalar("loss/c_real_loss", c_real_loss) + self._tpu_summary.scalar("loss/c_fake_loss", c_fake_loss) + self._tpu_summary.scalar("accuracy/d_rotation", accuracy) + self._tpu_summary.scalar("loss/d", self.d_loss) + self._tpu_summary.scalar("loss/g", self.g_loss) + self._tpu_summary.scalar("loss/penalty", penalty_loss) + diff --git a/compare_gan/gans/ssgan_test.py b/compare_gan/gans/ssgan_test.py new file mode 100644 index 0000000..973c673 --- /dev/null +++ b/compare_gan/gans/ssgan_test.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for SSGANs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +from absl.testing import parameterized +from compare_gan import datasets +from compare_gan.gans import consts as c +from compare_gan.gans import loss_lib +from compare_gan.gans import penalty_lib +from compare_gan.gans.ssgan import SSGAN +import gin +import tensorflow as tf + +FLAGS = flags.FLAGS +TEST_ARCHITECTURES = [c.RESNET_CIFAR, c.SNDCGAN_ARCH, c.RESNET5_ARCH] +TEST_LOSSES = [loss_lib.non_saturating, loss_lib.hinge] +TEST_PENALTIES = [penalty_lib.no_penalty, penalty_lib.wgangp_penalty] + + +class SSGANTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + super(SSGANTest, self).setUp() + FLAGS.data_fake_dataset = True + self.model_dir = os.path.join(FLAGS.test_tmpdir, "model_dir") + if tf.gfile.Exists(self.model_dir): + tf.gfile.DeleteRecursively(self.model_dir) + + def _runSingleTrainingStep(self, architecture, loss_fn, penalty_fn): + parameters = { + "architecture": architecture, + "discriminator_normalization": c.SPECTRAL_NORM, + "lambda": 1, + "z_dim": 128, + } + with gin.unlock_config(): + gin.bind_parameter("penalty.fn", penalty_fn) + gin.bind_parameter("loss.fn", loss_fn) + run_config = tf.contrib.tpu.RunConfig( + model_dir=self.model_dir, + tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) + dataset = datasets.get_dataset("cifar10") + gan = SSGAN( + dataset=dataset, + parameters=parameters, + model_dir=self.model_dir, + g_optimizer_fn=tf.train.AdamOptimizer, + g_lr=0.0002, + rotated_batch_size=4) + estimator = gan.as_estimator(run_config, batch_size=2, use_tpu=False) + estimator.train(gan.input_fn, steps=1) + + @parameterized.parameters(TEST_ARCHITECTURES) + def testSingleTrainingStepArchitectures(self, architecture): + self._runSingleTrainingStep(architecture, loss_lib.hinge, + penalty_lib.no_penalty) + + @parameterized.parameters(TEST_LOSSES) + def testSingleTrainingStepLosses(self, loss_fn): + self._runSingleTrainingStep(c.RESNET_CIFAR, loss_fn, penalty_lib.no_penalty) + + @parameterized.parameters(TEST_PENALTIES) + def testSingleTrainingStepPenalties(self, penalty_fn): + self._runSingleTrainingStep(c.RESNET_CIFAR, loss_lib.hinge, penalty_fn) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/gans/utils.py b/compare_gan/gans/utils.py new file mode 100644 index 0000000..dc78660 --- /dev/null +++ b/compare_gan/gans/utils.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities library.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import scipy.misc +import tensorflow as tf + + +def check_folder(log_dir): + if not tf.gfile.IsDirectory(log_dir): + tf.gfile.MakeDirs(log_dir) + return log_dir + + +def save_images(images, image_path): + with tf.gfile.Open(image_path, "wb") as f: + scipy.misc.imsave(f, images * 255.0) + + +def rotate_images(images, rot90_scalars=(0, 1, 2, 3)): + """Return the input image and its 90, 180, and 270 degree rotations.""" + images_rotated = [ + images, # 0 degree + tf.image.flip_up_down(tf.image.transpose_image(images)), # 90 degrees + tf.image.flip_left_right(tf.image.flip_up_down(images)), # 180 degrees + tf.image.transpose_image(tf.image.flip_up_down(images)) # 270 degrees + ] + + results = tf.stack([images_rotated[i] for i in rot90_scalars]) + results = tf.reshape(results, + [-1] + images.get_shape().as_list()[1:]) + return results + + +def gaussian(batch_size, n_dim, mean=0., var=1.): + return np.random.normal(mean, var, (batch_size, n_dim)).astype(np.float32) diff --git a/compare_gan/hooks.py b/compare_gan/hooks.py new file mode 100644 index 0000000..c41a81d --- /dev/null +++ b/compare_gan/hooks.py @@ -0,0 +1,148 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains SessionRunHooks for training.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import time + +from absl import logging +import tensorflow as tf + + +class AsyncCheckpointSaverHook(tf.contrib.tpu.AsyncCheckpointSaverHook): + """Saves checkpoints every N steps in a asynchronous thread. + + This is the same as tf.contrib.tpu.AsyncCheckpointSaverHook but guarantees + that there will be a checkpoint every `save_steps` steps. This helps to have + eval results at fixed step counts, even when training is paused between + regular checkpoint intervals. + """ + + def after_create_session(self, session, coord): + super(AsyncCheckpointSaverHook, self).after_create_session(session, coord) + # Interruptions to the training job can cause non-regular checkpoints + # (between every_steps). Modify last triggered step to point to the last + # regular checkpoint step to make sure we trigger on the next regular + # checkpoint step. + step = session.run(self._global_step_tensor) + every_steps = self._timer._every_steps # pylint: disable=protected-access + last_triggered_step = step - step % every_steps + self._timer.update_last_triggered_step(last_triggered_step) + + +class EveryNSteps(tf.train.SessionRunHook): + """"Base class for hooks that execute callbacks every N steps. + + class MyHook(EveryNSteps): + def __init__(self, every_n_steps): + super(MyHook, self).__init__(every_n_steps) + + def every_n_steps_after_run(self, step, run_context, run_values): + # Your Implementation + + If you do overwrite begin(), end(), before_run() or after_run() make sure to + call super() at the beginning. + """ + + def __init__(self, every_n_steps): + """Initializes an `EveryNSteps` hook. + + Args: + every_n_steps: `int`, the number of steps to allow between callbacks. + """ + self._timer = tf.train.SecondOrStepTimer(every_steps=every_n_steps) + self._global_step_tensor = None + + def begin(self): + self._global_step_tensor = tf.train.get_global_step() + if self._global_step_tensor is None: + raise RuntimeError("Global step must be created to use EveryNSteps.") + + def before_run(self, run_context): # pylint: disable=unused-argument + """Overrides `SessionRunHook.before_run`. + + Args: + run_context: A `SessionRunContext` object. + + Returns: + None or a `SessionRunArgs` object. + """ + return tf.train.SessionRunArgs({"global_step": self._global_step_tensor}) + + def after_run(self, run_context, run_values): + """Overrides `SessionRunHook.after_run`. + + Args: + run_context: A `SessionRunContext` object. + run_values: A SessionRunValues object. + """ + step = run_values.results["global_step"] + if self._timer.should_trigger_for_step(step): + self.every_n_steps_after_run(step, run_context, run_values) + self._timer.update_last_triggered_step(step) + + def end(self, sess): + step = sess.run(self._global_step_tensor) + self.every_n_steps_after_run(step, None, None) + + def every_n_steps_after_run(self, step, run_context, run_values): + """Callback after every n"th call to run(). + + Args: + step: Current global_step value. + run_context: A `SessionRunContext` object. + run_values: A SessionRunValues object. + """ + raise NotImplementedError("Subclasses of EveryNSteps should implement " + "every_n_steps_after_run().") + + +class ReportProgressHook(EveryNSteps): + """SessionRunHook that reports progress to a `TaskManager` instance.""" + + def __init__(self, task_manager, max_steps, every_n_steps=100): + """Create a new instance of ReportProgressHook. + + Args: + task_manager: A `TaskManager` instance that implements report_progress(). + max_steps: Maximum number of training steps. + every_n_steps: How frequently the hook should report progress. + """ + super(ReportProgressHook, self).__init__(every_n_steps=every_n_steps) + logging.info("Creating ReportProgressHook to report progress every %d " + "steps.", every_n_steps) + self.max_steps = max_steps + self.task_manager = task_manager + self.start_time = None + self.start_step = None + + def every_n_steps_after_run(self, step, run_context, run_values): + if self.start_time is None: + # First call. + self.start_time = time.time() + self.start_step = step + return + + time_elapsed = time.time() - self.start_time + steps_per_sec = float(step - self.start_step) / time_elapsed + eta_seconds = (self.max_steps - step) / (steps_per_sec + 0.0000001) + message = "{:.1f}% @{:d}, {:.1f} steps/s, ETA: {:.0f} min".format( + 100 * step / self.max_steps, step, steps_per_sec, eta_seconds / 60) + logging.info("Reporting progress: %s", message) + self.task_manager.report_progress(message) diff --git a/compare_gan/main.py b/compare_gan/main.py new file mode 100644 index 0000000..4b397cf --- /dev/null +++ b/compare_gan/main.py @@ -0,0 +1,134 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Binary to train and evaluate one GAN configuration.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +# pylint: disable=unused-import + +from absl import app +from absl import flags +from absl import logging + +from compare_gan import datasets +from compare_gan import runner_lib +# Import GAN types so that they can be used in Gin configs without module names. +from compare_gan.gans.gpu_modular_gan import GPUModularGAN +from compare_gan.gans.modular_gan import ModularGAN +from compare_gan.gans.s3gan import S3GAN +from compare_gan.gans.ssgan import SSGAN + +# Required import to configure core TF classes and functions. +import gin +import gin.tf.external_configurables +import tensorflow as tf + + +FLAGS = flags.FLAGS + +flags.DEFINE_string("model_dir", None, "Where to store files.") +flags.DEFINE_string( + "schedule", "train", + "Schedule to run. Options: train, continuous_eval.") +flags.DEFINE_multi_string( + "gin_config", [], + "List of paths to the config files.") +flags.DEFINE_multi_string( + "gin_bindings", [], + "Newline separated list of Gin parameter bindings.") +flags.DEFINE_string( + "score_filename", "scores.csv", + "Name of the CSV file with evaluation results model_dir.") + +flags.DEFINE_integer( + "num_eval_averaging_runs", 3, + "How many times to average FID and IS") +flags.DEFINE_integer( + "eval_every_steps", 5000, + "Evaluate only checkpoints whose step is divisible by this integer") + +flags.DEFINE_bool("use_tpu", None, "Whether running on TPU or not.") + + +def _get_cluster(): + if not FLAGS.use_tpu: # pylint: disable=unreachable + return None + if "TPU_NAME" not in os.environ: + raise ValueError("Could not find a TPU. Set TPU_NAME.") + return tf.contrib.cluster_resolver.TPUClusterResolver( + tpu=os.environ["TPU_NAME"], + zone=os.environ.get("TPU_ZONE", None)) + + +@gin.configurable("run_config") +def _get_run_config(tf_random_seed=None, + single_core=False, + iterations_per_loop=1000, + save_checkpoints_steps=5000, + keep_checkpoint_max=1000): + """Return `RunConfig` for TPUs.""" + tpu_config = tf.contrib.tpu.TPUConfig( + num_shards=1 if single_core else None, # None = all cores. + iterations_per_loop=iterations_per_loop) + return tf.contrib.tpu.RunConfig( + model_dir=FLAGS.model_dir, + tf_random_seed=tf_random_seed, + save_checkpoints_steps=save_checkpoints_steps, + keep_checkpoint_max=keep_checkpoint_max, + cluster=_get_cluster(), + tpu_config=tpu_config) + + + + +def _get_task_manager(): + """Returns a TaskManager for this experiment.""" + score_file = os.path.join(FLAGS.model_dir, FLAGS.score_filename) + return runner_lib.TaskManagerWithCsvResults( + model_dir=FLAGS.model_dir, score_file=score_file) + + +def main(unused_argv): + logging.info("Gin config: %s\nGin bindings: %s", + FLAGS.gin_config, FLAGS.gin_bindings) + gin.parse_config_files_and_bindings(FLAGS.gin_config, FLAGS.gin_bindings) + + + if FLAGS.use_tpu is None: + FLAGS.use_tpu = bool(os.environ.get("TPU_NAME", "")) + if FLAGS.use_tpu: + logging.info("Found TPU %s.", os.environ["TPU_NAME"]) + run_config = _get_run_config() + task_manager = _get_task_manager() + options = runner_lib.get_options_dict() + runner_lib.run_with_schedule( + schedule=FLAGS.schedule, + run_config=run_config, + task_manager=task_manager, + options=options, + use_tpu=FLAGS.use_tpu, + num_eval_averaging_runs=FLAGS.num_eval_averaging_runs, + eval_every_steps=FLAGS.eval_every_steps) + logging.info("I\"m done with my work, ciao!") + + +if __name__ == "__main__": + flags.mark_flag_as_required("model_dir") + app.run(main) diff --git a/compare_gan/src/multi_gan/__init__.py b/compare_gan/metrics/__init__.py similarity index 82% rename from compare_gan/src/multi_gan/__init__.py rename to compare_gan/metrics/__init__.py index c116485..39eaf76 100644 --- a/compare_gan/src/multi_gan/__init__.py +++ b/compare_gan/metrics/__init__.py @@ -13,7 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Init.py.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +# coding=utf-8 diff --git a/compare_gan/metrics/accuracy.py b/compare_gan/metrics/accuracy.py new file mode 100644 index 0000000..27e335d --- /dev/null +++ b/compare_gan/metrics/accuracy.py @@ -0,0 +1,145 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Discriminator accuracy. + +Computes the discrimionator's accuracy on (a subset) of the training dataset, +test dataset, and a generated data set. The score is averaged over several +multiple generated data sets and subsets of the training data. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging + +from compare_gan import datasets +from compare_gan import eval_utils +from compare_gan.metrics import eval_task + +import numpy as np + + +class AccuracyTask(eval_task.EvalTask): + """Evaluation Task for computing and reporting accuracy.""" + + def metric_list(self): + return frozenset([ + "train_accuracy", "test_accuracy", "fake_accuracy", "train_d_loss", + "test_d_loss" + ]) + + def run_in_session(self, options, sess, gan, real_images): + del options + return compute_accuracy_loss(sess, gan, real_images) + + +def compute_accuracy_loss(sess, + gan, + test_images, + max_train_examples=50000, + num_repeat=5): + """Compute discriminator's accuracy and loss on a given dataset. + + Args: + sess: Tf.Session object. + gan: Any AbstractGAN instance. + test_images: numpy array with test images. + max_train_examples: How many "train" examples to get from the dataset. + In each round, some of them will be randomly selected + to evaluate train set accuracy. + num_repeat: How many times to repreat the computation. + The mean of all the results is reported. + Returns: + Dict[Text, float] with all the computed scores. + + Raises: + ValueError: If the number of test_images is greater than the number of + training images returned by the dataset. + """ + logging.info("Evaluating training and test accuracy...") + train_images = eval_utils.get_real_images( + dataset=datasets.get_dataset(), + num_examples=max_train_examples, + split="train", + failure_on_insufficient_examples=False) + if train_images.shape[0] < test_images.shape[0]: + raise ValueError("num_train %d must be larger than num_test %d." % + (train_images.shape[0], test_images.shape[0])) + + num_batches = int(np.floor(test_images.shape[0] / gan.batch_size)) + if num_batches * gan.batch_size < test_images.shape[0]: + logging.error("Ignoring the last batch with %d samples / %d epoch size.", + test_images.shape[0] - num_batches * gan.batch_size, + gan.batch_size) + + ret = { + "train_accuracy": [], + "test_accuracy": [], + "fake_accuracy": [], + "train_d_loss": [], + "test_d_loss": [] + } + + for _ in range(num_repeat): + idx = np.random.choice(train_images.shape[0], test_images.shape[0]) + bs = gan.batch_size + train_subset = [train_images[i] for i in idx] + train_predictions, test_predictions, fake_predictions = [], [], [] + train_d_losses, test_d_losses = [], [] + + for i in range(num_batches): + z_sample = gan.z_generator(gan.batch_size, gan.z_dim) + start_idx = i * bs + end_idx = start_idx + bs + test_batch = test_images[start_idx : end_idx] + train_batch = train_subset[start_idx : end_idx] + + test_prediction, test_d_loss, fake_images = sess.run( + [gan.discriminator_output, gan.d_loss, gan.fake_images], + feed_dict={ + gan.inputs: test_batch, gan.z: z_sample + }) + train_prediction, train_d_loss = sess.run( + [gan.discriminator_output, gan.d_loss], + feed_dict={ + gan.inputs: train_batch, + gan.z: z_sample + }) + fake_prediction = sess.run( + gan.discriminator_output, + feed_dict={gan.inputs: fake_images})[0] + + train_predictions.append(train_prediction[0]) + test_predictions.append(test_prediction[0]) + fake_predictions.append(fake_prediction) + train_d_losses.append(train_d_loss) + test_d_losses.append(test_d_loss) + + train_predictions = [x >= 0.5 for x in train_predictions] + test_predictions = [x >= 0.5 for x in test_predictions] + fake_predictions = [x < 0.5 for x in fake_predictions] + + ret["train_accuracy"].append(np.array(train_predictions).mean()) + ret["test_accuracy"].append(np.array(test_predictions).mean()) + ret["fake_accuracy"].append(np.array(fake_predictions).mean()) + ret["train_d_loss"].append(np.mean(train_d_losses)) + ret["test_d_loss"].append(np.mean(test_d_losses)) + + for key in ret: + ret[key] = np.mean(ret[key]) + + return ret diff --git a/compare_gan/metrics/eval_task.py b/compare_gan/metrics/eval_task.py new file mode 100644 index 0000000..178e87a --- /dev/null +++ b/compare_gan/metrics/eval_task.py @@ -0,0 +1,76 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Abstract class that describes a single evaluation task. + +The tasks can be run in or after session. Each task can result +in a set of metrics. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc + +from absl import flags +import six +import tensorflow as tf + +FLAGS = flags.FLAGS + + +@six.add_metaclass(abc.ABCMeta) +class EvalTask(object): + """Class that describes a single evaluation task. + + For example: compute inception score or compute accuracy. + The classes that inherit from it, should implement the methods below. + """ + + _LABEL = None + + def metric_list(self): + """List of metrics that this class generates. + + These are the only keys that RunXX methods can return in + their output maps. + Returns: + frozenset of strings, which are the names of the metrics that task + computes. + """ + return frozenset(self._LABEL) + + def _create_session(self): + try: + target = FLAGS.master + except AttributeError: + return tf.Session() + return tf.Session(target) + + @abc.abstractmethod + def run_after_session(self, fake_dset, real_dset): + """Runs the task after all the generator calls, after session was closed. + + WARNING: the images here, are in 0..255 range, with 3 color channels. + + Args: + fake_dset: `EvalDataSample` with fake images and inception features. + real_dset: `EvalDataSample` with real images and inception features. + + Returns: + Dict with metric values. The keys must be contained in the set that + "MetricList" method above returns. + """ diff --git a/compare_gan/metrics/fid_score.py b/compare_gan/metrics/fid_score.py new file mode 100644 index 0000000..735f5da --- /dev/null +++ b/compare_gan/metrics/fid_score.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of the Frechet Inception Distance. + +Implemented as a wrapper around the tf.contrib.gan library. The details can be +found in "GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash +Equilibrium", Heusel et al. [https://arxiv.org/abs/1706.08500]. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging + +from compare_gan.metrics import eval_task + +import tensorflow as tf +import tensorflow_gan as tfgan + + +# Special value returned when FID code returned exception. +FID_CODE_FAILED = 4242.0 + + +class FIDScoreTask(eval_task.EvalTask): + """Evaluation task for the FID score.""" + + _LABEL = "fid_score" + + def run_after_session(self, fake_dset, real_dset): + logging.info("Calculating FID.") + with tf.Graph().as_default(): + fake_activations = tf.convert_to_tensor(fake_dset.activations) + real_activations = tf.convert_to_tensor(real_dset.activations) + fid = tfgan.eval.frechet_classifier_distance_from_activations( + real_activations=real_activations, + generated_activations=fake_activations) + with self._create_session() as sess: + fid = sess.run(fid) + logging.info("Frechet Inception Distance: %.3f.", fid) + return {self._LABEL: fid} + + +def compute_fid_from_activations(fake_activations, real_activations): + """Returns the FID based on activations. + + Args: + fake_activations: NumPy array with fake activations. + real_activations: NumPy array with real activations. + Returns: + A float, the Frechet Inception Distance. + """ + logging.info("Computing FID score.") + assert fake_activations.shape == real_activations.shape + with tf.Session(graph=tf.Graph()) as sess: + fake_activations = tf.convert_to_tensor(fake_activations) + real_activations = tf.convert_to_tensor(real_activations) + fid = tfgan.eval.frechet_classifier_distance_from_activations( + real_activations=real_activations, + generated_activations=fake_activations) + return sess.run(fid) diff --git a/compare_gan/bin/compare_gan_generate_tasks b/compare_gan/metrics/fid_score_test.py old mode 100755 new mode 100644 similarity index 54% rename from compare_gan/bin/compare_gan_generate_tasks rename to compare_gan/metrics/fid_score_test.py index 19a3532..27e2939 --- a/compare_gan/bin/compare_gan_generate_tasks +++ b/compare_gan/metrics/fid_score_test.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # coding=utf-8 # Copyright 2018 Google LLC & Hwalsuk Lee. # @@ -14,35 +13,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/usr/bin/env python +"""Tests for the FID score.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function -import copy -import collections -import os +from compare_gan.metrics import fid_score as fid_score_lib +import numpy as np import tensorflow as tf -from compare_gan.src import generate_tasks_lib -from compare_gan.src import task_utils -tf.flags.DEFINE_string( - "workdir", "/tmp/workdir", - "Path to the workdir. This flag is required.") +class FIDScoreTest(tf.test.TestCase): -tf.flags.DEFINE_string( - "experiment", "test", - "Type of experiment to generate. This flag is required.") - -FLAGS = tf.flags.FLAGS - -def main(_): - print("Running Experiments Generator for experiment %s" % FLAGS.experiment) - tasks = generate_tasks_lib.GetTasks(FLAGS.experiment) - task_utils.WriteTasksToDirectories(FLAGS.workdir, tasks) + def test_fid_computation(self): + real_data = np.ones((100, 2)) + real_data[:50, 0] = 2 + gen_data = np.ones((100, 2)) * 9 + gen_data[50:, 0] = 2 + # mean(real_data) = [1.5, 1] + # Cov(real_data) = [[ 0.2525, 0], [0, 0]] + # mean(gen_data) = [5.5, 9] + # Cov(gen_data) = [[12.37, 0], [0, 0]] + result = fid_score_lib.compute_fid_from_activations(real_data, gen_data) + self.assertNear(result, 89.091, 1e-4) if __name__ == "__main__": - tf.app.run() + tf.test.main() diff --git a/compare_gan/metrics/fractal_dimension.py b/compare_gan/metrics/fractal_dimension.py new file mode 100644 index 0000000..e78c1c9 --- /dev/null +++ b/compare_gan/metrics/fractal_dimension.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of the fractal dimension metric.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan.metrics import eval_task + +import numpy as np +import scipy.spatial + + +class FractalDimensionTask(eval_task.EvalTask): + """Fractal dimension metric.""" + + _LABEL = "fractal_dimension" + + def run_after_session(self, options, eval_data_fake, eval_data_real=None): + print(eval_data_fake) + score = compute_fractal_dimension(eval_data_fake.images) + return {self._LABEL: score} + + +def compute_fractal_dimension(fake_images, + num_fd_seeds=100, + n_bins=1000, + scale=0.1): + """Compute Fractal Dimension of fake_images. + + Args: + fake_images: an np array of datapoints, the dimensionality and scaling of + images can be arbitrary + num_fd_seeds: number of random centers from which fractal dimension + computation is performed + n_bins: number of bins to split the range of distance values into + scale: the scale of the y interval in the log-log plot for which we apply a + linear regression fit + + Returns: + fractal dimension of the dataset. + """ + assert len(fake_images.shape) >= 2 + assert fake_images.shape[0] >= num_fd_seeds + + num_images = fake_images.shape[0] + # In order to apply scipy function we need to flatten the number of dimensions + # to 2 + fake_images = np.reshape(fake_images, (num_images, -1)) + fake_images_subset = fake_images[np.random.randint( + num_images, size=num_fd_seeds)] + + distances = scipy.spatial.distance.cdist(fake_images, + fake_images_subset).flatten() + min_distance = np.min(distances[np.nonzero(distances)]) + max_distance = np.max(distances) + buckets = min_distance * ( + (max_distance / min_distance)**np.linspace(0, 1, n_bins)) + # Create a table where first column corresponds to distances r + # and second column corresponds to number of points N(r) that lie + # within distance r from the random seeds + fd_result = np.zeros((n_bins - 1, 2)) + fd_result[:, 0] = buckets[1:] + fd_result[:, 1] = np.sum(np.less.outer(distances, buckets[1:]), axis=0) + + # We compute the slope of the log-log plot at the middle y value + # which is stored in y_val; the linear regression fit is computed on + # the part of the plot that corresponds to an interval around y_val + # whose size is 2*scale*(total width of the y axis) + max_y = np.log(num_images * num_fd_seeds) + min_y = np.log(num_fd_seeds) + x = np.log(fd_result[:, 0]) + y = np.log(fd_result[:, 1]) + y_width = max_y - min_y + y_val = min_y + 0.5 * y_width + + start = np.argmax(y > y_val - scale * y_width) + end = np.argmax(y > y_val + scale * y_width) + + slope = np.linalg.lstsq( + a=np.vstack([x[start:end], np.ones(end - start)]).transpose(), + b=y[start:end].reshape(end - start, 1))[0][0][0] + return slope diff --git a/compare_gan/metrics/fractal_dimension_test.py b/compare_gan/metrics/fractal_dimension_test.py new file mode 100644 index 0000000..2af8bb2 --- /dev/null +++ b/compare_gan/metrics/fractal_dimension_test.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the fractal dimension metric.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from compare_gan.metrics import fractal_dimension as fractal_dimension_lib + +import numpy as np +import tensorflow as tf + + +class FractalDimensionTest(tf.test.TestCase): + + def test_straight_line(self): + """The fractal dimension of a 1D line must lie near 1.0.""" + self.assertAllClose( + fractal_dimension_lib.compute_fractal_dimension( + np.random.uniform(size=(10000, 1))), 1.0, atol=0.05) + + def test_square(self): + """The fractal dimension of a 2D square must lie near 2.0.""" + self.assertAllClose( + fractal_dimension_lib.compute_fractal_dimension( + np.random.uniform(size=(10000, 2))), 2.0, atol=0.1) diff --git a/compare_gan/src/gilbo.py b/compare_gan/metrics/gilbo.py similarity index 60% rename from compare_gan/src/gilbo.py rename to compare_gan/metrics/gilbo.py index 26ac26d..2e08327 100644 --- a/compare_gan/src/gilbo.py +++ b/compare_gan/metrics/gilbo.py @@ -15,150 +15,96 @@ """Implements GILBO score functions. -See https://arxiv.org/abs/1802.04874 for details. +Details are available in "GILBO: One Metric to Measure Them All", Alemi and +Fisher [https://arxiv.org/abs/1802.04874]. -See eval_gan_lib.py for working code that calls into this code. - -It should be sufficient to call in the following manner: -```python - -dataset = datasets.load_mnist(...) -gan = gans.GAN(...) - -sess = tf.Session(...) - -# Train GAN... - -outdir = '/desired/output/path' -checkpoint_path = '/path/to/gan/checkpoints' -options = dict( - 'gilbo_learning_rate': 4e-4, - ... # Other GILBO-specific options visible in this file. -) - -gilbo, = TrainGILBO( - gan, sess, outdir, checkpoint_path, dataset, options) -``` +Changelist: +(6 Feb 2019): Removed VAE. +(5 Jan 2019): The code is not supported in latest compare_gan due to the major + interface changes. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import cPickle as pickle import os -from compare_gan.src import gan_lib -from compare_gan.src.gans.VAE import VAE +from compare_gan import datasets +from compare_gan.metrics import eval_task import matplotlib -matplotlib.use('Agg') +matplotlib.use("Agg") import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top import numpy as np from pstar import plist import scipy.misc +import six.moves.cPickle as pickle import tensorflow as tf - -layers = tf.layers -ds = tf.contrib.distributions +import tensorflow_probability as tfp -def _Regressor(x, z_dim=64, gan_type='GAN'): +layers = tf.layers +ds = tfp.distributions + + +class GILBOTask(eval_task.EvalTask): + """Compute GILBO metric and related consistency metrics.""" + + def __init__(self, outdir, task_workdir, dataset_name): + self.outdir = outdir + self.task_workdir = task_workdir + self.dataset = dataset_name + + def metric_list(self): + return frozenset([ + "gilbo", + "gilbo_train_consistency", + "gilbo_eval_consistency", + "gilbo_self_consistency", + ]) + + def run_in_session(self, options, sess, gan, eval_data_real): + del eval_data_real + result_dict = {} + if options.get("compute_gilbo", False): + (gilbo, gilbo_train_consistency, + gilbo_eval_consistency, gilbo_self_consistency) = train_gilbo( + gan, sess, self.outdir, self.task_workdir, self.dataset, options) + result_dict["gilbo"] = gilbo + result_dict["gilbo_train_consistency"] = gilbo_train_consistency + result_dict["gilbo_eval_consistency"] = gilbo_eval_consistency + result_dict["gilbo_self_consistency"] = gilbo_self_consistency + return result_dict + + +def _build_regressor(x, z_dim=64): """Make the GILBO regressor, which is based off of the GAN discriminator.""" - assert gan_type in ['GAN', 'VAE'] - net = tf.cast(x, tf.float32) net = layers.conv2d(net, 64, 4, 2, activation=tf.nn.leaky_relu) net = layers.conv2d(net, 128, 4, 2, activation=tf.nn.leaky_relu) net = layers.flatten(net) net = layers.dense(net, 1024, activation=tf.nn.leaky_relu) net = layers.dense(net, 2 * z_dim) - - if gan_type == 'VAE': - encoding = ds.Normal( - loc=net[..., :z_dim], - scale=tf.nn.softplus(net[..., z_dim:])) - dist = ds.Independent(encoding, 1) - return dist - else: - # a is the alpha parameter and b is the beta parameter of the Beta - # distribution. - a, b = net[..., :z_dim], net[..., z_dim:2 * z_dim] - - a = 1 + tf.nn.softplus(a - 5) - b = 1 + tf.nn.softplus(b - 5) - - dist = ds.Independent(ds.Beta(a, b), 1) - bijector = ds.bijectors.Affine(-1.0, 2.0) - tdist = ds.TransformedDistribution(distribution=dist, bijector=bijector) - return tdist - - -def _SaveImage(img, filename): - # If img is [H W] or [H W 1], stack into [H W 3] for scipy's api. - if len(img.shape) == 2 or img.shape[-1] == 1: - img = np.stack((img.squeeze(),) * 3, -1) - with tf.gfile.Open(filename, 'w') as f: - scipy.misc.toimage(img, cmin=0.0, cmax=1.0).save(f) - - -def _SaveZHistograms(gan, z_sample, z_pred_dist, gan_type, outdir, step): - """Save a histogram for each z dimension as an png in outdir.""" - fig, axs = plt.subplots(8, 8, figsize=(15, 10)) - - pk = 0 - bins = np.linspace(-1, 1, 70) - - samp = z_sample.eval() - z_pred_samp = z_pred_dist.sample(10000).eval({gan.z: samp}) - - try: - for j in range(64): - if gan_type == 'VAE': - bins = np.linspace(np.nan_to_num(z_pred_samp[:, pk, j]).min(), - np.nan_to_num(z_pred_samp[:, pk, j]).max(), 70) - axs.flat[j].hist(z_pred_samp[:, pk, j], bins, histtype='stepfilled', - normed=True) - axs.flat[j].vlines(samp[pk, j], 0, 1.0, linestyle='dashed') - - plt.tight_layout() - filename = os.path.join(outdir, 'z_hist_%03d.png' % step) - tf.logging.info('Saving z histogram: %s' % filename) - with tf.gfile.Open(filename, 'w') as f: - fig.savefig(f, dpi='figure') - except Exception as e: # pylint: disable=broad-except - tf.logging.info('Caught %r while rendering chart. Ignoring.\n%s\n' - % (type(e), str(e))) + # a and b correspond to the alpha beta parameters of the Beta distribution. + a, b = net[..., :z_dim], net[..., z_dim:2 * z_dim] + a = 1 + tf.nn.softplus(a - 5) + b = 1 + tf.nn.softplus(b - 5) + dist = ds.Independent(ds.Beta(a, b), 1) + bijector = ds.bijectors.Affine(-1.0, 2.0) + tdist = ds.TransformedDistribution(distribution=dist, bijector=bijector) + return tdist -def _SaveGILBO(saver, sess, learning_rate, gilbo_step, step, lr, outdir): - """Save GILBO model checkpoints, including the current step and lr. - - Args: - saver: tf.train.Saver. - sess: tf.Session. - learning_rate: tf.Variable for the learning rate. - gilbo_step: tf.Variable for the current training step. - step: integer for the current step, to be saved in the checkpoint. - lr: float for the current learning rate, to be saved in the checkpoint. - outdir: output directory. - """ - # Save the current learning rate and gilbo training step with the checkpoint. - learning_rate.assign(lr).eval() - gilbo_step.assign(step).eval() - filename = os.path.join(outdir, 'gilbo_model') - saver.save(sess, filename, global_step=step) - - -def TrainGILBO(gan, sess, outdir, checkpoint_path, dataset, options): +def train_gilbo(gan, sess, outdir, checkpoint_path, dataset, options): """Build and train GILBO model. Args: gan: GAN object. sess: tf.Session. outdir: Output directory. A pickle file will be written there. - checkpoint_path: Path where gan's checkpoints are written. Only used to + checkpoint_path: Path where gan"s checkpoints are written. Only used to ensure that GILBO files are written to a unique subdirectory of outdir. dataset: Name of dataset used to train the GAN. @@ -166,7 +112,7 @@ def TrainGILBO(gan, sess, outdir, checkpoint_path, dataset, options): Returns: mean_eval_info: Mean GILBO computed over a large number of images generated - by the trained GAN or VAE. + by the trained GAN mean_train_consistency: Mean consistency of the trained GILBO model with data from the training set. mean_eval_consistency: Same consistency measure for the trained model with @@ -180,64 +126,45 @@ def TrainGILBO(gan, sess, outdir, checkpoint_path, dataset, options): """ uninitialized = sess.run(tf.report_uninitialized_variables()) if uninitialized: - raise ValueError('Model has uninitialized variables!\n%r' % uninitialized) + raise ValueError("Model has uninitialized variables!\n%r" % uninitialized) - outdir = os.path.join(outdir, checkpoint_path.replace('/', '_')) - if isinstance(gan, VAE): - gan_type = 'VAE' - else: - gan_type = 'GAN' + outdir = os.path.join(outdir, checkpoint_path.replace("/", "_")) tf.gfile.MakeDirs(outdir) - with tf.variable_scope('gilbo'): + with tf.variable_scope("gilbo"): ones = tf.ones((gan.batch_size, gan.z_dim)) - # Get a distribution for the prior, depending on whether the model is a VAE - # or a GAN. - if gan_type == 'VAE': - z_dist = ds.Independent(ds.Normal(0.0 * ones, ones), 1) - else: - z_dist = ds.Independent(ds.Uniform(-ones, ones), 1) - + # Get a distribution for the prior. + z_dist = ds.Independent(ds.Uniform(-ones, ones), 1) z_sample = z_dist.sample() - - epsneg = np.finfo('float32').epsneg - - if gan_type == 'VAE': - ganz_clip = gan.z - else: - # Clip samples from the GAN uniform prior because the Beta distribution - # doesn't include the top endpoint and has issues with the bottom endpoint - ganz_clip = tf.clip_by_value(gan.z, -(1 - epsneg), 1 - epsneg) + epsneg = np.finfo("float32").epsneg + # Clip samples from the GAN uniform prior because the Beta distribution + # doesn"t include the top endpoint and has issues with the bottom endpoint. + ganz_clip = tf.clip_by_value(gan.z, -(1 - epsneg), 1 - epsneg) # Get generated images from the model. fake_images = gan.fake_images # Build the regressor distribution that encodes images back to predicted # samples from the prior. - with tf.variable_scope('regressor'): - z_pred_dist = _Regressor(fake_images, gan.z_dim, gan_type) - + with tf.variable_scope("regressor"): + z_pred_dist = _build_regressor(fake_images, gan.z_dim) # Capture the parameters of the distributions for later analysis. - if gan_type == 'VAE': - dist_p1 = z_pred_dist.distribution.loc - dist_p2 = z_pred_dist.distribution.scale - else: - dist_p1 = z_pred_dist.distribution.distribution.concentration0 - dist_p2 = z_pred_dist.distribution.distribution.concentration1 + dist_p1 = z_pred_dist.distribution.distribution.concentration0 + dist_p2 = z_pred_dist.distribution.distribution.concentration1 # info and avg_info compute the GILBO. info = z_pred_dist.log_prob(ganz_clip) - z_dist.log_prob(ganz_clip) avg_info = tf.reduce_mean(info) # Set up training of the GILBO model. - lr = options.get('gilbo_learning_rate', 4e-4) + lr = options.get("gilbo_learning_rate", 4e-4) learning_rate = tf.get_variable( - 'learning_rate', initializer=lr, trainable=False) - gilbo_step = tf.get_variable('gilbo_step', dtype=tf.int32, initializer=0, + "learning_rate", initializer=lr, trainable=False) + gilbo_step = tf.get_variable("gilbo_step", dtype=tf.int32, initializer=0, trainable=False) opt = tf.train.AdamOptimizer(learning_rate) - regressor_vars = tf.contrib.framework.get_variables('gilbo/regressor') + regressor_vars = tf.contrib.framework.get_variables("gilbo/regressor") train_op = opt.minimize(-info, var_list=regressor_vars) # Initialize the variables we just created. @@ -251,58 +178,50 @@ def TrainGILBO(gan, sess, outdir, checkpoint_path, dataset, options): checkpoint_path = tf.train.latest_checkpoint(outdir) saver.restore(sess, checkpoint_path) except ValueError: - # Failing to restore just indicates that we don't have a valid checkpoint, + # Failing to restore just indicates that we don"t have a valid checkpoint, # so we will just start training a fresh GILBO model. pass + _train_gilbo(sess, gan, saver, learning_rate, gilbo_step, z_sample, avg_info, + z_pred_dist, train_op, outdir, options) - _TrainGILBO(sess, gan, saver, learning_rate, gilbo_step, z_sample, avg_info, - z_pred_dist, train_op, gan_type, outdir, options) - - mean_eval_info = _EvalGILBO( - sess, gan, z_sample, avg_info, dist_p1, dist_p2, fake_images, outdir, - options) - + mean_eval_info = _eval_gilbo(sess, gan, z_sample, avg_info, + dist_p1, dist_p2, fake_images, outdir, options) # Collect encoded distributions on the training and eval set in order to do # kl-nearest-neighbors on generated samples and measure consistency. - train_images = gan_lib.load_dataset( - dataset, split_name='train' - ).apply( - tf.contrib.data.batch_and_drop_remainder(gan.batch_size) - ).make_one_shot_iterator().get_next()[0] - train_images = tf.reshape(train_images, fake_images.shape) - - eval_images = gan_lib.load_dataset( - dataset, split_name='test' - ).apply( - tf.contrib.data.batch_and_drop_remainder(gan.batch_size) - ).make_one_shot_iterator().get_next()[0] - eval_images = tf.reshape(eval_images, fake_images.shape) - - mean_train_consistency = _RunGILBOConsistency( - train_images, 'train', extract_input_images=0, + dataset = datasets.get_dataset(dataset) + x_train = dataset.load_dataset(split_name="train", num_threads=1) + x_train = x_train.batch(gan.batch_size, drop_remainder=True) + x_train = x_train.make_one_shot_iterator().get_next()[0] + x_train = tf.reshape(x_train, fake_images.shape) + + x_eval = dataset.load_dataset(split_name="test", num_threads=1) + x_eval = x_eval.batch(gan.batch_size, drop_remainder=True) + x_eval = x_eval.make_one_shot_iterator().get_next()[0] + x_eval = tf.reshape(x_eval, fake_images.shape) + + mean_train_consistency = _run_gilbo_consistency( + x_train, "train", extract_input_images=0, save_consistency_images=20, num_batches=5, **locals()) - mean_eval_consistency = _RunGILBOConsistency( - eval_images, 'eval', extract_input_images=0, + mean_eval_consistency = _run_gilbo_consistency( + x_eval, "eval", extract_input_images=0, save_consistency_images=20, num_batches=5, **locals()) - mean_self_consistency = _RunGILBOConsistency( - fake_images, 'self', extract_input_images=20, + mean_self_consistency = _run_gilbo_consistency( + fake_images, "self", extract_input_images=20, save_consistency_images=20, num_batches=5, **locals()) - - return (mean_eval_info, - mean_train_consistency, - mean_eval_consistency, + return (mean_eval_info, mean_train_consistency, mean_eval_consistency, mean_self_consistency) -def _TrainGILBO(sess, gan, saver, learning_rate, gilbo_step, z_sample, avg_info, - z_pred_dist, train_op, gan_type, outdir, options): +def _train_gilbo(sess, gan, saver, learning_rate, gilbo_step, z_sample, + avg_info, z_pred_dist, train_op, outdir, options): + """Run the training process.""" - lr_scale = options.get('gilbo_lr_scale', 0.5) - min_lr = options.get('gilbo_min_lr', 1e-8) - min_ai_step_scale = options.get('gilbo_min_ai_step_scale', 0.75) - min_ai_step_value = options.get('gilbo_min_ai_step_value', 0.5) - max_train_cycles = options.get('gilbo_max_train_cycles', 50) - train_steps_per_cycle = options.get('gilbo_train_steps_per_cycle', 10000) + lr_scale = options.get("gilbo_lr_scale", 0.5) + min_lr = options.get("gilbo_min_lr", 1e-8) + min_ai_step_scale = options.get("gilbo_min_ai_step_scale", 0.75) + min_ai_step_value = options.get("gilbo_min_ai_step_value", 0.5) + max_train_cycles = options.get("gilbo_max_train_cycles", 50) + train_steps_per_cycle = options.get("gilbo_train_steps_per_cycle", 10000) ais = [0.0] # average gilbos (i is for info) min_ai = -2.0 @@ -311,18 +230,18 @@ def _TrainGILBO(sess, gan, saver, learning_rate, gilbo_step, z_sample, avg_info, if lr < min_lr: break - _SaveGILBO(saver, sess, learning_rate, gilbo_step, i, lr, outdir) + _save_gilbo(saver, sess, learning_rate, gilbo_step, i, lr, outdir) ai = 0.0 for j in range(train_steps_per_cycle): if j % (train_steps_per_cycle // 10) == 0: - tf.logging.info('step:%d, gilbo:%.3f' % (j, ai)) + tf.logging.info("step:%d, gilbo:%.3f" % (j, ai)) samp = sess.run(z_sample) _, z_info = sess.run( [train_op, avg_info], feed_dict={gan.z: samp, learning_rate: lr}) ai += (z_info - ai) / (j + 1) - tf.logging.info('cycle:%d gilbo:%.3f min next gilbo:%.3f learning rate:%.3f' + tf.logging.info("cycle:%d gilbo:%.3f min next gilbo:%.3f learning rate:%.3f" % (i, ai, min_ai, lr)) if ai < min_ai: @@ -330,7 +249,7 @@ def _TrainGILBO(sess, gan, saver, learning_rate, gilbo_step, z_sample, avg_info, if lr < min_lr: break if np.isnan(ai): - tf.logging.info('NaN GILBO at cycle %d, stopping training early.' % i) + tf.logging.info("NaN GILBO at cycle %d, stopping training early." % i) break ais.append(ai) @@ -344,14 +263,12 @@ def _TrainGILBO(sess, gan, saver, learning_rate, gilbo_step, z_sample, avg_info, (ai - ais[-2]) * min_ai_step_scale) ) ) - - _SaveGILBO(saver, sess, learning_rate, gilbo_step, i, lr, outdir) - - _SaveZHistograms(gan, z_sample, z_pred_dist, gan_type, outdir, i) + _save_gilbo(saver, sess, learning_rate, gilbo_step, i, lr, outdir) + _save_z_histograms(gan, z_sample, z_pred_dist, outdir, i) -def _EvalGILBO(sess, gan, z_sample, avg_info, dist_p1, dist_p2, fake_images, - outdir, options): +def _eval_gilbo(sess, gan, z_sample, avg_info, dist_p1, dist_p2, fake_images, + outdir, options): """Evaluate GILBO on new data from the generative model. Args: @@ -371,7 +288,7 @@ def _EvalGILBO(sess, gan, z_sample, avg_info, dist_p1, dist_p2, fake_images, The mean GILBO on the evaluation set. Also writes a pickle file saving distribution parameters and generated images for later analysis. """ - eval_steps = options.get('gilbo_eval_steps', 10000) + eval_steps = options.get("gilbo_eval_steps", 10000) z_infos = np.zeros(eval_steps, np.float32) z_dist_p1s, z_dist_p2s, z_fake_images = [], [], [] mean_eval_info = 0 @@ -389,7 +306,7 @@ def _EvalGILBO(sess, gan, z_sample, avg_info, dist_p1, dist_p2, fake_images, z_infos[i] = sess.run(avg_info, feed_dict={gan.z: samp}) if i % (eval_steps // 10) == 0: - tf.logging.info('eval step:%d gilbo:%3.1f' % (i, z_infos[i])) + tf.logging.info("eval step:%d gilbo:%3.1f" % (i, z_infos[i])) if eval_steps: mean_eval_info = np.mean(np.nan_to_num(z_infos)) @@ -398,19 +315,17 @@ def _EvalGILBO(sess, gan, z_sample, avg_info, dist_p1, dist_p2, fake_images, dist_p2=np.array(z_dist_p2s).reshape([-1, 64]), images=np.array(z_fake_images).reshape( [-1] + list(z_fake_images[0].shape[1:]))) - with tf.gfile.Open(os.path.join(outdir, 'eval_dists.p'), 'w') as f: + with tf.gfile.Open(os.path.join(outdir, "eval_dists.p"), "w") as f: pickle.dump(eval_dists, f) - tf.logging.info('eval gilbo:%3.1f' % mean_eval_info) + tf.logging.info("eval gilbo:%3.1f" % mean_eval_info) return mean_eval_info -def _RunGILBOConsistency(input_images, mode, dist_p1, dist_p2, z_pred_dist, - z_sample, gan, sess, outdir, gan_type, dataset, - extract_input_images=0, - save_consistency_images=0, - num_batches=3000, - **unused_kw): +def _run_gilbo_consistency( + input_images, mode, dist_p1, dist_p2, z_pred_dist, + z_sample, gan, sess, outdir, dataset, extract_input_images=0, + save_consistency_images=0, num_batches=3000, **unused_kw): """Measure consistency of the gilbo estimator with the GAN or VAE. Arguments without documentation are variables from the calling function needed @@ -418,7 +333,7 @@ def _RunGILBOConsistency(input_images, mode, dist_p1, dist_p2, z_pred_dist, Args: input_images: Tensor. Dataset images, or images generated by the GAN or VAE. - mode: 'train', 'eval', or 'self'. Which consistency measure to compute. + mode: "train", "eval", or "self". Which consistency measure to compute. dist_p1: dist_p2: z_pred_dist: @@ -426,7 +341,6 @@ def _RunGILBOConsistency(input_images, mode, dist_p1, dist_p2, z_pred_dist, gan: sess: outdir: - gan_type: dataset: extract_input_images: Number of batches to extract, -1 for all. Default: 0. save_consistency_images: Num batches to save, -1 for all. Default: 0. @@ -437,39 +351,22 @@ def _RunGILBOConsistency(input_images, mode, dist_p1, dist_p2, z_pred_dist, Symmetric consistency KL. Additionally saves distribution parameters as a pickle as well as any requested images as pngs to outdir. """ - with tf.variable_scope('gilbo'): - with tf.variable_scope('regressor', reuse=True): - z_pred_dist_train = _Regressor(input_images, gan.z_dim, gan_type) - + with tf.variable_scope("gilbo"): + with tf.variable_scope("regressor", reuse=True): + z_pred_dist_train = _build_regressor(input_images, gan.z_dim) z_sample_train = z_pred_dist_train.sample() - dist_p1_ph = tf.placeholder(tf.float32, dist_p1.shape) dist_p2_ph = tf.placeholder(tf.float32, dist_p2.shape) consist_dist_p1_ph = tf.placeholder(tf.float32, dist_p1.shape) consist_dist_p2_ph = tf.placeholder(tf.float32, dist_p2.shape) - - if gan_type == 'VAE': - dist_p1 = z_pred_dist_train.distribution.loc - dist_p2 = z_pred_dist_train.distribution.scale - - consist_z_dist_p1 = z_pred_dist.distribution.loc - consist_z_dist_p2 = z_pred_dist.distribution.scale - - base_dist = ds.Normal - else: - dist_p1 = z_pred_dist_train.distribution.distribution.concentration0 - dist_p2 = z_pred_dist_train.distribution.distribution.concentration1 - - consist_z_dist_p1 = z_pred_dist.distribution.distribution.concentration0 - consist_z_dist_p2 = z_pred_dist.distribution.distribution.concentration1 - - base_dist = ds.Beta - - kl_dist_p = ds.Independent( - base_dist(dist_p1_ph, dist_p2_ph), 1) + dist_p1 = z_pred_dist_train.distribution.distribution.concentration0 + dist_p2 = z_pred_dist_train.distribution.distribution.concentration1 + consist_z_dist_p1 = z_pred_dist.distribution.distribution.concentration0 + consist_z_dist_p2 = z_pred_dist.distribution.distribution.concentration1 + base_dist = ds.Beta + kl_dist_p = ds.Independent(base_dist(dist_p1_ph, dist_p2_ph), 1) kl_dist_q = ds.Independent( base_dist(consist_dist_p1_ph, consist_dist_p2_ph), 1) - consistency_kl = kl_dist_p.kl_divergence(kl_dist_q) consistency_rkl = kl_dist_q.kl_divergence(kl_dist_p) @@ -481,7 +378,6 @@ def _RunGILBOConsistency(input_images, mode, dist_p1, dist_p2, z_pred_dist, while i < num_batches: try: samp = sess.run(z_sample) - z_dist_p1, z_dist_p2, images, train_samp = sess.run( [dist_p1, dist_p2, input_images, z_sample_train], feed_dict={gan.z: samp}) @@ -514,37 +410,37 @@ def _RunGILBOConsistency(input_images, mode, dist_p1, dist_p2, z_pred_dist, filename = os.path.join( outdir, - 'consistency_image_%s_%06d_%06d.png' + "consistency_image_%s_%06d_%06d.png" % (mode, i * gan.batch_size, (i + 1) * gan.batch_size - 1)) img = consistency_images.reshape( [gan.batch_size * consistency_images.shape[1], consistency_images.shape[2], -1]) - _SaveImage(img, filename) + _save_image(img, filename) if extract_input_images: extract_input_images -= 1 - if mode == 'self': + if mode == "self": filename = os.path.join( outdir, - '%s_image_%06d_%06d.png' + "%s_image_%06d_%06d.png" % (mode, i * gan.batch_size, (i + 1) * gan.batch_size - 1)) img = images.reshape( [gan.batch_size * consistency_images.shape[1], consistency_images.shape[2], -1]) - _SaveImage(img, filename) + _save_image(img, filename) else: for j in range(gan.batch_size): filename = os.path.join( - outdir, '..', dataset, - '%s_image_%06d.png' % (mode, i * gan.batch_size + j)) - _SaveImage(images[j], filename) + outdir, "..", dataset, + "%s_image_%06d.png" % (mode, i * gan.batch_size + j)) + _save_image(images[j], filename) if i % 100 == 0: tf.logging.info( - '%s: step:%d consistency KL:%3.1f' % + "%s: step:%d consistency KL:%3.1f" % (mode, i, np.mean(consistency_skls))) i += 1 @@ -561,8 +457,59 @@ def _RunGILBOConsistency(input_images, mode, dist_p1, dist_p2, z_pred_dist, consistency_skl=np.reshape(consistency_skls, [-1, gan.batch_size]), ) with tf.gfile.Open( - os.path.join(outdir, '%s_consistency_dists.p' % mode), 'w') as f: + os.path.join(outdir, "%s_consistency_dists.p" % mode), "w") as f: pickle.dump(out_dists, f) return np.mean(consistency_skls) + +def _save_image(img, filename): + # If img is [H W] or [H W 1], stack into [H W 3] for scipy"s api. + if len(img.shape) == 2 or img.shape[-1] == 1: + img = np.stack((img.squeeze(),) * 3, -1) + with tf.gfile.Open(filename, "w") as f: + scipy.misc.toimage(img, cmin=0.0, cmax=1.0).save(f) + + +def _save_z_histograms(gan, z_sample, z_pred_dist, outdir, step): + """Save a histogram for each z dimension as an png in outdir.""" + fig, axs = plt.subplots(8, 8, figsize=(15, 10)) + + pk = 0 + bins = np.linspace(-1, 1, 70) + + samp = z_sample.eval() + z_pred_samp = z_pred_dist.sample(10000).eval({gan.z: samp}) + + try: + for j in range(64): + axs.flat[j].hist(z_pred_samp[:, pk, j], bins, histtype="stepfilled", + normed=True) + axs.flat[j].vlines(samp[pk, j], 0, 1.0, linestyle="dashed") + plt.tight_layout() + filename = os.path.join(outdir, "z_hist_%03d.png" % step) + tf.logging.info("Saving z histogram: %s" % filename) + with tf.gfile.Open(filename, "w") as f: + fig.savefig(f, dpi="figure") + except Exception as e: # pylint: disable=broad-except + tf.logging.info("Caught %r while rendering chart. Ignoring.\n%s\n" + % (type(e), str(e))) + + +def _save_gilbo(saver, sess, learning_rate, gilbo_step, step, lr, outdir): + """Save GILBO model checkpoints, including the current step and lr. + + Args: + saver: tf.train.Saver. + sess: tf.Session. + learning_rate: tf.Variable for the learning rate. + gilbo_step: tf.Variable for the current training step. + step: integer for the current step, to be saved in the checkpoint. + lr: float for the current learning rate, to be saved in the checkpoint. + outdir: output directory. + """ + # Save the current learning rate and gilbo training step with the checkpoint. + learning_rate.assign(lr).eval() + gilbo_step.assign(step).eval() + filename = os.path.join(outdir, "gilbo_model") + saver.save(sess, filename, global_step=step) diff --git a/compare_gan/src/image_similarity.py b/compare_gan/metrics/image_similarity.py similarity index 89% rename from compare_gan/src/image_similarity.py rename to compare_gan/metrics/image_similarity.py index 799c31f..fe39d18 100644 --- a/compare_gan/src/image_similarity.py +++ b/compare_gan/metrics/image_similarity.py @@ -19,8 +19,6 @@ used for monitoring how various models learn to reconstruct images or may be used directly as the final objective to be optimized. -This file was adapted from here to suit generative model evaluation: -cs/research/vision/piedpiper/brain/lib/image/metrics.py How to use these metrics: * MS-SSIM: For GANS, MS-SSIM can be used to detect mode collapse by looking at @@ -42,7 +40,7 @@ class samples are being compared. In the unconditional setting, it can be import tensorflow as tf -def VerifyCompatibleImageShapes(img1, img2): +def verify_compatible_shapes(img1, img2): """Checks if two image tensors are compatible for metric computation. This function checks if two sets of images have ranks at least 3, and if the @@ -84,7 +82,7 @@ def VerifyCompatibleImageShapes(img1, img2): _SSIM_K2 = 0.03 -def _SSIMHelper(x, y, reducer, max_val, compensation=1.0): +def _ssim_helper(x, y, reducer, max_val, compensation=1.0): r"""Helper function to SSIM. SSIM estimates covariances with weighted sums, e.g., normalized Gaussian blur. @@ -139,7 +137,7 @@ def _SSIMHelper(x, y, reducer, max_val, compensation=1.0): return luminance, cs -def FSpecialGauss(size, sigma): +def f_special_gauss(size, sigma): """Function to mimic the 'fspecial' gaussian MATLAB function.""" size = tf.convert_to_tensor(size, tf.int32) sigma = tf.convert_to_tensor(sigma) @@ -156,7 +154,7 @@ def FSpecialGauss(size, sigma): return tf.reshape(g, shape=[size, size, 1, 1]) -def _SSIMIndexPerChannel( +def _ssim_index_per_channel( img1, img2, filter_size, filter_width, max_val=255.0): """Computes SSIM index between img1 and img2 per color channel. @@ -184,19 +182,13 @@ def _SSIMIndexPerChannel( shape1, shape2 = tf.shape_n([img1, img2]) - # If the images are bigger than the filter size, reduce the filter size - # to the smallest image size. - # This is consistent with the implementation in - # third_party/tensorflow_models/compression/msssim.py - # but is not present in the original algorithm. - filter_size = tf.reduce_min( tf.concat([tf.expand_dims(filter_size, axis=0), shape1[-3:-1], shape2[-3:-1]], axis=0)) - kernel = FSpecialGauss(filter_size, filter_sigma) + kernel = f_special_gauss(filter_size, filter_sigma) kernel = tf.tile(kernel, multiples=[1, 1, shape1[-1], 1]) # The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`, @@ -210,7 +202,7 @@ def reducer(x): # pylint: disable=invalid-name y = tf.nn.depthwise_conv2d(x, kernel, strides=[1] * 4, padding='VALID') return tf.reshape(y, tf.concat([shape[:-3], tf.shape(y)[1:]], 0)) - luminance, cs = _SSIMHelper(img1, img2, reducer, max_val, compensation) + luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation) # Average over the second and the third from the last: height, width. axes = tf.constant([-3, -2], dtype=tf.int32) @@ -224,27 +216,27 @@ def reducer(x): # pylint: disable=invalid-name _MSSSIM_WEIGHTS = (.0448, 0.2856, 0.3001, 0.2363, 0.1333) -def MultiscaleSSIM( +def multiscale_ssim( img1, img2, filter_size=11, filter_width=1.5, max_val=255.0): """Computes MS-SSIM with power factors from Wang paper.""" - return _MultiscaleSSIMHelper(img1, img2, - filter_size=filter_size, - filter_width=filter_width, - max_val=max_val, - power_factors=_MSSSIM_WEIGHTS) + return _multiscale_ssim_helper(img1, img2, + filter_size=filter_size, + filter_width=filter_width, + max_val=max_val, + power_factors=_MSSSIM_WEIGHTS) -def MultiscaleSSIMUnweighted( +def multiscale_ssim_unweighted( img1, img2, filter_size=11, filter_width=1.5, max_val=255.0): """Computes unweighted MS-SSIM with power factors from Zhao paper.""" - return _MultiscaleSSIMHelper(img1, img2, - filter_size=filter_size, - filter_width=filter_width, - max_val=max_val, - power_factors=[1, 1, 1, 1, 1]) + return _multiscale_ssim_helper(img1, img2, + filter_size=filter_size, + filter_width=filter_width, + max_val=max_val, + power_factors=[1, 1, 1, 1, 1]) -def _MultiscaleSSIMHelper( +def _multiscale_ssim_helper( img1, img2, filter_size, filter_width, power_factors, max_val=255.0): """Computes the MS-SSIM between img1 and img2. @@ -273,7 +265,7 @@ def _MultiscaleSSIMHelper( shape1[-3:].merge_with(shape2[-3:]) with tf.name_scope(None, 'MS-SSIM', [img1, img2]): - shape1, shape2, checks = VerifyCompatibleImageShapes(img1, img2) + shape1, shape2, checks = verify_compatible_shapes(img1, img2) with tf.control_dependencies(checks): img1 = tf.identity(img1) @@ -324,7 +316,7 @@ def do_pad(images, remainder): # pylint: disable=invalid-name ] # Overwrite previous ssim value since we only need the last one. - ssim, cs = _SSIMIndexPerChannel( + ssim, cs = _ssim_index_per_channel( *imgs, filter_size=filter_size, filter_width=filter_width, max_val=max_val) diff --git a/compare_gan/metrics/inception_score.py b/compare_gan/metrics/inception_score.py new file mode 100644 index 0000000..7ec94d7 --- /dev/null +++ b/compare_gan/metrics/inception_score.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of the Inception Score. + +Implemented as a wrapper around the tensorflow_gan library. The details can be +found in "Improved Techniques for Training GANs", Salimans et al. +[https://arxiv.org/abs/1606.03498]. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging + +from compare_gan.metrics import eval_task +import tensorflow as tf +import tensorflow_gan as tfgan + + +class InceptionScoreTask(eval_task.EvalTask): + """Task that computes inception score for the generated images.""" + + _LABEL = "inception_score" + + def run_after_session(self, fake_dset, real_dest): + del real_dest + logging.info("Computing inception score.") + with tf.Graph().as_default(): + fake_logits = tf.convert_to_tensor(fake_dset.logits) + inception_score = tfgan.eval.classifier_score_from_logits(fake_logits) + with self._create_session() as sess: + inception_score = sess.run(inception_score) + logging.info("Inception score: %.3f", inception_score) + return {self._LABEL: inception_score} diff --git a/compare_gan/src/jacobian_conditioning.py b/compare_gan/metrics/jacobian_conditioning.py similarity index 60% rename from compare_gan/src/jacobian_conditioning.py rename to compare_gan/metrics/jacobian_conditioning.py index b73be8d..2c39100 100644 --- a/compare_gan/src/jacobian_conditioning.py +++ b/compare_gan/metrics/jacobian_conditioning.py @@ -13,20 +13,78 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Utils to compute the Jacobian of a function and condition number. +"""Implementation of the Jacobian Conditioning metrics. -Used to compute the diagnistic statistics from Odena et al. (2018), -https://arxiv.org/pdf/1802.08768.pdf. +The details can be found in "Is Generator Conditioning Causally Related to +GAN Performance?", Odena et al. [https://arxiv.org/abs/1802.08768]. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from compare_gan.metrics import eval_task + import numpy as np import tensorflow as tf +class GeneratorConditionNumberTask(eval_task.EvalTask): + """Computes the generator condition number. + + Computes the condition number for metric Tensor of the generator Jacobian. + This condition number is computed locally for each z sample in a minibatch. + Returns the mean log condition number and standard deviation across the + minibatch. + + Follows the methods in https://arxiv.org/abs/1802.08768. + """ + + _CONDITION_NUMBER_COUNT = "log_condition_number_count" + _CONDITION_NUMBER_MEAN = "log_condition_number_mean" + _CONDITION_NUMBER_STD = "log_condition_number_std" + + def metric_list(self): + return frozenset([ + self._CONDITION_NUMBER_COUNT, self._CONDITION_NUMBER_MEAN, + self._CONDITION_NUMBER_STD + ]) + + def run_in_session(self, options, sess, gan, real_images): + del options, real_images + result_dict = {} + result = compute_generator_condition_number(sess, gan) + result_dict[self._CONDITION_NUMBER_COUNT] = len(result) + result_dict[self._CONDITION_NUMBER_MEAN] = np.mean(result) + result_dict[self._CONDITION_NUMBER_STD] = np.std(result) + return result_dict + + +def compute_generator_condition_number(sess, gan): + """Computes the generator condition number. + + Computes the Jacobian of the generator in session, then postprocesses to get + the condition number. + + Args: + sess: tf.Session object. + gan: AbstractGAN object, that is already present in the current tf.Graph. + + Returns: + A list of length gan.batch_size. Each element is the condition number + computed at a single z sample within a minibatch. + """ + shape = gan.fake_images.get_shape().as_list() + flat_generator_output = tf.reshape( + gan.fake_images, [gan.batch_size, np.prod(shape[1:])]) + tf_jacobian = compute_jacobian( + xs=gan.z, fx=flat_generator_output) + z_sample = gan.z_generator(gan.batch_size, gan.z_dim) + np_jacobian = sess.run(tf_jacobian, feed_dict={gan.z: z_sample}) + result_dict = analyze_jacobian(np_jacobian) + return result_dict["metric_tensor"]["log_condition_number"] + + def compute_jacobian(xs, fx): """Computes df/dx matrix. @@ -109,9 +167,7 @@ def analyze_jacobian(jacobian_array): # Reshapes to have a dummy batch dimension. mean_metric_tensor = np.reshape(mean_metric_tensor, (1,) + metric_tensor.shape[1:]) - return { "metric_tensor": _analyze_metric_tensor(metric_tensor), "mean_metric_tensor": _analyze_metric_tensor(mean_metric_tensor) } - diff --git a/compare_gan/src/jacobian_conditioning_test.py b/compare_gan/metrics/jacobian_conditioning_test.py similarity index 90% rename from compare_gan/src/jacobian_conditioning_test.py rename to compare_gan/metrics/jacobian_conditioning_test.py index 07ea754..3c2e0ef 100644 --- a/compare_gan/src/jacobian_conditioning_test.py +++ b/compare_gan/metrics/jacobian_conditioning_test.py @@ -19,16 +19,17 @@ from __future__ import division from __future__ import print_function -from compare_gan.src import jacobian_conditioning - +from compare_gan.metrics import jacobian_conditioning import mock import numpy as np +from six.moves import range import tensorflow as tf + _BATCH_SIZE = 32 -def slow_jacobian(xs, fx): +def SlowJacobian(xs, fx): """Computes df/dx matrix. As jacobian_conditioning.compute_jacobian, but explicitly loops over @@ -41,7 +42,6 @@ def slow_jacobian(xs, fx): Returns: df/dx tensor. """ - fxs = tf.unstack(fx, axis=-1) grads = [tf.gradients(fx_i, xs) for fx_i in fxs] grads = [grad[0] for grad in grads] @@ -51,7 +51,7 @@ def slow_jacobian(xs, fx): class JacobianConditioningTest(tf.test.TestCase): - def testJacobianSimpleCase(self): + def test_jacobian_simple_case(self): x = tf.random_normal([_BATCH_SIZE, 2]) W = tf.constant([[2., -1.], [1.5, 1.]]) # pylint: disable=invalid-name f = tf.matmul(x, W) @@ -64,13 +64,13 @@ def testJacobianSimpleCase(self): expected = tf.tile([[[2, 1.5], [-1, 1]]], [_BATCH_SIZE, 1, 1]) self.assertAllClose(jacobian, expected) - def testJacobianAgainstSlowVersion(self): + def test_jacobian_against_slow_version(self): x = tf.random_normal([_BATCH_SIZE, 2]) h1 = tf.contrib.layers.fully_connected(x, 20) h2 = tf.contrib.layers.fully_connected(h1, 20) f = tf.contrib.layers.fully_connected(h2, 10) - j_slow_tensor = slow_jacobian(xs=x, fx=f) + j_slow_tensor = SlowJacobian(xs=x, fx=f) j_fast_tensor = jacobian_conditioning.compute_jacobian(xs=x, fx=f) with tf.Session() as sess: @@ -78,7 +78,7 @@ def testJacobianAgainstSlowVersion(self): j_fast, j_slow = sess.run([j_fast_tensor, j_slow_tensor]) self.assertAllClose(j_fast, j_slow) - def testJacobianNumerically(self): + def test_jacobian_numerically(self): x = tf.random_normal([_BATCH_SIZE, 2]) h1 = tf.contrib.layers.fully_connected(x, 20) h2 = tf.contrib.layers.fully_connected(h1, 20) @@ -92,7 +92,7 @@ def testJacobianNumerically(self): # Test 10 random elements. for _ in range(10): - # Pick a random element of jacobian to test. + # Pick a random element of Jacobian to test. batch_idx = np.random.randint(_BATCH_SIZE) x_idx = np.random.randint(2) f_idx = np.random.randint(10) @@ -114,8 +114,8 @@ def testJacobianNumerically(self): rtol=1e-3, atol=1e-3) - def testAnalyzeMetricTensor(self): - # Assumes numpy works, just tests that output shapes are as expected. + def test_analyze_metric_tensor(self): + # Assumes NumPy works, just tests that output shapes are as expected. jacobian = np.random.normal(0, 1, (_BATCH_SIZE, 2, 10)) metric_tensor = np.matmul(np.transpose(jacobian, [0, 2, 1]), jacobian) result_dict = jacobian_conditioning._analyze_metric_tensor(metric_tensor) @@ -124,7 +124,7 @@ def testAnalyzeMetricTensor(self): self.assertAllEqual(result_dict['log_condition_number'].shape, [_BATCH_SIZE]) - def testAnalyzeJacobian(self): + def test_analyze_jacobian(self): m = mock.patch.object( jacobian_conditioning, '_analyze_metric_tensor', new=lambda x: x) m.start() diff --git a/compare_gan/metrics/kid_score.py b/compare_gan/metrics/kid_score.py new file mode 100644 index 0000000..dc4fa02 --- /dev/null +++ b/compare_gan/metrics/kid_score.py @@ -0,0 +1,149 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of the KID score. + +The details can be found in "Demystifying MMD GANs", Binkowski et al. +[https://arxiv.org/abs/1801.01401]. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +from compare_gan.metrics import eval_task + +import numpy as np +import tensorflow as tf + + +class KIDScoreTask(eval_task.EvalTask): + """Evaluation task for the KID score.""" + + _LABEL = "kid_score" + + def run_after_session(self, fake_dset, real_dset): + score = kid(fake_dset.activations, real_dset.activations) + return {self._LABEL: score} + + +def kid(fake_activations, + real_activations, + max_batch_size=1024, + dtype=None, + return_stderr=False): + """Unbiased estimator of the Kernel Inception Distance. + + As defined by https://arxiv.org/abs/1801.01401. + + If return_stderr, also returns an estimate of the standard error, i.e. the + standard deviation of the KID estimator. Returns nan if the number + of batches is too small (< 5); for more reliable estimates, one could use + the asymptotic variance estimate given in https://arxiv.org/abs/1611.04488. + + Uses a block estimator, as in https://arxiv.org/abs/1307.1954, with blocks + no larger than max_batch_size. This is slightly different than the authors' + provided code, but is also unbiased (and provides more-valid a variance + estimate). + + NOTE: the blocking code assumes that real_activations and + fake_activations are in random order. If real_activations is sorted + in a meaningful order, the estimator will be biased. + + Args: + fake_activations: [batch, num_features] tensor with inception features. + real_activations: [batch, num_features] tensor with inception features. + max_batch_size: Batches to compute the KID. + dtype: Type used by the computations. + return_stderr: If true, also returns the std_error from the KID computation. + + Returns: + KID score (and optionally std error). + """ + real_activations.get_shape().assert_has_rank(2) + fake_activations.get_shape().assert_has_rank(2) + + # need to know dimension for the kernel, and batch size to split things + real_activations.get_shape().assert_is_fully_defined() + fake_activations.get_shape().assert_is_fully_defined() + + n_real, dim = real_activations.get_shape().as_list() + n_gen, dim2 = fake_activations.get_shape().as_list() + assert dim2 == dim + + # tensorflow_gan forces doubles for FID, but I don't think we need that here + if dtype is None: + dtype = real_activations.dtype + assert fake_activations.dtype == dtype + else: + real_activations = tf.cast(real_activations, dtype) + fake_activations = tf.cast(fake_activations, dtype) + + # split into largest approximately-equally-sized blocks + n_bins = int(math.ceil(max(n_real, n_gen) / max_batch_size)) + bins_r = np.full(n_bins, int(math.ceil(n_real / n_bins))) + bins_g = np.full(n_bins, int(math.ceil(n_gen / n_bins))) + bins_r[:(n_bins * bins_r[0]) - n_real] -= 1 + bins_g[:(n_bins * bins_r[0]) - n_gen] -= 1 + assert bins_r.min() >= 2 + assert bins_g.min() >= 2 + + inds_r = tf.constant(np.r_[0, np.cumsum(bins_r)]) + inds_g = tf.constant(np.r_[0, np.cumsum(bins_g)]) + + dim_ = tf.cast(dim, dtype) + + def get_kid_batch(i): + """Computes KID on a given batch of features. + + Takes real_activations[ind_r[i] : ind_r[i+1]] and + fake_activations[ind_g[i] : ind_g[i+1]]. + + Args: + i: is the index of the batch. + + Returns: + KID for the given batch. + """ + r_s = inds_r[i] + r_e = inds_r[i + 1] + r = real_activations[r_s:r_e] + m = tf.cast(r_e - r_s, dtype) + + g_s = inds_g[i] + g_e = inds_g[i + 1] + g = fake_activations[g_s:g_e] + n = tf.cast(r_e - r_s, dtype) + + # Could probably do this a bit faster... + k_rr = (tf.matmul(r, r, transpose_b=True) / dim_ + 1)**3 + k_rg = (tf.matmul(r, g, transpose_b=True) / dim_ + 1)**3 + k_gg = (tf.matmul(g, g, transpose_b=True) / dim_ + 1)**3 + return ( + -2 * tf.reduce_mean(k_rg) + (tf.reduce_sum(k_rr) - tf.trace(k_rr)) / + (m * (m - 1)) + (tf.reduce_sum(k_gg) - tf.trace(k_gg)) / (n * (n - 1))) + + ests = tf.map_fn( + get_kid_batch, np.arange(n_bins), dtype=dtype, back_prop=False) + + if return_stderr: + if n_bins < 5: + return tf.reduce_mean(ests), np.nan + mn, var = tf.nn.moments(ests, [0]) + return mn, tf.sqrt(var / n_bins) + else: + return tf.reduce_mean(ests) diff --git a/compare_gan/src/ms_ssim_score.py b/compare_gan/metrics/ms_ssim_score.py similarity index 56% rename from compare_gan/src/ms_ssim_score.py rename to compare_gan/metrics/ms_ssim_score.py index 16d3dc3..17cd304 100644 --- a/compare_gan/src/ms_ssim_score.py +++ b/compare_gan/metrics/ms_ssim_score.py @@ -13,34 +13,67 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""MS-SSIM metrics for image diversity evaluation. +"""Implementation of the MS-SSIM metric. -More details could be found from section 5.3: -https://arxiv.org/pdf/1710.08446.pdf +The details on the application of this metric to GANs can be found in +Section 5.3 of "Many Paths to Equilibrium: GANs Do Not Need to Decrease a +Divergence At Every Step", Fedus*, Rosca* et al. +[https://arxiv.org/abs/1710.08446]. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import itertools +from absl import logging -from compare_gan.src import image_similarity +from compare_gan.metrics import eval_task +from compare_gan.metrics import image_similarity import numpy as np from six.moves import range import tensorflow as tf -logging = tf.logging +class MultiscaleSSIMTask(eval_task.EvalTask): + """Task that computes MSSIMScore for generated images.""" -def get_metric_function(generated_images, num_batches): + _LABEL = "ms_ssim" + + def run_after_session(self, options, eval_data_fake, eval_data_real=None): + del options, eval_data_real + score = _compute_multiscale_ssim_score(eval_data_fake.images) + return {self._LABEL: score} + + +def _compute_multiscale_ssim_score(fake_images): + """Compute ms-ssim score .""" + batch_size = 64 + with tf.Graph().as_default(): + fake_images_batch = tf.train.shuffle_batch( + [tf.convert_to_tensor(fake_images, dtype=tf.float32)], + capacity=16*batch_size, + min_after_dequeue=8*batch_size, + num_threads=4, + enqueue_many=True, + batch_size=batch_size) + + # Following section 5.3 of https://arxiv.org/pdf/1710.08446.pdf, we only + # evaluate 5 batches of the generated images. + eval_fn = compute_msssim( + generated_images=fake_images_batch, num_batches=5) + with tf.train.MonitoredTrainingSession() as sess: + score = eval_fn(sess) + return score + + +def compute_msssim(generated_images, num_batches): """Get a fn returning the ms ssim score for generated images. Args: generated_images: TF Tensor of shape [batch_size, dim, dim, 3] which evaluates to a batch of generated images. Should be in range [0..255]. - num_eval_images: Number of (generated/ground_truth) images to evaluate. + num_batches: Number of batches to consider. Returns: eval_fn: a function which takes a session as an argument and returns the @@ -60,13 +93,13 @@ def get_metric_function(generated_images, num_batches): # Compute the mean of the scores (but ignore the 'identical' images - which # should get 1.0 from the MultiscaleSSIM) - score = tf.reduce_sum(image_similarity.MultiscaleSSIM(pair1, - pair2)) - batch_size + score = tf.reduce_sum(image_similarity.multiscale_ssim(pair1, pair2)) + score -= batch_size score = tf.div(score, batch_size * batch_size - batch_size) # Define a function which wraps some session.run calls to generate a large # number of images and compute multiscale ssim metric on them. - def eval_fn(session): + def _eval_fn(session): """Function which wraps session.run calls to compute given metric.""" logging.info("Computing MS-SSIM score...") scores = [] @@ -75,5 +108,4 @@ def eval_fn(session): result = np.mean(scores) return result - - return eval_fn + return _eval_fn diff --git a/compare_gan/src/ms_ssim_score_test.py b/compare_gan/metrics/ms_ssim_score_test.py similarity index 89% rename from compare_gan/src/ms_ssim_score_test.py rename to compare_gan/metrics/ms_ssim_score_test.py index 008eb34..0776f8e 100644 --- a/compare_gan/src/ms_ssim_score_test.py +++ b/compare_gan/metrics/ms_ssim_score_test.py @@ -13,11 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Tests for the MS-SSIM score.""" + from __future__ import absolute_import from __future__ import division from __future__ import print_function -from compare_gan.src import ms_ssim_score +from compare_gan.metrics import ms_ssim_score import tensorflow as tf @@ -31,8 +33,7 @@ def test_on_one_vs_07_vs_zero_images(self): tf.ones([64, 64, 3]) * 0.7, tf.zeros([64, 64, 3]), ]) - metric = ms_ssim_score.get_metric_function(generated_images, 1) - + metric = ms_ssim_score.compute_msssim(generated_images, 1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) result = metric(sess) diff --git a/compare_gan/src/prd_score.py b/compare_gan/metrics/prd_score.py similarity index 98% rename from compare_gan/src/prd_score.py rename to compare_gan/metrics/prd_score.py index 04d2aee..4e21937 100644 --- a/compare_gan/src/prd_score.py +++ b/compare_gan/metrics/prd_score.py @@ -17,8 +17,10 @@ Given a sample from the true and the fake distribution embedded in some feature space (say, Inception), it computes the precision and recall via the algorithm -presented in [arxiv.org/abs/1806.00035]. Finally, one can plot the resulting -curves for different models. +presented in "Assessing Generative Models via Precision and Recall", +Sajjadi et al. [https://arxiv.org/abs/1806.00035]. + +Finally, one can plot the resulting curves for different models. Typical usage example: diff --git a/compare_gan/src/prd_score_test.py b/compare_gan/metrics/prd_score_test.py similarity index 99% rename from compare_gan/src/prd_score_test.py rename to compare_gan/metrics/prd_score_test.py index b02313f..bb544c9 100644 --- a/compare_gan/src/prd_score_test.py +++ b/compare_gan/metrics/prd_score_test.py @@ -20,7 +20,7 @@ from __future__ import print_function import unittest -from compare_gan.src import prd_score as prd +from compare_gan.metrics import prd_score as prd import numpy as np diff --git a/compare_gan/runner_lib.py b/compare_gan/runner_lib.py new file mode 100644 index 0000000..2aad6a3 --- /dev/null +++ b/compare_gan/runner_lib.py @@ -0,0 +1,354 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Binary to train and evaluate a single GAN configuration.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import csv +import os +import re +import time + +from absl import flags +from absl import logging +from compare_gan import datasets +from compare_gan import eval_gan_lib +from compare_gan import hooks +from compare_gan.gans import utils +from compare_gan.metrics import fid_score as fid_score_lib +from compare_gan.metrics import inception_score as inception_score_lib +import gin.tf +import numpy as np +import six +import tensorflow as tf + + +FLAGS = flags.FLAGS + + +class _DummyParserDelegate(gin.config_parser.ParserDelegate): + """Dummy class required to parse Gin configs. + + Our use case (just get the config as dictionary) does not require real + implementations the two methods. + """ + + def configurable_reference(self, scoped_name, evaluate): + return scoped_name + + def macro(self, scoped_name): + return scoped_name + + +def _parse_gin_config(config_path): + """Parses a Gin config into a dictionary. All values are strings.""" + with tf.gfile.Open(config_path) as f: + config_str = f.read() + parser = gin.config_parser.ConfigParser(config_str, _DummyParserDelegate()) + config = {} + for statement in parser: + if not isinstance(statement, gin.config_parser.ImportStatement): + name = statement.scope + "/" if statement.scope else "" + name = statement.selector + "." + statement.arg_name + config[name] = statement.value + return config + + +@gin.configurable("options") +def get_options_dict(batch_size=gin.REQUIRED, + gan_class=gin.REQUIRED, + architecture=gin.REQUIRED, + training_steps=gin.REQUIRED, + discriminator_normalization=None, + lamba=1, + disc_iters=1, + z_dim=128): + """Parse legacy options from Gin configurations into a Python dict. + + Args: + batch_size: The (global) batch size to use. On TPUs each core will get a + fraction of this. + gan_class: References to the GAN classe to use. This must implement the + AbstractGAN interface. + architecture: Name of the architecuter to use for G and D. This should be + value from consts.ARCHITECTURES and be supported by the GAN class. + training_steps: The number of training steps. These are discriminator steps. + discriminator_normalization: Deprecated. Ignored, but kept to read old + configs. + lamba: Weight for gradient penalty. + disc_iters: How often the discriminator is trained before training G for one + step. G will be trained for `training_steps // disc_iters` steps. + z_dim: Length of the latent variable z fed to the generator. + + Returns: + A Python dictionary with the options. + """ + del discriminator_normalization + return { + "use_tpu": FLAGS.use_tpu, # For compatibility with AbstractGAN. + "batch_size": batch_size, + "gan_class": gan_class, + "architecture": architecture, + "training_steps": training_steps, + "lambda": lamba, # Different spelling intended. + "disc_iters": disc_iters, + "z_dim": z_dim, + } + + +class TaskManager(object): + """Interface for managing a task.""" + + def __init__(self, model_dir): + self._model_dir = model_dir + + @property + def model_dir(self): + return self._model_dir + + def mark_training_done(self): + with tf.gfile.Open(os.path.join(self.model_dir, "TRAIN_DONE"), "w") as f: + f.write("") + + def is_training_done(self): + return tf.gfile.Exists(os.path.join(self.model_dir, "TRAIN_DONE")) + + def add_eval_result(self, checkpoint_path, result_dict, default_value): + pass + + def get_checkpoints_with_results(self): + return set() + + def unevaluated_checkpoints(self, timeout=0, eval_every_steps=None): + """Generator for checkpoints without evaluation results. + + Args: + timeout: Optional timeout for waiting for new checkpoints. Set this to + do continious evaluation. + eval_every_steps: Only evaluate checkpoints from steps divisible by this + integer. + + Yields: + Path to checkpoints that have not yet been evaluated. + """ + logging.info("Looking for checkpoints in %s", self._model_dir) + evaluated_checkpoints = self.get_checkpoints_with_results() + last_eval = time.time() + while True: + unevaluated_checkpoints = [] + checkpoint_state = tf.train.get_checkpoint_state(self.model_dir) + if checkpoint_state: + checkpoints = set(checkpoint_state.all_model_checkpoint_paths) + # Remove already evaluated checkpoints and sort ascending by step + # number. + unevaluated_checkpoints = checkpoints - evaluated_checkpoints + step_and_ckpt = sorted( + [(int(x.split("-")[-1]), x) for x in unevaluated_checkpoints]) + if eval_every_steps: + step_and_ckpt = [(step, ckpt) for step, ckpt in step_and_ckpt + if step > 0 and step % eval_every_steps == 0] + unevaluated_checkpoints = [ckpt for _, ckpt in step_and_ckpt] + logging.info( + "Found checkpoints: %s\nEvaluated checkpoints: %s\n" + "Unevaluated checkpoints: %s", checkpoints, evaluated_checkpoints, + unevaluated_checkpoints) + for checkpoint_path in unevaluated_checkpoints: + yield checkpoint_path + if unevaluated_checkpoints: + evaluated_checkpoints |= set(unevaluated_checkpoints) + last_eval = time.time() + continue + # No new checkpoints, timeout or stop if training finished. Otherwise + # wait 1 minute. + if time.time() - last_eval > timeout or self.is_training_done(): + break + time.sleep(60) + + def report_progress(self, message): + pass + + +class TaskManagerWithCsvResults(TaskManager): + """Task Manager that writes results to a CSV file.""" + + def __init__(self, model_dir, score_file=None): + super(TaskManagerWithCsvResults, self).__init__(model_dir) + if score_file is None: + score_file = os.path.join(model_dir, "scores.csv") + self._score_file = score_file + + def _get_config_for_step(self, step): + """Returns the latest operative config for the global step as dictionary.""" + saved_configs = tf.gfile.Glob( + os.path.join(self.model_dir, "operative_config-*.gin")) + get_step = lambda fn: int(re.findall(r"operative_config-(\d+).gin", fn)[0]) + config_steps = [get_step(fn) for fn in saved_configs] + assert config_steps + last_config_step = sorted([s for s in config_steps if s <= step])[-1] + config_path = os.path.join( + self.model_dir, "operative_config-{}.gin".format(last_config_step)) + return _parse_gin_config(config_path) + + def add_eval_result(self, checkpoint_path, result_dict, default_value): + step = os.path.basename(checkpoint_path).split("-")[-1] + config = self._get_config_for_step(step) + csv_header = ( + ["checkpoint_path", "step"] + sorted(result_dict) + sorted(config)) + write_header = not tf.gfile.Exists(self._score_file) + if write_header: + with tf.gfile.Open(self._score_file, "w") as f: + writer = csv.DictWriter(f, fieldnames=csv_header, extrasaction="ignore") + writer.writeheader() + row = dict(checkpoint_path=checkpoint_path, step=step, **config) + for k, v in six.iteritems(result_dict): + if isinstance(v, float): + v = "{:.3f}".format(v) + row[k] = v + with tf.gfile.Open(self._score_file, "a") as f: + writer = csv.DictWriter(f, fieldnames=csv_header, extrasaction="ignore") + writer.writerow(row) + + def get_checkpoints_with_results(self): + if not tf.gfile.Exists(self._score_file): + return set() + with tf.gfile.Open(self._score_file) as f: + reader = csv.DictReader(f) + return {r["checkpoint_path"] for r in reader} + return set() + + +def _run_eval(module_spec, checkpoints, task_manager, run_config, + use_tpu, num_averaging_runs): + """Evaluates the given checkpoints and add results to a result writer. + + Args: + module_spec: `ModuleSpec` of the model. + checkpoints: Generator for for checkpoint paths. + task_manager: `TaskManager`. init_eval() will be called before adding + results. + run_config: `RunConfig` to use. Values for master and tpu_config are + currently ignored. + use_tpu: Whether to use TPU for evaluation. + num_averaging_runs: Determines how many times each metric is computed. + """ + # By default, we compute FID and Inception scores. Other tasks defined in + # the metrics folder (such as the one in metrics/kid_score.py) can be added + # to this list if desired. + eval_tasks = [ + inception_score_lib.InceptionScoreTask(), + fid_score_lib.FIDScoreTask() + ] + logging.info("eval_tasks: %s", eval_tasks) + + for checkpoint_path in checkpoints: + step = os.path.basename(checkpoint_path).split("-")[-1] + if step == 0: + continue + export_path = os.path.join(run_config.model_dir, "tfhub", str(step)) + if not tf.gfile.Exists(export_path): + module_spec.export(export_path, checkpoint_path=checkpoint_path) + default_value = -1.0 + try: + result_dict = eval_gan_lib.evaluate_tfhub_module( + export_path, eval_tasks, use_tpu=use_tpu, + num_averaging_runs=num_averaging_runs) + except ValueError as nan_found_error: + result_dict = {} + logging.exception(nan_found_error) + default_value = eval_gan_lib.NAN_DETECTED + + logging.info("Evaluation result for checkpoint %s: %s (default value: %s)", + checkpoint_path, result_dict, default_value) + task_manager.add_eval_result(checkpoint_path, result_dict, default_value) + + +def run_with_schedule(schedule, run_config, task_manager, options, use_tpu, + num_eval_averaging_runs=1, eval_every_steps=-1): + """Run the schedule with the given options. + + Available schedules: + - train: Train up to options["training_steps"], continuing from existing + checkpoints if available. + - eval_after_train: First train up to options["training_steps"] then + evaluate all checkpoints. + - continuous_eval: Waiting for new checkpoints and evaluate them as they + become available. This is meant to run in parallel with a job running + the training schedule but can also run after it. + + Args: + schedule: Schedule to run. One of: train, continuous_eval, train_and_eval. + run_config: `tf.contrib.tpu.RunConfig` to use. + task_manager: `TaskManager` for this run. + options: Python dictionary will run parameters. + use_tpu: Boolean whether to use TPU. + num_eval_averaging_runs: Determines how many times each metric is computed. + eval_every_steps: Integer determining which checkpoints to evaluate. + """ + logging.info("Running schedule '%s' with options: %s", schedule, options) + if run_config.tf_random_seed: + logging.info("Setting NumPy random seed to %s.", run_config.tf_random_seed) + np.random.seed(run_config.tf_random_seed) + + result_dir = os.path.join(run_config.model_dir, "result") + utils.check_folder(result_dir) + + dataset = datasets.get_dataset() + gan = options["gan_class"](dataset=dataset, + parameters=options, + model_dir=run_config.model_dir) + + if schedule not in {"train", "eval_after_train", "continuous_eval"}: + raise ValueError("Schedule {} not supported.".format(schedule)) + if schedule in {"train", "eval_after_train"}: + train_hooks = [ + gin.tf.GinConfigSaverHook(run_config.model_dir), + hooks.ReportProgressHook(task_manager, + max_steps=options["training_steps"]), + ] + if run_config.save_checkpoints_steps: + # This replaces the default checkpoint saver hook in the estimator. + logging.info("Using AsyncCheckpointSaverHook.") + train_hooks.append( + hooks.AsyncCheckpointSaverHook( + checkpoint_dir=run_config.model_dir, + save_steps=run_config.save_checkpoints_steps)) + # (b/122782388): Remove hotfix. + run_config = run_config.replace(save_checkpoints_steps=1000000) + estimator = gan.as_estimator( + run_config, batch_size=options["batch_size"], use_tpu=use_tpu) + estimator.train( + input_fn=gan.input_fn, + max_steps=options["training_steps"], + hooks=train_hooks) + task_manager.mark_training_done() + + if schedule == "continuous_eval": + # Continuous eval with up to 24 hours between checkpoints. + checkpoints = task_manager.unevaluated_checkpoints( + timeout=24 * 3600, eval_every_steps=eval_every_steps) + if schedule == "eval_after_train": + checkpoints = task_manager.unevaluated_checkpoints( + eval_every_steps=eval_every_steps) + if schedule in {"continuous_eval", "eval_after_train"}: + _run_eval( + gan.as_module_spec(), + checkpoints=checkpoints, + task_manager=task_manager, + run_config=run_config, + use_tpu=use_tpu, + num_averaging_runs=num_eval_averaging_runs) diff --git a/compare_gan/runner_lib_test.py b/compare_gan/runner_lib_test.py new file mode 100644 index 0000000..e1d38b2 --- /dev/null +++ b/compare_gan/runner_lib_test.py @@ -0,0 +1,290 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests high level behavior of the runner_lib.py.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +from absl import logging +from absl.testing import flagsaver +from absl.testing import parameterized + +from compare_gan import eval_gan_lib +from compare_gan import eval_utils +from compare_gan import runner_lib +from compare_gan.architectures import arch_ops +from compare_gan.gans.modular_gan import ModularGAN + +import gin +import numpy as np +import tensorflow as tf + +FLAGS = flags.FLAGS + + +class RunnerLibTest(parameterized.TestCase, tf.test.TestCase): + + def _create_fake_inception_graph(self): + """Creates a graph with that mocks inception. + + It takes the input, multiplies it through a matrix full of 0.001 values + and returns as logits. It makes sure to match the tensor names of + the real inception model. + + Returns: + tf.Graph object with a simple mock inception inside. + """ + fake_inception = tf.Graph() + with fake_inception.as_default(): + graph_input = tf.placeholder( + tf.float32, shape=[None, 299, 299, 3], name="Mul") + matrix = tf.ones(shape=[299 * 299 * 3, 10]) * 0.00001 + output = tf.matmul(tf.layers.flatten(graph_input), matrix) + output = tf.identity(output, name="pool_3") + output = tf.identity(output, name="logits") + return fake_inception + + def setUp(self): + super(RunnerLibTest, self).setUp() + FLAGS.data_fake_dataset = True + gin.clear_config() + inception_graph = self._create_fake_inception_graph() + eval_utils.INCEPTION_GRAPH = inception_graph.as_graph_def() + + @parameterized.named_parameters([ + ("SameSeeds", 42, 42), + ("DifferentSeeds", 1, 42), + ("NoSeeds", None, None), + ]) + def testWeightInitialization(self, seed1, seed2): + gin.bind_parameter("dataset.name", "cifar10") + gin.bind_parameter("ModularGAN.g_optimizer_fn", + tf.train.GradientDescentOptimizer) + options = { + "architecture": "resnet_cifar_arch", + "batch_size": 2, + "disc_iters": 1, + "gan_class": ModularGAN, + "lambda": 1, + "training_steps": 1, + "z_dim": 128, + } + for i in range(2): + seed = seed1 if i == 0 else seed2 + model_dir = os.path.join(FLAGS.test_tmpdir, str(i)) + if tf.gfile.Exists(model_dir): + tf.gfile.DeleteRecursively(model_dir) + run_config = tf.contrib.tpu.RunConfig( + model_dir=model_dir, tf_random_seed=seed) + task_manager = runner_lib.TaskManager(model_dir) + runner_lib.run_with_schedule( + "train", + run_config=run_config, + task_manager=task_manager, + options=options, + use_tpu=False) + + checkpoint_path_0 = os.path.join(FLAGS.test_tmpdir, "0/model.ckpt-0") + checkpoint_path_1 = os.path.join(FLAGS.test_tmpdir, "1/model.ckpt-0") + checkpoint_reader_0 = tf.train.load_checkpoint(checkpoint_path_0) + checkpoint_reader_1 = tf.train.load_checkpoint(checkpoint_path_1) + for name, _ in tf.train.list_variables(checkpoint_path_0): + tf.logging.info(name) + t0 = checkpoint_reader_0.get_tensor(name) + t1 = checkpoint_reader_1.get_tensor(name) + zero_initialized_vars = [ + "bias", "biases", "beta", "moving_mean", "global_step" + ] + one_initialized_vars = ["gamma", "moving_variance"] + if any(name.endswith(e) for e in zero_initialized_vars): + # Variables that are always initialized to 0. + self.assertAllClose(t0, np.zeros_like(t0)) + self.assertAllClose(t1, np.zeros_like(t1)) + elif any(name.endswith(e) for e in one_initialized_vars): + # Variables that are always initialized to 1. + self.assertAllClose(t0, np.ones_like(t0)) + self.assertAllClose(t1, np.ones_like(t1)) + elif seed1 is not None and seed1 == seed2: + # Same random seed. + self.assertAllClose(t0, t1) + else: + # Different random seeds. + logging.info("name=%s, t0=%s, t1=%s", name, t0, t1) + self.assertNotAllClose(t0, t1) + + @parameterized.named_parameters([ + ("WithRealData", False), + ("WithFakeData", True), + ]) + @flagsaver.flagsaver + def testTrainingIsDeterministic(self, fake_dataset): + FLAGS.data_fake_dataset = fake_dataset + gin.bind_parameter("dataset.name", "cifar10") + options = { + "architecture": "resnet_cifar_arch", + "batch_size": 2, + "disc_iters": 1, + "gan_class": ModularGAN, + "lambda": 1, + "training_steps": 3, + "z_dim": 128, + } + for i in range(2): + model_dir = os.path.join(FLAGS.test_tmpdir, str(i)) + if tf.gfile.Exists(model_dir): + tf.gfile.DeleteRecursively(model_dir) + run_config = tf.contrib.tpu.RunConfig( + model_dir=model_dir, tf_random_seed=3) + task_manager = runner_lib.TaskManager(model_dir) + runner_lib.run_with_schedule( + "train", + run_config=run_config, + task_manager=task_manager, + options=options, + use_tpu=False, + num_eval_averaging_runs=1) + + checkpoint_path_0 = os.path.join(FLAGS.test_tmpdir, "0/model.ckpt-3") + checkpoint_path_1 = os.path.join(FLAGS.test_tmpdir, "1/model.ckpt-3") + checkpoint_reader_0 = tf.train.load_checkpoint(checkpoint_path_0) + checkpoint_reader_1 = tf.train.load_checkpoint(checkpoint_path_1) + for name, _ in tf.train.list_variables(checkpoint_path_0): + tf.logging.info(name) + t0 = checkpoint_reader_0.get_tensor(name) + t1 = checkpoint_reader_1.get_tensor(name) + self.assertAllClose(t0, t1, msg=name) + + @parameterized.parameters([ + {"use_tpu": False}, + # {"use_tpu": True}, + ]) + def testTrainAndEval(self, use_tpu): + gin.bind_parameter("dataset.name", "cifar10") + options = { + "architecture": "resnet_cifar_arch", + "batch_size": 2, + "disc_iters": 1, + "gan_class": ModularGAN, + "lambda": 1, + "training_steps": 1, + "z_dim": 128, + } + model_dir = FLAGS.test_tmpdir + if tf.gfile.Exists(model_dir): + tf.gfile.DeleteRecursively(model_dir) + run_config = tf.contrib.tpu.RunConfig( + model_dir=model_dir, + tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) + task_manager = runner_lib.TaskManager(model_dir) + runner_lib.run_with_schedule( + "eval_after_train", + run_config=run_config, + task_manager=task_manager, + options=options, + use_tpu=use_tpu, + num_eval_averaging_runs=1, + eval_every_steps=None) + expected_files = [ + "TRAIN_DONE", "checkpoint", "model.ckpt-0.data-00000-of-00001", + "model.ckpt-0.index", "model.ckpt-0.meta", + "model.ckpt-1.data-00000-of-00001", "model.ckpt-1.index", + "model.ckpt-1.meta", "operative_config-0.gin", "tfhub"] + self.assertAllInSet(expected_files, tf.gfile.ListDirectory(model_dir)) + + def testTrainAndEvalWithSpectralNormAndEma(self): + gin.bind_parameter("dataset.name", "cifar10") + gin.bind_parameter("ModularGAN.g_use_ema", True) + gin.bind_parameter("G.spectral_norm", True) + options = { + "architecture": "resnet_cifar_arch", + "batch_size": 2, + "disc_iters": 1, + "gan_class": ModularGAN, + "lambda": 1, + "training_steps": 1, + "z_dim": 128, + } + model_dir = FLAGS.test_tmpdir + if tf.gfile.Exists(model_dir): + tf.gfile.DeleteRecursively(model_dir) + run_config = tf.contrib.tpu.RunConfig( + model_dir=model_dir, + tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) + task_manager = runner_lib.TaskManager(model_dir) + runner_lib.run_with_schedule( + "eval_after_train", + run_config=run_config, + task_manager=task_manager, + options=options, + use_tpu=False, + num_eval_averaging_runs=1, + eval_every_steps=None) + expected_files = [ + "TRAIN_DONE", "checkpoint", "model.ckpt-0.data-00000-of-00001", + "model.ckpt-0.index", "model.ckpt-0.meta", + "model.ckpt-1.data-00000-of-00001", "model.ckpt-1.index", + "model.ckpt-1.meta", "operative_config-0.gin", "tfhub"] + self.assertAllInSet(expected_files, tf.gfile.ListDirectory(model_dir)) + + def testTrainAndEvalWithBatchNormAccu(self): + gin.bind_parameter("dataset.name", "cifar10") + gin.bind_parameter("standardize_batch.use_moving_averages", False) + gin.bind_parameter("G.batch_norm_fn", arch_ops.batch_norm) + options = { + "architecture": "resnet_cifar_arch", + "batch_size": 2, + "disc_iters": 1, + "gan_class": ModularGAN, + "lambda": 1, + "training_steps": 1, + "z_dim": 128, + } + model_dir = FLAGS.test_tmpdir + if tf.gfile.Exists(model_dir): + tf.gfile.DeleteRecursively(model_dir) + run_config = tf.contrib.tpu.RunConfig( + model_dir=model_dir, + tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) + task_manager = runner_lib.TaskManager(model_dir) + # Wrap _UpdateBnAccumulators to only perform one accumulator update step. + # Otherwise the test case would time out. + orig_update_bn_accumulators = eval_gan_lib._update_bn_accumulators + def mock_update_bn_accumulators(sess, generated, num_accu_examples): + del num_accu_examples + return orig_update_bn_accumulators(sess, generated, num_accu_examples=64) + eval_gan_lib._update_bn_accumulators = mock_update_bn_accumulators + runner_lib.run_with_schedule( + "eval_after_train", + run_config=run_config, + task_manager=task_manager, + options=options, + use_tpu=False, + num_eval_averaging_runs=1, + eval_every_steps=None) + expected_tfhub_files = [ + "checkpoint", "model-with-accu.ckpt.data-00000-of-00001", + "model-with-accu.ckpt.index", "model-with-accu.ckpt.meta"] + self.assertAllInSet( + expected_tfhub_files, + tf.gfile.ListDirectory(os.path.join(model_dir, "tfhub/0"))) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/src/datasets.py b/compare_gan/src/datasets.py deleted file mode 100644 index 16108ff..0000000 --- a/compare_gan/src/datasets.py +++ /dev/null @@ -1,253 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Dataset loading code for compare_gan.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import numpy as np -from six.moves import range -import tensorflow as tf - -flags = tf.flags -FLAGS = flags.FLAGS - -flags.DEFINE_string( - "dataset_root", - "/tmp/datasets/gan_compare/", - "Folder which contains all datasets.") - - -def load_convex(dataset_name): - folder_path = os.path.join(FLAGS.dataset_root, "convex") - file_path = "%s/%s.npz" % (folder_path, dataset_name) - with tf.gfile.Open(file_path, "rb") as infile: - data = np.load(infile) - features, labels = data["x"], data["y"] - return features, labels - - -def unpack_png_image(image_data): - """Returns an image and a label. 0-1 range.""" - value = tf.parse_single_example( - image_data, - features={"image/encoded": tf.FixedLenFeature([], tf.string), - "image/class/label": tf.FixedLenFeature([], tf.int64)}) - image = tf.image.decode_png(value["image/encoded"]) - image = tf.cast(image, tf.float32) / 255.0 - label = tf.cast(value["image/class/label"], tf.int32) - return image, label - - -def unpack_raw_image(image_data): - """Returns an image and a label. 0-1 range.""" - value = tf.parse_single_example( - image_data, - features={"image": tf.FixedLenFeature([], tf.string)}) - image = tf.decode_raw(value["image"], out_type=tf.uint8) - image = tf.reshape(image, [128, 128, 3]) - image = tf.cast(image, tf.float32) / 255.0 - return image, tf.constant(0, dtype=tf.int32) - - -def unpack_celeba_image(image_data): - """Returns 64x64x3 image and constant label.""" - value = tf.parse_single_example( - image_data, - features={"image/encoded": tf.FixedLenFeature([], tf.string)}) - image = tf.image.decode_png(value["image/encoded"]) - image = tf.image.resize_image_with_crop_or_pad(image, 160, 160) - # Note: possibly consider using NumPy's imresize(image, (64, 64)) - image = tf.image.resize_images(image, [64, 64]) - image = tf.cast(image, tf.float32) / 255.0 - label = tf.constant(0, dtype=tf.int32) - return image, label - - -def get_sharded_filenames(prefix, num_shards, range_start=0, range_end=None): - """Retrieves sharded file names.""" - if range_end is None: - range_end = num_shards - return [ - os.path.join(FLAGS.dataset_root, - "%s-%05d-of-%05d" % (prefix, i, num_shards)) - for i in range(range_start, range_end) - ] - - -def load_triangles_and_squares(dataset_name, split_name, num_threads, - buffer_size): - """Loads the triangle and squares dataset.""" - del num_threads, buffer_size - features, labels = load_convex(dataset_name) - assert features.shape[0] == 80000 - assert labels.shape[0] == features.shape[0] - if split_name == "train": - return tf.data.Dataset.from_tensor_slices( - (features[:60000], labels[:60000])) - elif split_name == "test": - return tf.data.Dataset.from_tensor_slices( - (features[-20000:-10000], labels[-20000:-10000])) - else: - return tf.data.Dataset.from_tensor_slices( - (features[-10000:], labels[-10000:])) - - -def load_fake(dataset_name, split_name, num_threads, buffer_size): - del dataset_name, split_name - del num_threads, buffer_size - # Fake dataset for unittests. - return tf.data.Dataset.zip(( - tf.data.Dataset.from_tensor_slices(np.random.uniform( - size=(100, 64, 64, 1))), - tf.data.Dataset.from_tensor_slices(np.zeros(shape=(100))))) - - -def load_mnist(dataset_name, split_name, num_threads, buffer_size): - del dataset_name - if split_name == "train": - filenames = get_sharded_filenames("image_mnist-train", 10) - else: - filenames = get_sharded_filenames("image_mnist-dev", 1) - # Image dim: 28,28,1 range: 0..1 label: int32 - return tf.data.TFRecordDataset( - filenames, - buffer_size=buffer_size, - num_parallel_reads=num_threads).map( - unpack_png_image, num_parallel_calls=num_threads) - - -def load_fashion_mnist(dataset_name, split_name, num_threads, buffer_size): - del dataset_name - if split_name == "train": - filenames = get_sharded_filenames("image_fashion_mnist-train", 10) - else: - filenames = get_sharded_filenames("image_fashion_mnist-dev", 1) - # Image dim: 28,28,1 range: 0..1 label: int32 - return tf.data.TFRecordDataset( - filenames, - buffer_size=buffer_size, - num_parallel_reads=num_threads).map( - unpack_png_image, num_parallel_calls=num_threads) - - -def load_cifar10(dataset_name, split_name, num_threads, buffer_size): - del dataset_name - if split_name == "train": - filenames = get_sharded_filenames("image_cifar10-train", 10) - else: - filenames = get_sharded_filenames("image_cifar10-dev", 1) - # Image dim: 32,32,3 range: 0..1 label: int32 - return tf.data.TFRecordDataset( - filenames, - buffer_size=buffer_size, - num_parallel_reads=num_threads).map( - unpack_png_image, num_parallel_calls=num_threads) - - -def load_celeba(dataset_name, split_name, num_threads, buffer_size): - """Returns Celeba-HQ as a TFRecordDataset.""" - del dataset_name - if split_name == "train": - filenames = get_sharded_filenames("image_celeba-train", 100) - else: - filenames = get_sharded_filenames("image_celeba-dev", 10) - # Image dim: 64,64,3 range: 0..1 label: 0 (fixed) - return tf.data.TFRecordDataset( - filenames, - buffer_size=buffer_size, - num_parallel_reads=num_threads).map( - unpack_celeba_image, num_parallel_calls=num_threads) - - -def unpack_lsun_image(image_data): - """Returns a LSUN-Bedrooms image as a 128x128x3 Tensor, with label 0.""" - value = tf.parse_single_example( - image_data, - features={"image/encoded": tf.FixedLenFeature([], tf.string)}) - image = tf.image.decode_jpeg(value["image/encoded"], channels=3) - image = tf.image.resize_image_with_crop_or_pad( - image, target_height=128, target_width=128) - image = tf.cast(image, tf.float32) / 255.0 - label = tf.constant(0, dtype=tf.int32) - return image, label - - -def load_lsun(dataset_name, split_name, num_threads, buffer_size): - """Returns LSUN Bedrooms as a TFRecordDataset.""" - del dataset_name - # The eval set is too small (only 300 examples) so we're using the last - # shard from the training set as our eval (approx. 30k examples). - if split_name == "train": - range_start = 0 - range_end = 99 - else: - range_start = 99 - range_end = 100 - filenames = get_sharded_filenames( - "image_lsun_bedrooms-train", num_shards=100, - range_start=range_start, range_end=range_end) - # Image dim: 128, 128 ,3 range: 0..1 label: 0 (fixed) - return tf.data.TFRecordDataset( - filenames, - buffer_size=buffer_size, - num_parallel_reads=num_threads).map( - unpack_lsun_image, num_parallel_calls=num_threads) - - -def unpack_celebahq_image(record): - """Returns a Celeba-HQ image as a Tensor, with label 0.""" - record = tf.parse_single_example( - record, - features={ - "image/encoded": tf.FixedLenFeature((), tf.string, default_value=""), - }) - image = tf.image.decode_png(record["image/encoded"]) - image = tf.cast(image, tf.float32) / 255.0 - label = tf.constant(0, dtype=tf.int32) - return image, label - - -CELEBA_AVAILABLE_RESOLUTIONS = ["128", "256"] - - -def load_celebahq(dataset_name, split_name, num_threads, buffer_size): - """Returns Celeba-HQ as a TFRecordDataset.""" - resolution = dataset_name[-3:] - - if resolution not in CELEBA_AVAILABLE_RESOLUTIONS: - raise ValueError("Resolution not available for CelebaHQ: %s" % dataset_name) - - # No default split for train/valid for CelebA-HQ. - if split_name == "train": - range_start = 0 - range_end = 90 - else: - range_start = 90 - range_end = 100 - filenames = get_sharded_filenames("image_celebahq-%s" % resolution, - num_shards=100, range_start=range_start, - range_end=range_end) - # Image dim: 128, 128, 3 range: 0..1 label: 0 (fixed) - # Image dim: 256, 256, 3 range: 0..1 label: 0 (fixed) - return tf.data.TFRecordDataset( - filenames, - buffer_size=buffer_size, - num_parallel_reads=num_threads).map( - unpack_celebahq_image, num_parallel_calls=num_threads) diff --git a/compare_gan/src/datasets_test.py b/compare_gan/src/datasets_test.py deleted file mode 100644 index e40129c..0000000 --- a/compare_gan/src/datasets_test.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for datasets.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -from compare_gan.src import datasets - -import tensorflow as tf - -FLAGS = tf.flags.FLAGS - -pkg_dir, _ = os.path.split(__file__) -_TESTDATA = os.path.join(pkg_dir, "test_data") - - -class DatasetsTest(tf.test.TestCase): - - def setUp(self): - FLAGS.dataset_root = _TESTDATA - - def test_just_create_dataset(self): - datasets.load_fake("fake", "train", 1, 10) - datasets.load_mnist("mnist", "train", 1, 10) - datasets.load_fashion_mnist("fashion_mnist", "train", 1, 10) - datasets.load_cifar10("cifar10", "train", 1, 10) - datasets.load_celeba("celeba", "train", 1, 10) - datasets.load_lsun("lsun", "train", 1, 10) - datasets.load_celebahq("celebahq128", "train", 1, 10) - - def get_element_and_verify_shape(self, dataset, expected_shape): - element = dataset.make_one_shot_iterator().get_next() - with tf.Session() as session: - image, _ = session.run(element) - self.assertEqual(image.shape, expected_shape) - self.assertGreaterEqual(image.min(), 0.0) - self.assertLessEqual(image.max(), 1.0) - - def test_mnist(self): - self.get_element_and_verify_shape( - datasets.load_mnist("mnist", "dev", 1, 10), - (28, 28, 1)) - - def test_fashion_mnist(self): - self.get_element_and_verify_shape( - datasets.load_fashion_mnist("fashion_mnist", "dev", 1, 10), - (28, 28, 1)) - - def test_cifar10(self): - self.get_element_and_verify_shape( - datasets.load_cifar10("cifar10", "dev", 1, 10), - (32, 32, 3)) - - def test_celeba(self): - self.get_element_and_verify_shape( - datasets.load_celeba("celeba", "dev", 1, 10), - (64, 64, 3)) - - def test_lsun(self): - self.get_element_and_verify_shape( - datasets.load_lsun("lsun", "dev", 1, 10), - (128, 128, 3)) - - def test_celebahq(self): - self.get_element_and_verify_shape( - datasets.load_celebahq("celebahq128", "dev", 1, 10), - (128, 128, 3)) - - -if __name__ == "__main__": - tf.test.main() diff --git a/compare_gan/src/eval_gan_lib.py b/compare_gan/src/eval_gan_lib.py deleted file mode 100644 index a82e477..0000000 --- a/compare_gan/src/eval_gan_lib.py +++ /dev/null @@ -1,929 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Evaluation for GAN tasks.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import abc -import csv -import os - -from compare_gan.src import fid_score as fid_score_lib -from compare_gan.src import gan_lib -from compare_gan.src import gilbo as gilbo_lib -from compare_gan.src import jacobian_conditioning as conditioning_lib -from compare_gan.src import kid_score as kid_score_lib -from compare_gan.src import ms_ssim_score -from compare_gan.src import params -from compare_gan.src.gans import consts - -import numpy as np -import pandas as pd -import scipy.spatial -from six.moves import range -import tensorflow as tf - -flags = tf.flags -logging = tf.logging -FLAGS = flags.FLAGS - -# This stores statistics for the real data and should be computed only -# once for the whole execution. -MU_REAL, SIGMA_REAL = None, None - -# Special value returned when fake image generated by GAN has nans. -NAN_DETECTED = 31337.0 - -# Special value returned when FID code returned exception. -FID_CODE_FAILED = 4242.0 - -# If the given param was not specified in the model, use this default. -# This is mostly for COLAB, which tries to automatically infer the type -# of the column -DEFAULT_VALUES = { - "weight_clipping": -1.0, - "y_dim": -1, - "lambda": -1.0, - "disc_iters": -1, - "beta1": -1.0, - "beta2": -1.0, - "gamma": -1.0, - "penalty_type": "none", - "discriminator_spectralnorm": False, - "architecture": "unknown", -} - -# Inception batch size. -INCEPTION_BATCH = 50 - -# List of models that we consider. -SUPPORTED_GANS = [ - "GAN", "GAN_MINMAX", "WGAN", "WGAN_GP", "DRAGAN", "LSGAN", "VAE", "BEGAN" -] + consts.MODELS_WITH_PENALTIES - - -def GetAllTrainingParams(): - all_params = set(["architecture"]) - for gan_type in SUPPORTED_GANS: - for _ in ["mnist", "fashion-mnist", "cifar10", "celeba"]: - p = params.GetParameters(gan_type, "wide") - all_params.update(p.keys()) - logging.info("All training parameter exported: %s", sorted(all_params)) - return sorted(all_params) - - -# Fake images are already re-scaled to [0, 255] range. -def GetInceptionScore(fake_images, inception_graph): - """Compute the Inception score.""" - assert fake_images.shape[3] == 3 - num_images = fake_images.shape[0] - assert num_images % INCEPTION_BATCH == 0 - - with tf.Graph().as_default(): - images = tf.constant(fake_images) - inception_score_op = fid_score_lib.inception_score_fn( - images, - num_batches=num_images // INCEPTION_BATCH, - inception_graph=inception_graph) - with tf.train.MonitoredSession() as sess: - inception_score = sess.run(inception_score_op) - return inception_score - - -# Images must have the same resolution and pixels must be in 0..255 range. -def ComputeKIDScore(fake_images, real_images, inception_graph): - """Compute KID score using the kid_score library.""" - assert fake_images.shape[3] == 3 - assert real_images.shape[3] == 3 - bs_real = real_images.shape[0] - bs_fake = fake_images.shape[0] - assert bs_real % INCEPTION_BATCH == 0 - assert bs_fake % INCEPTION_BATCH == 0 - assert bs_real >= bs_fake and bs_real % bs_fake == 0 - ratio = bs_real // bs_fake - logging.info("Ratio of real/fake images is: %d", ratio) - with tf.Graph().as_default(): - fake_images_batch = tf.train.batch( - [tf.convert_to_tensor(fake_images, dtype=tf.float32)], - enqueue_many=True, - batch_size=INCEPTION_BATCH) - real_images_batch = tf.train.batch( - [tf.convert_to_tensor(real_images, dtype=tf.float32)], - enqueue_many=True, - batch_size=INCEPTION_BATCH * ratio) - eval_fn = kid_score_lib.get_kid_function( - gen_image_tensor=fake_images_batch, - real_image_tensor=real_images_batch, - num_gen_images=fake_images.shape[0], - num_eval_images=real_images.shape[0], - image_range="0_255", - inception_graph=inception_graph) - with tf.train.MonitoredTrainingSession() as sess: - kid_score = eval_fn(sess) - return kid_score - - -# Images must have the same resolution and pixels must be in 0..255 range. -def ComputeTFGanFIDScore(fake_images, real_images, inception_graph): - """Compute FID score using TF.Gan library.""" - assert fake_images.shape[3] == 3 - assert real_images.shape[3] == 3 - bs_real = real_images.shape[0] - bs_fake = fake_images.shape[0] - assert bs_real % INCEPTION_BATCH == 0 - assert bs_fake % INCEPTION_BATCH == 0 - assert bs_real >= bs_fake and bs_real % bs_fake == 0 - ratio = bs_real // bs_fake - logging.info("Ratio of real/fake images is: %d", ratio) - with tf.Graph().as_default(): - fake_images_batch = tf.train.batch( - [tf.convert_to_tensor(fake_images, dtype=tf.float32)], - enqueue_many=True, - batch_size=INCEPTION_BATCH) - real_images_batch = tf.train.batch( - [tf.convert_to_tensor(real_images, dtype=tf.float32)], - enqueue_many=True, - batch_size=INCEPTION_BATCH * ratio) - eval_fn = fid_score_lib.get_fid_function( - gen_image_tensor=fake_images_batch, - real_image_tensor=real_images_batch, - num_gen_images=fake_images.shape[0], - num_eval_images=real_images.shape[0], - image_range="0_255", - inception_graph=inception_graph) - with tf.train.MonitoredTrainingSession() as sess: - fid_score = eval_fn(sess) - return fid_score - - -def GetRealImages(dataset, - split_name, - num_examples, - failure_on_insufficient_examples=True): - """Get num_examples images from the given dataset/split.""" - # Multithread and buffer could improve the training speed by 20%, however it - # consumes more memory. In evaluation, we used single thread without buffer - # to avoid using too much memory. - dataset_content = gan_lib.load_dataset( - dataset, - split_name=split_name, - num_threads=1, - buffer_size=0) - # Get real images from the dataset. In the case of a 1-channel - # dataset (like mnist) convert it to 3 channels. - data_x = [] - with tf.Graph().as_default(): - get_next = dataset_content.make_one_shot_iterator().get_next() - with tf.train.MonitoredTrainingSession() as sess: - for i in range(num_examples): - try: - data_x.append(sess.run(get_next[0])) - except tf.errors.OutOfRangeError: - logging.error("Reached the end of dataset. Read: %d samples." % i) - break - - real_images = np.array(data_x) - if real_images.shape[0] != num_examples: - if failure_on_insufficient_examples: - raise ValueError("Not enough examples in the dataset %s: %d / %d" % - (dataset, real_images.shape[0], num_examples)) - else: - logging.error("Not enough examples in the dataset %s: %d / %d", dataset, - real_images.shape[0], num_examples) - - real_images *= 255.0 - return real_images - - -def ShouldRunAccuracyLossTrainVsTest(options): - """Only run the accuracy test for the NS GAN.""" - return options["gan_type"] == consts.GAN_WITH_PENALTY - - -def ComputeAccuracyLoss(options, - sess, - gan, - test_images, - max_train_examples=50000, - num_repeat=5): - """Compute discriminator accurate/loss on train/test/fake set. - - Args: - options: Dict[Text, Text] with all parameters for the current trial. - sess: Tf.Session object. - gan: Any AbstractGAN instance. - test_images: numpy array with test images. - max_train_examples: How many "train" examples to get from the dataset. - In each round, some of them will be randomly selected - to evaluate train set accuracy. - num_repeat: How many times to repreat the computation. - The mean of all the results is reported. - Returns: - Dict[Text, float] with all the computed scores. - - Raises: - ValueError: If the number of test_images is greater than the number of - training images returned by the dataset. - """ - train_images = GetRealImages( - options["dataset"], - split_name="train", - num_examples=max_train_examples, - failure_on_insufficient_examples=False) - if train_images.shape[0] < test_images.shape[0]: - raise ValueError("num_train %d must be larger than num_test %d." % - (train_images.shape[0], test_images.shape[0])) - - logging.info("Evaluating training and test accuracy...") - - num_batches = int(np.floor(test_images.shape[0] / gan.batch_size)) - if num_batches * gan.batch_size < test_images.shape[0]: - logging.error("Ignoring the last batch with %d samples / %d epoch size.", - test_images.shape[0] - num_batches * gan.batch_size, - gan.batch_size) - - train_accuracies = [] - test_accuracies = [] - fake_accuracies = [] - train_d_losses = [] - test_d_losses = [] - for repeat in range(num_repeat): - idx = np.random.choice(train_images.shape[0], test_images.shape[0]) - train_subset = [train_images[i] for i in idx] - - train_predictions = [] - test_predictions = [] - fake_predictions = [] - train_d_losses = [] - test_d_losses = [] - - for i in range(num_batches): - z_sample = gan.z_generator(gan.batch_size, gan.z_dim) - start_idx = i * gan.batch_size - end_idx = start_idx + gan.batch_size - test_batch = test_images[start_idx:end_idx] - train_batch = train_subset[start_idx:end_idx] - - test_prediction, test_d_loss, fake_images = sess.run( - [gan.discriminator_output, gan.d_loss, gan.fake_images], - feed_dict={ - gan.inputs: test_batch, - gan.z: z_sample - }) - test_predictions.append(test_prediction[0]) - test_d_losses.append(test_d_loss) - - train_prediction, train_d_loss = sess.run( - [gan.discriminator_output, gan.d_loss], - feed_dict={ - gan.inputs: train_batch, - gan.z: z_sample - }) - train_predictions.append(train_prediction[0]) - train_d_losses.append(train_d_loss) - - fake_prediction = sess.run( - gan.discriminator_output, feed_dict={gan.inputs: fake_images})[0] - fake_predictions.append(fake_prediction) - - discriminator_threshold = 0.5 - train_predictions = [ - x >= discriminator_threshold for x in train_predictions - ] - test_predictions = [x >= discriminator_threshold for x in test_predictions] - fake_predictions = [x < discriminator_threshold for x in fake_predictions] - - train_accuracy = sum(train_predictions) / float(len(train_predictions)) - test_accuracy = sum(test_predictions) / float(len(test_predictions)) - fake_accuracy = sum(fake_predictions) / float(len(fake_predictions)) - train_d_loss = np.mean(train_d_losses) - test_d_loss = np.mean(test_d_losses) - print("repeat %d: train_accuracy: %.3f, test_accuracy: %.3f, " - "fake_accuracy: %.3f, train_d_loss: %.3f, test_d_loss: %.3f" % - (repeat, train_accuracy, test_accuracy, fake_accuracy, train_d_loss, - test_d_loss)) - - train_accuracies.append(train_accuracy) - test_accuracies.append(test_accuracy) - fake_accuracies.append(fake_accuracy) - train_d_losses.append(train_d_loss) - test_d_losses.append(test_d_loss) - - result_dict = {} - result_dict["train_accuracy"] = np.mean(train_accuracies) - result_dict["test_accuracy"] = np.mean(test_accuracies) - result_dict["fake_accuracy"] = np.mean(fake_accuracies) - result_dict["train_d_loss"] = np.mean(train_d_losses) - result_dict["test_d_loss"] = np.mean(test_d_losses) - return result_dict - - -def ShouldRunMultiscaleSSIM(options): - msssim_datasets = ["celeba", "celebahq128"] - return options["dataset"] in msssim_datasets - - -def ComputeMultiscaleSSIMScore(fake_images): - """Compute ms-ssim score .""" - batch_size = 64 - with tf.Graph().as_default(): - fake_images_batch = tf.train.shuffle_batch( - [tf.convert_to_tensor(fake_images, dtype=tf.float32)], - capacity=16*batch_size, - min_after_dequeue=8*batch_size, - num_threads=4, - enqueue_many=True, - batch_size=batch_size) - - # Following section 5.3 of https://arxiv.org/pdf/1710.08446.pdf, we only - # evaluate 5 batches of the generated images. - eval_fn = ms_ssim_score.get_metric_function( - generated_images=fake_images_batch, num_batches=5) - with tf.train.MonitoredTrainingSession() as sess: - score = eval_fn(sess) - return score - - -class EvalTask(object): - """Class that describes a single evaluation task. - - For example: compute inception score or compute accuracy. - The classes that inherit from it, should implement the methods below. - """ - __metaclass__ = abc.ABCMeta - - @abc.abstractmethod - def MetricsList(self): - """List of metrics that this class generates. - - These are the only keys that RunXX methods can return in - their output maps. - Returns: - frozenset of strings, which are the names of the metrics that task - computes. - """ - return frozenset() - - def RunInSession(self, options, sess, gan, real_images): - """Runs the task inside the session, which allows access to tf Graph. - - This code is run after all images were generated, but the session - is still active. It allows looking into the contents of the graph. - - WARNING: real_images might have 1 or 3 channels (depending on the dataset). - - Args: - options: Dict, containing all parameters for the current trial. - sess: tf.Session object. - gan: AbstractGAN object, that is already present in the current tf.Graph. - real_images: numpy array with real 'train' images. - - Returns: - Dict with metric values. The keys must be contained in the set that - "MetricList" method above returns. - """ - del options, sess, gan, real_images - return {} - - def RunAfterSession(self, options, fake_images, real_images): - """Runs the task after all the generator calls, after session was closed. - - WARNING: the images here, are in 0..255 range, with 3 color channels. - Args: - options: Dict, containing all parameters for the current trial. - fake_images: numpy array with generated images. Expanded 3 channels, - values 0..255. dtype: float. - real_images: numpy array with real 'train' images, they are expanded 3 - channels. dtype: float - - Returns: - Dict with metric values. The keys must be contained in the set that - "MetricList" method above returns. - """ - del options, fake_images, real_images - return {} - - -class InceptionScoreTask(EvalTask): - """Task that computes inception score for the generated images.""" - - def __init__(self, inception_graph): - self._inception_graph = inception_graph - - _INCEPTION_SCORE = "inception_score" - - def MetricsList(self): - return frozenset([self._INCEPTION_SCORE]) - - # 'RunInSession' it not needed for this task. - - def RunAfterSession(self, options, fake_images, real_images): - del options, real_images - logging.info("Computing inception score.") - result_dict = {} - result_dict[self._INCEPTION_SCORE] = GetInceptionScore( - fake_images, self._inception_graph) - logging.info("Inception score computed: %.3f", - result_dict[self._INCEPTION_SCORE]) - return result_dict - - -class FIDScoreTask(EvalTask): - """Task that computes FID score for the generated images.""" - - def __init__(self, inception_graph): - self._inception_graph = inception_graph - - _FID_SCORE = "fid_score" - - def MetricsList(self): - return frozenset([self._FID_SCORE]) - - # 'RunInSession' it not needed for this task. - - def RunAfterSession(self, options, fake_images, real_images): - del options - logging.info("Computing FID score.") - result_dict = {} - result_dict[self._FID_SCORE] = ComputeTFGanFIDScore( - fake_images, real_images, self._inception_graph) - logging.info("Frechet Inception Distance is %.3f", - result_dict[self._FID_SCORE]) - return result_dict - - -class KIDScoreTask(EvalTask): - """Task that computes KID score for the generated images.""" - - def __init__(self, inception_graph): - self._inception_graph = inception_graph - - _KID_SCORE = "kid_score" - - def MetricsList(self): - return frozenset([self._KID_SCORE]) - - # 'RunInSession' it not needed for this task. - - def RunAfterSession(self, options, fake_images, real_images): - result_dict = {} - if options.get("compute_kid_score", False): - logging.info("Computing KID score.") - result_dict[self._KID_SCORE] = ComputeKIDScore(fake_images, real_images, - self._inception_graph) - logging.info("KID score is %.3f", result_dict[self._KID_SCORE]) - return result_dict - - -class MultiscaleSSIMTask(EvalTask): - """Task that computes MSSIMScore for generated images.""" - - _MS_SSIM = "ms_ssim" - - def MetricsList(self): - return frozenset([self._MS_SSIM]) - - # 'RunInSession' it not needed for this task. - - def RunAfterSession(self, options, fake_images, real_images): - del real_images - result_dict = {} - if ShouldRunMultiscaleSSIM(options): - result_dict[self._MS_SSIM] = ComputeMultiscaleSSIMScore(fake_images) - logging.info("MS-SSIM score computed: %.3f", result_dict[self._MS_SSIM]) - return result_dict - - -class ComputeAccuracyTask(EvalTask): - """Task that computes the accuracy over test/train/fake data.""" - - def MetricsList(self): - return frozenset([ - "train_accuracy", "test_accuracy", "fake_accuracy", "train_d_loss", - "test_d_loss" - ]) - - # RunAfterSession is not needed for this task. - - def RunInSession(self, options, sess, gan, real_images): - if ShouldRunAccuracyLossTrainVsTest(options): - return ComputeAccuracyLoss(options, sess, gan, real_images) - else: - return {} - - -def ComputeFractalDimension(fake_images, - num_fd_seeds=100, - n_bins=1000, - scale=0.1): - """Compute Fractal Dimension of fake_images. - - Args: - fake_images: an np array of datapoints, the dimensionality and scaling of - images can be arbitrary - num_fd_seeds: number of random centers from which fractal dimension - computation is performed - n_bins: number of bins to split the range of distance values into - scale: the scale of the y interval in the log-log plot for which we apply a - linear regression fit - - Returns: - fractal dimension of the dataset. - """ - assert len(fake_images.shape) >= 2 - assert fake_images.shape[0] >= num_fd_seeds - - num_images = fake_images.shape[0] - # In order to apply scipy function we need to flatten the number of dimensions - # to 2 - fake_images = np.reshape(fake_images, (num_images, -1)) - fake_images_subset = fake_images[np.random.randint( - num_images, size=num_fd_seeds)] - - distances = scipy.spatial.distance.cdist(fake_images, - fake_images_subset).flatten() - min_distance = np.min(distances[np.nonzero(distances)]) - max_distance = np.max(distances) - buckets = min_distance * ( - (max_distance / min_distance)**np.linspace(0, 1, n_bins)) - # Create a table where first column corresponds to distances r - # and second column corresponds to number of points N(r) that lie - # within distance r from the random seeds - fd_result = np.zeros((n_bins - 1, 2)) - fd_result[:, 0] = buckets[1:] - fd_result[:, 1] = np.sum(np.less.outer(distances, buckets[1:]), axis=0) - - # We compute the slope of the log-log plot at the middle y value - # which is stored in y_val; the linear regression fit is computed on - # the part of the plot that corresponds to an interval around y_val - # whose size is 2*scale*(total width of the y axis) - max_y = np.log(num_images * num_fd_seeds) - min_y = np.log(num_fd_seeds) - x = np.log(fd_result[:, 0]) - y = np.log(fd_result[:, 1]) - y_width = max_y - min_y - y_val = min_y + 0.5 * y_width - - start = np.argmax(y > y_val - scale * y_width) - end = np.argmax(y > y_val + scale * y_width) - - slope = np.linalg.lstsq( - a=np.vstack([x[start:end], np.ones(end - start)]).transpose(), - b=y[start:end].reshape(end - start, 1))[0][0][0] - return slope - - -class FractalDimensionTask(EvalTask): - """Fractal dimension metric.""" - - _FRACTAL_DIMENSION = "fractal_dimension" - - def MetricsList(self): - return frozenset([self._FRACTAL_DIMENSION]) - - def RunAfterSession(self, options, fake_images, real_images): - del real_images - result_dict = {} - if options.get("compute_fractal_dimension", False): - result_dict[self._FRACTAL_DIMENSION] = ComputeFractalDimension( - fake_images) - return result_dict - - -class GILBOTask(EvalTask): - """Compute GILBO metric and related consistency metrics.""" - - def __init__(self, outdir, task_workdir, dataset_name): - self.outdir = outdir - self.task_workdir = task_workdir - self.dataset = dataset_name - - def MetricsList(self): - return frozenset([ - "gilbo", - "gilbo_train_consistency", - "gilbo_eval_consistency", - "gilbo_self_consistency", - ]) - - def RunInSession(self, options, sess, gan, real_images): - del real_images - result_dict = {} - if options.get("compute_gilbo", False): - (gilbo, gilbo_train_consistency, - gilbo_eval_consistency, gilbo_self_consistency) = gilbo_lib.TrainGILBO( - gan, sess, self.outdir, self.task_workdir, self.dataset, options) - result_dict["gilbo"] = gilbo - result_dict["gilbo_train_consistency"] = gilbo_train_consistency - result_dict["gilbo_eval_consistency"] = gilbo_eval_consistency - result_dict["gilbo_self_consistency"] = gilbo_self_consistency - - return result_dict - - -def ComputeGeneratorConditionNumber(sess, gan): - """Computes the generator condition number. - - Computes the Jacobian of the generator in session, then postprocesses to get - the condition number. - - Args: - sess: tf.Session object. - gan: AbstractGAN object, that is already present in the current tf.Graph. - - Returns: - A list of length gan.batch_size. Each element is the condition number - computed at a single z sample within a minibatch. - """ - shape = gan.fake_images.get_shape().as_list() - flat_generator_output = tf.reshape( - gan.fake_images, [gan.batch_size, np.prod(shape[1:])]) - tf_jacobian = conditioning_lib.compute_jacobian( - xs=gan.z, fx=flat_generator_output) - z_sample = gan.z_generator(gan.batch_size, gan.z_dim) - np_jacobian = sess.run(tf_jacobian, feed_dict={gan.z: z_sample}) - result_dict = conditioning_lib.analyze_jacobian(np_jacobian) - return result_dict["metric_tensor"]["log_condition_number"] - - -class GeneratorConditionNumberTask(EvalTask): - """Computes the generator condition number. - - Computes the condition number for metric Tensor of the generator Jacobian. - This condition number is computed locally for each z sample in a minibatch. - Returns the mean log condition number and standard deviation across the - minibatch. - - Follows the methods in https://arxiv.org/abs/1802.08768. - """ - - _CONDITION_NUMBER_COUNT = "log_condition_number_count" - _CONDITION_NUMBER_MEAN = "log_condition_number_mean" - _CONDITION_NUMBER_STD = "log_condition_number_std" - - def MetricsList(self): - return frozenset([ - self._CONDITION_NUMBER_COUNT, self._CONDITION_NUMBER_MEAN, - self._CONDITION_NUMBER_STD - ]) - - def RunInSession(self, options, sess, gan, real_images): - del real_images - result_dict = {} - if options.get("compute_generator_condition_number", False): - result = ComputeGeneratorConditionNumber(sess, gan) - result_dict[self._CONDITION_NUMBER_COUNT] = len(result) - result_dict[self._CONDITION_NUMBER_MEAN] = np.mean(result) - result_dict[self._CONDITION_NUMBER_STD] = np.std(result) - return result_dict - - -class NanFoundError(Exception): - """Exception thrown, when the Nans are present in the output.""" - - -def RunCheckpointEval(checkpoint_path, task_workdir, options, tasks_to_run): - """Evaluate model at given checkpoint_path. - - Args: - checkpoint_path: string, path to the single checkpoint to evaluate. - task_workdir: directory, where results and logs can be written. - options: Dict[Text, Text] with all parameters for the current trial. - tasks_to_run: List of objects that inherit from EvalTask. - - Returns: - Dict[Text, float] with all the computed results. - - Raises: - NanFoundError: If gan output has generated any NaNs. - ValueError: If options["gan_type"] is not supported. - """ - - # Make sure that the same latent variables are used for each evaluation. - np.random.seed(42) - - checkpoint_dir = os.path.join(task_workdir, "checkpoint") - result_dir = os.path.join(task_workdir, "result") - gan_log_dir = os.path.join(task_workdir, "logs") - - gan_type = options["gan_type"] - if gan_type not in SUPPORTED_GANS: - raise ValueError("Gan type %s is not supported." % gan_type) - - dataset = options["dataset"] - dataset_params = params.GetDatasetParameters(dataset) - dataset_params.update(options) - num_test_examples = dataset_params.get("eval_test_samples", 10000) - - if num_test_examples % INCEPTION_BATCH != 0: - logging.info("Padding number of examples to fit inception batch.") - num_test_examples -= num_test_examples % INCEPTION_BATCH - - real_images = GetRealImages( - options["dataset"], - split_name="test", - num_examples=num_test_examples) - logging.info("Real data processed.") - - result_dict = {} - # Get Fake images from the generator. - samples = [] - logging.info("Running eval for dataset %s, checkpoint: %s, num_examples: %d ", - dataset, checkpoint_path, num_test_examples) - with tf.Graph().as_default(): - with tf.Session() as sess: - gan = gan_lib.create_gan( - gan_type=gan_type, - dataset=dataset, - dataset_content=None, - options=options, - checkpoint_dir=checkpoint_dir, - result_dir=result_dir, - gan_log_dir=gan_log_dir) - - gan.build_model(is_training=False) - - tf.global_variables_initializer().run() - saver = tf.train.Saver() - saver.restore(sess, checkpoint_path) - - # Make sure we have >= examples as in the test set. - num_batches = int(np.ceil(num_test_examples / gan.batch_size)) - for _ in range(num_batches): - z_sample = gan.z_generator(gan.batch_size, gan.z_dim) - x = sess.run(gan.fake_images, feed_dict={gan.z: z_sample}) - # If NaNs were generated, ignore this checkpoint and assign a very high - # FID score which we handle specially later. - if np.isnan(x).any(): - logging.error("Detected NaN in fake_images! Returning NaN.") - raise NanFoundError("Detected NaN in fake images.") - samples.append(x) - - print("Fake data generated, running tasks in session.") - for task in tasks_to_run: - result_dict.update(task.RunInSession(options, sess, gan, real_images)) - - fake_images = np.concatenate(samples, axis=0) - # Adjust the number of fake images to the number of images in the test set. - fake_images = fake_images[:num_test_examples, :, :, :] - - assert fake_images.shape == real_images.shape - - # In case we use a 1-channel dataset (like mnist) - convert it to 3 channel. - if fake_images.shape[3] == 1: - fake_images = np.tile(fake_images, [1, 1, 1, 3]) - # change the real_images' shape too - so that it keep matching - # fake_images' shape. - real_images = np.tile(real_images, [1, 1, 1, 3]) - - fake_images *= 255.0 - - logging.info("Fake data processed. Starting tasks for checkpoint: %s.", - checkpoint_path) - - for task in tasks_to_run: - result_dict.update(task.RunAfterSession(options, fake_images, real_images)) - - return result_dict - - -def RunTaskEval(options, task_workdir, inception_graph, out_file="scores.csv"): - """Evaluates all checkpoints for the given task. - - Fetches all the checkpoints that exists in the workdir and evaluates each one. - Final scores are written into the out_file and the best fid_score is also - stored in the "value" file in the task_workdir. - - Args: - options: Dict[Text, Text] with all parameters for the current trial. - task_workdir: Directory where checkpoints are present. All scores will be - written there too. - inception_graph: GraphDef that contains inception model (used for FID - computation). - out_file: name of the file to store final outputs. - """ - outdir = options.get("eval_outdir", task_workdir) - - tasks_to_run = [ - InceptionScoreTask(inception_graph), - FIDScoreTask(inception_graph), - MultiscaleSSIMTask(), - ComputeAccuracyTask(), - FractalDimensionTask(), - KIDScoreTask(inception_graph), - GeneratorConditionNumberTask(), - GILBOTask(outdir, task_workdir, options["dataset"]), - ] - - # If the output file doesn't exist, create it. - csv_header = [ - "checkpoint_path", - "model", - "dataset", - "tf_seed", - "sample_id", - ] - task_headers = [] - for task in tasks_to_run: - task_headers.extend(sorted(task.MetricsList())) - csv_header.extend(task_headers) - - train_params = GetAllTrainingParams() - csv_header.extend(train_params) - - scores_path = os.path.join(outdir, out_file) - - if not tf.gfile.Exists(scores_path): - with tf.gfile.Open(scores_path, "w") as f: - writer = csv.DictWriter(f, fieldnames=csv_header) - writer.writeheader() - - # Get the list of records that were already computed, to not re-do them. - finished_checkpoints = set() - try: - with tf.gfile.Open(scores_path, "r") as f: - reader = csv.DictReader(f) - for row in reader: - if sorted(csv_header) != sorted(list(row.keys())): - raise ValueError("wrong csv keys.") - finished_checkpoints.add(row["checkpoint_path"]) - except ValueError: - logging.error("CSV headers no longer match. Recomputing all results.") - finished_checkpoints = {} - with tf.gfile.Open(scores_path, "w") as f: - writer = csv.DictWriter(f, fieldnames=csv_header) - writer.writeheader() - - # Compute all records not done yet. - with tf.gfile.Open(scores_path, "a") as f: - writer = csv.writer(f) - checkpoint_dir = os.path.join(task_workdir, "checkpoint") - # Fetch checkpoint to eval. - checkpoint_state = tf.train.get_checkpoint_state(checkpoint_dir) - - all_checkpoint_paths = checkpoint_state.all_model_checkpoint_paths - for checkpoint_path in all_checkpoint_paths: - if checkpoint_path in finished_checkpoints: - logging.info("Skipping already computed path: %s", checkpoint_path) - continue - - # Write the FID score and all training params. - default_value = -1.0 - try: - result_dict = RunCheckpointEval(checkpoint_path, task_workdir, options, - tasks_to_run) - except NanFoundError as nan_found_error: - result_dict = {} - - logging.error(nan_found_error) - default_value = NAN_DETECTED - - logging.info(result_dict) - tf_seed = str(options.get("tf_seed", -1)) - sample_id = str(options.get("sample_id", -1)) - output_row = [ - checkpoint_path, options["gan_type"], options["dataset"], tf_seed, - sample_id - ] - - for task_metric in task_headers: - output_row.append("%.3f" % result_dict.get(task_metric, default_value)) - - for param in train_params: - if param in options: - output_row.append(options[param]) - else: - output_row.append(str(DEFAULT_VALUES[param])) - writer.writerow(output_row) - - f.flush() - - -def SaveFinalEvaluationScore(scores_path, metric_name, output_path): - """Get final evaluation score (lowest "metric_name") and save in the file. - - Reads the lowest "metric_name" value from the scores and - writes it in the output_path. - Args: - scores_path: string, CSV file with the scores. - metric_name: string, name of the metric/column in CSV. - output_path: string, file to write the result to. - """ - data = pd.read_csv(tf.gfile.Open(scores_path), sep=",") - min_score = min(data[metric_name]) - with tf.gfile.Open(output_path, mode="w") as f: - f.write(str(min_score)) diff --git a/compare_gan/src/eval_gan_lib_test.py b/compare_gan/src/eval_gan_lib_test.py deleted file mode 100644 index 06a30da..0000000 --- a/compare_gan/src/eval_gan_lib_test.py +++ /dev/null @@ -1,435 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import csv -import os -import os.path - -from compare_gan.src import eval_gan_lib -from compare_gan.src import fid_score as fid_score_lib -from compare_gan.src import gan_lib -from compare_gan.src.gans import abstract_gan -from compare_gan.src.gans import consts -import mock -import numpy as np -import tensorflow as tf - - -class EvalGanLibTest(tf.test.TestCase): - - def _create_fake_inception_graph(self): - """Creates a graph with that mocks inception. - - It takes the input, multiplies it through a matrix full of 0.001 values - and returns as logits. It makes sure to match the tensor names of - the real inception model. - - Returns: - tf.Graph object with a simple mock inception inside. - """ - fake_inception = tf.Graph() - with fake_inception.as_default(): - graph_input = tf.placeholder( - tf.float32, shape=[None, 299, 299, 3], name="Mul") - matrix = tf.ones(shape=[299 * 299 * 3, 10]) * 0.001 - output = tf.matmul(tf.layers.flatten(graph_input), matrix) - output = tf.identity(output, name="pool_3") - output = tf.identity(output, name="logits") - return fake_inception - - def _create_checkpoint(self, variable_name, checkpoint_path): - """Creates a checkpoint with a single variable stored inside. - - There must be at least one variable in the checkpoint for tf.Saver - to work correctly. - - Args: - variable_name: string, name of the tf variable to save in the checkpoint. - checkpoint_path: string, path to where checkpoint will be written. - """ - # Create a checkpoint with a single variable. - with tf.Graph().as_default(): - tf.get_variable(variable_name, shape=[1]) - saver = tf.train.Saver() - with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - saver.save(sess, checkpoint_path) - - def _get_scores(self, workdir): - """Returns all the scores from the csv file in a list.""" - with tf.gfile.FastGFile(os.path.join(workdir, "scores.csv")) as csvfile: - return list(csv.DictReader(csvfile)) - - def test_end2end_checkpoint(self): - """Takes real GAN (trained for 1 step) and evaluate it.""" - workdir = os.path.join(tf.test.get_temp_dir(), self.id()) - tf.logging.info("Workdir: %s" % workdir) - options = { - "gan_type": "GAN", - "dataset": "fake", - "training_steps": 1, - "save_checkpoint_steps": 10, - "learning_rate": 0.001, - "discriminator_normalization": consts.NO_NORMALIZATION, - "eval_test_samples": 50, - } - gan_lib.run_with_options(options, workdir) - fake_inception = self._create_fake_inception_graph() - - eval_gan_lib.RunTaskEval( - options, workdir, inception_graph=fake_inception.as_graph_def()) - - rows = self._get_scores(workdir) - - self.assertEquals(1, len(rows)) - # The fid score should exist (and be quite large). - self.assertGreater(rows[0]["fid_score"], 100.0) - - _FAKE_FID_SCORE = 18.3 - - def test_mock_gan_and_mock_fid(self): - """Mocks out the GAN and eval tasks and check that eval still works.""" - workdir = os.path.join(tf.test.get_temp_dir(), self.id()) - tf.logging.info("Workdir: %s" % workdir) - options = { - "gan_type": "GAN", "dataset": "fake", "eval_test_samples": 50, - "gilbo_max_train_cycles": 5, "gilbo_train_steps_per_cycle": 10, - "gilbo_eval_steps": 10, "compute_gilbo": True, - } - checkpoint_path = os.path.join(workdir, "model") - self._create_checkpoint("foo", checkpoint_path) - - with mock.patch.object(gan_lib, "create_gan", autospec=True) as mock_cls: - with mock.patch.object( - fid_score_lib, "get_fid_function", autospec=True) as fid_mock: - fid_mock.return_value.return_value = self._FAKE_FID_SCORE - - mock_gan = mock_cls.return_value - mock_gan.batch_size = 16 - - def create_mock_gan(is_training): - """Creates a minimal graph that has all the required GAN nodes. - - It also has a single (unused) variable inside, to make sure that - tf.Saver is happy. - - Args: - is_training: unused, but required by mock. - """ - del is_training - z = tf.placeholder(tf.float32) - mock_gan.z = z - tf.get_variable("foo", shape=[1]) - fake_images = tf.ones([16, 64, 64, 1]) - mock_gan.fake_images = fake_images - mock_gan.z_dim = 64 - - mock_gan.build_model.side_effect = create_mock_gan - fake_inception = self._create_fake_inception_graph() - inception_graph = fake_inception.as_graph_def() - - tasks_to_run = [ - eval_gan_lib.InceptionScoreTask(inception_graph), - eval_gan_lib.FIDScoreTask(inception_graph), - eval_gan_lib.MultiscaleSSIMTask(), - eval_gan_lib.ComputeAccuracyTask(), - eval_gan_lib.GILBOTask(workdir, workdir, options["dataset"]), - ] - - result_dict = eval_gan_lib.RunCheckpointEval(checkpoint_path, workdir, - options, tasks_to_run) - self.assertEquals(result_dict["fid_score"], self._FAKE_FID_SCORE) - - print(result_dict) - self.assertNear(result_dict["gilbo"], 0.0, 5.0, - "GILBO should be pretty close to 0!") - - def test_mock_gan_and_nan(self): - """Tests the case, where GAN starts returning NaNs.""" - workdir = os.path.join(tf.test.get_temp_dir(), self.id()) - tf.logging.info("Workdir: %s" % workdir) - options = {"gan_type": "GAN", "dataset": "fake", "eval_test_samples": 50} - checkpoint_path = os.path.join(workdir, "model") - self._create_checkpoint("foo", checkpoint_path) - - with mock.patch.object(gan_lib, "create_gan", autospec=True) as mock_cls: - mock_gan = mock_cls.return_value - mock_gan.batch_size = 16 - - def create_mock_gan(is_training): - """Creates a minimal graph that has all the required GAN nodes. - - It will return a NaN values in the output. - - Args: - is_training: unused but required by mock. - """ - del is_training - z = tf.placeholder(tf.float32) - mock_gan.z = z - tf.get_variable("foo", shape=[1]) - fake_images = tf.ones([16, 64, 64, 1]) * float("NaN") - mock_gan.fake_images = fake_images - - mock_gan.build_model.side_effect = create_mock_gan - with self.assertRaises(eval_gan_lib.NanFoundError): - eval_gan_lib.RunCheckpointEval( - checkpoint_path, workdir, options, tasks_to_run=[]) - - def test_accuracy_loss(self): - """Evaluates accuracy loss metric on mock graph.""" - options = { - "gan_type": "GAN", - "dataset": "fake", - } - mock_gan = mock.create_autospec(abstract_gan.AbstractGAN) - mock_gan.batch_size = 100 - with tf.Graph().as_default(): - with tf.Session() as sess: - test_images = np.random.random(size=(100, 64, 64, 1)) - mock_gan.z = tf.placeholder(tf.float32) - mock_gan.inputs = tf.placeholder(tf.float32) - mock_gan.fake_images = tf.ones([100, 64, 64, 1]) - mock_gan.discriminator_output = [tf.constant(0.3)] - mock_gan.d_loss = tf.constant(1.1) - mock_gan.z_dim = 15 - mock_gan.z_generator.return_value = [1.0, 2.0, 3.0] - result_dict = eval_gan_lib.ComputeAccuracyLoss( - options, - sess, - mock_gan, - test_images, - max_train_examples=100, - num_repeat=1) - # Model always returns 0.3 (so it thinks that image is rather fake) - # On test/train, it means 0% accuracy as the threshold is 0.5. - # On fake, it is going to have 100% accuracy, - self.assertNear(result_dict["train_accuracy"], 0.0, 0.01) - self.assertNear(result_dict["test_accuracy"], 0.0, 0.01) - self.assertNear(result_dict["fake_accuracy"], 1.0, 0.01) - self.assertNear(result_dict["train_d_loss"], 1.1, 0.01) - self.assertNear(result_dict["test_d_loss"], 1.1, 0.01) - # Make sure that the keys are matchhing the MetricsList from the task - self.assertSetEqual(eval_gan_lib.ComputeAccuracyTask().MetricsList(), - set(result_dict.keys())) - - def test_condition_number_for_mock(self): - """Tests that condition number task runs end to end without crashing.""" - workdir = os.path.join(tf.test.get_temp_dir(), self.id()) - tf.logging.info("Workdir: %s" % workdir) - options = { - "gan_type": "GAN", - "dataset": "fake", - "eval_test_samples": 50, - "compute_generator_condition_number": True - } - checkpoint_path = os.path.join(workdir, "model") - self._create_checkpoint("foo", checkpoint_path) - - with mock.patch.object(gan_lib, "create_gan", autospec=True) as mock_cls: - mock_gan = mock_cls.return_value - mock_gan.batch_size = 16 - mock_gan.z_dim = 2 - - def z_generator(batch_size, z_dim): - return np.random.uniform( - -1, 1, size=(batch_size, z_dim)).astype(np.float32) - - mock_gan.z_generator = z_generator - - def create_mock_gan(is_training): - """Creates a minimal graph that has all the required GAN nodes. - - It also has a single (unused) variable inside, to make sure that - tf.Saver is happy. - - Args: - is_training: unused, but required by mock. - """ - del is_training - z = tf.placeholder( - tf.float32, shape=(mock_gan.batch_size, mock_gan.z_dim)) - mock_gan.z = z - # Trivial function from z to fake images to compute Jacobian of. - fake_images = tf.tile( - tf.reshape(mock_gan.z, [16, 2, 1, 1]), [1, 32, 64, 1]) - mock_gan.fake_images = fake_images - tf.get_variable("foo", shape=[1]) - - mock_gan.build_model.side_effect = create_mock_gan - - tasks_to_run = [ - eval_gan_lib.GeneratorConditionNumberTask(), - ] - - result_dict = eval_gan_lib.RunCheckpointEval(checkpoint_path, workdir, - options, tasks_to_run) - self.assertEquals(result_dict["log_condition_number_count"], 16) - self.assertEquals(result_dict["log_condition_number_mean"], 0) - self.assertEquals(result_dict["log_condition_number_std"], 0) - - def test_csv_writing(self): - """Verifies that results are correctly written to final CSV file.""" - workdir = os.path.join(tf.test.get_temp_dir(), self.id()) - checkpoint_path = os.path.join(workdir, "checkpoint/") - tf.gfile.MakeDirs(checkpoint_path) - - options = { - "gan_type": "GAN", - "dataset": "fake", - "discriminator_normalization": consts.NO_NORMALIZATION, - "learning_rate": 0.001, - } - # Create 10 checkpoints. - with tf.Graph().as_default(): - tf.get_variable("foo", shape=[1]) - saver = tf.train.Saver(max_to_keep=1000) - with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - for x in range(10): - saver.save(sess, checkpoint_path, global_step=x) - - with mock.patch.object( - eval_gan_lib, "RunCheckpointEval", autospec=True) as mock_cls: - result_dict = {"inception_score": 12.0, "train_d_loss": 1.3} - mock_cls.return_value = result_dict - eval_gan_lib.RunTaskEval(options, workdir, inception_graph=None) - rows = self._get_scores(workdir) - self.assertEquals(10, len(rows)) - self.assertNear(float(rows[0]["inception_score"]), 12.0, 0.01) - self.assertNear(float(rows[1]["train_d_loss"]), 1.3, 0.01) - self.assertNear(float(rows[1]["test_accuracy"]), -1.0, 0.01) - - def test_csv_append(self): - workdir = os.path.join(tf.test.get_temp_dir(), self.id()) - checkpoint_path = os.path.join(workdir, "checkpoint/") - tf.gfile.MakeDirs(checkpoint_path) - - options = { - "gan_type": "GAN", - "dataset": "fake", - "discriminator_normalization": consts.NO_NORMALIZATION, - "learning_rate": 0.001, - } - - # Start by creating first 2 checkpoints. - with tf.Graph().as_default(): - tf.get_variable("foo", shape=[1]) - saver = tf.train.Saver(max_to_keep=1000) - with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - for x in range(2): - saver.save(sess, checkpoint_path, global_step=x) - - with mock.patch.object( - eval_gan_lib, "RunCheckpointEval", autospec=True) as mock_cls: - mock_cls.return_value = {"inception_score": 12.0, "train_d_loss": 1.3} - eval_gan_lib.RunTaskEval(options, workdir, inception_graph=None) - - rows = self._get_scores(workdir) - self.assertEquals(2, len(rows)) - self.assertNear(float(rows[0]["inception_score"]), 12.0, 0.01) - self.assertNear(float(rows[1]["train_d_loss"]), 1.3, 0.01) - self.assertNear(float(rows[1]["test_accuracy"]), -1.0, 0.01) - - # Now create 2 more checkpoints. - for x in range(3, 5): - saver.save(sess, checkpoint_path, global_step=x) - mock_cls.return_value = {"inception_score": 14.0, "train_d_loss": 1.5} - eval_gan_lib.RunTaskEval(options, workdir, inception_graph=None) - rows = self._get_scores(workdir) - self.assertEquals(4, len(rows)) - # old scores should stay intact. - self.assertNear(float(rows[0]["inception_score"]), 12.0, 0.01) - self.assertNear(float(rows[1]["train_d_loss"]), 1.3, 0.01) - self.assertNear(float(rows[1]["test_accuracy"]), -1.0, 0.01) - # New entries should have new values. - self.assertNear(float(rows[2]["inception_score"]), 14.0, 0.01) - self.assertNear(float(rows[3]["train_d_loss"]), 1.5, 0.01) - - self.assertNotIn("new_metric", rows[0]) - - # Now assume that metric names have changed. - with mock.patch.object( - eval_gan_lib, "MultiscaleSSIMTask", autospec=True) as mock_task: - mock_task.return_value.MetricsList.return_value = [ - "ms_ssim", "new_metric" - ] - # Now create 2 more checkpoints. - for x in range(5, 7): - saver.save(sess, checkpoint_path, global_step=x) - mock_cls.return_value = { - "inception_score": 16.0, - "train_d_loss": 1.7, - "new_metric": 20.0 - } - eval_gan_lib.RunTaskEval(options, workdir, inception_graph=None) - rows = self._get_scores(workdir) - self.assertEquals(6, len(rows)) - - # As CSV header has changed, all the results should have been - # recomputed. - for x in range(6): - self.assertNear(float(rows[x]["inception_score"]), 16.0, 0.01) - self.assertNear(float(rows[x]["new_metric"]), 20.0, 0.01) - self.assertNear(float(rows[x]["test_accuracy"]), -1.0, 0.01) - self.assertNear(float(rows[x]["train_d_loss"]), 1.7, 0.01) - - def test_save_final(self): - workdir = os.path.join(tf.test.get_temp_dir(), self.id()) - tf.gfile.MakeDirs(workdir) - scores_path = os.path.join(workdir, "scores") - value_path = os.path.join(workdir, "value") - - with tf.gfile.FastGFile(scores_path, "w") as csvfile: - writer = csv.DictWriter(csvfile, ["fid_score", "other_score"]) - writer.writeheader() - writer.writerow({"fid_score": 13.0, "other_score": 20.0}) - writer.writerow({"fid_score": 17.0, "other_score": 10.0}) - - eval_gan_lib.SaveFinalEvaluationScore(scores_path, "other_score", - value_path) - - with tf.gfile.FastGFile(value_path) as f: - self.assertEquals(f.read(), "10.0") - - -class FractalDimensionTest(tf.test.TestCase): - - def test_straight_line(self): - """The fractal dimension of a 1D line mustlie near 1.0 .""" - - self.assertAllClose( - eval_gan_lib.ComputeFractalDimension( - np.random.uniform(size=(10000, 1))), - 1.0, - atol=0.05) - - def test_square(self): - """The fractal dimension of a 2D square must lie near 2.0 .""" - - self.assertAllClose( - eval_gan_lib.ComputeFractalDimension( - np.random.uniform(size=(10000, 2))), - 2.0, - atol=0.1) - - -if __name__ == "__main__": - tf.test.main() diff --git a/compare_gan/src/fid_score.py b/compare_gan/src/fid_score.py deleted file mode 100644 index 5e2f511..0000000 --- a/compare_gan/src/fid_score.py +++ /dev/null @@ -1,179 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Library for evaluating GAN models using Frechet Inception distance.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools - -import numpy as np -from six.moves import range -import tensorflow as tf - -logging = tf.logging -tfgan_eval = tf.contrib.gan.eval - - -def get_fid_function(real_image_tensor, gen_image_tensor, num_gen_images, - num_eval_images, image_range, inception_graph): - """Get a fn returning the FID between distributions defined by two tensors. - - Wraps session.run calls to generate num_eval_images images from both - gen_image_tensor and real_image_tensor (as num_eval_images is often much - larger than the training batch size). Then finds the FID between these two - groups of images. - - Args: - real_image_tensor: Tensor of shape [batch_size, dim, dim, 3] which evaluates - to a batch of real eval images. Should be in range [0..255]. - gen_image_tensor: Tensor of shape [batch_size, dim, dim, 3] which evaluates - to a batch of gen images. Should be in range [0..255]. - num_gen_images: Number of generated images to evaluate FID between - num_eval_images: Number of real images to evaluate FID between - image_range: Range of values in the images. Accepted values: "0_255". - inception_graph: GraphDef with frozen inception model. - - Returns: - eval_fn: a function which takes a session as an argument and returns the - FID between num_eval_images images generated from the distributions - defined by gen_image_tensor and real_image_tensor - """ - - assert image_range == "0_255" - # Set up graph for generating features to pass to FID eval. - batch_size_gen = gen_image_tensor.get_shape().as_list()[0] - batch_size_real = real_image_tensor.get_shape().as_list()[0] - - # We want to cover only the case that the real data is bigger than - # generated (50k vs 10k for CIFAR to be comparable with SN GAN) - assert batch_size_real >= batch_size_gen - assert batch_size_real % batch_size_gen == 0 - - # We preprocess images and extract inception features as soon as they're - # generated. This is to maintain memory efficiency if the images are large. - # For example, for ImageNet, the inception features are much smaller than - # the images. - eval_features_tensor = get_inception_features(real_image_tensor, - inception_graph) - gen_features_tensor = get_inception_features(gen_image_tensor, - inception_graph) - - num_gen_images -= num_gen_images % batch_size_gen - num_eval_images -= num_eval_images % batch_size_real - logging.info("Evaluating %d real images to match batch size %d", - num_eval_images, batch_size_real) - logging.info("Evaluating %d generated images to match batch size %d", - num_gen_images, batch_size_gen) - # Make sure we run the same number of batches, as this is what TFGAN code - # assumes. - assert num_eval_images // batch_size_real == num_gen_images // batch_size_gen - num_batches = num_eval_images // batch_size_real - - # Set up another subgraph for calculating FID from fed images. - feed_gen_features = tf.placeholder( - dtype=tf.float32, shape=[num_gen_images] + - gen_features_tensor.get_shape().as_list()[1:]) - feed_eval_features = tf.placeholder( - dtype=tf.float32, shape=[num_eval_images] + - eval_features_tensor.get_shape().as_list()[1:]) - - # Create the tensor which stores the computed FID. We have extracted the - # features at the point of image generation so classifier_fn=tf.identity. - fid_tensor = tfgan_eval.frechet_classifier_distance( - classifier_fn=tf.identity, - real_images=feed_eval_features, - generated_images=feed_gen_features, - num_batches=num_batches) - - # Define a function which wraps some session.run calls to generate a large - # number of images and compute FID on them. - def eval_fn(session): - """Function which wraps session.run calls to evaluate FID.""" - logging.info("Evaluating.....") - logging.info("Generating images to feed") - eval_features_np = [] - gen_features_np = [] - for _ in range(num_batches): - e, g = session.run([eval_features_tensor, gen_features_tensor]) - eval_features_np.append(e) - gen_features_np.append(g) - - logging.info("Generated images successfully.") - eval_features_np = np.concatenate(eval_features_np) - gen_features_np = np.concatenate(gen_features_np) - - logging.info("Computing FID with generated images...") - fid_result = session.run(fid_tensor, feed_dict={ - feed_eval_features: eval_features_np, - feed_gen_features: gen_features_np}) - - logging.info("Computed FID: %f", fid_result) - return fid_result - - return eval_fn - - -def preprocess_for_inception(images): - """Preprocess images for inception. - - Args: - images: images minibatch. Shape [batch size, width, height, - channels]. Values are in [0..255]. - - Returns: - preprocessed_images - """ - - # Images should have 3 channels. - assert images.shape[3].value == 3 - - # tfgan_eval.preprocess_image function takes values in [0, 255] - with tf.control_dependencies([tf.assert_greater_equal(images, 0.0), - tf.assert_less_equal(images, 255.0)]): - images = tf.identity(images) - - preprocessed_images = tf.map_fn( - fn=tfgan_eval.preprocess_image, - elems=images, - back_prop=False - ) - - return preprocessed_images - - -def get_inception_features(inputs, inception_graph, layer_name="pool_3:0"): - """Compose the preprocess_for_inception function with TFGAN run_inception.""" - - preprocessed = preprocess_for_inception(inputs) - return tfgan_eval.run_inception( - preprocessed, - graph_def=inception_graph, - output_tensor=layer_name) - - -def run_inception(images, inception_graph): - preprocessed = tfgan_eval.preprocess_image(images) - logits = tfgan_eval.run_inception(preprocessed, graph_def=inception_graph) - return logits - - -def inception_score_fn(images, num_batches, inception_graph): - return tfgan_eval.classifier_score( - images, num_batches=num_batches, - classifier_fn=functools.partial(run_inception, - inception_graph=inception_graph)) diff --git a/compare_gan/src/fid_score_test.py b/compare_gan/src/fid_score_test.py deleted file mode 100644 index e4cebe1..0000000 --- a/compare_gan/src/fid_score_test.py +++ /dev/null @@ -1,61 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src import fid_score -import tensorflow as tf - - -class FidScoreTest(tf.test.TestCase): - - def _create_fake_inception_graph(self): - """Creates a graph with that mocks inception. - - It takes the input, multiplies it through a matrix full of 0.001 values - and returns as logits. It makes sure to match the tensor names of - the real inception model. - - Returns: - tf.Graph object with a simple mock inception inside. - """ - fake_inception = tf.Graph() - with fake_inception.as_default(): - graph_input = tf.placeholder( - tf.float32, shape=[None, 299, 299, 3], name="Mul") - matrix = tf.ones(shape=[299 * 299 * 3, 10]) * 0.001 - output = tf.matmul(tf.layers.flatten(graph_input), matrix) - output = tf.identity(output, name="pool_3") - output = tf.identity(output, name="logits") - return fake_inception - - def test_fid_function(self): - with tf.Graph().as_default(): - real_data = tf.zeros((10, 4, 4, 3)) - gen_data = tf.ones((10, 4, 4, 3)) - inception_graph = self._create_fake_inception_graph() - - eval_fn = fid_score.get_fid_function(real_data, gen_data, 18, 19, "0_255", - inception_graph.as_graph_def()) - with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - result = eval_fn(sess) - self.assertNear(result, 43.8, 0.1) - - -if __name__ == "__main__": - tf.test.main() diff --git a/compare_gan/src/gan_lib.py b/compare_gan/src/gan_lib.py deleted file mode 100644 index 6bf39d5..0000000 --- a/compare_gan/src/gan_lib.py +++ /dev/null @@ -1,256 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Library used for training various flavors of GANs on various datasets.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import contextlib -import os - -from compare_gan.src import datasets -from compare_gan.src import params -from compare_gan.src.gans import ops -from compare_gan.src.gans.BEGAN import BEGAN -from compare_gan.src.gans.DRAGAN import DRAGAN -from compare_gan.src.gans.GAN import GAN -from compare_gan.src.gans.GAN import GAN_MINMAX -from compare_gan.src.gans.gans_with_penalty import GAN_PENALTY -from compare_gan.src.gans.gans_with_penalty import LSGAN_PENALTY -from compare_gan.src.gans.gans_with_penalty import WGAN_PENALTY -from compare_gan.src.gans.LSGAN import LSGAN -from compare_gan.src.gans.VAE import VAE -from compare_gan.src.gans.WGAN import WGAN -from compare_gan.src.gans.WGAN_GP import WGAN_GP - -import numpy as np -from six.moves import range -import tensorflow as tf - - -MODELS = { - "GAN": GAN, - "GAN_MINMAX": GAN_MINMAX, - "WGAN": WGAN, - "WGAN_GP": WGAN_GP, - "DRAGAN": DRAGAN, - "LSGAN": LSGAN, - "BEGAN": BEGAN, - "VAE": VAE, - "SN_GAN": GAN_PENALTY, - "GAN_PENALTY": GAN_PENALTY, - "LSGAN_PENALTY": LSGAN_PENALTY, - "WGAN_PENALTY": WGAN_PENALTY, -} - -DATASETS = {"mnist": datasets.load_mnist, - "fashion-mnist": datasets.load_fashion_mnist, - "triangles": datasets.load_triangles_and_squares, - "squares": datasets.load_triangles_and_squares, - "cifar10": datasets.load_cifar10, - "celeba": datasets.load_celeba, - "fake": datasets.load_fake, - "celebahq128": datasets.load_celebahq, - "lsun-bedroom": datasets.load_lsun} - -flags = tf.flags -FLAGS = flags.FLAGS -logging = tf.logging - -flags.DEFINE_string("master", "local", - "Estimator only: BNS name of the TensorFlow master to use.") -flags.DEFINE_integer("iterations_per_loop", 1000, - "TPU only: Number of iterations per TPU training loop.") - -flags.DEFINE_integer("data_reading_num_threads", 4, - "The number of threads used to read the dataset.") -flags.DEFINE_integer("data_reading_buffer_bytes", 128 * 1024, - "The buffer size used to read the dataset.") - - -# Seed for shuffling dataset. -DEFAULT_DATASET_SEED = 547 - - -def load_dataset(dataset_name, - split_name="train", - num_threads=None, - buffer_size=None): - """Reads files and returns a TFDataset object.""" - if not num_threads: - num_threads = FLAGS.data_reading_num_threads - if not buffer_size: - buffer_size = FLAGS.data_reading_buffer_bytes - if split_name not in ["test", "train", "val"]: - raise ValueError("Invalid split name.") - if dataset_name not in DATASETS: - raise ValueError("Dataset %s is not available." % dataset_name) - - return DATASETS[dataset_name](dataset_name, - split_name=split_name, - num_threads=num_threads, - buffer_size=buffer_size) - - -def create_gan(gan_type, dataset, dataset_content, options, - checkpoint_dir, result_dir, gan_log_dir): - """Instantiates a GAN with the requested options.""" - - if gan_type not in MODELS: - raise Exception("[!] Unrecognized GAN type: %s" % gan_type) - - if dataset not in DATASETS: - raise ValueError("Dataset %s is not available." % dataset) - - # We use the same batch size and latent space dimension for all GANs. - parameters = { - "batch_size": 64, - "z_dim": 64, - } - # Get the default parameters for the dataset. - parameters.update(params.GetDatasetParameters(dataset)) - # Get the parameters provided in the argument. - parameters.update(options) - - assert parameters["training_steps"] >= 1, ( - "Number of steps has to be positive.") - assert parameters["save_checkpoint_steps"] >= 1, ( - "The number of steps per eval should be positive") - assert parameters["batch_size"] >= 1, "Batch size has to be positive." - assert parameters["z_dim"] >= 1, ("Number of latent dimensions has to be " - "positive.") - - # Runtime settings for GANs. - runtime_info = collections.namedtuple( - 'RuntimeInfo', ['checkpoint_dir', 'result_dir', 'log_dir']) - - runtime_info.checkpoint_dir = checkpoint_dir - runtime_info.result_dir = result_dir - runtime_info.log_dir = gan_log_dir - - return MODELS[gan_type]( - dataset_content=dataset_content, - parameters=parameters, - runtime_info=runtime_info) - - -@contextlib.contextmanager -def profile_context(tfprofile_dir): - if "enable_tf_profile" in FLAGS and FLAGS.enable_tf_profile: - with tf.contrib.tfprof.ProfileContext( - tfprofile_dir, trace_steps=list(range(100, 200, 1)), dump_steps=[200]): - yield - else: - yield - - -def run_with_options(options, task_workdir, progress_reporter=None, - warm_start_from=None): - """Runs the task with arbitrary options. - - Args: - options: Dictionary with meta and hyper parameters. - task_workdir: Directory to save logs, checkpoints, samples etc. If the - subdirectory "checkpoint" contains checkpoints the method will attempt - to load the latest checkpoint. - progress_reporter: Callback function to report progress (parameters: - step, steps_per_sec, progress, eta_minutes). - warm_start_from: `tf.estimator.WarmStartSettings`. Only supported for - estimator training. - - Raises: - ValueError: For infeasible combinations of options. - """ - checkpoint_dir = os.path.join(task_workdir, "checkpoint") - tfprofile_dir = os.path.join(task_workdir, "tfprofile") - result_dir = os.path.join(task_workdir, "result") - gan_log_dir = os.path.join(task_workdir, "logs") - - gan_type = options["gan_type"] - dataset = options["dataset"] - - logging.info("Running tasks with gan_type: %s dataset: %s with parameters %s", - gan_type, dataset, str(options)) - logging.info("Checkpoint dir: %s result_dir: %s gan_log_dir: %s", - checkpoint_dir, result_dir, gan_log_dir) - - ops.check_folder(checkpoint_dir) - ops.check_folder(tfprofile_dir) - ops.check_folder(result_dir) - ops.check_folder(gan_log_dir) - - if "tf_seed" in options: - logging.info("Setting np random seed to %s", options["tf_seed"]) - np.random.seed(options["tf_seed"]) - - # Set the dataset shuffling seed if specified in options. - dataset_seed = DEFAULT_DATASET_SEED - if "dataset_seed" in options: - logging.info("Seeting dataset seed to %d", options["dataset_seed"]) - dataset_seed = options["dataset_seed"] - - dataset_content = load_dataset(dataset, split_name="train") - dataset_content = dataset_content.repeat().shuffle(10000, seed=dataset_seed) - - if options.get("use_estimator", options.get("use_tpu", False)): - # Estimator mode supports CPU, GPU and TPU training. - gan = create_gan( - gan_type=gan_type, - dataset=dataset, - dataset_content=dataset_content, - options=options, - gan_log_dir=gan_log_dir, - result_dir=result_dir, - checkpoint_dir=checkpoint_dir) - config = tf.contrib.tpu.RunConfig( - model_dir=checkpoint_dir, - tf_random_seed=options.get("tf_seed", None), - save_checkpoints_steps=int(options["save_checkpoint_steps"]), - keep_checkpoint_max=gan.max_checkpoints_to_keep, - master=FLAGS.master, - evaluation_master=FLAGS.master, - tpu_config=tf.contrib.tpu.TPUConfig( - iterations_per_loop=FLAGS.iterations_per_loop)) - print(" [*] Training started!") - gan.train_with_estimator(config=config, warm_start_from=warm_start_from) - print(" [*] Training finished!") - else: - if options.get("use_tpu", False): - raise ValueError("TPU experiments must run with use_estimator=True.") - if warm_start_from: - raise ValueError("Warm starting is only supported for estimator.") - with tf.Graph().as_default(): - if "tf_seed" in options: - logging.info("Setting tf random seed to %s", options["tf_seed"]) - tf.set_random_seed(options["tf_seed"]) - # NumPy random seed is already set above. - with profile_context(tfprofile_dir): - config = tf.ConfigProto(allow_soft_placement=True) - with tf.Session(config=config) as sess: - gan = create_gan( - gan_type=gan_type, - dataset=dataset, - dataset_content=dataset_content, - options=options, - checkpoint_dir=checkpoint_dir, - result_dir=result_dir, - gan_log_dir=gan_log_dir) - gan.build_model() - print(" [*] Training started!") - gan.train(sess, progress_reporter) - print(" [*] Training finished!") diff --git a/compare_gan/src/gan_lib_test.py b/compare_gan/src/gan_lib_test.py deleted file mode 100644 index 0648252..0000000 --- a/compare_gan/src/gan_lib_test.py +++ /dev/null @@ -1,135 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for compare_gan.gan_lib.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import random -import string - -from absl import flags -from absl.testing import parameterized - -from compare_gan.src import gan_lib -from compare_gan.src.gans import consts - -import numpy as np -from six.moves import range -import tensorflow as tf - -FLAGS = flags.FLAGS - - -class GanLibTest(parameterized.TestCase, tf.test.TestCase): - - def testLoadingTriangles(self): - with tf.Graph().as_default(): - iterator = gan_lib.load_dataset("triangles").batch( - 32).make_one_shot_iterator().get_next() - with tf.Session() as sess: - image, label = sess.run(iterator) - self.assertEqual(image.shape, (32, 28, 28, 1)) - self.assertEqual(label.shape, (32,)) - self.assertEqual(label[4], 3) - with tf.Graph().as_default(): - iterator = gan_lib.load_dataset( - "triangles", split_name="test").make_one_shot_iterator().get_next() - with tf.Session() as sess: - image, label = sess.run(iterator) - self.assertEqual(image.shape, (28, 28, 1)) - self.assertEqual(label.shape, ()) - with tf.Graph().as_default(): - iterator = gan_lib.load_dataset( - "triangles", split_name="val").make_one_shot_iterator( - ).get_next() - with tf.Session() as sess: - image, label = sess.run(iterator) - self.assertEqual(image.shape, (28, 28, 1)) - self.assertEqual(label.shape, ()) - - def testLoadingMnist(self): - with tf.Graph().as_default(): - dataset = gan_lib.load_dataset("mnist") - iterator = dataset.make_one_shot_iterator().get_next() - with tf.Session() as sess: - image, label = sess.run(iterator) - self.assertEqual(image.shape, (28, 28, 1)) - self.assertEqual(label.shape, ()) - - def trainSingleStep(self, tf_seed): - """Train a GAN for a single training step and return the checkpoint.""" - parameters = { - "tf_seed": tf_seed, - "learning_rate": 0.0002, - "z_dim": 64, - "batch_size": 2, - "training_steps": 1, - "disc_iters": 1, - "save_checkpoint_steps": 5000, - "discriminator_normalization": consts.NO_NORMALIZATION, - "dataset": "fake", - "gan_type": "GAN", - "penalty_type": consts.NO_PENALTY, - "architecture": consts.RESNET_CIFAR, - "lambda": 0.1, - } - random.seed(None) - exp_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(16)) - task_workdir = os.path.join(FLAGS.test_tmpdir, exp_name) - gan_lib.run_with_options(parameters, task_workdir) - ckpt_fn = os.path.join(task_workdir, "checkpoint/{}.model-0".format( - parameters["gan_type"])) - tf.logging.info("ckpt_fn: %s", ckpt_fn) - self.assertTrue(tf.gfile.Exists(ckpt_fn + ".index")) - return tf.train.load_checkpoint(ckpt_fn) - - def testSameTFRandomSeed(self): - # Setting the same tf_seed should give the same initial values at each run. - # In practice training still converge due to non-deterministic behavior - # in certain operations on the hardware level (e.g. cuDNN optimizations). - ckpt_1 = self.trainSingleStep(tf_seed=42) - ckpt_2 = self.trainSingleStep(tf_seed=42) - - for name in ckpt_1.get_variable_to_shape_map(): - self.assertTrue(ckpt_2.has_tensor(name)) - t1 = ckpt_1.get_tensor(name) - t2 = ckpt_2.get_tensor(name) - np.testing.assert_almost_equal(t1, t2) - - @parameterized.named_parameters([ - ('Given', 1, 2), - ('OneNone', 1, None), - ('BothNone', None, None), - ]) - def testDifferentTFRandomSeed(self, seed_1, seed_2): - ckpt_1 = self.trainSingleStep(tf_seed=seed_1) - ckpt_2 = self.trainSingleStep(tf_seed=seed_2) - - diff_counter = 0 - for name in ckpt_1.get_variable_to_shape_map(): - self.assertTrue(ckpt_2.has_tensor(name)) - t1 = ckpt_1.get_tensor(name) - t2 = ckpt_2.get_tensor(name) - if np.abs(t1 - t2).sum() > 0: - diff_counter += 1 - self.assertGreater(diff_counter, 0) - - -if __name__ == "__main__": - tf.test.main() diff --git a/compare_gan/src/gans/BEGAN.py b/compare_gan/src/gans/BEGAN.py deleted file mode 100644 index 2b5fe40..0000000 --- a/compare_gan/src/gans/BEGAN.py +++ /dev/null @@ -1,171 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implementation of the BEGAN algorithm (https://arxiv.org/abs/1703.10717).""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src.gans import consts -from compare_gan.src.gans.abstract_gan import AbstractGAN -from compare_gan.src.gans.ops import batch_norm, linear, conv2d, deconv2d, lrelu - -import tensorflow as tf - - -class BEGAN(AbstractGAN): - """Boundary Equilibrium Generative Adversarial Networks.""" - - def __init__(self, **kwargs): - super(BEGAN, self).__init__("BEGAN", **kwargs) - - self.gamma = self.parameters["gamma"] - self.lambd = self.parameters["lambda"] - - def discriminator(self, x, is_training, reuse=False): - """BEGAN discriminator (auto-encoder). - - This implementation doesn't match the one from the paper, but is similar - to our "standard" discriminator (same 2 conv layers, using lrelu). - However, it still has less parameters (1.3M vs 8.5M) because of the huge - linear layer in the standard discriminator. - - Args: - x: input images, shape [bs, h, w, channels] - is_training: boolean, are we in train or eval model. - reuse: boolean, should params be re-used. - - Returns: - out: a float (in [0, 1]) with discriminator prediction - recon_error: L1 reconstrunction error of the auto-encoder - code: the representation (bottleneck layer of the auto-encoder) - """ - height = self.input_height - width = self.input_width - sn = self.discriminator_normalization == consts.SPECTRAL_NORM - with tf.variable_scope("discriminator", reuse=reuse): - # Encoding step (Mapping from [bs, h, w, c] to [bs, 64]) - net = conv2d( - x, 64, 4, 4, 2, 2, name="d_conv1", use_sn=sn) # [bs, h/2, w/2, 64] - net = lrelu(net) - net = conv2d( - net, 128, 4, 4, 2, 2, name="d_conv2", - use_sn=sn) # [bs, h/4, w/4, 128] - net = tf.reshape(net, [self.batch_size, -1]) # [bs, h * w * 8] - code = linear(net, 64, scope="d_fc6", use_sn=sn) # [bs, 64] - if self.discriminator_normalization == consts.BATCH_NORM: - code = batch_norm(code, is_training=is_training, scope="d_bn1") - code = lrelu(code) - - # Decoding step (Mapping from [bs, 64] to [bs, h, w, c]) - net = linear( - code, 128 * (height // 4) * (width // 4), scope="d_fc1", - use_sn=sn) # [bs, h/4 * w/4 * 128] - if self.discriminator_normalization == consts.BATCH_NORM: - net = batch_norm(net, is_training=is_training, scope="d_bn2") - net = lrelu(net) - net = tf.reshape(net, [ - self.batch_size, height // 4, width // 4, 128]) # [bs, h/4, w/4, 128] - net = deconv2d(net, [self.batch_size, height // 2, width // 2, 64], - 4, 4, 2, 2, name="d_deconv1") # [bs, h/2, w/2, 64] - if self.discriminator_normalization == consts.BATCH_NORM: - net = batch_norm(net, is_training=is_training, scope="d_bn3") - net = lrelu(net) - net = deconv2d(net, [self.batch_size, height, width, self.c_dim], - 4, 4, 2, 2, name="d_deconv2") # [bs, h, w, c] - out = tf.nn.sigmoid(net) - - # Reconstruction loss. - recon_error = tf.reduce_mean(tf.abs(out - x)) - return out, recon_error, code - - def build_model(self, is_training=True): - image_dims = [self.input_height, self.input_width, self.c_dim] - batch_size = self.batch_size - - # BEGAN parameter. - self.k = tf.Variable(0., trainable=False) - - # Input images. - self.inputs = tf.placeholder( - tf.float32, [batch_size] + image_dims, name="real_images") - # Noise vector. - self.z = tf.placeholder(tf.float32, [batch_size, self.z_dim], name="z") - - # Discriminator loss for real images. - D_real_img, D_real_err, D_real_code = self.discriminator( - self.inputs, is_training=is_training, reuse=False) - - # Discriminator loss for fake images. - G = self.generator(self.z, is_training=is_training, reuse=False) - D_fake_img, D_fake_err, D_fake_code = self.discriminator( - G, is_training=is_training, reuse=True) - - # Total discriminator loss. - self.d_loss = D_real_err - self.k * D_fake_err - - # Total generator loss. - self.g_loss = D_fake_err - - # Convergence metric. - self.M = D_real_err + tf.abs(self.gamma * D_real_err - D_fake_err) - - # Op for updating k. - self.update_k = self.k.assign(self.k + self.lambd * - (self.gamma * D_real_err - D_fake_err)) - - # Divide trainable variables into a group for D and a group for G. - t_vars = tf.trainable_variables() - d_vars = [var for var in t_vars if "discriminator" in var.name] - g_vars = [var for var in t_vars if "generator" in var.name] - self.check_variables(t_vars, d_vars, g_vars) - - # Define optimization ops. - with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): - self.d_optim = tf.train.AdamOptimizer( - self.learning_rate, beta1=self.beta1, name="d_adam").minimize( - self.d_loss, var_list=d_vars) - self.g_optim = tf.train.AdamOptimizer( - self.learning_rate, beta1=self.beta1, name="g_adam").minimize( - self.g_loss, var_list=g_vars) - - # Store testing images. - self.fake_images = self.generator(self.z, is_training=False, reuse=True) - - # Setup summaries. - d_loss_real_sum = tf.summary.scalar("d_error_real", D_real_err) - d_loss_fake_sum = tf.summary.scalar("d_error_fake", D_fake_err) - d_loss_sum = tf.summary.scalar("d_loss", self.d_loss) - g_loss_sum = tf.summary.scalar("g_loss", self.g_loss) - M_sum = tf.summary.scalar("M", self.M) - k_sum = tf.summary.scalar("k", self.k) - - self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum]) - self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum]) - self.p_sum = tf.summary.merge([M_sum, k_sum]) - - def after_training_step_hook(self, sess, features, counter): - batch_images = features["images"] - batch_z = features["z_for_disc_step"] - # Update k. - summary_str = sess.run( - [self.update_k, self.p_sum, self.M, self.k], - feed_dict={ - self.inputs: batch_images, - self.z: batch_z - })[1] - # Write summary. - self.writer.add_summary(summary_str, counter) diff --git a/compare_gan/src/gans/DRAGAN.py b/compare_gan/src/gans/DRAGAN.py deleted file mode 100644 index 162aae1..0000000 --- a/compare_gan/src/gans/DRAGAN.py +++ /dev/null @@ -1,122 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implementation of the DRAGAN algorithm (https://arxiv.org/abs/1705.07215).""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src.gans.abstract_gan import AbstractGAN - -import numpy as np -import tensorflow as tf - - -class DRAGAN(AbstractGAN): - """How to Train Your DRAGAN.""" - - def __init__(self, **kwargs): - super(DRAGAN, self).__init__("DRAGAN", **kwargs) - - # Higher value: more stable, but slower convergence. - self.lambd = self.parameters["lambda"] - - def get_perturbed_batch(self, minibatch): - return minibatch + 0.5 * minibatch.std() * np.random.random(minibatch.shape) - - def build_model(self, is_training=True): - image_dims = [self.input_height, self.input_width, self.c_dim] - batch_size = self.batch_size - - # Input images. - self.inputs = tf.placeholder( - tf.float32, [batch_size] + image_dims, name="real_images") - self.inputs_p = tf.placeholder( - tf.float32, [batch_size] + image_dims, name="real_perturbed_images") - - # Noise vector. - self.z = tf.placeholder(tf.float32, [batch_size, self.z_dim], name="z") - - # Discriminator output for real images. - D_real, D_real_logits, _ = self.discriminator( - self.inputs, is_training=is_training, reuse=False) - - # Discriminator output for fake images. - G = self.generator(self.z, is_training=is_training, reuse=False) - D_fake, D_fake_logits, _ = self.discriminator( - G, is_training=is_training, reuse=True) - - # Loss on real and fake data. - d_loss_real = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=D_real_logits, labels=tf.ones_like(D_real))) - d_loss_fake = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=D_fake_logits, labels=tf.zeros_like(D_fake))) - - # Total discriminator loss. - self.d_loss = d_loss_real + d_loss_fake - - # Total generator loss. - self.g_loss = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=D_fake_logits, labels=tf.ones_like(D_fake))) - - # Gradient penalty. - alpha = tf.random_uniform(shape=[batch_size, 1, 1, 1], minval=0., maxval=1.) - differences = self.inputs_p - self.inputs - interpolates = self.inputs + (alpha * differences) - D_inter, _, _ = self.discriminator( - interpolates, is_training=is_training, reuse=True) - gradients = tf.gradients(D_inter, [interpolates])[0] - slopes = tf.sqrt(0.0001 + tf.reduce_sum( - tf.square(gradients), reduction_indices=[1, 2, 3])) - gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.)) - self.d_loss += self.lambd * gradient_penalty - - # Divide trainable variables into a group for D and group for G. - t_vars = tf.trainable_variables() - d_vars = [var for var in t_vars if "discriminator" in var.name] - g_vars = [var for var in t_vars if "generator" in var.name] - self.check_variables(t_vars, d_vars, g_vars) - - # Define optimization ops. - with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): - self.d_optim = tf.train.AdamOptimizer( - self.learning_rate, beta1=self.beta1, name="d_adam").minimize( - self.d_loss, var_list=d_vars) - self.g_optim = tf.train.AdamOptimizer( - self.learning_rate, beta1=self.beta1, name="g_adam").minimize( - self.g_loss, var_list=g_vars) - - # Store testing images. - self.fake_images = self.generator(self.z, is_training=False, reuse=True) - - # Setup summaries. - d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real) - d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake) - d_loss_sum = tf.summary.scalar("d_loss", self.d_loss) - g_loss_sum = tf.summary.scalar("g_loss", self.g_loss) - - self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum]) - self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum]) - - def discriminator_feed_dict(self, features, labels): - del labels - return { - self.inputs: features["images"], - self.inputs_p: self.get_perturbed_batch(features["images"]), - self.z: features["z_for_disc_step"], - } diff --git a/compare_gan/src/gans/GAN.py b/compare_gan/src/gans/GAN.py deleted file mode 100644 index fe8e6c3..0000000 --- a/compare_gan/src/gans/GAN.py +++ /dev/null @@ -1,121 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implementation of the MM-GAN and NS-GAN (https://arxiv.org/abs/1406.2661).""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src.gans.abstract_gan import AbstractGAN - -import tensorflow as tf - - -class ClassicGAN(AbstractGAN): - """Original Generative Adverserial Networks.""" - - def __init__(self, model_name, **kwargs): - super(ClassicGAN, self).__init__(model_name, **kwargs) - - def build_model(self, is_training=True): - image_dims = [self.input_height, self.input_width, self.c_dim] - batch_size = self.batch_size - - # Input images. - self.inputs = tf.placeholder( - tf.float32, [batch_size] + image_dims, name="real_images") - - # Noise vector. - self.z = tf.placeholder(tf.float32, [batch_size, self.z_dim], name="z") - - # Discriminator output for real images. - D_real, D_real_logits, _ = self.discriminator( - self.inputs, is_training=is_training, reuse=False) - - # Discriminator output for fake images. - G = self.generator(self.z, is_training=is_training, reuse=False) - D_fake, D_fake_logits, _ = self.discriminator( - G, is_training=is_training, reuse=True) - - # Loss on real and fake data. - d_loss_real = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=D_real_logits, labels=tf.ones_like(D_real))) - d_loss_fake = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=D_fake_logits, labels=tf.zeros_like(D_fake))) - - # Total discriminator loss. - self.d_loss = d_loss_real + d_loss_fake - - # Total generator loss. - if self.model_name == "GAN": - self.g_loss = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=D_fake_logits, labels=tf.ones_like(D_fake))) - elif self.model_name == "GAN_MINMAX": - self.g_loss = -d_loss_fake - else: - assert False, "Unknown GAN model_name: %s" % self.model_name - - # Divide trainable variables into a group for D and group for G. - t_vars = tf.trainable_variables() - d_vars = [var for var in t_vars if "discriminator" in var.name] - g_vars = [var for var in t_vars if "generator" in var.name] - self.check_variables(t_vars, d_vars, g_vars) - - # Define optimization ops. - with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): - self.d_optim = tf.train.AdamOptimizer( - self.learning_rate, beta1=self.beta1, name="d_adam").minimize( - self.d_loss, var_list=d_vars) - self.g_optim = tf.train.AdamOptimizer( - self.learning_rate, beta1=self.beta1, name="g_adam").minimize( - self.g_loss, var_list=g_vars) - - # Store testing images. - self.fake_images = self.generator(self.z, is_training=False, reuse=True) - - # Setup summaries. - d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real) - d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake) - d_loss_sum = tf.summary.scalar("d_loss", self.d_loss) - g_loss_sum = tf.summary.scalar("g_loss", self.g_loss) - - self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum]) - self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum]) - - -class GAN(ClassicGAN): - """Non-saturating Generative Adverserial Networks. - - The loss for the generator is computed using the log trick. That is, - G_loss = -log(D(fake_images)) [maximizes log(D)] - """ - - def __init__(self, **kwargs): - super(GAN, self).__init__(model_name="GAN", **kwargs) - - -class GAN_MINMAX(ClassicGAN): - """Generative Adverserial Networks with the standard min-max loss. - - The loss for the generator is computed as: - G_loss = - ( (1-0) * -log(1 - D(fake_images)) - = log (1 - D(fake_images)) [ minimize log (1 - D) ] - """ - - def __init__(self, **kwargs): - super(GAN_MINMAX, self).__init__(model_name="GAN_MINMAX", **kwargs) diff --git a/compare_gan/src/gans/LSGAN.py b/compare_gan/src/gans/LSGAN.py deleted file mode 100644 index a90e03b..0000000 --- a/compare_gan/src/gans/LSGAN.py +++ /dev/null @@ -1,89 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implementation of the LS-GAN algorithm (https://arxiv.org/abs/1611.04076).""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src.gans.abstract_gan import AbstractGAN - -import tensorflow as tf - - -class LSGAN(AbstractGAN): - """Least Squares Generative Adversarial Networks.""" - - def __init__(self, **kwargs): - super(LSGAN, self).__init__("LSGAN", **kwargs) - - def build_model(self, is_training=True): - image_dims = [self.input_height, self.input_width, self.c_dim] - batch_size = self.batch_size - - # Input images. - self.inputs = tf.placeholder( - tf.float32, [batch_size] + image_dims, name="real_images") - - # Noise vector. - self.z = tf.placeholder(tf.float32, [batch_size, self.z_dim], name="z") - - # Discriminator output for real images. - D_real, D_real_logits, _ = self.discriminator( - self.inputs, is_training=is_training, reuse=False) - - # Discriminator output for fake images. - G = self.generator(self.z, is_training=is_training, reuse=False) - D_fake, D_fake_logits, _ = self.discriminator( - G, is_training=is_training, reuse=True) - - # Total discriminator loss. - D_real = tf.squeeze(D_real) - D_fake = tf.squeeze(D_fake) - assert D_real.get_shape() == (batch_size,) - assert D_fake.get_shape() == (batch_size,) - d_loss_real = tf.reduce_mean(tf.square(D_real - 1.0)) - d_loss_fake = tf.reduce_mean(tf.square(D_fake)) - self.d_loss = 0.5 * (d_loss_real + d_loss_fake) - - # Total generator loss. - self.g_loss = 0.5 * tf.reduce_mean(tf.square(D_fake - 1)) - - # Divide trainable variables into a group for D and group for G. - t_vars = tf.trainable_variables() - d_vars = [var for var in t_vars if "discriminator" in var.name] - g_vars = [var for var in t_vars if "generator" in var.name] - self.check_variables(t_vars, d_vars, g_vars) - - # Define optimization ops. - with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): - self.d_optim = tf.train.AdamOptimizer( - self.learning_rate, beta1=self.beta1, name="d_adam").minimize( - self.d_loss, var_list=d_vars) - self.g_optim = tf.train.AdamOptimizer( - self.learning_rate, beta1=self.beta1, name="g_adam").minimize( - self.g_loss, var_list=g_vars) - - # Store testing images. - self.fake_images = self.generator(self.z, is_training=False, reuse=True) - - # Setup summaries. - d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real) - d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake) - d_loss_sum = tf.summary.scalar("d_loss", self.d_loss) - g_loss_sum = tf.summary.scalar("g_loss", self.g_loss) - - self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum]) - self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum]) diff --git a/compare_gan/src/gans/VAE.py b/compare_gan/src/gans/VAE.py deleted file mode 100644 index 5a3db05..0000000 --- a/compare_gan/src/gans/VAE.py +++ /dev/null @@ -1,154 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implementation of the VAE algorithm (https://arxiv.org/abs/1312.6114).""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src.gans import consts -from compare_gan.src.gans.abstract_gan import AbstractGAN -from compare_gan.src.gans.ops import lrelu, conv2d, deconv2d, batch_norm, linear, gaussian - -import tensorflow as tf - - -class VAE(AbstractGAN): - """Vanilla Variational Autoencoder.""" - - def __init__(self, **kwargs): - super(VAE, self).__init__("VAE", **kwargs) - - def encoder(self, x, is_training, reuse=False): - """Implements the Gaussian Encoder.""" - - sn = self.discriminator_normalization == consts.SPECTRAL_NORM - with tf.variable_scope("encoder", reuse=reuse): - net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name="en_conv1", use_sn=sn)) - net = conv2d(net, 128, 4, 4, 2, 2, name="en_conv2", use_sn=sn) - if self.discriminator_normalization == consts.BATCH_NORM: - net = batch_norm(net, is_training=is_training, scope="en_bn2") - net = lrelu(net) - net = tf.reshape(net, [self.batch_size, -1]) - net = linear(net, 1024, scope="en_fc3", use_sn=sn) - if self.discriminator_normalization == consts.BATCH_NORM: - net = batch_norm(net, is_training=is_training, scope="en_bn3") - net = lrelu(net) - - gaussian_params = linear(net, 2 * self.z_dim, scope="en_fc4", use_sn=sn) - mean = gaussian_params[:, :self.z_dim] - stddev = 1e-6 + tf.nn.softplus(gaussian_params[:, self.z_dim:]) - return mean, stddev - - def decoder(self, z, is_training, reuse=False): - """Implements the Bernoulli decoder.""" - height = self.input_height - width = self.input_width - with tf.variable_scope("decoder", reuse=reuse): - net = tf.nn.relu( - batch_norm( - linear(z, 1024, scope="de_fc1"), - is_training=is_training, - scope="de_bn1")) - net = tf.nn.relu( - batch_norm( - linear(net, 128 * (height // 4) * (width // 4), scope="de_fc2"), - is_training=is_training, - scope="de_bn2")) - net = tf.reshape(net, [self.batch_size, height // 4, width // 4, 128]) - net = tf.nn.relu( - batch_norm( - deconv2d( - net, [self.batch_size, height // 2, width // 2, 64], - 4, 4, 2, 2, name="de_dc3"), - is_training=is_training, scope="de_bn3")) - out = tf.nn.sigmoid( - deconv2d(net, [self.batch_size, height, width, self.c_dim], - 4, 4, 2, 2, name="de_dc4")) - return out - - def build_model(self, is_training=True): - image_dims = [self.input_height, self.input_width, self.c_dim] - batch_size = self.batch_size - - # Input images. - self.inputs = tf.placeholder( - tf.float32, [batch_size] + image_dims, name="real_images") - - # Noise vector. - self.z = tf.placeholder(tf.float32, [batch_size, self.z_dim], name="z") - - # Encoding. - self.mu, sigma = self.encoder( - self.inputs, is_training=is_training, reuse=False) - - # Sammpling using the re-parameterization trick. - z = self.mu + sigma * tf.random_normal( - tf.shape(self.mu), 0, 1, dtype=tf.float32) - - # Decoding. - out = self.decoder(z, is_training=is_training, reuse=False) - self.out = tf.clip_by_value(out, 1e-8, 1 - 1e-8) - - # Loss function. - marginal_likelihood = tf.reduce_sum( - self.inputs * tf.log(self.out) + - (1 - self.inputs) * tf.log(1 - self.out), [1, 2]) - kl_divergence = 0.5 * tf.reduce_sum( - tf.square(self.mu) + tf.square(sigma) - - tf.log(1e-8 + tf.square(sigma)) - 1, [1]) - - self.neg_loglikelihood = -tf.reduce_mean(marginal_likelihood) - self.kl_divergence = tf.reduce_mean(kl_divergence) - - self.loss = self.neg_loglikelihood + self.kl_divergence - - # Define optimization ops. - t_vars = tf.trainable_variables() - with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): - self.optim = tf.train.AdamOptimizer( - self.learning_rate, beta1=self.beta1).minimize( - self.loss, var_list=t_vars) - # Store testing images. - self.fake_images = self.decoder(self.z, is_training=False, reuse=True) - - # Setup summaries. - nll_sum = tf.summary.scalar("nll", self.neg_loglikelihood) - kl_sum = tf.summary.scalar("kl", self.kl_divergence) - loss_sum = tf.summary.scalar("loss", self.loss) - - self.merged_summary_op = tf.summary.merge([nll_sum, kl_sum, loss_sum]) - - def z_generator(self, batch_size, z_dim): - return gaussian(batch_size, z_dim) - - def z_tf_generator(self, batch_size, z_dim, name=None): - """Returns the z-generator, as tensorflow op.""" - return tf.random_normal((batch_size, z_dim), name=name) - - def run_single_train_step(self, batch_images, batch_z, counter, g_loss, sess): - # Update the autoencoder - _, summary_str, _, nll_loss, kl_loss = sess.run([ - self.optim, self.merged_summary_op, self.loss, - self.neg_loglikelihood, self.kl_divergence], feed_dict={ - self.inputs: batch_images, self.z: batch_z - }) - - # Write summary. - self.writer.add_summary(summary_str, counter) - return kl_loss, nll_loss - - def visualize_results(self, step, sess): - super(VAE, self).visualize_results(step, sess, z_distribution=gaussian) diff --git a/compare_gan/src/gans/WGAN.py b/compare_gan/src/gans/WGAN.py deleted file mode 100644 index aa6e4a3..0000000 --- a/compare_gan/src/gans/WGAN.py +++ /dev/null @@ -1,106 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implementation of the WGAN algorithm (https://arxiv.org/abs/1701.07875).""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src.gans.abstract_gan import AbstractGAN - -import tensorflow as tf - - -class WGAN(AbstractGAN): - """Wasserstein GAN.""" - - def __init__(self, **kwargs): - super(WGAN, self).__init__("WGAN", **kwargs) - - self.clip = self.parameters["weight_clipping"] - # If the optimizer wasn't specified, use Adam to be consistent with - # other GANs. - self.optimizer = self.parameters.get("optimizer", "adam") - - def get_optimizer(self, name_prefix): - if self.optimizer == "adam": - print("Using Adam optimizer.") - return tf.train.AdamOptimizer( - self.learning_rate, - beta1=self.beta1, - name=name_prefix + self.optimizer) - elif self.optimizer == "rmsprop": - print("Using RMSProp optimizer.") - return tf.train.RMSPropOptimizer( - self.learning_rate, name=name_prefix + self.optimizer) - else: - raise ValueError("Unknown optimizer: %s" % self.optimizer) - - def build_model(self, is_training=True): - image_dims = [self.input_height, self.input_width, self.c_dim] - batch_size = self.batch_size - - # Input images. - self.inputs = tf.placeholder( - tf.float32, [batch_size] + image_dims, name="real_images") - - # Noise vector. - self.z = tf.placeholder(tf.float32, [batch_size, self.z_dim], name="z") - - # Discriminator output for real images. - _, D_real_logits, _ = self.discriminator( - self.inputs, is_training=is_training, reuse=False) - - # Discriminator output for fake images. - G = self.generator(self.z, is_training=is_training, reuse=False) - _, D_fake_logits, _ = self.discriminator( - G, is_training=is_training, reuse=True) - - # Total discriminator loss. - d_loss_real = -tf.reduce_mean(D_real_logits) - d_loss_fake = tf.reduce_mean(D_fake_logits) - self.d_loss = d_loss_real + d_loss_fake - - # Total generator loss. - self.g_loss = -d_loss_fake - - # Divide trainable variables into a group for D and group for G. - t_vars = tf.trainable_variables() - d_vars = [var for var in t_vars if "discriminator" in var.name] - g_vars = [var for var in t_vars if "generator" in var.name] - self.check_variables(t_vars, d_vars, g_vars) - - # Define optimization ops. - with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): - self.d_optim = self.get_optimizer("d_").minimize(self.d_loss, var_list=d_vars) - self.g_optim = self.get_optimizer("g_").minimize(self.g_loss, var_list=g_vars) - - # Weight clipping. - self.clip_D = [ - p.assign(tf.clip_by_value(p, -self.clip, self.clip)) for p in d_vars] - - self.d_optim = tf.group(*[self.d_optim, self.clip_D]) - - # Store testing images. - self.fake_images = self.generator(self.z, is_training=False, reuse=True) - - # Setup summaries. - d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real) - d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake) - d_loss_sum = tf.summary.scalar("d_loss", self.d_loss) - g_loss_sum = tf.summary.scalar("g_loss", self.g_loss) - - self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum]) - self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum]) diff --git a/compare_gan/src/gans/WGAN_GP.py b/compare_gan/src/gans/WGAN_GP.py deleted file mode 100644 index 696e387..0000000 --- a/compare_gan/src/gans/WGAN_GP.py +++ /dev/null @@ -1,105 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implementation of WGAN-GP algorithm (https://arxiv.org/abs/1704.00028).""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src.gans.abstract_gan import AbstractGAN - -import tensorflow as tf - - -class WGAN_GP(AbstractGAN): - """Improved Training of Wasserstein GANs.""" - - def __init__(self, **kwargs): - super(WGAN_GP, self).__init__("WGAN_GP", **kwargs) - - # Higher value: more stable, but slower convergence. - self.lambd = self.parameters["lambda"] - - def build_model(self, is_training=True): - image_dims = [self.input_height, self.input_width, self.c_dim] - batch_size = self.batch_size - - # Input images. - self.inputs = tf.placeholder( - tf.float32, [batch_size] + image_dims, name="real_images") - - # Noise vector. - self.z = tf.placeholder(tf.float32, [batch_size, self.z_dim], name="z") - - # Discriminator output for real images. - _, D_real_logits, _ = self.discriminator( - self.inputs, is_training=is_training, reuse=False) - - # Discriminator output for fake images. - G = self.generator(self.z, is_training=is_training, reuse=False) - _, D_fake_logits, _ = self.discriminator( - G, is_training=is_training, reuse=True) - - # Loss on real and fake data. - d_loss_real = -tf.reduce_mean(D_real_logits) - d_loss_fake = tf.reduce_mean(D_fake_logits) - - # Total discriminator loss. - self.d_loss = d_loss_real + d_loss_fake - - # Generator loss. - self.g_loss = -d_loss_fake - - # Gradient penalty. - alpha = tf.random_uniform(shape=[batch_size, 1, 1, 1], minval=0., maxval=1.) - differences = G - self.inputs - interpolates = self.inputs + (alpha * differences) - _, D_inter_logits, _ = self.discriminator( - interpolates, is_training=is_training, reuse=True) - gradients = tf.gradients(D_inter_logits, [interpolates])[0] - slopes = tf.sqrt(0.0001 + tf.reduce_sum( - tf.square(gradients), reduction_indices=[1, 2, 3])) - gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.)) - self.d_loss += self.lambd * gradient_penalty - - # Divide trainable variables into a group for D and group for G. - t_vars = tf.trainable_variables() - d_vars = [var for var in t_vars if "discriminator" in var.name] - g_vars = [var for var in t_vars if "generator" in var.name] - self.check_variables(t_vars, d_vars, g_vars) - - # Define optimization ops. - with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): - self.d_optim = tf.train.AdamOptimizer( - self.learning_rate, beta1=self.beta1, name="d_adam").minimize( - self.d_loss, var_list=d_vars) - self.g_optim = tf.train.AdamOptimizer( - self.learning_rate, beta1=self.beta1, name="g_adam").minimize( - self.g_loss, var_list=g_vars) - - # Store testing images. - self.fake_images = self.generator(self.z, is_training=False, reuse=True) - - # Setup summaries. - d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real) - d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake) - d_loss_sum = tf.summary.scalar("d_loss", self.d_loss) - g_loss_sum = tf.summary.scalar("g_loss", self.g_loss) - - self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum]) - self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum]) - - def resample_before_generator(self): - return True diff --git a/compare_gan/src/gans/ablation_resnet_architecture.py b/compare_gan/src/gans/ablation_resnet_architecture.py deleted file mode 100644 index e830780..0000000 --- a/compare_gan/src/gans/ablation_resnet_architecture.py +++ /dev/null @@ -1,318 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This file is only used for ablation study for Resnet. - -Don't use this implementation in other experiments. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from math import log - -from compare_gan.src.gans import consts -from compare_gan.src.gans import ops - -import numpy as np -from six.moves import range -import tensorflow as tf - -slim = tf.contrib.slim -tfgan = tf.contrib.gan - - -def _validate_image_inputs(inputs, validate_power2=True): - inputs.get_shape().assert_has_rank(4) - inputs.get_shape()[1:3].assert_is_fully_defined() - if inputs.get_shape()[1] != inputs.get_shape()[2]: - raise ValueError("Input tensor does not have equal width and height: ", - inputs.get_shape()[1:3]) - width = inputs.get_shape().as_list()[1] - if validate_power2 and log(width, 2) != int(log(width, 2)): - raise ValueError("Input tensor `width` is not a power of 2: ", width) - - -def batch_norm_resnet(input_, is_training, scope, epsilon=1e-5): - return tf.contrib.layers.batch_norm( - input_, - decay=0.9, - updates_collections=None, - epsilon=epsilon, - scale=True, - fused=False, # Interesting. - is_training=is_training, - scope=scope) - - -# From https://github.com/tensorflow/tensorflow/issues/2169 -def unpool(value, name="unpool"): - """Unpooling operation. - - N-dimensional version of the unpooling operation from - https://www.robots.ox.ac.uk/~vgg/rg/papers/Dosovitskiy_Learning_to_Generate_2015_CVPR_paper.pdf - Taken from: https://github.com/tensorflow/tensorflow/issues/2169 - - Args: - value: a Tensor of shape [b, d0, d1, ..., dn, ch] - name: name of the op - - Returns: - A Tensor of shape [b, 2*d0, 2*d1, ..., 2*dn, ch] - """ - with tf.name_scope(name) as scope: - sh = value.get_shape().as_list() - dim = len(sh[1:-1]) - out = (tf.reshape(value, [-1] + sh[-dim:])) - for i in range(dim, 0, -1): - out = tf.concat([out, tf.zeros_like(out)], i) - out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]] - out = tf.reshape(out, out_size, name=scope) - return out - - -def get_conv(inputs, in_channels, out_channels, scale, suffix, use_sn): - """Return convolution for the resnet block.""" - if inputs.get_shape().as_list()[-1] != in_channels: - raise ValueError("Unexpected number of input channels.") - - if scale == "up": - output = unpool(inputs) - output = ops.conv2d( - output, output_dim=out_channels, k_h=3, k_w=3, - d_h=1, d_w=1, name="up_%s" % suffix, use_sn=use_sn) - return output - elif scale == "none": - return ops.conv2d( - inputs, output_dim=out_channels, k_h=3, k_w=3, - d_h=1, d_w=1, name="same_%s" % suffix, use_sn=use_sn) - elif scale == "down": - output = ops.conv2d( - inputs, output_dim=out_channels, k_h=3, k_w=3, - d_h=1, d_w=1, name="down_%s" % suffix, use_sn=use_sn) - output = tf.nn.pool( - output, [2, 2], "AVG", "SAME", strides=[2, 2], name="pool_%s" % suffix) - return output - return None # Should not happen! - - -def resnet_block(inputs, in_channels, out_channels, scale, - block_scope, is_training, reuse, discriminator_normalization, - is_gen_block): - assert scale in ["up", "down", "none"] - if inputs.get_shape().as_list()[-1] != in_channels: - raise ValueError("Unexpected number of input channels.") - - # In SN paper, if they upscale in generator they do this in the first conv. - # For discriminator downsampling happens after second conv. - if is_gen_block: - # Generator block - scale1 = scale # "up" or "none" - scale2 = "none" - else: - # Discriminator block. - scale1 = "none" - scale2 = scale # "down" or "none" - - print ("resnet_block, in=%d out=%d, scale=%s, scope=%s normalizer=%s" % ( - in_channels, out_channels, scale, block_scope, - discriminator_normalization)) - print ("INPUTS: ", inputs.get_shape()) - with tf.variable_scope(block_scope, values=[inputs], reuse=reuse): - output = inputs - use_sn = discriminator_normalization == consts.SPECTRAL_NORM - - # Define the skip connection, ensure 'conv' is in the suffix, otherwise it - # will not be regularized. - - shortcut = get_conv( - output, in_channels, out_channels, scale, - suffix="conv_shortcut", use_sn=use_sn) - print ("SHORTCUT: ", shortcut.get_shape()) - - # Apply batch norm in discriminator only if enabled. - if is_gen_block or discriminator_normalization == consts.BATCH_NORM: - output = batch_norm_resnet(output, is_training=is_training, scope="bn1") - elif discriminator_normalization == consts.LAYER_NORM: - output = ops.layer_norm(output, is_training=is_training, scope="ln1") - - output = tf.nn.relu(output) - output = get_conv( - output, in_channels, out_channels, scale1, - suffix="conv1", use_sn=use_sn) - print ("OUTPUT CONV1: ", output.get_shape()) - - # Apply batch norm in discriminator only if enabled. - if is_gen_block or discriminator_normalization == consts.BATCH_NORM: - output = batch_norm_resnet(output, is_training=is_training, scope="bn2") - elif discriminator_normalization == consts.LAYER_NORM: - output = ops.layer_norm(output, is_training=is_training, scope="ln2") - - output = tf.nn.relu(output) - output = get_conv( - output, out_channels, out_channels, scale2, - suffix="conv2", use_sn=use_sn) - print ("OUTPUT CONV2: ", output.get_shape()) - - # Combine skip-connection with the convolved part. - output += shortcut - - return output - - -def generator_block(inputs, in_channels, out_channels, scale, - block_scope, is_training, reuse): - assert scale in ["up", "none"] - return resnet_block(inputs, in_channels, out_channels, scale, - block_scope, is_training, reuse, - discriminator_normalization=consts.NO_NORMALIZATION, - is_gen_block=True) - - -def discriminator_block(inputs, in_channels, out_channels, scale, - block_scope, is_training, reuse, - discriminator_normalization): - assert scale in ["down", "none"] - return resnet_block(inputs, in_channels, out_channels, scale, - block_scope, is_training, reuse, - discriminator_normalization, is_gen_block=False) - - -# Generates resolution 128x128 -def resnet5_generator(noise, - is_training, - reuse=None, - colors=3, - output_shape=128, - unused_ablation_type=""): - # Input is a noise tensor of shape [bs, z_dim] - assert len(noise.get_shape().as_list()) == 2 - - # Calculate / define a few numbers. - batch_size = noise.get_shape().as_list()[0] - # Each block upscales by a factor of 2: - seed_size = 4 - # We want the last block to have 64 channels: - ch = 64 - - with tf.variable_scope("generator", reuse=reuse): - # Map noise to the actual seed. - output = ops.linear(noise, ch * 8 * seed_size * seed_size, scope="fc_noise") - - # Reshape the seed to be a rank-4 Tensor. - output = tf.reshape( - output, [batch_size, seed_size, seed_size, ch * 8], name="fc_reshaped") - - # Magic in/out channel numbers copied from SN paper. - magic = [(8, 8), (8, 4), (4, 4), (4, 2), (2, 1)] - up_layers = np.log2(float(output_shape) / seed_size) - assert up_layers.is_integer(), "log2(%d/%d) must be an integer" % ( - output_shape, seed_size) - assert up_layers <= 5 and up_layers >= 0, "Invalid output_shape %d" % ( - output_shape) - up_layers = int(up_layers) - for block_idx in range(5): - block_scope = "B%d" % (block_idx + 1) - in_channels = ch * magic[block_idx][0] - out_channels = ch * magic[block_idx][1] - print("Resnet5, block %d in=%d out=%d" % (block_idx, in_channels, - out_channels)) - if block_idx < up_layers: - scale = "up" - else: - scale = "none" - output = generator_block(output, in_channels=in_channels, - out_channels=out_channels, - scale=scale, block_scope=block_scope, - is_training=is_training, reuse=reuse) - - # Final processing of the output. - output = batch_norm_resnet(output, is_training=is_training, - scope="final_norm") - output = tf.nn.relu(output) - output = ops.conv2d( - output, output_dim=colors, k_h=3, k_w=3, d_h=1, d_w=1, - name="final_conv") - output = tf.nn.sigmoid(output) - - print("Generator output shape: ", output) - return output - - -def resnet5_discriminator(inputs, - is_training, - discriminator_normalization, - reuse=None, - unused_ablation_type=""): - """ResNet style discriminator. - - Construct discriminator network from inputs to the final endpoint. - - Args: - inputs: A tensor of size [batch_size, height, width, channels]. Must be - floating point. - is_training: Is the model currently being trained. - discriminator_normalization: which type of normalization to apply. - reuse: Whether or not the network variables should be reused. `scope` - must be given to be reused. - unused_ablation_type: str, which type of ablation to apply. - - Returns: - out: The prediction of the discrminator (in [0, 1]). Shape: [bs, 1] - out_logit: The pre-softmax activations for discrimination - real/generated, a tensor of size [batch_size, 1] - - Raises: - ValueError: If the input image shape is not 4-dimensional, if the spatial - dimensions aren't defined at graph construction time, if the spatial - dimensions aren't square, or if the spatial dimensions aren"t a power of - two. - """ - - _validate_image_inputs(inputs) - colors = inputs.get_shape().as_list()[-1] - assert colors in [1, 3] - - ch = 64 - with tf.variable_scope("discriminator", values=[inputs], reuse=reuse): - output = discriminator_block( - inputs, in_channels=colors, out_channels=ch, - scale="down", block_scope="B0", is_training=is_training, reuse=reuse, - discriminator_normalization=discriminator_normalization) - - # Magic in/out channel numbers copied from SN paper. - magic = [(1, 2), (2, 4), (4, 4), (4, 8), (8, 8)] - for block_idx in range(5): - block_scope = "B%d" % (block_idx + 1) - in_channels = ch * magic[block_idx][0] - out_channels = ch * magic[block_idx][1] - print ("Resnet5 disc, block %d in=%d out=%d" % ( - block_idx, in_channels, out_channels)) - output = discriminator_block( - output, in_channels=in_channels, out_channels=out_channels, - scale="down", block_scope=block_scope, is_training=is_training, - reuse=reuse, discriminator_normalization=discriminator_normalization) - - # Final part - output = tf.nn.relu(output) - pre_logits = tf.reduce_mean(output, axis=[1, 2]) - - use_sn = discriminator_normalization == consts.SPECTRAL_NORM - out_logit = ops.linear(pre_logits, 1, scope="disc_final_fc", use_sn=use_sn) - out = tf.nn.sigmoid(out_logit) - return out, out_logit, None - - diff --git a/compare_gan/src/gans/abstract_gan.py b/compare_gan/src/gans/abstract_gan.py deleted file mode 100644 index a482dd8..0000000 --- a/compare_gan/src/gans/abstract_gan.py +++ /dev/null @@ -1,453 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Base class for all GANs.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from contextlib import contextmanager -import os -import time - -from compare_gan.src.gans import consts -from compare_gan.src.gans import ops -from compare_gan.src.gans.ops import lrelu, batch_norm, linear, conv2d, deconv2d - -import numpy as np -from six.moves import range -import tensorflow as tf - -flags = tf.flags -FLAGS = flags.FLAGS - -PREFETCH_NUM_BATCHES = 32 - - -class AbstractGAN(object): - """Base class for all GANs.""" - - def __init__(self, model_name, dataset_content, parameters, runtime_info): - super(AbstractGAN, self).__init__() - self.model_name = model_name - self.parameters = parameters - self.use_tpu = parameters.get("use_tpu", False) - - # Initialize training-specific parameters. - self.training_steps = int(parameters["training_steps"]) - self.save_checkpoint_steps = int( - parameters["save_checkpoint_steps"]) - self.batch_size = parameters["batch_size"] - self.learning_rate = parameters["learning_rate"] - self.beta1 = parameters.get("beta1", 0.5) - self.z_dim = parameters["z_dim"] - self.discriminator_normalization = parameters["discriminator_normalization"] - if self.discriminator_normalization not in consts.NORMALIZERS: - raise ValueError( - "Normalization not recognized: %s" % self.discriminator_normalization) - - # Output folders and checkpoints. - self.checkpoint_dir = runtime_info.checkpoint_dir - self.result_dir = runtime_info.result_dir - self.log_dir = runtime_info.log_dir - self.max_checkpoints_to_keep = 1000 - - # Input and output shapes. - self.input_height = parameters["input_height"] - self.input_width = parameters["input_width"] - self.output_height = parameters["output_height"] - self.output_width = parameters["output_width"] - self.c_dim = parameters["c_dim"] - self.dataset_name = parameters["dataset_name"] - self.dataset_content = dataset_content - - # Number of discriminator iterations per one iteration of the generator. - self.disc_iters = parameters.get("disc_iters", 1) - self.unroll_disc_iters = parameters.get("unroll_disc_iters", self.use_tpu) - - @property - def num_sub_steps(self): - # To support disc_iters > 1 for TPU we unroll the graph to train - # multiple sub steps in one mini-batch. Each mini-batch will do - # self.disc_iters sub steps, each training the discriminator and increases - # global_step by one. After the last step we train the generator once. - if self.unroll_disc_iters: - return self.disc_iters - return 1 - - def discriminator(self, x, is_training, reuse=False): - """Discriminator architecture based on InfoGAN. - - Args: - x: input images, shape [bs, h, w, channels] - is_training: boolean, are we in train or eval model. - reuse: boolean, should params be re-used. - - Returns: - out: a float (in [0, 1]) with discriminator prediction - out_logit: the value "out" before sigmoid - net: the architecture - """ - sn = self.discriminator_normalization == consts.SPECTRAL_NORM - with tf.variable_scope("discriminator", reuse=reuse): - # Mapping x from [bs, h, w, c] to [bs, 1] - net = conv2d( - x, 64, 4, 4, 2, 2, name="d_conv1", use_sn=sn) # [bs, h/2, w/2, 64] - net = lrelu(net) - net = conv2d( - net, 128, 4, 4, 2, 2, name="d_conv2", - use_sn=sn) # [bs, h/4, w/4, 128] - if self.discriminator_normalization == consts.BATCH_NORM: - net = batch_norm(net, is_training=is_training, scope="d_bn2") - net = lrelu(net) - net = tf.reshape(net, [self.batch_size, -1]) # [bs, h * w * 8] - net = linear(net, 1024, scope="d_fc3", use_sn=sn) # [bs, 1024] - if self.discriminator_normalization == consts.BATCH_NORM: - net = batch_norm(net, is_training=is_training, scope="d_bn3") - net = lrelu(net) - out_logit = linear(net, 1, scope="d_fc4", use_sn=sn) # [bs, 1] - out = tf.nn.sigmoid(out_logit) - return out, out_logit, net - - def generator(self, z, is_training, reuse=False): - height = self.input_height - width = self.input_width - batch_size = self.batch_size - with tf.variable_scope("generator", reuse=reuse): - net = linear(z, 1024, scope="g_fc1") - net = batch_norm(net, is_training=is_training, scope="g_bn1") - net = lrelu(net) - net = linear(net, 128 * (height // 4) * (width // 4), scope="g_fc2") - net = batch_norm(net, is_training=is_training, scope="g_bn2") - net = lrelu(net) - net = tf.reshape(net, [batch_size, height // 4, width // 4, 128]) - net = deconv2d(net, [batch_size, height // 2, width // 2, 64], - 4, 4, 2, 2, name="g_dc3") - net = batch_norm(net, is_training=is_training, scope="g_bn3") - net = lrelu(net) - net = deconv2d(net, [batch_size, height, width, self.c_dim], - 4, 4, 2, 2, name="g_dc4") - out = tf.nn.sigmoid(net) - return out - - @property - def model_dir(self): - return "{}_{}_{}_{}".format(self.model_name, self.dataset_name, - self.batch_size, self.z_dim) - - def save(self, checkpoint_dir, step, sess): - if not tf.gfile.IsDirectory(checkpoint_dir): - tf.gfile.MakeDirs(checkpoint_dir) - - self.saver.save( - sess, - os.path.join(checkpoint_dir, self.model_name + ".model"), - global_step=step) - - def load(self, checkpoint_dir, sess): - print(" [*] Reading checkpoints...") - - ckpt = tf.train.get_checkpoint_state(checkpoint_dir) - if ckpt and ckpt.model_checkpoint_path: - ckpt_name = os.path.basename(ckpt.model_checkpoint_path) - self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name)) - self.saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths) - print(" [*] Successfully read {}".format(ckpt_name)) - return True - else: - print(" [*] Failed to find a checkpoint") - return False - - def print_progress(self, step, start_step, start_time, d_loss, g_loss, - progress_reporter, sess): - del sess # Unused by abstract_gan, used for gans_with_penalty. - if step % 100 == 0: - time_elapsed = time.time() - start_time - steps_per_sec = (step - start_step) / time_elapsed - eta_seconds = (self.training_steps - step) / (steps_per_sec + 0.0000001) - eta_minutes = eta_seconds / 60.0 - if progress_reporter: - progress_reporter(step=step, - steps_per_sec=steps_per_sec, - progress=step/self.training_steps, - eta_minutes=eta_minutes) - print("[%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f " - "steps_per_sec: %.4f ETA: %.2f minutes" % - (step, self.training_steps, time_elapsed, d_loss, g_loss, - steps_per_sec, eta_minutes)) - - def visualize_results(self, step, sess, z_distribution=None): - """Generates and stores a set of fake images.""" - suffix = "%s_step%03d_test_all_classes.png" % (self.model_name, step) - self._save_samples(step, sess, filename_suffix=suffix, - z_distribution=z_distribution) - - def check_variables(self, t_vars, d_vars, g_vars): - """Make sure that every variable belongs to generator or discriminator.""" - shared_vars = set(d_vars) & set(g_vars) - if shared_vars: - raise ValueError("Shared trainable variables: %s" % shared_vars) - unused_vars = set(t_vars) - set(d_vars) - set(g_vars) - if unused_vars: - raise ValueError("Unused trainable variables: %s" % unused_vars) - - def maybe_save_checkpoint(self, checkpoint_dir, step, sess): - if step % self.save_checkpoint_steps == 0: - self.save(checkpoint_dir, step, sess) - self.visualize_results(step, sess) - - def maybe_save_samples(self, step, sess): - """Saves training results every 5000 steps.""" - if np.mod(step, 5000) != 0: - return - suffix = "%s_train_%04d.png" % (self.model_name, step) - self._save_samples(step, sess, filename_suffix=suffix) - - def _image_grid_shape(self): - if self.batch_size & (self.batch_size - 1) != 0: - raise ValueError("Batch size must be a power of 2 to create a grid of " - "fake images but was {}.".format(self.batch_size)) - # Since b = 2^c we can use x = 2^(floor(c/2)) and y = 2^(ceil(c/2)). - x = 2 ** int(np.log2(self.batch_size) / 2) - return x, self.batch_size // x - - def _save_samples(self, step, sess, filename_suffix, z_distribution=None): - if z_distribution is None: - z_distribution = self.z_generator - z_sample = z_distribution(self.batch_size, self.z_dim) - grid_shape = self._image_grid_shape() - samples = sess.run(self.fake_images_merged, - feed_dict={self.z: z_sample}) - samples = samples.reshape((grid_shape[0] * self.input_height, - grid_shape[1] * self.input_width, -1)).squeeze() - out_folder = ops.check_folder(os.path.join(self.result_dir, self.model_dir)) - full_path = os.path.join(out_folder, filename_suffix) - ops.save_images(samples, full_path) - - def after_training_step_hook(self, sess, features, counter): - # Called after the training step. (used by BEGAN). - pass - - def resample_before_generator(self): - return False - - def discriminator_feed_dict(self, features, labels): - """Returns the feed_dict for discriminator training step.""" - del labels - return { - self.inputs: features["images"], - self.z: features["z_for_disc_step"], - } - - def generator_feed_dict(self, features, labels): - """Returns the feed_dict for generator training step.""" - # If self.resample_before_generator() returns True - # features["z_for_disc_step"] and features["z_for_gen_step"] will be the - # same. - del labels - return {self.z: features["z_for_gen_step"]} - - def z_generator(self, batch_size, z_dim): - """Returns the z-generator as numpy. Used during training.""" - return np.random.uniform(-1, 1, size=(batch_size, z_dim)).astype(np.float32) - - def z_tf_generator(self, batch_size, z_dim, name=None): - """Returns the z-generator as TF op. - - Used during exported evaluation subgraph. - - Args: - batch_size: batch size used by the graph. - z_dim: dimensions of the z (latent) space. - name: name for the created Tensor. - - Returns: - Tensor object. - """ - - return tf.random_uniform( - (batch_size, z_dim), minval=-1.0, maxval=1.0, name=name) - - def run_single_train_step(self, features, counter, g_loss, sess): - """Runs a single training step.""" - - # Update the discriminator network. - _, summary_str, d_loss = sess.run( - [self.d_optim, self.d_sum, self.d_loss], - feed_dict=self.discriminator_feed_dict(features, labels=None)) - self.writer.add_summary(summary_str, counter) - - # Update the generator network. - if (counter - 1) % self.disc_iters == 0 or g_loss is None: - _, summary_str, g_loss = sess.run( - [self.g_optim, self.g_sum, self.g_loss], - feed_dict=self.generator_feed_dict(features, labels=None)) - self.writer.add_summary(summary_str, counter) - - self.after_training_step_hook(sess, features, counter) - return d_loss, g_loss - - @contextmanager - def _different_batch_size(self, batch_size): - """Context to temporary change the batch size in self.batch_size. - - This is useful for TPUs where - self.batch_size = num_tpu_cores * batch_size_per_core. - The input pipeline will create batches of size self.batch_size but the - subgraph for the model needs to be constructed with batch_size_per_core. - - Args: - batch_size: Batch size to set self.batch_size for within the context. - Yields: - Context with self.batch_size set to batch_size. - """ - original_batch_size = self.batch_size - self.batch_size = batch_size - try: - yield - finally: - self.batch_size = original_batch_size - - def _input_fn(self, params): - """Input_fn for Estimator training.""" - - del params - # If self.unroll_disc_iters is True we need to pack multiple mini-batches - # (one per disc_iters) into one batch. Unrolling the discriminator - # iterations is workaround until tf.cond is fully supported on TPUs. - with self._different_batch_size(self.batch_size * self.num_sub_steps): - dataset = self.dataset_content.batch(self.batch_size) - dataset = dataset.prefetch(PREFETCH_NUM_BATCHES) - images, _ = dataset.make_one_shot_iterator().get_next() - # TPUs require known shapes everywhere. - images = tf.to_float(tf.reshape( - images, - [self.batch_size, self.input_height, self.input_width, self.c_dim])) - z = self.z_tf_generator(self.batch_size, self.z_dim, - name="z_for_disc_step") - features = { - "images": images, - "z_for_disc_step": z, - } - # Some models resample the noise before training the generator. Therefore - # our mini-batch contains noise for both discriminator and generator. - # For models that don't resample both are the same. - if self.resample_before_generator(): - features["z_for_gen_step"] = self.z_tf_generator( - self.batch_size, self.z_dim, name="z_for_gen_step") - else: - features["z_for_gen_step"] = tf.identity(z, name="z_for_gen_step") - return features, None - - def _model_fn(self, features, labels, mode, params): - """Constructs the model for the given features and mode. - - Args: - features: A dictionary with the feature tensors. See the _input_fn() - for the set of features. - labels: Tensor will labels. None for the implementation of _input_fn() - above. Will be None if mode is PREDICT. - mode: `tf.estimator.ModeKeys` value (TRAIN, EVAL, PREDICT). The mode - should be passed to the TPUEstimatorSpec and your model should be - build this mode. - params: Dictionary with hyper parameters. We currently do not use this, - but TPUEstimator will set the desired batch_size. You can use - _different_batch_size to temporary override self.batch_size. - - Returns: - A `tf.contrib.tpu.TPUEstimatorSpec`. - """ - raise NotImplementedError( - "_model_fn() must be implemented in subclasses of AbstractGAN.") - - def train_with_estimator(self, config, warm_start_from=None): - """Trains this model using the Estimator interface. - - Args: - config: `tf.contrib.tpu.RunConfig` for the training. - warm_start_from: None or a `tf.estimator.WarmStartSettings` object. If - provided variables can be initialized from a checkpoint. - - Returs: - A `tf.contrib.tpu.TPUEstimator`. - """ - # Noise for generating fake images. - self.sample_z = self.z_generator(self.batch_size, self.z_dim) - - bs = self.batch_size * self.num_sub_steps - estimator = tf.contrib.tpu.TPUEstimator( - config=config, - use_tpu=self.use_tpu, - model_fn=self._model_fn, - train_batch_size=bs, - eval_batch_size=bs, - warm_start_from=warm_start_from) - estimator.train(input_fn=self._input_fn, max_steps=self.training_steps) - - def train(self, sess, progress_reporter=None): - """Runs the training algorithm.""" - - self.fake_images_merged = tf.contrib.gan.eval.image_grid( - self.fake_images, - grid_shape=self._image_grid_shape(), - image_shape=(self.input_height, self.input_width), - num_channels=self.c_dim) - - # Initialize the variables. - global_step = tf.train.get_or_create_global_step() - global_step_inc = global_step.assign_add(1) - tf.global_variables_initializer().run() - batch_size = self.batch_size - - # Noise for generating fake images. - self.sample_z = self.z_generator(batch_size, self.z_dim) - - # Model saver and summary writer. - self.saver = tf.train.Saver(max_to_keep=self.max_checkpoints_to_keep) - self.writer = tf.summary.FileWriter( - self.log_dir + "/" + self.model_name, sess.graph) - - # Restore existing checkpoints. - could_load = self.load(self.checkpoint_dir, sess) - if could_load: - print(" [*] Model successfully loaded.") - else: - print(" [!] No checkpoint available, loading failed.") - self.save(self.checkpoint_dir, 0, sess) - - # Start training. - counter = tf.train.global_step(sess, global_step) - batch_input, _ = self._input_fn(params=None) - start_time = time.time() - - g_loss = None - start_step = int(counter) + 1 - for step in range(start_step, self.training_steps + 1): - # Fetch next batch and run the single training step. - features = sess.run(batch_input) - d_loss, g_loss = self.run_single_train_step(features, step, g_loss, sess) - - sess.run(global_step_inc) - self.print_progress(step, start_step, start_time, d_loss, g_loss, - progress_reporter, sess) - self.maybe_save_samples(step, sess) - # Save the model and visualize current results. - self.maybe_save_checkpoint(self.checkpoint_dir, step, sess) - - if start_step < self.training_steps: - # Save the final model. - self.save(self.checkpoint_dir, self.training_steps, sess) - self.visualize_results(self.training_steps, sess) diff --git a/compare_gan/src/gans/abstract_gan_estimator_test.py b/compare_gan/src/gans/abstract_gan_estimator_test.py deleted file mode 100644 index eb47cd2..0000000 --- a/compare_gan/src/gans/abstract_gan_estimator_test.py +++ /dev/null @@ -1,135 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test cases to verify estimator training and matches our training loop.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import itertools -import os - -from absl import flags -from absl.testing import flagsaver -from absl.testing import parameterized -from compare_gan.src import gan_lib - -import numpy as np -import tensorflow as tf - -FLAGS = flags.FLAGS - - -class AbstractGANEstimatorTest(parameterized.TestCase, tf.test.TestCase): - options = { - 'dataset': 'cifar10', - 'architecture': 'resnet_cifar_arch', - 'batch_size': 4, - 'discriminator_normalization': 'none', - 'gan_type': 'GAN_PENALTY', - 'optimizer': 'sgd', - 'penalty_type': 'no_penalty', - 'save_checkpoint_steps': 1, - 'training_steps': 5, - 'disc_iters': 3, - 'learning_rate': 0.000199999994948, - 'tf_seed': 42, - 'z_dim': 128, - 'lambda': 1, - 'beta2': 0.999000012875, - 'beta1': 0.5, - } - - def load_checkpoint(self, checkpoint_dir, step): - checkpoint_path = os.path.join(checkpoint_dir, 'model.ckpt-' + str(step)) - if not tf.gfile.Exists(checkpoint_path + '.index'): - checkpoint_path = os.path.join(checkpoint_dir, - 'GAN_PENALTY.model-' + str(step)) - return tf.train.load_checkpoint(checkpoint_path) - - # The test case trains a model with the non-estimator training loop and the - # estimator training loop for a few steps and checks that the learned weights - # match. - @parameterized.named_parameters( - [('DiscIters{}Unroll{}TPU{}'.format(*p), p[0], p[1], p[2]) for p in - itertools.product([1, 3], [False, True], [False, True])] - ) - @flagsaver.flagsaver - def testEstimatorEqualsSession(self, disc_iters, unroll_disc_iters, use_tpu): - if use_tpu and not unroll_disc_iters: - # This is currently not supported. See b/111760885. - return - - FLAGS.master = None - FLAGS.iterations_per_loop = 1 - - options = self.options.copy() - options['disc_iters'] = disc_iters - - workdir = os.path.join(FLAGS.test_tmpdir, 'disc_iters{}/unroll{}'.format( - disc_iters, unroll_disc_iters)) - - options['use_estimator'] = False - gan_lib.run_with_options(options, os.path.join(workdir, 'no_estimator')) - - # We use the first checkpoint of the run without Estimator to warm start. - # Otherwise both models start with different values as the tf_seed is fixed - # but the graphs are still different. - warm_start = tf.estimator.WarmStartSettings( - os.path.join(workdir, 'no_estimator/checkpoint/GAN_PENALTY.model-0')) - options.update({ - 'use_estimator': True, - 'unroll_disc_iters': unroll_disc_iters, - 'use_tpu': use_tpu, - }) - gan_lib.run_with_options(options, os.path.join(workdir, 'estimator'), - warm_start_from=warm_start) - - for step in range(0, options['training_steps'], options['disc_iters']): - tf.logging.info('Comparing checkpoints for step %d', step) - estimator_ckpt = self.load_checkpoint( - os.path.join(workdir, 'estimator/checkpoint'), step) - baseline_ckpt = self.load_checkpoint( - os.path.join(workdir, 'no_estimator/checkpoint'), step) - - not_equal = 0 - for name in baseline_ckpt.get_variable_to_shape_map(): - assert estimator_ckpt.has_tensor(name), name - if name.endswith('moving_mean') or name.endswith('moving_variance'): - # Ignore batch norm values. - continue - if 'discriminator' in name and name.endswith('biases'): - - continue - t1 = estimator_ckpt.get_tensor(name) - t2 = baseline_ckpt.get_tensor(name) - if name == 'global_step': - self.assertEqual(t1, step) - self.assertEqual(t2, step) - continue - if not np.allclose(t1, t2, atol=1e-7): - not_equal += 1 - diff = np.abs(t1 - t2) - idx = np.argmax(diff) - tf.logging.info( - '%s is not close at step %d: maximum difference: %s (%s -> %s)', - name, step, diff.max(), t1.flatten()[idx], t2.flatten()[idx]) - - self.assertEqual(not_equal, 0) - - -if __name__ == '__main__': - tf.test.main() diff --git a/compare_gan/src/gans/dcgan_architecture.py b/compare_gan/src/gans/dcgan_architecture.py deleted file mode 100644 index d57e461..0000000 --- a/compare_gan/src/gans/dcgan_architecture.py +++ /dev/null @@ -1,241 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implementation of DCGAN generator and discriminator architectures.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src.gans import consts -from compare_gan.src.gans.ops import lrelu, linear, conv2d, deconv2d - -import numpy as np -import tensorflow as tf - - -def batch_norm_dcgan(input_, is_training, scope, decay=0.9, epsilon=1e-5): - return tf.contrib.layers.batch_norm( - input_, - decay=decay, - updates_collections=None, - epsilon=epsilon, - scale=True, - fused=False, # Interesting. - is_training=is_training, - scope=scope) - - -def conv_out_size_same(size, stride): - return int(np.ceil(float(size) / float(stride))) - - -def discriminator(x, - batch_size, - is_training, - discriminator_normalization, - reuse=False): - """Returns the outputs of the DCGAN discriminator. - - Details are available at https://arxiv.org/abs/1511.06434. Notable changes - include BatchNorm in the discriminator and LeakyReLU for all layers. - - Args: - x: input images, shape [bs, h, w, channels]. - batch_size: integer, number of samples in batch. - is_training: boolean, are we in train or eval model. - discriminator_normalization: which type of normalization to apply. - reuse: boolean, should params be re-used. - - Returns: - out: A float (in [0, 1]) with discriminator prediction. - out_logit: Logits (activations of the last linear layer). - net: Logits of the last ReLu layer. - """ - assert discriminator_normalization in [ - consts.NO_NORMALIZATION, consts.SPECTRAL_NORM, consts.BATCH_NORM] - bs = batch_size - df_dim = 64 # Dimension of filters in first convolutional layer. - use_sn = discriminator_normalization == consts.SPECTRAL_NORM - with tf.variable_scope("discriminator", reuse=reuse): - net = lrelu(conv2d(x, df_dim, 5, 5, 2, 2, name="d_conv1", use_sn=use_sn)) - net = conv2d(net, df_dim * 2, 5, 5, 2, 2, name="d_conv2", use_sn=use_sn) - - if discriminator_normalization == consts.BATCH_NORM: - net = batch_norm_dcgan(net, is_training, scope="d_bn1") - net = lrelu(net) - net = conv2d(net, df_dim * 4, 5, 5, 2, 2, name="d_conv3", use_sn=use_sn) - - if discriminator_normalization == consts.BATCH_NORM: - net = batch_norm_dcgan(net, is_training, scope="d_bn2") - net = lrelu(net) - net = conv2d(net, df_dim * 8, 5, 5, 2, 2, name="d_conv4", use_sn=use_sn) - - if discriminator_normalization == consts.BATCH_NORM: - net = batch_norm_dcgan(net, is_training, scope="d_bn3") - net = lrelu(net) - out_logit = linear( - tf.reshape(net, [bs, -1]), 1, scope="d_fc4", use_sn=use_sn) - out = tf.nn.sigmoid(out_logit) - return out, out_logit, net - - -def generator(z, batch_size, output_height, output_width, output_c_dim, - is_training, reuse=False): - """Returns the output tensor of the DCGAN generator. - - Details are available at https://arxiv.org/abs/1511.06434. Notable changes - include BatchNorm in the generator, ReLu instead of LeakyReLu and ReLu in - generator, except for output which uses TanH. - - Args: - z: latent code, shape [batch_size, latent_dimensionality] - batch_size: Batch size. - output_height: Output image height. - output_width: Output image width. - output_c_dim: Number of color channels. - is_training: boolean, are we in train or eval model. - reuse: boolean, should params be re-used. - - Returns: - net: The generated image Tensor with entries in [0, 1]. - """ - gf_dim = 64 # Dimension of filters in first convolutional layer. - bs = batch_size - with tf.variable_scope("generator", reuse=reuse): - s_h, s_w = output_height, output_width - s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2) - s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2) - s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2) - s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2) - - net = linear(z, gf_dim * 8 *s_h16 * s_w16, scope="g_fc1") - net = tf.reshape(net, [-1, s_h16, s_w16, gf_dim * 8]) - net = tf.nn.relu(batch_norm_dcgan(net, is_training, scope="g_bn1")) - net = deconv2d(net, [bs, s_h8, s_w8, gf_dim*4], 5, 5, 2, 2, name="g_dc1") - net = tf.nn.relu(batch_norm_dcgan(net, is_training, scope="g_bn2")) - net = deconv2d(net, [bs, s_h4, s_w4, gf_dim*2], 5, 5, 2, 2, name="g_dc2") - net = tf.nn.relu(batch_norm_dcgan(net, is_training, scope="g_bn3")) - net = deconv2d(net, [bs, s_h2, s_w2, gf_dim*1], 5, 5, 2, 2, name="g_dc3") - net = tf.nn.relu(batch_norm_dcgan(net, is_training, scope="g_bn4")) - net = deconv2d(net, [bs, s_h, s_w, output_c_dim], 5, 5, 2, 2, name="g_dc4") - net = 0.5 * tf.nn.tanh(net) + 0.5 - return net - - -def sn_discriminator(x, batch_size, reuse=False, use_sn=False): - """Returns the outputs of the SNDCGAN discriminator. - - Details are available at https://openreview.net/pdf?id=B1QRgziT-. - - Args: - x: input images, shape [bs, h, w, channels]. - batch_size: integer, number of samples in batch. - reuse: boolean, should params be re-used. - - Returns: - out: A float (in [0, 1]) with discriminator prediction. - out_logit: Logits (activations of the last linear layer). - net: Logits of the last ReLu layer. - """ - - # In compare gan framework, the image preprocess normalize image pixel to - # range [0, 1], while author used [-1, 1]. Apply this trick to input image - # instead of changing our preprocessing function. - x = x * 2.0 - 1.0 - with tf.variable_scope("discriminator", reuse=reuse): - # Mapping x from [bs, h, w, c] to [bs, 1] - normal = tf.random_normal_initializer - net = conv2d( - x, 64, 3, 3, 1, 1, name="d_conv1", initializer=normal, use_sn=use_sn) - net = lrelu(net, leak=0.1) - net = conv2d( - net, 128, 4, 4, 2, 2, name="d_conv2", initializer=normal, use_sn=use_sn) - net = lrelu(net, leak=0.1) - net = conv2d( - net, 128, 3, 3, 1, 1, name="d_conv3", initializer=normal, use_sn=use_sn) - net = lrelu(net, leak=0.1) - net = conv2d( - net, 256, 4, 4, 2, 2, name="d_conv4", initializer=normal, use_sn=use_sn) - net = lrelu(net, leak=0.1) - net = conv2d( - net, 256, 3, 3, 1, 1, name="d_conv5", initializer=normal, use_sn=use_sn) - net = lrelu(net, leak=0.1) - net = conv2d( - net, 512, 4, 4, 2, 2, name="d_conv6", initializer=normal, use_sn=use_sn) - net = lrelu(net, leak=0.1) - net = conv2d( - net, 512, 3, 3, 1, 1, name="d_conv7", initializer=normal, use_sn=use_sn) - net = lrelu(net, leak=0.1) - - net = tf.reshape(net, [batch_size, -1]) - out_logit = linear(net, 1, scope="d_fc1", use_sn=use_sn) - out_logit = tf.squeeze(out_logit) - out = tf.nn.sigmoid(out_logit) - return out, out_logit, net - - -def sn_generator(z, - batch_size, - output_height, - output_width, - output_c_dim, - is_training, - reuse=False): - """Returns the output tensor of the SNDCGAN generator. - - Details are available at https://openreview.net/pdf?id=B1QRgziT-. - - Args: - z: latent code, shape [batch_size, latent_dimensionality] - batch_size: Batch size. - output_height: Output image height. - output_width: Output image width. - output_c_dim: Number of color channels. - is_training: boolean, are we in train or eval model. - reuse: boolean, should params be re-used. - - Returns: - net: The generated image Tensor with entries in [0, 1]. - """ - s_h, s_w = output_height, output_width - s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2) - s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2) - s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2) - - with tf.variable_scope("generator", reuse=reuse): - net = linear(z, s_h8 * s_w8 * 512, scope="g_fc1") - net = batch_norm_dcgan(net, is_training, scope="g_bn1", epsilon=2e-5) - net = tf.nn.relu(net) - net = tf.reshape(net, [batch_size, s_h8, s_w8, 512]) - net = deconv2d(net, [batch_size, s_h4, s_w4, 256], 4, 4, 2, 2, name="g_dc2") - net = batch_norm_dcgan(net, is_training, scope="g_bn2", epsilon=2e-5) - net = tf.nn.relu(net) - net = deconv2d(net, [batch_size, s_h2, s_w2, 128], 4, 4, 2, 2, name="g_dc3") - net = batch_norm_dcgan(net, is_training, scope="g_bn3", epsilon=2e-5) - net = tf.nn.relu(net) - net = deconv2d(net, [batch_size, s_h, s_w, 64], 4, 4, 2, 2, name="g_dc4") - net = batch_norm_dcgan(net, is_training, scope="g_bn4", epsilon=2e-5) - net = tf.nn.relu(net) - net = deconv2d( - net, [batch_size, s_h, s_w, output_c_dim], 3, 3, 1, 1, name="g_dc5") - out = tf.tanh(net) - - # NOTE: this normalization is introduced to match current image - # preprocessing, which normalize the real image to range [0, 1]. - # In author's implementation, they simply use the tanh activation function - # and normalize the image to range [-1, 1]. - out = tf.div(out + 1.0, 2.0) - - return out diff --git a/compare_gan/src/gans/gan_test.py b/compare_gan/src/gans/gan_test.py deleted file mode 100644 index 2c1378b..0000000 --- a/compare_gan/src/gans/gan_test.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for GAN library.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections - -from compare_gan.src.gans import consts -from compare_gan.src.gans import GAN, WGAN, WGAN_GP, DRAGAN, LSGAN, BEGAN, VAE - -import numpy as np -import tensorflow as tf - -MODELS = { - "GAN": GAN.GAN, - "WGAN": WGAN.WGAN, - "WGAN_GP": WGAN_GP.WGAN_GP, - "DRAGAN": DRAGAN.DRAGAN, - "LSGAN": LSGAN.LSGAN, - "BEGAN": BEGAN.BEGAN, - "VAE": VAE.VAE -} - - -class GANTest(tf.test.TestCase): - - def testGANBuildsAndImageShapeIsOk(self): - features = np.random.randn(100, 2) - labels = np.zeros(100) - dataset_content = tf.data.Dataset.from_tensor_slices((features, labels)) - params = { - # dataset params - "input_height": 28, - "input_width": 28, - "output_height": 28, - "output_width": 28, - "c_dim": 1, - "dataset_name": "mnist_fake", - # training params - "learning_rate": 0.0002, - "beta1": 0.5, - "z_dim": 62, - "batch_size": 32, - "training_steps": 200, - "disc_iters": 5, - "gamma": 0.1, - "lambda": 0.1, - "y_dim": 10, - "pt_loss_weight": 1.0, - "len_discrete_code": 10, - "len_continuous_code": 2, - "SUPERVISED": True, - "discriminator_normalization": consts.NO_NORMALIZATION, - "weight_clipping": -1.0, - "save_checkpoint_steps": 5000 - } - - config = tf.ConfigProto(allow_soft_placement=True) - for gan_type in MODELS: - tf.reset_default_graph() - - runtime_info = collections.namedtuple( - 'RuntimeInfo', ['checkpoint_dir', 'result_dir', 'log_dir']) - - kwargs = dict( - runtime_info=runtime_info, - dataset_content=dataset_content, - parameters=params) - gan = MODELS[gan_type](**kwargs) - gan.build_model() - self.assertEqual(gan.fake_images.get_shape(), [32, 28, 28, 1]) - - -if __name__ == "__main__": - tf.test.main() diff --git a/compare_gan/src/gans/gans_with_penalty.py b/compare_gan/src/gans/gans_with_penalty.py deleted file mode 100644 index 7ff4f2a..0000000 --- a/compare_gan/src/gans/gans_with_penalty.py +++ /dev/null @@ -1,489 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implementation of GANs (MMGAN, NSGAN, WGAN) with different regularizers.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src.gans import ablation_resnet_architecture -from compare_gan.src.gans import consts -from compare_gan.src.gans import dcgan_architecture -from compare_gan.src.gans import resnet_architecture -from compare_gan.src.gans.abstract_gan import AbstractGAN - -from six.moves import range -import tensorflow as tf - -flags = tf.flags -FLAGS = flags.FLAGS - -flags.DEFINE_boolean("print_penalty_loss", False, - "Whether to print penalty loss.") - - -class AbstractGANWithPenalty(AbstractGAN): - """GAN class for which we can modify loss/penalty/architecture via params.""" - - def __init__(self, model_name, **kwargs): - super(AbstractGANWithPenalty, self).__init__(model_name, **kwargs) - - self.architecture = self.parameters.get("architecture") - self.penalty_type = self.parameters.get("penalty_type") - self.discriminator_normalization = self.parameters.get( - "discriminator_normalization") - - if self.penalty_type not in consts.PENALTIES: - raise ValueError("Penalty '%s' not recognized." % self.penalty_type) - - if self.discriminator_normalization not in consts.NORMALIZERS: - raise ValueError("Normalization '%s' not recognized." % ( - self.discriminator_normalization)) - - if self.architecture not in consts.ARCHITECTURES: - raise ValueError("Architecture '%s' not recognized." % self.architecture) - - # Regularization strength. - self.lambd = self.parameters["lambda"] - self.beta2 = self.parameters.get("beta2", 0.999) - - # If the optimizer wasn't specified, use Adam to be consistent with - # other GANs. - self.optimizer = self.parameters.get("optimizer", "adam") - - self.resnet_ablation_type = self.parameters.get( - "resnet_ablation_type", "none") - - def get_optimizer(self, name_prefix): - if self.optimizer == "adam": - print("Using Adam optimizer.") - return tf.train.AdamOptimizer( - self.learning_rate, - beta1=self.beta1, - beta2=self.beta2, - name=name_prefix + self.optimizer) - elif self.optimizer == "rmsprop": - print("Using RMSProp optimizer.") - return tf.train.RMSPropOptimizer( - self.learning_rate, name=name_prefix + self.optimizer) - elif self.optimizer == "sgd": - print("Using GradientDescent optimizer.") - return tf.train.GradientDescentOptimizer(self.learning_rate) - else: - raise ValueError("Unknown optimizer: %s" % self.optimizer) - - def print_progress(self, step, start_step, start_time, d_loss, g_loss, - progress_reporter, sess): - super(AbstractGANWithPenalty, self).print_progress( - step, start_step, start_time, d_loss, g_loss, progress_reporter, sess) - if FLAGS.print_penalty_loss and step % 100 == 0: - penalty_loss = sess.run(self.penalty_loss) - print("\t\tlambda: %.4f penalty_loss: %.4f" % (self.lambd, penalty_loss)) - - def discriminator(self, x, is_training, reuse=False): - if self.architecture == consts.INFOGAN_ARCH: - return super(AbstractGANWithPenalty, self).discriminator( - x, is_training, reuse) - elif self.architecture == consts.DCGAN_ARCH: - return dcgan_architecture.discriminator( - x, self.batch_size, is_training, - self.discriminator_normalization, reuse) - elif self.architecture == consts.RESNET5_ARCH: - return resnet_architecture.resnet5_discriminator( - x, is_training, self.discriminator_normalization, reuse) - elif self.architecture == consts.RESNET107_ARCH: - return resnet_architecture.resnet107_discriminator( - x, is_training, self.discriminator_normalization, reuse) - elif self.architecture == consts.RESNET_CIFAR: - return resnet_architecture.resnet_cifar_discriminator( - x, is_training, self.discriminator_normalization, reuse) - elif self.architecture == consts.RESNET_STL: - return resnet_architecture.resnet_stl_discriminator( - x, is_training, self.discriminator_normalization, reuse) - elif self.architecture == consts.SNDCGAN_ARCH: - assert self.discriminator_normalization in [ - consts.SPECTRAL_NORM, consts.NO_NORMALIZATION] - return dcgan_architecture.sn_discriminator( - x, self.batch_size, reuse, - use_sn=self.discriminator_normalization == consts.SPECTRAL_NORM) - elif self.architecture == consts.RESNET5_ABLATION: - return ablation_resnet_architecture.resnet5_discriminator( - x, is_training, self.discriminator_normalization, reuse, - unused_ablation_type=self.resnet_ablation_type) - else: - raise NotImplementedError( - "Architecture %s not implemented." % self.architecture) - - def generator(self, z, is_training, reuse=False): - if self.architecture == consts.INFOGAN_ARCH: - return super(AbstractGANWithPenalty, self).generator( - z, is_training, reuse) - elif self.architecture == consts.DCGAN_ARCH: - return dcgan_architecture.generator(z, self.batch_size, - self.output_height, self.output_width, - self.c_dim, is_training, reuse) - elif self.architecture == consts.RESNET5_ARCH: - assert self.output_height == self.output_width - return resnet_architecture.resnet5_generator( - z, - is_training=is_training, - reuse=reuse, - colors=self.c_dim, - output_shape=self.output_height) - elif self.architecture == consts.RESNET_STL: - return resnet_architecture.resnet_stl_generator( - z, is_training=is_training, reuse=reuse, colors=self.c_dim) - elif self.architecture == consts.RESNET107_ARCH: - return resnet_architecture.resnet107_generator( - z, is_training=is_training, reuse=reuse, colors=self.c_dim) - elif self.architecture == consts.RESNET_CIFAR: - return resnet_architecture.resnet_cifar_generator( - z, is_training=is_training, reuse=reuse, colors=self.c_dim) - elif self.architecture == consts.SNDCGAN_ARCH: - return dcgan_architecture.sn_generator( - z, self.batch_size, self.output_height, self.output_width, self.c_dim, - is_training, reuse) - elif self.architecture == consts.RESNET5_ABLATION: - assert self.output_height == self.output_width - return ablation_resnet_architecture.resnet5_generator( - z, is_training=is_training, reuse=reuse, colors=self.c_dim, - output_shape=self.output_height, - unused_ablation_type=self.resnet_ablation_type) - else: - raise NotImplementedError( - "Architecture %s not implemented." % self.architecture) - - def dragan_penalty(self, x, discriminator, is_training): - """Returns the DRAGAN gradient penalty.""" - _, var = tf.nn.moments(x, axes=list(range(len(x.get_shape())))) - std = tf.sqrt(var) - x_noisy = x + std * (tf.random_uniform(x.shape) - 0.5) - x_noisy = tf.clip_by_value(x_noisy, 0.0, 1.0) - logits = discriminator(x_noisy, is_training=is_training, reuse=True)[1] - gradients = tf.gradients(logits, [x_noisy])[0] - slopes = tf.sqrt(0.0001 + tf.reduce_sum( - tf.square(gradients), reduction_indices=[1, 2, 3])) - gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.0)) - return gradient_penalty - - def wgangp_penalty(self, x, x_fake, discriminator, is_training): - """Returns the WGAN gradient penalty.""" - alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1]) - interpolates = x + alpha * (x_fake - x) - logits = discriminator(interpolates, is_training=is_training, reuse=True)[1] - gradients = tf.gradients(logits, [interpolates])[0] - slopes = tf.sqrt(0.0001 + tf.reduce_sum( - tf.square(gradients), reduction_indices=[1, 2, 3])) - gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.0)) - return gradient_penalty - - def l2_penalty(self): - """Returns the L2 penalty for each matrix/vector excluding biases.""" - t_vars = tf.trainable_variables() - d_vars = [var for var in t_vars if "discriminator" in var.name] - # Linear + conv2d and deconv2d layers. - print(d_vars) - d_weights = [ - v for v in d_vars - if ((v.name.endswith("/Matrix:0") and "fc" in v.name) or - (v.name.endswith("/w:0") and "conv" in v.name)) - ] - if len(d_weights) != consts.N_DISCRIMINATOR_LAYERS[self.architecture]: - raise RuntimeError( - "l2_penalty: got %d layers(%s), expected %d layers for %s." % - (len(d_weights), d_weights, - consts.N_DISCRIMINATOR_LAYERS[self.architecture], self.architecture)) - return tf.reduce_mean( - [tf.nn.l2_loss(i) for i in d_weights], name="l2_penalty") - - def _model_fn(self, features, labels, mode, params): - del labels # unused. - tf.logging.info("model_fn(): features=%s, mode=%s, params=%s)", features, - mode, params) - - if mode == tf.estimator.ModeKeys.PREDICT: - - noise = features["z"] - predictions = { - "generated_images": self.generator(noise, is_training=False) - } - return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions) - - is_training = (mode == tf.estimator.ModeKeys.TRAIN) - global_step = tf.train.get_or_create_global_step() - - def create_sub_step_loss(features, sub_step_idx=0, reuse=True): - """Creates the loss for a slice of the current batch. - - Args: - features: A dictionary with the feature tensors. - sub_step_idx: Index of the slice of the batch to use to construct the - loss. If self.unroll_disc_iters is True this must be 0 and the whole - batch will be used. - reuse: Bool, whether to reuse existing variables for the models. - Should be False for the first call and True on all other calls. - """ - - tf.logging.info("sub_step_idx: %s, params: %s, unroll_disc_iters: %s", - sub_step_idx, params, self.unroll_disc_iters) - assert (sub_step_idx == 0) or self.unroll_disc_iters - bs = params["batch_size"] // self.num_sub_steps - with self._different_batch_size(bs): - begin = sub_step_idx * self.batch_size - end = begin + self.batch_size - f = {k: v[begin:end] for k, v in features.iteritems()} - self.create_loss(f, is_training=is_training, reuse=reuse) - self.fake_images = self.generator(f["z"], is_training=False, reuse=True) - - if mode == tf.estimator.ModeKeys.TRAIN: - ######### - # TRAIN # - ######### - # Discriminator training. - # Create the model and loss for z_for_disc_step features. - features["z"] = features["z_for_disc_step"] - create_sub_step_loss(features, reuse=False) - - # Divide trainable variables into a group for D and group for G. - t_vars = tf.trainable_variables() - d_vars = [var for var in t_vars if "discriminator" in var.name] - g_vars = [var for var in t_vars if "generator" in var.name] - self.check_variables(t_vars, d_vars, g_vars) - - # Discriminator training. - deps = tf.get_collection(tf.GraphKeys.UPDATE_OPS) - with tf.control_dependencies(deps): - d_optimizer = self.get_optimizer("d_") - g_optimizer = self.get_optimizer("g_") - if self.use_tpu: - d_optimizer = tf.contrib.tpu.CrossShardOptimizer(d_optimizer) - g_optimizer = tf.contrib.tpu.CrossShardOptimizer(g_optimizer) - - deps.append( - d_optimizer.minimize( - self.d_loss, var_list=d_vars, global_step=global_step)) - - if self.unroll_disc_iters: - for sub_step in range(1, self.disc_iters): - with tf.control_dependencies(deps): - create_sub_step_loss(features, sub_step) - deps.append( - d_optimizer.minimize( - self.d_loss, var_list=d_vars, global_step=global_step)) - - # Generator training. - # Create loss using the z_for_gen_step features. - features["z"] = features["z_for_gen_step"] - if self.num_sub_steps > 1: - create_sub_step_loss(features, self.num_sub_steps - 1) - else: - create_sub_step_loss(features) - - with tf.control_dependencies(deps): - if self.unroll_disc_iters or self.disc_iters == 1: - train_op = g_optimizer.minimize(self.g_loss, var_list=g_vars) - else: - # We should only train the generator every self.disc_iter steps. - # We can do this using `tf.cond`. Both paths must return a tensor. - # Our true_fn will return a tensor that depends on training the - # generator, while the tensor from false_fn depends on nothing. - def do_train_generator(): - actual_train_op = g_optimizer.minimize(self.g_loss, var_list=g_vars) - with tf.control_dependencies([actual_train_op]): - return tf.constant(0) - def do_not_train_generator(): - return tf.constant(0) - counter = tf.to_int32(global_step) - 1 - train_op = tf.cond( - tf.equal(counter % self.disc_iters, 0), - true_fn=do_train_generator, - false_fn=do_not_train_generator).op - else: - train_op = None - - return tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - # Estimator requires a loss which gets displayed on TensorBoard. - # The given Tensor is evaluated but not used to create gradients. - loss=self.d_loss, - train_op=train_op) - - def create_loss(self, features, is_training=True, reuse=False): - """Build the loss tensors for discriminator and generator. - - This method will set self.d_loss, self.g_loss and self.penalty_loss. - - Args: - features: Optional dictionary with inputs to the model ("images" should - contain the real images and "z" the noise for the generator). - is_training: If True build the model in training mode. If False build the - model for inference mode (e.g. use trained averages for batch norm). - reuse: Bool, whether to reuse existing variables for the models. - This is only used for unrolling discriminator iterations when training - on TPU. - - Raises: - ValueError: If set of meta/hyper parameters is not supported. - """ - images = features["images"] # Input images. - z = features["z"] # Noise vector. - - # Discriminator output for real images. - d_real, d_real_logits, _ = self.discriminator( - images, is_training=is_training, reuse=reuse) - - # Discriminator output for fake images. - generated = self.generator(z, is_training=is_training, reuse=reuse) - d_fake, d_fake_logits, _ = self.discriminator( - generated, is_training=is_training, reuse=True) - - self.discriminator_output = d_real - - if self.model_name not in consts.MODELS_WITH_PENALTIES: - raise ValueError("Model %s not recognized" % self.model_name) - - # Define the loss functions - if self.model_name == consts.GAN_WITH_PENALTY: - d_loss_real = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=d_real_logits, labels=tf.ones_like(d_real))) - d_loss_fake = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=d_fake_logits, labels=tf.zeros_like(d_fake))) - self.d_loss = d_loss_real + d_loss_fake - self.g_loss = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=d_fake_logits, labels=tf.ones_like(d_fake))) - elif self.model_name == consts.WGAN_WITH_PENALTY: - d_loss_real = -tf.reduce_mean(d_real_logits) - d_loss_fake = tf.reduce_mean(d_fake_logits) - self.d_loss = d_loss_real + d_loss_fake - self.g_loss = -d_loss_fake - elif self.model_name == consts.LSGAN_WITH_PENALTY: - d_loss_real = tf.reduce_mean(tf.square(d_real - 1.0)) - d_loss_fake = tf.reduce_mean(tf.square(d_fake)) - self.d_loss = 0.5 * (d_loss_real + d_loss_fake) - self.g_loss = 0.5 * tf.reduce_mean(tf.square(d_fake - 1.0)) - elif self.model_name == consts.SN_GAN_WITH_PENALTY: - d_loss_real = tf.reduce_mean(tf.nn.softplus(-d_real_logits)) - d_loss_fake = tf.reduce_mean(tf.nn.softplus(d_fake_logits)) - self.d_loss = d_loss_real + d_loss_fake - self.g_loss = tf.reduce_mean(tf.nn.softplus(-d_fake_logits)) - else: - raise ValueError("Unknown GAN model_name: %s" % self.model_name) - - # Define the penalty. - if self.penalty_type == consts.NO_PENALTY: - self.penalty_loss = 0.0 - elif self.penalty_type == consts.DRAGAN_PENALTY: - self.penalty_loss = self.dragan_penalty(images, self.discriminator, - is_training) - self.d_loss += self.lambd * self.penalty_loss - elif self.penalty_type == consts.WGANGP_PENALTY: - self.penalty_loss = self.wgangp_penalty(images, generated, - self.discriminator, is_training) - self.d_loss += self.lambd * self.penalty_loss - elif self.penalty_type == consts.L2_PENALTY: - self.penalty_loss = self.l2_penalty() - self.d_loss += self.lambd * self.penalty_loss - else: - raise NotImplementedError( - "The penalty %s was not implemented." % self.penalty_type) - - # Setup summaries. - - if not self.use_tpu: - d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real) - d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake) - d_loss_sum = tf.summary.scalar("d_loss", self.d_loss) - g_loss_sum = tf.summary.scalar("g_loss", self.g_loss) - self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum]) - self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum]) - - def build_model(self, is_training=True): - """Build the model (input placeholders, losses and optimizer ops). - - Args: - is_training: If True build the model in training mode. If False build the - model for inference mode (e.g. use trained averages for batch norm). - """ - image_dims = [self.input_height, self.input_width, self.c_dim] - batch_size = self.batch_size - # Input images. - self.inputs = tf.placeholder( - tf.float32, [batch_size] + image_dims, name="real_images") - # Noise vector. - self.z = tf.placeholder(tf.float32, [batch_size, self.z_dim], name="z") - - features = {"images": self.inputs, "z": self.z} - self.create_loss(features, is_training=is_training) - - # Store testing images. - self.fake_images = self.generator(self.z, is_training=False, reuse=True) - - # This subgraph will be used to evaluate generators. - # It doesn't require any inputs and just keeps generating images. - with tf.name_scope("generator_evaluation"): - eval_input = self.z_tf_generator( - self.batch_size, self.z_dim, name="input") - result = self.generator(eval_input, is_training=False, reuse=True) - result = tf.identity(result, name="result") - - # This subraph can be used as representation. - # It pins the current values of batch_norm, and has a separate input tensor. - with tf.name_scope("discriminator_representation"): - result, _, _ = self.discriminator( - tf.placeholder(tf.float32, self.inputs.get_shape(), name="input"), - is_training=False, - reuse=True) - result = tf.identity(result, name="result") - - # Divide trainable variables into a group for D and group for G. - t_vars = tf.trainable_variables() - d_vars = [var for var in t_vars if "discriminator" in var.name] - g_vars = [var for var in t_vars if "generator" in var.name] - self.check_variables(t_vars, d_vars, g_vars) - - # Define optimization ops. - with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): - self.d_optim = self.get_optimizer("d_").minimize( - self.d_loss, var_list=d_vars) - self.g_optim = self.get_optimizer("g_").minimize( - self.g_loss, var_list=g_vars) - - -class GAN_PENALTY(AbstractGANWithPenalty): - """Non-saturating Generative Adverserial Networks. - - The loss for the generator is computed using the log trick. That is, - G_loss = -log(D(fake_images)) [maximizes log(D)] - """ - - def __init__(self, **kwargs): - super(GAN_PENALTY, self).__init__(consts.GAN_WITH_PENALTY, **kwargs) - - -class WGAN_PENALTY(AbstractGANWithPenalty): - """Generative Adverserial Networks with the Wasserstein loss.""" - - def __init__(self, **kwargs): - super(WGAN_PENALTY, self).__init__(consts.WGAN_WITH_PENALTY, **kwargs) - - -class LSGAN_PENALTY(AbstractGANWithPenalty): - """Generative Adverserial Networks with the Least Squares loss.""" - - def __init__(self, **kwargs): - super(LSGAN_PENALTY, self).__init__(consts.LSGAN_WITH_PENALTY, **kwargs) diff --git a/compare_gan/src/gans/gans_with_penalty_estimator_test.py b/compare_gan/src/gans/gans_with_penalty_estimator_test.py deleted file mode 100644 index 0f27a54..0000000 --- a/compare_gan/src/gans/gans_with_penalty_estimator_test.py +++ /dev/null @@ -1,196 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for GANs with different regularizers.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import itertools -import os - -from absl import flags -from absl.testing import parameterized - -from compare_gan.src import params -from compare_gan.src import test_utils -from compare_gan.src.gans import consts -from compare_gan.src.gans.gans_with_penalty import GAN_PENALTY -from compare_gan.src.gans.gans_with_penalty import LSGAN_PENALTY -from compare_gan.src.gans.gans_with_penalty import WGAN_PENALTY - -import numpy as np -import tensorflow as tf - -FLAGS = flags.FLAGS - -TEST_MODELS = [GAN_PENALTY, WGAN_PENALTY, LSGAN_PENALTY] -TEST_NORMALIZERS = consts.NORMALIZERS -TEST_PENALTIES = consts.PENALTIES -TEST_ARCHITECTURES = [ - consts.INFOGAN_ARCH, consts.DCGAN_ARCH, consts.SNDCGAN_ARCH, - consts.RESNET5_ARCH -] -GENERATOR_TRAINED_IN_STEPS = [ - # disc_iters=1. - 5 * [True], - # disc_iters=2. - (3 * [True, False])[:5], - # disc_iters=3. - (3 * [True, False, False])[:5], -] - - -class FakeRuntimeInfo(object): - - def __init__(self, model_dir=None): - self.checkpoint_dir = model_dir - self.result_dir = model_dir - self.log_dir = model_dir - - -class GANSWithPenaltyEstimatorTest(parameterized.TestCase, tf.test.TestCase): - parameters = { - "learning_rate": 0.0002, - "beta1": 0.5, - "z_dim": 64, - "batch_size": 2, - "gamma": 0.1, - "lambda": 0.1, - "y_dim": 10, - "pt_loss_weight": 1.0, - "len_discrete_code": 10, - "len_continuous_code": 2, - "save_checkpoint_steps": 1, - "weight_clipping": -1.0, - "tf_seed": 42, - "use_estimator": True, - } - - def isValidModel(self, model, normalization, architecture, penalty_type): - if architecture == consts.SNDCGAN_ARCH: - return normalization in [consts.SPECTRAL_NORM, consts.NO_NORMALIZATION] - if architecture == consts.DCGAN_ARCH: - return normalization in [ - consts.NO_NORMALIZATION, consts.SPECTRAL_NORM, consts.BATCH_NORM - ] - return True - - @parameterized.named_parameters( - [["m{}norm{}arch{}penalty{}".format(*p)] + list(p) - for p in itertools.product(TEST_MODELS, TEST_NORMALIZERS, - TEST_ARCHITECTURES, TEST_PENALTIES)]) - def testSingleTrainingStepOnTPU(self, model, normalization, architecture, - penalty_type): - if not self.isValidModel(model, normalization, architecture, penalty_type): - return - parameters = self.parameters.copy() - parameters.update(params.GetDatasetParameters("cifar10")) - parameters.update({ - "use_tpu": True, - "discriminator_normalization": normalization, - "architecture": architecture, - "penalty_type": penalty_type, - "disc_iters": 1, - "training_steps": 1, - }) - dataset_content = test_utils.load_fake_dataset(parameters).repeat() - - model_dir = os.path.join(FLAGS.test_tmpdir, model.__name__, normalization, - architecture, penalty_type) - - config = tf.contrib.tpu.RunConfig( - model_dir=model_dir, - tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) - print("Testing loss %s, regularizer %s, with architecture %s." % - (model, penalty_type, architecture)) - gan = model( - runtime_info=FakeRuntimeInfo(model_dir), - dataset_content=dataset_content, - parameters=parameters) - gan.train_with_estimator(config) - - @parameterized.named_parameters( - [["disc{}tpu{}".format(*p)] + list(p) - for p in itertools.product(range(1, 4), [False, True])]) - def testDiscItersIsUsedCorrectly(self, disc_iters, use_tpu): - if disc_iters > 1 and use_tpu: - - return - parameters = self.parameters.copy() - parameters.update(params.GetDatasetParameters("cifar10")) - parameters.update({ - "use_tpu": use_tpu, - "discriminator_normalization": consts.NO_NORMALIZATION, - - "architecture": consts.RESNET_CIFAR, - "penalty_type": consts.NO_PENALTY, - "disc_iters": disc_iters, - "training_steps": 5, - }) - dataset_content = test_utils.load_fake_dataset(parameters).repeat() - - model_dir = os.path.join(FLAGS.test_tmpdir, str(disc_iters)) - - config = tf.contrib.tpu.RunConfig( - model_dir=model_dir, - save_checkpoints_steps=1, - keep_checkpoint_max=1000, - tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) - gan = GAN_PENALTY( - runtime_info=FakeRuntimeInfo(model_dir), - dataset_content=dataset_content, - parameters=parameters) - gan.train_with_estimator(config) - - # Read checkpoints for each training step. If the weight in the generator - # changed we trained the generator during that step. - previous_values = {} - generator_trained = [] - for step in range(0, parameters["training_steps"] + 1): - basename = os.path.join(model_dir, "model.ckpt-{}".format(step)) - self.assertTrue(tf.gfile.Exists(basename + ".index")) - ckpt = tf.train.load_checkpoint(basename) - - if step == 0: - for name in ckpt.get_variable_to_shape_map(): - previous_values[name] = ckpt.get_tensor(name) - continue - - d_trained = False - g_trained = False - for name in ckpt.get_variable_to_shape_map(): - t = ckpt.get_tensor(name) - diff = np.abs(previous_values[name] - t).max() - previous_values[name] = t - if "discriminator" in name and diff > 1e-10: - d_trained = True - elif "generator" in name and diff > 1e-10: - if name.endswith("moving_mean") or name.endswith("moving_variance"): - # Note: Even when we don't train the generator the batch norm - # values still get updated. - continue - tf.logging.info("step %d: %s changed up to %f", step, name, diff) - g_trained = True - self.assertTrue(d_trained) # Discriminator is trained every step. - generator_trained.append(g_trained) - - self.assertEqual(generator_trained, - GENERATOR_TRAINED_IN_STEPS[disc_iters - 1]) - - -if __name__ == "__main__": - tf.test.main() diff --git a/compare_gan/src/gans/gans_with_penalty_test.py b/compare_gan/src/gans/gans_with_penalty_test.py deleted file mode 100644 index 44c560b..0000000 --- a/compare_gan/src/gans/gans_with_penalty_test.py +++ /dev/null @@ -1,214 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for GANs with different regularizers.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import itertools -import os - -from absl import flags -from absl.testing import parameterized - -from compare_gan.src import params -from compare_gan.src import test_utils -from compare_gan.src.gans import consts -from compare_gan.src.gans.gans_with_penalty import GAN_PENALTY -from compare_gan.src.gans.gans_with_penalty import LSGAN_PENALTY -from compare_gan.src.gans.gans_with_penalty import WGAN_PENALTY - -import numpy as np -from six.moves import range -import tensorflow as tf - -FLAGS = flags.FLAGS - -TEST_MODELS = [GAN_PENALTY, WGAN_PENALTY, LSGAN_PENALTY] -TEST_NORMALIZERS = consts.NORMALIZERS -TEST_PENALTIES = consts.PENALTIES -TEST_ARCHITECTURES = [ - consts.INFOGAN_ARCH, consts.DCGAN_ARCH, consts.SNDCGAN_ARCH, - consts.RESNET5_ARCH -] -GENERATOR_TRAINED_IN_STEPS = [ - # disc_iters=1. - 10 * [True], - # disc_iters=2. - 5 * [True, False], - # disc_iters=3. - (4 * [True, False, False])[:10], - # disc_iters=4. - (3 * [True, False, False, False])[:10], - # disc_iters=5. - (3 * [True, False, False, False, False])[:10], -] - - -class FakeRuntimeInfo(object): - - def __init__(self, task_workdir=None): - self.checkpoint_dir = task_workdir - self.result_dir = task_workdir - self.log_dir = task_workdir - - -class GANSWithPenaltyTest(parameterized.TestCase, tf.test.TestCase): - parameters = { - "learning_rate": 0.0002, - "beta1": 0.5, - "z_dim": 100, - "batch_size": 64, - "training_steps": 10, - "disc_iters": 1, - "gamma": 0.1, - "lambda": 0.1, - "y_dim": 10, - "pt_loss_weight": 1.0, - "len_discrete_code": 10, - "len_continuous_code": 2, - "save_checkpoint_steps": 5000, - "SUPERVISED": True, - "weight_clipping": -1.0, - } - - def isValidModel(self, model, normalization, architecture, penalty_type): - if architecture == consts.SNDCGAN_ARCH: - return normalization in [consts.SPECTRAL_NORM, consts.NO_NORMALIZATION] - if architecture == consts.DCGAN_ARCH: - return normalization in [consts.NO_NORMALIZATION, consts.SPECTRAL_NORM, - consts.BATCH_NORM] - return True - - @parameterized.named_parameters( - [(str(i), p[0], p[1], p[2], p[3]) for i, p in enumerate( - itertools.product(TEST_MODELS, TEST_NORMALIZERS, TEST_ARCHITECTURES, - TEST_PENALTIES))]) - def testGANBuildsAndImageShapeIsOk(self, model, normalization, architecture, - penalty_type): - if not self.isValidModel(model, normalization, architecture, penalty_type): - return - parameters = self.parameters.copy() - parameters.update(params.GetDatasetParameters("celeba")) - parameters.update({ - "architecture": architecture, - "penalty_type": penalty_type, - "discriminator_normalization": normalization, - }) - dataset_content = test_utils.load_fake_dataset(parameters).repeat() - - config = tf.ConfigProto(allow_soft_placement=True) - tf.reset_default_graph() - with tf.Session(config=config): - kwargs = dict( - runtime_info=FakeRuntimeInfo(), - dataset_content=dataset_content, - parameters=parameters) - print("Testing loss %s, regularizer %s, with architecture %s." % - (model, penalty_type, architecture)) - gan = model(**kwargs) - gan.build_model() - self.assertEqual(gan.fake_images.get_shape(), [64, 64, 64, 3]) - - @parameterized.named_parameters([ - ("DiscIters" + str(i), i) for i in range(1, 6) - ]) - def testDiscItersIsUsedCorrectly(self, disc_iters): - parameters = self.parameters.copy() - parameters.update(params.GetDatasetParameters("cifar10")) - parameters.update({ - "batch_size": 2, - "training_steps": 10, - "save_checkpoint_steps": 1, - "disc_iters": disc_iters, - "architecture": consts.RESNET_CIFAR, - "penalty_type": consts.NO_PENALTY, - "discriminator_normalization": consts.NO_NORMALIZATION, - }) - dataset_content = test_utils.load_fake_dataset(parameters).repeat() - - task_workdir = os.path.join(FLAGS.test_tmpdir, str(disc_iters)) - with tf.Graph().as_default(), tf.Session() as sess: - gan = GAN_PENALTY( - runtime_info=FakeRuntimeInfo(task_workdir), - dataset_content=dataset_content, - parameters=parameters) - gan.build_model() - gan.train(sess) - - # Read checkpoints for each training step. If the weight in the generator - # changed we trained the generator during that step. - previous_values = {} - generator_trained = [] - for step in range(0, parameters["training_steps"] + 1): - basename = os.path.join(task_workdir, "GAN_PENALTY.model-{}".format(step)) - self.assertTrue(tf.gfile.Exists(basename + ".index")) - ckpt = tf.train.load_checkpoint(basename) - - if step == 0: - for name in ckpt.get_variable_to_shape_map(): - previous_values[name] = ckpt.get_tensor(name) - continue - - d_trained = False - g_trained = False - for name in ckpt.get_variable_to_shape_map(): - t = ckpt.get_tensor(name) - diff = np.abs(previous_values[name] - t).max() - previous_values[name] = t - if "discriminator" in name and diff > 1e-10: - d_trained = True - elif "generator" in name and diff > 1e-10: - if name.endswith('moving_mean') or name.endswith('moving_variance'): - # Note: Even when we don't train the generator the batch norm - # values still get updated. - continue - tf.logging.info("step %d: %s changed up to %f", step, name, diff) - g_trained = True - self.assertTrue(d_trained) # Discriminator is trained every step. - generator_trained.append(g_trained) - - self.assertEqual(generator_trained, - GENERATOR_TRAINED_IN_STEPS[disc_iters - 1]) - - @parameterized.named_parameters([ - (str(i), p) for i, p in enumerate( - TEST_ARCHITECTURES + [consts.RESNET_CIFAR]) - ]) - def testL2Regularization(self, architecture): - parameters = self.parameters.copy() - parameters.update(params.GetDatasetParameters("celeba")) - parameters.update({ - "architecture": architecture, - "penalty_type": consts.L2_PENALTY, - "discriminator_normalization": consts.NO_NORMALIZATION, - }) - dataset_content = test_utils.load_fake_dataset(parameters).repeat() - - config = tf.ConfigProto(allow_soft_placement=True) - tf.reset_default_graph() - with tf.Session(config=config): - kwargs = dict( - runtime_info=FakeRuntimeInfo(), - dataset_content=dataset_content, - parameters=parameters) - gan = GAN_PENALTY(**kwargs) - gan.build_model() - - -if __name__ == "__main__": - tf.test.main() diff --git a/compare_gan/src/gans/ops.py b/compare_gan/src/gans/ops.py deleted file mode 100644 index b6e8be5..0000000 --- a/compare_gan/src/gans/ops.py +++ /dev/null @@ -1,313 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import scipy.misc -from six.moves import map -from six.moves import range -import tensorflow as tf - - -def check_folder(log_dir): - if not tf.gfile.IsDirectory(log_dir): - tf.gfile.MakeDirs(log_dir) - return log_dir - - -def save_images(images, image_path): - with tf.gfile.Open(image_path, "wb") as f: - scipy.misc.imsave(f, images * 255.0) - - -def gaussian(batch_size, n_dim, mean=0., var=1.): - return np.random.normal(mean, var, (batch_size, n_dim)).astype(np.float32) - - -def lrelu(input_, leak=0.2, name="lrelu"): - return tf.maximum(input_, leak * input_, name=name) - - -def batch_norm(input_, is_training, scope): - return tf.contrib.layers.batch_norm( - input_, - decay=0.999, - epsilon=0.001, - updates_collections=None, - scale=True, - fused=False, - is_training=is_training, - scope=scope) - - -def layer_norm(input_, is_training, scope): - return tf.contrib.layers.layer_norm( - input_, trainable=is_training, scope=scope) - - -def spectral_norm(input_): - """Performs Spectral Normalization on a weight tensor.""" - if len(input_.shape) < 2: - raise ValueError( - "Spectral norm can only be applied to multi-dimensional tensors") - - # The paper says to flatten convnet kernel weights from (C_out, C_in, KH, KW) - # to (C_out, C_in * KH * KW). But Sonnet's and Compare_gan's Conv2D kernel - # weight shape is (KH, KW, C_in, C_out), so it should be reshaped to - # (KH * KW * C_in, C_out), and similarly for other layers that put output - # channels as last dimension. - # n.b. this means that w here is equivalent to w.T in the paper. - w = tf.reshape(input_, (-1, input_.shape[-1])) - - # Persisted approximation of first left singular vector of matrix `w`. - - u_var = tf.get_variable( - input_.name.replace(":", "") + "/u_var", - shape=(w.shape[0], 1), - dtype=w.dtype, - initializer=tf.random_normal_initializer(), - trainable=False) - u = u_var - - # Use power iteration method to approximate spectral norm. - # The authors suggest that "one round of power iteration was sufficient in the - # actual experiment to achieve satisfactory performance". According to - # observation, the spectral norm become very accurate after ~20 steps. - - power_iteration_rounds = 1 - for _ in range(power_iteration_rounds): - # `v` approximates the first right singular vector of matrix `w`. - v = tf.nn.l2_normalize( - tf.matmul(tf.transpose(w), u), axis=None, epsilon=1e-12) - u = tf.nn.l2_normalize(tf.matmul(w, v), axis=None, epsilon=1e-12) - - # Update persisted approximation. - with tf.control_dependencies([tf.assign(u_var, u, name="update_u")]): - u = tf.identity(u) - - # The authors of SN-GAN chose to stop gradient propagating through u and v. - # In johnme@'s experiments it wasn't clear that this helps, but it doesn't - # seem to hinder either so it's kept in order to be a faithful implementation. - u = tf.stop_gradient(u) - v = tf.stop_gradient(v) - - # Largest singular value of `w`. - norm_value = tf.matmul(tf.matmul(tf.transpose(u), w), v) - norm_value.shape.assert_is_fully_defined() - norm_value.shape.assert_is_compatible_with([1, 1]) - - w_normalized = w / norm_value - - # Unflatten normalized weights to match the unnormalized tensor. - w_tensor_normalized = tf.reshape(w_normalized, input_.shape) - return w_tensor_normalized - - -def linear(input_, - output_size, - scope=None, - stddev=0.02, - bias_start=0.0, - use_sn=False): - - shape = input_.get_shape().as_list() - - with tf.variable_scope(scope or "Linear"): - matrix = tf.get_variable( - "Matrix", [shape[1], output_size], - tf.float32, - tf.random_normal_initializer(stddev=stddev)) - bias = tf.get_variable( - "bias", [output_size], initializer=tf.constant_initializer(bias_start)) - if use_sn: - return tf.matmul(input_, spectral_norm(matrix)) + bias - else: - return tf.matmul(input_, matrix) + bias - - -def spectral_norm_update_ops(var_list, weight_getter): - update_ops = [] - print(" [*] Spectral norm layers") - layer = 0 - for var in var_list: - if weight_getter.match(var.name): - layer += 1 - print(" %d. %s" % (layer, var.name)) - # Alternative solution here is keep spectral norm and original weight - # matrix separately, and only normalize the weight matrix if needed. - # But as spectral norm converges to 1.0 very quickly, it should be very - # minor accuracy diff caused by float value division. - update_ops.append(tf.assign(var, spectral_norm(var))) - return update_ops - - -def spectral_norm_svd(input_): - if len(input_.shape) < 2: - raise ValueError( - "Spectral norm can only be applied to multi-dimensional tensors") - - w = tf.reshape(input_, (-1, input_.shape[-1])) - s, _, _ = tf.svd(w) - return s[0] - - -def spectral_norm_value(var_list, weight_getter): - """Compute spectral norm value using svd, for debug purpose.""" - norms = {} - for var in var_list: - if weight_getter.match(var.name): - norms[var.name] = spectral_norm_svd(var) - return norms - - -def conv2d(input_, output_dim, k_h, k_w, d_h, d_w, stddev=0.02, name="conv2d", - initializer=tf.truncated_normal_initializer, use_sn=False): - with tf.variable_scope(name): - w = tf.get_variable( - "w", [k_h, k_w, input_.get_shape()[-1], output_dim], - initializer=initializer(stddev=stddev)) - if use_sn: - conv = tf.nn.conv2d( - input_, spectral_norm(w), strides=[1, d_h, d_w, 1], padding="SAME") - else: - conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding="SAME") - biases = tf.get_variable( - "biases", [output_dim], initializer=tf.constant_initializer(0.0)) - return tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) - - -def deconv2d(input_, output_shape, k_h, k_w, d_h, d_w, - stddev=0.02, name="deconv2d"): - with tf.variable_scope(name): - w = tf.get_variable( - "w", [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], - initializer=tf.random_normal_initializer(stddev=stddev)) - deconv = tf.nn.conv2d_transpose( - input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) - biases = tf.get_variable( - "biases", [output_shape[-1]], initializer=tf.constant_initializer(0.0)) - return tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape()) - - -def weight_norm_linear(input_, output_size, - init=False, init_scale=1.0, - name="wn_linear", - initializer=tf.truncated_normal_initializer, - stddev=0.02): - """Linear layer with Weight Normalization (Salimans, Kingma '16).""" - with tf.variable_scope(name): - if init: - v = tf.get_variable("V", [int(input_.get_shape()[1]), output_size], - tf.float32, initializer(0, stddev), trainable=True) - v_norm = tf.nn.l2_normalize(v.initialized_value(), [0]) - x_init = tf.matmul(input_, v_norm) - m_init, v_init = tf.nn.moments(x_init, [0]) - scale_init = init_scale / tf.sqrt(v_init + 1e-10) - g = tf.get_variable("g", dtype=tf.float32, - initializer=scale_init, trainable=True) - b = tf.get_variable("b", dtype=tf.float32, initializer= - -m_init*scale_init, trainable=True) - x_init = tf.reshape(scale_init, [1, output_size]) * ( - x_init - tf.reshape(m_init, [1, output_size])) - return x_init - else: - - v = tf.get_variable("V") - g = tf.get_variable("g") - b = tf.get_variable("b") - tf.assert_variables_initialized([v, g, b]) - x = tf.matmul(input_, v) - scaler = g / tf.sqrt(tf.reduce_sum(tf.square(v), [0])) - x = tf.reshape(scaler, [1, output_size]) * x + tf.reshape( - b, [1, output_size]) - return x - - -def weight_norm_conv2d(input_, output_dim, - k_h, k_w, d_h, d_w, - init, init_scale, - stddev=0.02, - name="wn_conv2d", - initializer=tf.truncated_normal_initializer): - """Convolution with Weight Normalization (Salimans, Kingma '16).""" - with tf.variable_scope(name): - if init: - v = tf.get_variable( - "V", [k_h, k_w] + [int(input_.get_shape()[-1]), output_dim], - tf.float32, initializer(0, stddev), trainable=True) - v_norm = tf.nn.l2_normalize(v.initialized_value(), [0, 1, 2]) - x_init = tf.nn.conv2d(input_, v_norm, strides=[1, d_h, d_w, 1], - padding="SAME") - m_init, v_init = tf.nn.moments(x_init, [0, 1, 2]) - scale_init = init_scale / tf.sqrt(v_init + 1e-8) - g = tf.get_variable( - "g", dtype=tf.float32, initializer=scale_init, trainable=True) - b = tf.get_variable( - "b", dtype=tf.float32, initializer=-m_init*scale_init, trainable=True) - x_init = tf.reshape(scale_init, [1, 1, 1, output_dim]) * ( - x_init - tf.reshape(m_init, [1, 1, 1, output_dim])) - return x_init - else: - v = tf.get_variable("V") - g = tf.get_variable("g") - b = tf.get_variable("b") - tf.assert_variables_initialized([v, g, b]) - w = tf.reshape(g, [1, 1, 1, output_dim]) * tf.nn.l2_normalize( - v, [0, 1, 2]) - x = tf.nn.bias_add( - tf.nn.conv2d(input_, w, [1, d_h, d_w, 1], padding="SAME"), b) - return x - - -def weight_norm_deconv2d(x, output_dim, - k_h, k_w, d_h, d_w, - init=False, init_scale=1.0, - stddev=0.02, - name="wn_deconv2d", - initializer=tf.truncated_normal_initializer): - """Transposed Convolution with Weight Normalization (Salimans, Kingma '16).""" - xs = list(map(int, x.get_shape())) - target_shape = [xs[0], xs[1] * d_h, xs[2] * d_w, output_dim] - with tf.variable_scope(name): - if init: - v = tf.get_variable( - "V", [k_h, k_w] + [output_dim, int(x.get_shape()[-1])], - tf.float32, initializer(0, stddev), trainable=True) - v_norm = tf.nn.l2_normalize(v.initialized_value(), [0, 1, 3]) - x_init = tf.nn.conv2d_transpose(x, v_norm, target_shape, - [1, d_h, d_w, 1], padding="SAME") - m_init, v_init = tf.nn.moments(x_init, [0, 1, 2]) - scale_init = init_scale/tf.sqrt(v_init + 1e-8) - g = tf.get_variable("g", dtype=tf.float32, - initializer=scale_init, trainable=True) - b = tf.get_variable("b", dtype=tf.float32, - initializer=-m_init*scale_init, trainable=True) - x_init = tf.reshape(scale_init, [1, 1, 1, output_dim]) * ( - x_init - tf.reshape(m_init, [1, 1, 1, output_dim])) - return x_init - else: - v = tf.get_variable("v") - g = tf.get_variable("g") - b = tf.get_variable("b") - tf.assert_variables_initialized([v, g, b]) - w = tf.reshape(g, [1, 1, output_dim, 1]) * tf.nn.l2_normalize( - v, [0, 1, 3]) - x = tf.nn.conv2d_transpose(x, w, target_shape, strides=[1, d_h, d_w, 1], - padding="SAME") - x = tf.nn.bias_add(x, b) - return x diff --git a/compare_gan/src/gans/resnet_architecture.py b/compare_gan/src/gans/resnet_architecture.py deleted file mode 100644 index 991b720..0000000 --- a/compare_gan/src/gans/resnet_architecture.py +++ /dev/null @@ -1,570 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A GAN architecture with residual blocks and skip connections. - -This implementation is based on Spectral Norm implementation. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from math import log - -from compare_gan.src.gans import consts -from compare_gan.src.gans import ops - -import numpy as np -from six.moves import range -import tensorflow as tf - -slim = tf.contrib.slim -tfgan = tf.contrib.gan - - -def _validate_image_inputs(inputs, validate_power2=True): - inputs.get_shape().assert_has_rank(4) - inputs.get_shape()[1:3].assert_is_fully_defined() - if inputs.get_shape()[1] != inputs.get_shape()[2]: - raise ValueError("Input tensor does not have equal width and height: ", - inputs.get_shape()[1:3]) - width = inputs.get_shape().as_list()[1] - if validate_power2 and log(width, 2) != int(log(width, 2)): - raise ValueError("Input tensor `width` is not a power of 2: ", width) - - -def batch_norm_resnet(input_, is_training, scope, epsilon=1e-5): - return tf.contrib.layers.batch_norm( - input_, - decay=0.9, - updates_collections=None, - epsilon=epsilon, - scale=True, - fused=False, # Interesting. - is_training=is_training, - scope=scope) - - -# From https://github.com/tensorflow/tensorflow/issues/2169 -def unpool(value, name="unpool"): - """Unpooling operation. - - N-dimensional version of the unpooling operation from - https://www.robots.ox.ac.uk/~vgg/rg/papers/Dosovitskiy_Learning_to_Generate_2015_CVPR_paper.pdf - Taken from: https://github.com/tensorflow/tensorflow/issues/2169 - - Args: - value: a Tensor of shape [b, d0, d1, ..., dn, ch] - name: name of the op - - Returns: - A Tensor of shape [b, 2*d0, 2*d1, ..., 2*dn, ch] - """ - with tf.name_scope(name) as scope: - sh = value.get_shape().as_list() - dim = len(sh[1:-1]) - out = (tf.reshape(value, [-1] + sh[-dim:])) - for i in range(dim, 0, -1): - out = tf.concat([out, tf.zeros_like(out)], i) - out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]] - out = tf.reshape(out, out_size, name=scope) - return out - - -def get_conv(inputs, in_channels, out_channels, scale, suffix, use_sn): - """Return convolution for the resnet block.""" - if inputs.get_shape().as_list()[-1] != in_channels: - raise ValueError("Unexpected number of input channels.") - - if scale == "up": - output = unpool(inputs) - output = ops.conv2d( - output, output_dim=out_channels, k_h=3, k_w=3, - d_h=1, d_w=1, name="up_%s" % suffix, use_sn=use_sn) - return output - elif scale == "none": - return ops.conv2d( - inputs, output_dim=out_channels, k_h=3, k_w=3, - d_h=1, d_w=1, name="same_%s" % suffix, use_sn=use_sn) - elif scale == "down": - output = ops.conv2d( - inputs, output_dim=out_channels, k_h=3, k_w=3, - d_h=1, d_w=1, name="down_%s" % suffix, use_sn=use_sn) - output = tf.nn.pool( - output, [2, 2], "AVG", "SAME", strides=[2, 2], name="pool_%s" % suffix) - return output - return None # Should not happen! - - -def resnet_block(inputs, in_channels, out_channels, scale, - block_scope, is_training, reuse, discriminator_normalization, - is_gen_block): - assert scale in ["up", "down", "none"] - if inputs.get_shape().as_list()[-1] != in_channels: - raise ValueError("Unexpected number of input channels.") - - # In SN paper, if they upscale in generator they do this in the first conv. - # For discriminator downsampling happens after second conv. - if is_gen_block: - # Generator block - scale1 = scale # "up" or "none" - scale2 = "none" - else: - # Discriminator block. - scale1 = "none" - scale2 = scale # "down" or "none" - - print ("resnet_block, in=%d out=%d, scale=%s, scope=%s normalizer=%s" % ( - in_channels, out_channels, scale, block_scope, - discriminator_normalization)) - print ("INPUTS: ", inputs.get_shape()) - with tf.variable_scope(block_scope, values=[inputs], reuse=reuse): - output = inputs - use_sn = discriminator_normalization == consts.SPECTRAL_NORM - - # Define the skip connection, ensure 'conv' is in the suffix, otherwise it - # will not be regularized. - - shortcut = get_conv( - output, in_channels, out_channels, scale, - suffix="conv_shortcut", use_sn=use_sn) - print ("SHORTCUT: ", shortcut.get_shape()) - - # Apply batch norm in discriminator only if enabled. - if is_gen_block or discriminator_normalization == consts.BATCH_NORM: - output = batch_norm_resnet(output, is_training=is_training, scope="bn1") - elif discriminator_normalization == consts.LAYER_NORM: - output = ops.layer_norm(output, is_training=is_training, scope="ln1") - - output = tf.nn.relu(output) - output = get_conv( - output, in_channels, out_channels, scale1, - suffix="conv1", use_sn=use_sn) - print ("OUTPUT CONV1: ", output.get_shape()) - - # Apply batch norm in discriminator only if enabled. - if is_gen_block or discriminator_normalization == consts.BATCH_NORM: - output = batch_norm_resnet(output, is_training=is_training, scope="bn2") - elif discriminator_normalization == consts.LAYER_NORM: - output = ops.layer_norm(output, is_training=is_training, scope="ln2") - - output = tf.nn.relu(output) - output = get_conv( - output, out_channels, out_channels, scale2, - suffix="conv2", use_sn=use_sn) - print ("OUTPUT CONV2: ", output.get_shape()) - - # Combine skip-connection with the convolved part. - output += shortcut - - return output - - -def generator_block(inputs, in_channels, out_channels, scale, - block_scope, is_training, reuse): - assert scale in ["up", "none"] - return resnet_block(inputs, in_channels, out_channels, scale, - block_scope, is_training, reuse, - discriminator_normalization=consts.NO_NORMALIZATION, - is_gen_block=True) - - -def discriminator_block(inputs, in_channels, out_channels, scale, - block_scope, is_training, reuse, - discriminator_normalization): - assert scale in ["down", "none"] - return resnet_block(inputs, in_channels, out_channels, scale, - block_scope, is_training, reuse, - discriminator_normalization, is_gen_block=False) - - -# Generates resolution 128x128 -def resnet5_generator(noise, - is_training, - reuse=None, - colors=3, - output_shape=128): - # Input is a noise tensor of shape [bs, z_dim] - assert len(noise.get_shape().as_list()) == 2 - - # Calculate / define a few numbers. - batch_size = noise.get_shape().as_list()[0] - # Each block upscales by a factor of 2: - seed_size = 4 - # We want the last block to have 64 channels: - ch = 64 - - with tf.variable_scope("generator", reuse=reuse): - # Map noise to the actual seed. - output = ops.linear(noise, ch * 8 * seed_size * seed_size, scope="fc_noise") - - # Reshape the seed to be a rank-4 Tensor. - output = tf.reshape( - output, [batch_size, seed_size, seed_size, ch * 8], name="fc_reshaped") - - # Magic in/out channel numbers copied from SN paper. - magic = [(8, 8), (8, 4), (4, 4), (4, 2), (2, 1)] - up_layers = np.log2(float(output_shape) / seed_size) - assert up_layers.is_integer(), "log2(%d/%d) must be an integer" % ( - output_shape, seed_size) - assert up_layers <= 5 and up_layers >= 0, "Invalid output_shape %d" % ( - output_shape) - up_layers = int(up_layers) - for block_idx in range(5): - block_scope = "B%d" % (block_idx + 1) - in_channels = ch * magic[block_idx][0] - out_channels = ch * magic[block_idx][1] - print("Resnet5, block %d in=%d out=%d" % (block_idx, in_channels, - out_channels)) - if block_idx < up_layers: - scale = "up" - else: - scale = "none" - output = generator_block(output, in_channels=in_channels, - out_channels=out_channels, - scale=scale, block_scope=block_scope, - is_training=is_training, reuse=reuse) - - # Final processing of the output. - output = batch_norm_resnet(output, is_training=is_training, - scope="final_norm") - output = tf.nn.relu(output) - output = ops.conv2d( - output, output_dim=colors, k_h=3, k_w=3, d_h=1, d_w=1, - name="final_conv") - output = tf.nn.sigmoid(output) - - print("Generator output shape: ", output) - return output - - -def resnet5_discriminator(inputs, - is_training, - discriminator_normalization, - reuse=None): - """ResNet style discriminator. - - Construct discriminator network from inputs to the final endpoint. - - Args: - inputs: A tensor of size [batch_size, height, width, channels]. Must be - floating point. - is_training: Is the model currently being trained. - discriminator_normalization: which type of normalization to apply. - reuse: Whether or not the network variables should be reused. `scope` - must be given to be reused. - - Returns: - out: The prediction of the discrminator (in [0, 1]). Shape: [bs, 1] - out_logit: The pre-softmax activations for discrimination - real/generated, a tensor of size [batch_size, 1] - - Raises: - ValueError: If the input image shape is not 4-dimensional, if the spatial - dimensions aren't defined at graph construction time, if the spatial - dimensions aren't square, or if the spatial dimensions aren"t a power of - two. - """ - - _validate_image_inputs(inputs) - colors = inputs.get_shape().as_list()[-1] - assert colors in [1, 3] - - ch = 64 - with tf.variable_scope("discriminator", values=[inputs], reuse=reuse): - output = discriminator_block( - inputs, in_channels=colors, out_channels=ch, - scale="down", block_scope="B0", is_training=is_training, reuse=reuse, - discriminator_normalization=discriminator_normalization) - - # Magic in/out channel numbers copied from SN paper. - magic = [(1, 2), (2, 4), (4, 4), (4, 8), (8, 8)] - for block_idx in range(5): - block_scope = "B%d" % (block_idx + 1) - in_channels = ch * magic[block_idx][0] - out_channels = ch * magic[block_idx][1] - print ("Resnet5 disc, block %d in=%d out=%d" % ( - block_idx, in_channels, out_channels)) - output = discriminator_block( - output, in_channels=in_channels, out_channels=out_channels, - scale="down", block_scope=block_scope, is_training=is_training, - reuse=reuse, discriminator_normalization=discriminator_normalization) - - # Final part - output = tf.nn.relu(output) - pre_logits = tf.reduce_mean(output, axis=[1, 2]) - - use_sn = discriminator_normalization == consts.SPECTRAL_NORM - out_logit = ops.linear(pre_logits, 1, scope="disc_final_fc", use_sn=use_sn) - out = tf.nn.sigmoid(out_logit) - return out, out_logit, None - - -# Generates resolution 32x32, same architecture as in SN-GAN paper, Table 4, -# page 17. -def resnet_cifar_generator(noise, - is_training, - reuse=None, - colors=3): - batch_size = noise.get_shape().as_list()[0] - with tf.variable_scope("generator", reuse=reuse): - # Map noise to the actual seed. - output = ops.linear( - noise, 4 * 4 * 256, scope="fc_noise") - - # Reshape the seed to be a rank-4 Tensor. - output = tf.reshape( - output, [batch_size, 4, 4, 256], name="fc_reshaped") - - for block_idx in range(3): - block_scope = "B%d" % (block_idx + 1) - output = generator_block(output, in_channels=256, - out_channels=256, - scale="up", block_scope=block_scope, - is_training=is_training, reuse=reuse) - - # Final processing of the output. - output = batch_norm_resnet( - output, is_training=is_training, scope="final_norm") - output = tf.nn.relu(output) - output = ops.conv2d( - output, output_dim=colors, k_h=3, k_w=3, d_h=1, d_w=1, - name="final_conv") - output = tf.nn.sigmoid(output) - - print ("Generator output shape: ", output) - return output - - -def resnet_cifar_discriminator(inputs, - is_training, - discriminator_normalization, - reuse=None): - _validate_image_inputs(inputs) - colors = inputs.get_shape().as_list()[-1] - assert colors in [1, 3] - - with tf.variable_scope("discriminator", values=[inputs], reuse=reuse): - output = inputs - channels = colors - - for block_idx in range(4): - block_scope = "B%d" % block_idx - scale = "down" if block_idx <= 1 else "none" - output = discriminator_block( - output, in_channels=channels, out_channels=128, - scale=scale, block_scope=block_scope, - is_training=is_training, reuse=reuse, - discriminator_normalization=discriminator_normalization) - channels = 128 - - # Final part - ReLU - output = tf.nn.relu(output) - # Global sum pooling (it's actually "mean" here, as that's what they had in - # their implementation for resnet5). There was no implementation for Cifar. - pre_logits = tf.reduce_mean(output, axis=[1, 2]) - # dense -> 1 - use_sn = discriminator_normalization == consts.SPECTRAL_NORM - out_logit = ops.linear(pre_logits, 1, scope="disc_final_fc", use_sn=use_sn) - out = tf.nn.sigmoid(out_logit) - return out, out_logit, None - - -# Generates resolution 48*48, same architecture as in SN-GAN paper, Table 5, -# page 17. -def resnet_stl_generator(noise, is_training, reuse=None, colors=3): - batch_size = noise.get_shape().as_list()[0] - with tf.variable_scope("generator", reuse=reuse): - # Map noise to the actual seed. - output = ops.linear(noise, 6 * 6 * 512, scope="fc_noise") - - # Reshape the seed to be a rank-4 Tensor. - output = tf.reshape(output, [batch_size, 6, 6, 512], name="fc_reshaped") - - ch = 64 - # in/out channel numbers copied from SN paper. - magic = [(8, 4), (4, 2), (2, 1)] - for block_idx in range(3): - block_scope = "B%d" % (block_idx + 1) - in_channels = ch * magic[block_idx][0] - out_channels = ch * magic[block_idx][1] - output = generator_block( - output, - in_channels=in_channels, - out_channels=out_channels, - scale="up", - block_scope=block_scope, - is_training=is_training, - reuse=reuse) - - # Final processing of the output. - output = batch_norm_resnet( - output, is_training=is_training, scope="final_norm") - output = tf.nn.relu(output) - output = ops.conv2d( - output, - output_dim=colors, - k_h=3, - k_w=3, - d_h=1, - d_w=1, - name="final_conv") - output = tf.nn.sigmoid(output) - - print("Generator output shape: ", output) - return output - - -def resnet_stl_discriminator(inputs, - is_training, - discriminator_normalization, - reuse=None): - _validate_image_inputs(inputs, validate_power2=False) - colors = inputs.get_shape().as_list()[-1] - assert colors in [1, 3] - - ch = 64 - with tf.variable_scope("discriminator", values=[inputs], reuse=reuse): - output = discriminator_block( - inputs, - in_channels=colors, - out_channels=ch, - scale="down", - block_scope="B0", - is_training=is_training, - reuse=reuse, - discriminator_normalization=discriminator_normalization) - - # in/out channel numbers copied from SN paper. - magic = [(1, 2), (2, 4), (4, 8), (8, 16)] - for block_idx in range(4): - block_scope = "B%d" % (block_idx + 1) - in_channels = ch * magic[block_idx][0] - out_channels = ch * magic[block_idx][1] - print("Resnet5 disc, block %d in=%d out=%d" % (block_idx, in_channels, - out_channels)) - - if block_idx < 3: - scale = "down" - else: - scale = "none" - output = discriminator_block( - output, - in_channels=in_channels, - out_channels=out_channels, - scale=scale, - block_scope=block_scope, - is_training=is_training, - reuse=reuse, - discriminator_normalization=discriminator_normalization) - - # Final part - output = tf.nn.relu(output) - pre_logits = tf.reduce_mean(output, axis=[1, 2]) - - use_sn = discriminator_normalization == consts.SPECTRAL_NORM - out_logit = ops.linear(pre_logits, 1, scope="disc_final_fc", use_sn=use_sn) - out = tf.nn.sigmoid(out_logit) - return out, out_logit, None - - -# Resnet-107, trying to be as similar as possible to Resnet-101 in -# https://github.com/igul222/improved_wgan_training/blob/master/gan_64x64.py -# (the difference comes from the fact that the Resnet-101 generates 64x64 -# and we need 128x128, while keeping similar number of resnet blocks). -def resnet107_generator(noise, is_training, reuse=None, colors=3): - # Input is a noise tensor of shape [bs, z_dim] - assert len(noise.get_shape().as_list()) == 2 - - # Calculate / define a few numbers. - batch_size = noise.get_shape().as_list()[0] - ch = 64 - - with tf.variable_scope("generator", reuse=reuse): - # Map noise to the actual seed. - output = ops.linear(noise, 4 * 4 * 8 * ch, scope="fc_noise") - - # Reshape the seed to be a rank-4 Tensor. - output = tf.reshape(output, [batch_size, 4, 4, 8 * ch], name="fc_reshaped") - - in_channels = 8 * ch - out_channels = 4 * ch - for superblock in range(6): - for i in range(5): - block_scope = "B_%d_%d" % (superblock, i) - output = generator_block( - output, in_channels=in_channels, out_channels=in_channels, - scale="none", block_scope=block_scope, is_training=is_training, - reuse=reuse) - # We want to upscale 5 times. - if superblock < 5: - output = generator_block( - output, in_channels=in_channels, out_channels=out_channels, - scale="up", block_scope="B_%d_up" % superblock, - is_training=is_training, reuse=reuse) - in_channels /= 2 - out_channels /= 2 - - output = ops.conv2d( - output, output_dim=colors, k_h=3, k_w=3, d_h=1, d_w=1, - name="final_conv") - output = tf.nn.sigmoid(output) - - print ("Generator output shape: ", output) - return output - - -def resnet107_discriminator(inputs, - is_training, - discriminator_normalization, - reuse=None): - _validate_image_inputs(inputs) - colors = inputs.get_shape().as_list()[-1] - assert colors in [1, 3] - - ch = 64 - - with tf.variable_scope("discriminator", values=[inputs], reuse=reuse): - output = ops.conv2d( - inputs, output_dim=ch // 4, k_h=3, k_w=3, d_h=1, d_w=1, - name="color_conv") - in_channels = ch // 4 - out_channels = ch // 2 - for superblock in range(6): - for i in range(5): - block_scope = "B_%d_%d" % (superblock, i) - output = discriminator_block( - output, in_channels=in_channels, out_channels=in_channels, - scale="none", block_scope=block_scope, is_training=is_training, - reuse=reuse, - discriminator_normalization=discriminator_normalization) - # We want to downscale 5 times. - if superblock < 5: - output = discriminator_block( - output, in_channels=in_channels, out_channels=out_channels, - scale="down", block_scope="B_%d_up" % superblock, - is_training=is_training, reuse=reuse, - discriminator_normalization=discriminator_normalization) - in_channels *= 2 - out_channels *= 2 - - # Final part - output = tf.reshape(output, [-1, 4 * 4 * 8 * ch]) - use_sn = discriminator_normalization == consts.SPECTRAL_NORM - out_logit = ops.linear(output, 1, scope="disc_final_fc", use_sn=use_sn) - out = tf.nn.sigmoid(out_logit) - - return out, out_logit, None diff --git a/compare_gan/src/gans/resnet_architecture_test.py b/compare_gan/src/gans/resnet_architecture_test.py deleted file mode 100644 index 5bb2aca..0000000 --- a/compare_gan/src/gans/resnet_architecture_test.py +++ /dev/null @@ -1,90 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for Resnet architectures.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src.gans import resnet_architecture as resnet_arch - -import tensorflow as tf - - -def TestResnet5GeneratorShape(output_shape): - config = tf.ConfigProto(allow_soft_placement=True) - tf.reset_default_graph() - batch_size = 8 - z_dim = 64 - os = output_shape - with tf.Session(config=config) as sess: - z = tf.random_normal([batch_size, z_dim]) - g = resnet_arch.resnet5_generator( - noise=z, is_training=True, reuse=False, colors=3, output_shape=os) - tf.global_variables_initializer().run() - output = sess.run([g]) - return [output[0].shape, (batch_size, os, os, 3)] - - -class ResnetArchitectureTest(tf.test.TestCase): - - def testResnet5GeneratorRuns(self): - generator_128 = TestResnet5GeneratorShape(128) - generator_64 = TestResnet5GeneratorShape(64) - self.assertEquals(generator_128[0], generator_128[1]) - self.assertEquals(generator_64[0], generator_64[1]) - - def testResnet5DiscriminatorRuns(self): - config = tf.ConfigProto(allow_soft_placement=True) - tf.reset_default_graph() - batch_size = 8 - with tf.Session(config=config) as sess: - images = tf.random_normal([batch_size, 128, 128, 3]) - out, _, _ = resnet_arch.resnet5_discriminator( - images, is_training=True, discriminator_normalization="spectral_norm", - reuse=False) - tf.global_variables_initializer().run() - output = sess.run([out]) - self.assertEquals(output[0].shape, (batch_size, 1)) - - def testResnet107GeneratorRuns(self): - config = tf.ConfigProto(allow_soft_placement=True) - tf.reset_default_graph() - batch_size = 8 - z_dim = 64 - with tf.Session(config=config) as sess: - z = tf.random_normal([batch_size, z_dim]) - g = resnet_arch.resnet107_generator( - noise=z, is_training=True, reuse=False, colors=3) - tf.global_variables_initializer().run() - output = sess.run([g]) - self.assertEquals(output[0].shape, (batch_size, 128, 128, 3)) - - def testResnet107DiscriminatorRuns(self): - config = tf.ConfigProto(allow_soft_placement=True) - tf.reset_default_graph() - batch_size = 8 - with tf.Session(config=config) as sess: - images = tf.random_normal([batch_size, 128, 128, 3]) - out, _, _ = resnet_arch.resnet107_discriminator( - images, is_training=True, - discriminator_normalization="spectral_norm", reuse=False) - tf.global_variables_initializer().run() - output = sess.run([out]) - self.assertEquals(output[0].shape, (batch_size, 1)) - -if __name__ == "__main__": - tf.test.main() diff --git a/compare_gan/src/generate_tasks_lib.py b/compare_gan/src/generate_tasks_lib.py deleted file mode 100644 index d9eda31..0000000 --- a/compare_gan/src/generate_tasks_lib.py +++ /dev/null @@ -1,731 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generate tasks for comparing GANs.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import copy -import csv -import math -import os -import random - -from compare_gan.src import params -from compare_gan.src import simple_task_pb2 -from compare_gan.src import task_utils - -from compare_gan.src.gans import consts -import six -import tensorflow as tf -from google.protobuf import text_format - -flags = tf.flags -FLAGS = flags.FLAGS -logging = tf.logging - -BATCH_SIZE = 64 - -ALL_GANS = [ - "GAN", "GAN_MINMAX", "WGAN", "WGAN_GP", - "DRAGAN", "VAE", "LSGAN", "BEGAN"] - - -def CreateCrossProductAndAddDefaultParams(config): - tasks = task_utils.CrossProduct(config) - # Add GAN and dataset specific hyperparams. - for task in tasks: - defaults = GetDefaultParams( - params.GetParameters(task["gan_type"], "wide")) - defaults.update(task) - task.update(defaults) - return tasks - - -def TestExp(): - """Run for one epoch over all tested GANs.""" - config = { - "gan_type": ["GAN", "GAN_MINMAX", "WGAN", "WGAN_GP", - "DRAGAN", "VAE", "LSGAN", "BEGAN", - "GAN_PENALTY", "WGAN_PENALTY"], - "dataset": ["fake"], - "architecture": [consts.INFOGAN_ARCH], - "training_steps": [100], - # Don't save any checkpoints during the training - # (one is always saved at the end). - "save_checkpoint_steps": [10000], - "batch_size": [BATCH_SIZE], - "tf_seed": [42], - "z_dim": [64], - # For test reasons - use a small sample. - "eval_test_samples": [50], - } - return CreateCrossProductAndAddDefaultParams(config) - - -def TestGILBOExp(): - config = TestExp()[0] - config["compute_gilbo"] = True - config["gilbo_max_train_cycles"] = 2 - config["gilbo_train_steps_per_cycle"] = 100 - config["gilbo_eval_steps"] = 100 - return [config] - - -def TestGansWithPenalty(): - """Run for one epoch over all tested GANs.""" - config = { - "gan_type": ["GAN_PENALTY", "WGAN_PENALTY"], - "penalty_type": [consts.NO_PENALTY, consts.WGANGP_PENALTY], - "dataset": ["mnist"], - "training_steps": [60000 // BATCH_SIZE], - "save_checkpoint_steps": [10000], - "architecture": [consts.INFOGAN_ARCH], - "batch_size": [BATCH_SIZE], - "tf_seed": [42], - "z_dim": [64], - } - return CreateCrossProductAndAddDefaultParams(config) - - -def TestNormalization(): - """Run for one epoch over different normalizations.""" - config = { - "gan_type": ["GAN_PENALTY", "WGAN_PENALTY"], - "penalty_type": [consts.NO_PENALTY], - "discriminator_normalization": [consts.BATCH_NORM, - consts.SPECTRAL_NORM], - "dataset": ["mnist"], - "training_steps": [60000 // BATCH_SIZE], - "save_checkpoint_steps": [10000], - "architecture": [consts.INFOGAN_ARCH], - "batch_size": [BATCH_SIZE], - "tf_seed": [42], - "z_dim": [64], - } - return CreateCrossProductAndAddDefaultParams(config) - - -def TestNewDatasets(): - """Run for one epoch over all tested GANs.""" - config = { - "gan_type": ["GAN_PENALTY"], - "penalty_type": [consts.NO_PENALTY], - "dataset": ["celebahq128", "imagenet64", "imagenet128", "lsun-bedroom"], - "training_steps": [100], - "save_checkpoint_steps": [50], - "architecture": [consts.DCGAN_ARCH], - "batch_size": [BATCH_SIZE], - "tf_seed": [42], - "z_dim": [100], - } - return CreateCrossProductAndAddDefaultParams(config) - - -def TestGansWithPenaltyNewDatasets(architecture): - """Run for one epoch over all tested GANs.""" - config = { - "gan_type": ["GAN_PENALTY", "WGAN_PENALTY"], - "penalty_type": [consts.NO_PENALTY, consts.WGANGP_PENALTY], - "dataset": ["celebahq128", "imagenet128", "lsun-bedroom"], - "training_steps": [162000 * 80 // BATCH_SIZE], - "save_checkpoint_steps": [10000], - "architecture": [architecture], - "batch_size": [BATCH_SIZE], - "tf_seed": [42], - "z_dim": [128], - } - return CreateCrossProductAndAddDefaultParams(config) - - -def GetDefaultParams(gan_params): - """Return the default params for a GAN (=the ones used in the paper).""" - ret = {} - for param_name, param_info in six.iteritems(gan_params): - ret[param_name] = param_info.default - return ret - - -def BestModelSNDCGan(): - # These models are matching table 5 (SNDCGan) from the paper. - best_models = [ - ## Without normalization and without penalty. - # Line 1: FID score: 28.66 - { - "dataset": "cifar10", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0001, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.NO_NORMALIZATION, - "tf_seed": 2, - "lambda": 10, # this should not be needed for this one. - }, - # Line 2: FID score: 33.23 - { - "dataset": "cifar10", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.NO_NORMALIZATION, - "tf_seed": 2, - "lambda": 10, # this should not be needed for this one. - }, - # Line 3: FID score: 61.15 - { - "dataset": "celebahq128", - "disc_iters": 1, - "discriminator_normalization": consts.NO_NORMALIZATION, - "penalty_type": consts.NO_PENALTY, - "tf_seed": 23, - "training_steps": 100000, - "learning_rate": 0.000412, - "beta1": 0.246, - "beta2": 0.341, - "lambda": 10, # this should not be needed for this one. - }, - # Line 4: FID score: 62.96 - { - "dataset": "celebahq128", - "training_steps": 100000, - "penalty_type": consts.NO_PENALTY, - "discriminator_normalization": consts.NO_NORMALIZATION, - "learning_rate": 0.000254, - "beta1": 0.222, - "beta2": 0.599, - "disc_iters": 1, - "tf_seed": 23, - "lambda": 10, # this should not be needed for this one. - }, - # Line 5: FID score: 163.51 - { - "dataset": "lsun-bedroom", - "training_steps": 100000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.NO_NORMALIZATION, - "tf_seed": 23, - "lambda": 10, # this should not be needed for this one. - }, - # Line 6: FID score: 167.15 - { - "dataset": "lsun-bedroom", - "training_steps": 100000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.000062, - "beta1": 0.608, - "beta2": 0.620, - "disc_iters": 1, - "discriminator_normalization": consts.NO_NORMALIZATION, - "tf_seed": 23, - "lambda": 10, # this should not be needed for this one. - }, - - ## With normalization - - # Line 7: FID score: 25.27 - { - "dataset": "cifar10", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 1, - }, - # Line 8: FID score: 26.16 - { - "dataset": "cifar10", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0001, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 10, - }, - # Line 9: FID score: 28.21 - { - "dataset": "celebahq128", - "training_steps": 100000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.9, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 10, - }, - # Line 10: FID score: 29.92 - { - "dataset": "celebahq128", - "training_steps": 100000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.000154, - "beta1": 0.246, - "beta2": 0.734, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 0.144, - }, - # Line 11: FID score: 53.59 - { - "dataset": "lsun-bedroom", - "training_steps": 100000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.000264, - "beta1": 0.011, - "beta2": 0.966, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 0.300, - }, - # Line 12: FID score: 56.71 - { - "dataset": "lsun-bedroom", - "training_steps": 100000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.00192, - "beta1": 0.097, - "beta2": 0.938, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 0.580, - }, - - ## With normalization and penalty - - # Line 13: FID score: 25.27 - { - "dataset": "cifar10", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 1, - }, - # Line 14: FID score: 26.00 - { - "dataset": "cifar10", - "training_steps": 200000, - "penalty_type": consts.WGANGP_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 10, - }, - # Line 15: FID score: 24.67 - { - "dataset": "celebahq128", - "training_steps": 100000, - "penalty_type": consts.WGANGP_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 1, - }, - # Line 16: FID score: 25.22 - { - "dataset": "celebahq128", - "training_steps": 100000, - "penalty_type": consts.WGANGP_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.900, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 1, - }, - # Line 17: FID score: 53.59 - { - "dataset": "lsun-bedroom", - "training_steps": 100000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.000264, - "beta1": 0.011, - "beta2": 0.966, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 0.300, - }, - # Line 18: FID score: 56.71 - { - "dataset": "lsun-bedroom", - "training_steps": 100000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.00192, - "beta1": 0.097, - "beta2": 0.938, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 0.580, - }, - - ] - - for model in best_models: - model.update({ - "architecture": consts.SNDCGAN_ARCH, - "batch_size": 64, - "gan_type": consts.GAN_WITH_PENALTY, - "optimizer": "adam", - "save_checkpoint_steps": 20000, - "z_dim": 128, - }) - - return best_models - - -def BestModelResnet19(): - # These models are matching table 7 (ResNet19) from the paper. - best_models = [ - ## Without normalization and without penalty. - # Line 1: FID score: 34.29 - { - "dataset": "celebahq128", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0000338, - "beta1": 0.3753, - "beta2": 0.9982, - "disc_iters": 1, - "discriminator_normalization": consts.NO_NORMALIZATION, - "tf_seed": 2, - "lambda": 10, # this should not be needed for this one. - }, - # Line 2: FID score: 35.85 - { - "dataset": "celebahq128", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0001, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.NO_NORMALIZATION, - "tf_seed": 2, - "lambda": 10, # this should not be needed for this one. - }, - # Line 3: FID score: 102.74 - { - "dataset": "lsun-bedroom", - "gan_type": consts.LSGAN_WITH_PENALTY, - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0000322, - "beta1": 0.5850, - "beta2": 0.9904, - "disc_iters": 1, - "discriminator_normalization": consts.NO_NORMALIZATION, - "tf_seed": 2, - "lambda": 10, # this should not be needed for this one. - }, - # Line 4: FID score: 112.92 - { - "dataset": "lsun-bedroom", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0000193, - "beta1": 0.1947, - "beta2": 0.8819, - "disc_iters": 1, - "discriminator_normalization": consts.NO_NORMALIZATION, - "tf_seed": 2, - "lambda": 10, # this should not be needed for this one. - }, - - ## With normalization - - # Line 5: FID score: 30.02 - { - "dataset": "celebahq128", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0001, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.LAYER_NORM, - "tf_seed": 2, - "lambda": 0.001, - }, - # Line 6: FID score: 32.05 - { - "dataset": "celebahq128", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0001, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.LAYER_NORM, - "tf_seed": 2, - "lambda": 0.01, - }, - # Line 7: FID score: 41.6 - { - "dataset": "lsun-bedroom", - "gan_type": consts.LSGAN_WITH_PENALTY, - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 1, - }, - # Line 8: FID score: 42.51 - { - "dataset": "lsun-bedroom", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002851, - "beta1": 0.1019, - "beta2": 0.998, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 5.6496, - }, - - ## With normalization and penalty - - # Line 9: FID score: 29.04 - { - "dataset": "celebahq128", - "training_steps": 200000, - "penalty_type": consts.WGANGP_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 1, - }, - # Line 10: FID score: 29.13 - { - "dataset": "celebahq128", - "training_steps": 200000, - "penalty_type": consts.DRAGAN_PENALTY, - "learning_rate": 0.0001, - "beta1": 0.5, - "beta2": 0.9, - "disc_iters": 5, - "discriminator_normalization": consts.LAYER_NORM, - "tf_seed": 2, - "lambda": 1, - }, - # Line 11: FID score: 40.36 - { - "dataset": "lsun-bedroom", - "training_steps": 200000, - "penalty_type": consts.WGANGP_PENALTY, - "learning_rate": 0.0001281, - "beta1": 0.7108, - "beta2": 0.9792, - "disc_iters": 1, - "discriminator_normalization": consts.LAYER_NORM, - "tf_seed": 2, - "lambda": 0.1451, - }, - # Line 12: FID score: 41.60 - { - "dataset": "lsun-bedroom", - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 1, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 1, - }] - - for model in best_models: - model.update({ - "architecture": consts.RESNET5_ARCH, - "batch_size": 64, - "optimizer": "adam", - "save_checkpoint_steps": 20000, - "z_dim": 128, - }) - if "gan_type" not in model: - model.update({"gan_type": consts.GAN_WITH_PENALTY}) - - return best_models - -def BestModelResnetCifar(): - # These models are matching table 7 (ResNet19) from the paper. - best_models = [ - ## Without normalization and without penalty. - # Line 1: FID score: 28.12 - { - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 5, - "discriminator_normalization": consts.NO_NORMALIZATION, - "tf_seed": 2, - "lambda": 10, # this should not be needed for this one. - }, - # Line 2: FID score: 30.08 - { - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0001, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 5, - "discriminator_normalization": consts.NO_NORMALIZATION, - "tf_seed": 2, - "lambda": 10, # this should not be needed for this one. - }, - - ## With normalization. - - # Line 3: FID score: 22.91 - { - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 5, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 10, - }, - - # Line 4: FID score: 23.22 - { - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 5, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 1, - }, - - ## With normalization and penalty. - - # Line 5: FID score: 22.73 - { - "training_steps": 200000, - "penalty_type": consts.WGANGP_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 5, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 1, - }, - - # Line 6: FID score: 22.91 - { - "training_steps": 200000, - "penalty_type": consts.NO_PENALTY, - "learning_rate": 0.0002, - "beta1": 0.5, - "beta2": 0.999, - "disc_iters": 5, - "discriminator_normalization": consts.SPECTRAL_NORM, - "tf_seed": 2, - "lambda": 10, - }, - ] - - for model in best_models: - model.update({ - "architecture": consts.RESNET_CIFAR, - "dataset": "cifar10", - "batch_size": 64, - "gan_type": consts.GAN_WITH_PENALTY, - "optimizer": "adam", - "save_checkpoint_steps": 20000, - "z_dim": 128, - }) - - return best_models - - -def GetTasks(experiment): - """Get a list of tasks to run and eval for the given experiment name.""" - random.seed(123) - - if experiment == "test": - return TestExp() - if experiment == "test_gilbo": - return TestGILBOExp() - elif experiment == "test_penalty": - return TestGansWithPenalty() - elif experiment == "test_new_datasets": - return TestNewDatasets() - elif experiment == "test_penalty_new_datasets": - return TestGansWithPenaltyNewDatasets(consts.DCGAN_ARCH) - elif experiment == "test_penalty_resnet5": - return TestGansWithPenaltyNewDatasets(consts.RESNET5_ARCH) - elif experiment == "test_normalization": - return TestNormalization(); - elif experiment == "best_models_sndcgan": - return BestModelSNDCGan(); - elif experiment == "best_models_resnet19": - return BestModelResnet19(); - elif experiment == "best_models_resnet_cifar": - return BestModelResnetCifar(); - else: - raise ValueError("Unknown experiment %s" % experiment) diff --git a/compare_gan/src/kid_score.py b/compare_gan/src/kid_score.py deleted file mode 100644 index d25687a..0000000 --- a/compare_gan/src/kid_score.py +++ /dev/null @@ -1,216 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Library for evaluating GANs using KID score.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -from compare_gan.src import fid_score -import numpy as np -import tensorflow as tf - -logging = tf.logging - - -def get_kid_function(real_image_tensor, - gen_image_tensor, - num_gen_images, - num_eval_images, - image_range, - inception_graph, - kid_max_batch_size=1024): - """Returns the function that computes KID score.""" - assert image_range == "0_255" - # Set up graph for generating features to pass to FID eval. - batch_size_gen = gen_image_tensor.get_shape().as_list()[0] - batch_size_real = real_image_tensor.get_shape().as_list()[0] - - # We want to cover only the case that the real data is bigger than - # generated (50k vs 10k for CIFAR to be comparable with SN GAN) - assert batch_size_real >= batch_size_gen - assert batch_size_real % batch_size_gen == 0 - - # We preprocess images and extract inception features as soon as they're - # generated. This is to maintain memory efficiency if the images are large. - # For example, for ImageNet, the inception features are much smaller than - # the images. - eval_features_tensor = fid_score.get_inception_features( - real_image_tensor, inception_graph) - gen_features_tensor = fid_score.get_inception_features( - gen_image_tensor, inception_graph) - - num_gen_images -= num_gen_images % batch_size_gen - num_eval_images -= num_eval_images % batch_size_real - logging.info("Evaluating %d real images to match batch size %d", - num_eval_images, batch_size_real) - logging.info("Evaluating %d generated images to match batch size %d", - num_gen_images, batch_size_gen) - # Make sure we run the same number of batches, as this is what TFGAN code - # assumes. - assert num_eval_images // batch_size_real == num_gen_images // batch_size_gen - num_batches = num_eval_images // batch_size_real - - feed_gen_features = tf.placeholder( - dtype=tf.float32, - shape=[num_gen_images] + gen_features_tensor.get_shape().as_list()[1:]) - feed_eval_features = tf.placeholder( - dtype=tf.float32, - shape=[num_eval_images] + eval_features_tensor.get_shape().as_list()[1:]) - - # Create the tensor which stores the computed KID. - kid_tensor = kid( - feed_eval_features, feed_gen_features, max_batch_size=kid_max_batch_size) - - # Define a function which wraps some session.run calls to generate a large - # number of images and compute FID on them. - def eval_fn(session): - """Function which wraps session.run calls to evaluate kid.""" - - logging.info("Evaluating.....") - logging.info("Generating images to feed") - eval_features_np = [] - gen_features_np = [] - for _ in range(num_batches): - e, g = session.run([eval_features_tensor, gen_features_tensor]) - eval_features_np.append(e) - gen_features_np.append(g) - - logging.info("Generated images successfully.") - eval_features_np = np.concatenate(eval_features_np) - gen_features_np = np.concatenate(gen_features_np) - - logging.info("Computing KID with generated images...") - kid_result = session.run( - kid_tensor, - feed_dict={ - feed_eval_features: eval_features_np, - feed_gen_features: gen_features_np - }) - logging.info("Computed KID: %f", kid_result) - - return kid_result - - return eval_fn - - -def kid(real_activations, - generated_activations, - max_batch_size=1024, - dtype=None, - return_stderr=False): - """Unbiased estimator of the Kernel Inception Distance. - - As defined by https://arxiv.org/abs/1801.01401. - - If return_stderr, also returns an estimate of the standard error, i.e. the - standard deviation of the KID estimator. Returns nan if the number - of batches is too small (< 5); for more reliable estimates, one could use - the asymptotic variance estimate given in https://arxiv.org/abs/1611.04488. - - Uses a block estimator, as in https://arxiv.org/abs/1307.1954, with blocks - no larger than max_batch_size. This is slightly different than the authors' - provided code, but is also unbiased (and provides more-valid a variance - estimate). - - NOTE: the blocking code assumes that real_activations and - generated_activations are in random order. If real_activations is sorted - in a meaningful order, the estimator will be biased. - - Args: - real_activations: [batch, num_features] tensor with inception features. - generated_activations: [batch, num_features] tensor with inception features. - max_batch_size: Batches to compute the KID. - dtype: Type used by the computations. - return_stderr: If true, also returns the std_error from the KID computation. - - Returns: - kid score (and optionally std error). - """ - real_activations.get_shape().assert_has_rank(2) - generated_activations.get_shape().assert_has_rank(2) - - # need to know dimension for the kernel, and batch size to split things - real_activations.get_shape().assert_is_fully_defined() - generated_activations.get_shape().assert_is_fully_defined() - - n_real, dim = real_activations.get_shape().as_list() - n_gen, dim2 = generated_activations.get_shape().as_list() - assert dim2 == dim - - # tfgan forces doubles for FID, but I don't think we need that here - if dtype is None: - dtype = real_activations.dtype - assert generated_activations.dtype == dtype - else: - real_activations = tf.cast(real_activations, dtype) - generated_activations = tf.cast(generated_activations, dtype) - - # split into largest approximately-equally-sized blocks - n_bins = int(math.ceil(max(n_real, n_gen) / max_batch_size)) - bins_r = np.full(n_bins, int(math.ceil(n_real / n_bins))) - bins_g = np.full(n_bins, int(math.ceil(n_gen / n_bins))) - bins_r[:(n_bins * bins_r[0]) - n_real] -= 1 - bins_g[:(n_bins * bins_r[0]) - n_gen] -= 1 - assert bins_r.min() >= 2 - assert bins_g.min() >= 2 - - inds_r = tf.constant(np.r_[0, np.cumsum(bins_r)]) - inds_g = tf.constant(np.r_[0, np.cumsum(bins_g)]) - - dim_ = tf.cast(dim, dtype) - - def get_kid_batch(i): - """Computes KID on a given batch of features. - - Takes real_activations[ind_r[i] : ind_r[i+1]] and - generated_activations[ind_g[i] : ind_g[i+1]]. - - Args: - i: is the index of the batch. - - Returns: - KID for the given batch. - """ - r_s = inds_r[i] - r_e = inds_r[i + 1] - r = real_activations[r_s:r_e] - m = tf.cast(r_e - r_s, dtype) - - g_s = inds_g[i] - g_e = inds_g[i + 1] - g = generated_activations[g_s:g_e] - n = tf.cast(r_e - r_s, dtype) - - # Could probably do this a bit faster... - k_rr = (tf.matmul(r, r, transpose_b=True) / dim_ + 1)**3 - k_rg = (tf.matmul(r, g, transpose_b=True) / dim_ + 1)**3 - k_gg = (tf.matmul(g, g, transpose_b=True) / dim_ + 1)**3 - return ( - -2 * tf.reduce_mean(k_rg) + (tf.reduce_sum(k_rr) - tf.trace(k_rr)) / - (m * (m - 1)) + (tf.reduce_sum(k_gg) - tf.trace(k_gg)) / (n * (n - 1))) - - ests = tf.map_fn( - get_kid_batch, np.arange(n_bins), dtype=dtype, back_prop=False) - - if return_stderr: - if n_bins < 5: - return tf.reduce_mean(ests), np.nan - mn, var = tf.nn.moments(ests, [0]) - return mn, tf.sqrt(var / n_bins) - else: - return tf.reduce_mean(ests) diff --git a/compare_gan/src/kid_score_test.py b/compare_gan/src/kid_score_test.py deleted file mode 100644 index bade9d6..0000000 --- a/compare_gan/src/kid_score_test.py +++ /dev/null @@ -1,61 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src import kid_score -import tensorflow as tf - - -class KidScoreTest(tf.test.TestCase): - - def _create_fake_inception_graph(self): - """Creates a graph with that mocks inception. - - It takes the input, multiplies it through a matrix full of 0.001 values - and returns as logits. It makes sure to match the tensor names of - the real inception model. - - Returns: - tf.Graph object with a simple mock inception inside. - """ - fake_inception = tf.Graph() - with fake_inception.as_default(): - graph_input = tf.placeholder( - tf.float32, shape=[None, 299, 299, 3], name="Mul") - matrix = tf.ones(shape=[299 * 299 * 3, 10]) * 0.00001 - output = tf.matmul(tf.layers.flatten(graph_input), matrix) - output = tf.identity(output, name="pool_3") - output = tf.identity(output, name="logits") - return fake_inception - - def test_kid_function(self): - with tf.Graph().as_default(): - real_data = tf.zeros((10, 4, 4, 3)) - gen_data = tf.ones((10, 4, 4, 3)) - inception_graph = self._create_fake_inception_graph() - - eval_fn = kid_score.get_kid_function(real_data, gen_data, 18, 19, "0_255", - inception_graph.as_graph_def()) - with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - result = eval_fn(sess) - self.assertNear(result, 0.24, 0.1) - - -if __name__ == "__main__": - tf.test.main() diff --git a/compare_gan/src/multi_gan/README.md b/compare_gan/src/multi_gan/README.md deleted file mode 100644 index 67d0792..0000000 --- a/compare_gan/src/multi_gan/README.md +++ /dev/null @@ -1,60 +0,0 @@ -# A Case for Object Compositionality in Deep Generative Models of Images - -![clevr-generated](illustrations/clevr_generated.png) - -This is the code repository complementing the -["A Case for Object Compositionality in Deep Generative Models of Images"](https://arxiv.org/pdf/1810.10340.pdf). - -## Datasets - -The following provides an overview of the datasets that were used. Corresponding -.tfrecords files for all custom datasets are available [here](https://goo.gl/Eub81x). - -### Multi-MNIST - -In these dataset each image consists of 3 (potentially overlapping) MNIST digits. -Digits are obtained from the original dataset (using train/test/valid respectively), -re-scaled and randomly placed in the image. A fixed-offset from the border ensures -that digits appear entirely in the image. - -#### Independent - -All digits 0-9 have an equal chance of appearing in the image. This roughly -results in a uniform distribution over all 3-tuples of digits in the images. In -practice we generate multi-mnist images in batches, meaning that we repeatedly -select a batch of 100 MNIST digits that are then used to generate 100 -multi-MNIST images. Each digit in the batch has an equal chance of appearing in -the multi-MNIST image, such that we roughly achieve uniformity. Digits are drawn -on top of each other and clipped in 0.0-1.0 range. - -#### Triplet - -All 3-tuples of digits that are triplets, eg. (2, 2, 2) have an equal chance of -appearing in the image. As before we use batching, i.e. from a batch of images a -digit id is selected, after which 3 digits of that type are randomly sampled -from the batch. Digits are drawn on top of each other and clipped in 0.0-1.0 -range. - -#### RGB Occluded - -Digits are sampled [uniformly](#uniform) and colored either -red, green or -blue, such that exactly one of each color -appears in a Multi-MNIST image. Digits are drawn one by one into the canvas -without blending colors, such that overlapping digits occlude one another. - -### CIFAR10 + RGB MM - -Draws digits from [rgb occluded](#rgb-occluded) images on top of a randomly -sampled CIFAR10 image (resized to 64 x 64 using bilinear interpolation). - -### CLEVR - -The CLEVR dataset can be downloaded from -[here](https://cs.stanford.edu/people/jcjohns/clevr/). We use the "main" dataset -containing 70.000 train images to train our model, and 10.000 of the test images -to compute FID scores. - -During data loading we resize the raw images (320 x 480) to (160, 240) using -bilinear interpolation, and take a center crop to obtain (128 x 128) images. -These are then normalized to 0.0-1.0 before being fed to the network. diff --git a/compare_gan/src/multi_gan/dataset.py b/compare_gan/src/multi_gan/dataset.py deleted file mode 100644 index 0962a10..0000000 --- a/compare_gan/src/multi_gan/dataset.py +++ /dev/null @@ -1,139 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Provides access to Datasets and their parameters.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools -import os -import tensorflow as tf - -flags = tf.flags -FLAGS = flags.FLAGS - -flags.DEFINE_string("multigan_dataset_root", - "/tmp/datasets/multi_gan", - "Folder which contains all datasets.") - -# Multi-MNIST configs. -MULTI_MNIST_CONFIGS = [ - "multi-mnist-3-uniform", "multi-mnist-3-triplet", - "multi-mnist-3-uniform-rgb-occluded", - "multi-mnist-3-uniform-rgb-occluded-cifar10"] - - -def unpack_clevr_image(image_data): - """Returns an image and a label. 0-1 range.""" - value = tf.parse_single_example( - image_data, features={"image": tf.FixedLenFeature([], tf.string)}) - image = tf.decode_raw(value["image"], tf.uint8) - image = tf.reshape(image, [1, 320, 480, 3]) - image = tf.image.resize_bilinear(image, size=(160, 240)) - image = tf.squeeze(tf.image.resize_image_with_crop_or_pad(image, 128, 128)) - image = tf.cast(image, tf.float32) / 255.0 - dummy_label = tf.constant(value=0, dtype=tf.int32) - return image, dummy_label - - -def load_clevr(dataset_name, split_name, num_threads, buffer_size): - del dataset_name - filenames = tf.data.Dataset.list_files( - os.path.join(FLAGS.multigan_dataset_root, "clevr/%s*" % split_name)) - - return tf.data.TFRecordDataset( - filenames, - buffer_size=buffer_size, - num_parallel_reads=num_threads).map( - unpack_clevr_image, num_parallel_calls=num_threads) - - -def unpack_multi_mnist_image(split_name, k, rgb, image_data): - """Returns an image and a label in [0, 1] range.""" - c_dim = 3 if rgb else 1 - - value = tf.parse_single_example( - image_data, - features={"%s/image" % split_name: tf.FixedLenFeature([], tf.string), - "%s/label" % split_name: tf.FixedLenFeature([k], tf.int64)}) - - image = tf.decode_raw(value["%s/image" % split_name], tf.float32) - image = tf.reshape(image, [64, 64, c_dim]) - image = image / 255.0 if rgb else image - label = tf.cast(value["%s/label" % split_name], tf.int32) - return image, label - - -def load_multi_mnist(dataset_name, split_name, num_threads, buffer_size): - k = int(dataset_name.split("-")[2]) - rgb = "rgb" in dataset_name - unpack = functools.partial(unpack_multi_mnist_image, split_name, k, rgb) - filename = os.path.join(FLAGS.multigan_dataset_root, - "%s-%s.tfrecords" % (dataset_name, split_name)) - - return tf.data.TFRecordDataset( - filename, - buffer_size=buffer_size, - num_parallel_reads=num_threads).map( - unpack, num_parallel_calls=num_threads) - - -def get_dataset_params(): - """Returns a dictionary containing dicts with hyper params for datasets.""" - - params = { - "clevr": { - "input_height": 128, - "input_width": 128, - "output_height": 128, - "output_width": 128, - "c_dim": 3, - "dataset_name": "clevr", - "eval_test_samples": 10000 - }, - } - - # Add multi-mnist configs. - for dataset_name in MULTI_MNIST_CONFIGS: - c_dim = 3 if "rgb" in dataset_name else 1 - params.update({ - dataset_name: { - "input_height": 64, - "input_width": 64, - "output_height": 64, - "output_width": 64, - "c_dim": c_dim, - "dataset_name": dataset_name, - "eval_test_samples": 10000 - } - }) - - return params - - -def get_datasets(): - """Returns a dict containing methods to load specific dataset.""" - - datasets = { - "clevr": load_clevr, - } - - # Add multi-mnist configs. - for dataset_name in MULTI_MNIST_CONFIGS: - datasets[dataset_name] = load_multi_mnist - - return datasets diff --git a/compare_gan/src/multi_gan/illustrations/clevr_generated.png b/compare_gan/src/multi_gan/illustrations/clevr_generated.png deleted file mode 100644 index e4d82bf33516b1e5b9d8406c908eb36671e56cba..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 334677 zcmeFZcU)6z6E2DcX`7*o z)Pa{;BBGO~q-TI5!VGc>L_}n4Hqz3nHsoGJht%=VB-le(sOjY_h z54(R1l|EN@UiRJTWL2nrF!m%bDp(jwrFQp&7I%rZOmPtxmzfmgBg^Z2unskS48wYI zm&e$b^Lr^3Tl-UzR*QGGUY=(l3J}$$sV?K>$e^fBo{xVl^+9R*N&U7B_azk-$Z~|I z%#}DoeRu0*7HRhh|B^UeA>KO{3Fif`-w{9LO);|kp#E6V+dLty zZe@DlC4z4^@!MRxktaPl(IxI(l5kN9itaH_ z=jqD#r12*dA5dKKr&uPcWhTu(p<}uO3$A(3ebp9+wB=aO%uR+589G&FKPm~?6 z7@ipT;NZw`_6g@*QjYh$%qN#Vh@Y0y4L;k&d_kT}{h13hYcQpr^tnjz1SPT58&%<$ z(-wCtI^D8Kc>;4^-_9o6CB98!^ca5cNhhoI*~Fm3Ac+s8N&z!PC-YCtHQ2|Ws;aMD z))&2~9O(Sj+Z_V@a}Atd&ux=?gRcYyF7vvBPLNWYGPec`)XQRnNoYc4uAY1-q#hi@ zx*^Zo9kzaTi|Xo)C*Crqs={=%8R!z3TWR~M-kLnNLb;4ufw#kF?{1_nzG+fr>17bU zz#q{sW$@Z#`mT%cZ3-iQ0XazOaMwU*!!+5t*V!`-?~ATkGFmB7ahxlq)(E{7Y|~)a zbk?ltdZ{j-_L;mV@9I4tG!)GYp0sD*JiSTdMk@CBz@L>6<0vaCXLwD3EbbX^^UZGp zgdoRgtVgw|@nyv0t@m!n%LQgu`H2$_v^^oo72Z>zFq@AR_v=Jg^k-qXlH+ISDS>7QD- z=nxS8rgU1|^HNQ)Zlp{fwaWsr1QohYZ#Xij3j(XBm<$*DNkQBv@>=y0nVFfVSASdA3-#T#2gT5Mz~# zD~&17z&92R>4G|;BGA1aQ_d5G)dSt3>5!mrX1IJ<7*i|xMlQ}$6FA6SZ=w}Y4 z@u+L4JL|M**{ONuhwE77HD@!aE@`CnJg_-!J>NUs8*ipi>Yjf|o5D^rI^QNOAy@CM zhY3X*t{*jUJKI$U+%M7<-c{INn|5k0eztM;Qtt)B$Hu0$EH=;s6tz(lfF}(M^pR4; zo9~%%Spzv+`X(?*UB9=!DTfTFXrG*Za^?wVkft<4gLnhHp)RZ}Y+JTZc1tHp=XN2! zq{MC#Y<~Hh{5R>Q^Dt|UMGi5Tr;9 zxc%xKISLK53%Up7r#GwLWFTUoTwqp!GVCj|#ox#Ae9HZN-@3i*+&c+-{xY?4veF!k z2RzRfY&vW$*3w02L@GNP4__E0n;%-3!h-NHeA{MlO|;LO1OHQfA3EN&jwhs{%*)Kt zX&+k}ZPI!ytjbFB){MVXzd9MwVMhot{A4Inv0fQv=wp~?m{Mg^MTV~&F@40a<+GTU z*Qdi-u((ciMLfdiipm|mS2?fhJWZ>`YjSIh-NByUJ-2o`z5Dls_o}zvt?{q;5VB_C z#s|Kstejs#tfo*Do#&#YIx|ZGC$GND2Brh|(uY!Ro`;`(W%RAc@+dKcDmpSClEhBW zu5@73kh4m_bR=*iX2W!@<7uk&se4J%4E5smvkjIF9^*I1StR28{Ev9*MN;|9Zg=yP z2s_^-LdKVwl^h8N}UGs|C6d$xJ-Ez;7u~DAcme}&SZ1Jh2 zzU0x~z*3za+|by|-;w#7e^GrwYMy$5lgi{hEu}ukcSf!TVczY9i4{+%NuHhia>cel zE1N&}z(sU(x#4zJWw>Y9?ka0MYl57y@HS-qvIUcaZHxn-?Y=kPO#Cu-Yh{$9G&9q3 zXDSMWWlyu%mH8M_#krv3_HMSk|K0T=l=5SZ3t7g8G`nq2dJ40=J{NVAz0#62=ux&4 zocF&i;u3t@{C1#`l+ly#hMzZF4BRkBxm$C^Ypvh&1m%snD&(>8e#jN|&lz9ZxZ|t} zWlSzC>5Z*T*naKXt1+%QHDLI51iR>dV9S)X)J>TO{*KVci$6m9tfmUpaDYE-->C3Uu?rW#fv<|BMkPD;Q&Tk$_&;E}NNSv(}{TDV+CB3C2@QX{LK zxO-m!w29eVUL8h6#ou%Vid(-o7)!9J$p_RtIw`|0MMq=7FwB6y@B8qJZDAC< zL2MQ67R>;*xhAJgc}G^D*sKj2&Y!0rYd$uHw;HMu+8V7W6{Fw-Ee+gSl78^W&8@s$ zTv7b3$06}4;8yt@4k^S*{^uV8-n~s4jQC%Fi2VQm(ErCKKutAbOm<$FnBKW_ zC#Is}R=>3{)oCfXZ2&WA0JENzySr%i^-3~hlGWx$CMIL6 zAD=BDJvxeq=O>M>XF|Hn2}dbG0~e035t&aIfK>}-56^O3scr(!Ck=Qw@~zP>41S_! z{Kt$Qo_C-#QNi$H#Or_V?UPic>Bx>f4q9Bz9!?b6o@YjYZs zu^$iHrbMp`T1tGn7r#SCjldK!hPc$Ng(M)joncLump{8YJFk#p!X#2E9VWglz@85< zS%%+G8K2v&_d5#IG%>_FzTw3|jHOQ;yX!-#w65vaGw`4LJtldyCiyM!xKzB`7VOtE z+vK^tzq2;abhN_6eYCD}By*E)mh1RDVMWLxOmW{&xc5Ghe{w)w9btTIqjAY=Fych# zh2t|pjYo1-u5+=&YuF;j%?X8?@wI76IfSTZLsbF9aQ5-&)tYgN3!BO$!;wAJTV3Gr zQOETlggL@}4nPYW54i&T^{GP+hmkk^3f-8Rz|g08(b`F>QuszKDe@)SqrsjpDOOM1 z^js#D3`KXZCeU#;IjGL#`6rlqJ}4OB*_cE#0@$rIPRpFPO`MEEC3#B(!}l0&V%l@D zdcBQ@%qfo=nB>YHC&};8}V>iWW7H&H@))pUW1s zK9|rVqs2JGr1&t|8AKfJ2F4RC;pYh8V+;E8S4nyQh`%2L;0$B#2T!Nuj*mZtV$eS$ zsa`R-5mINRH#;MHY_~4(Xid$y{t9Ky_%BOhY9N;gU=)}R!UbopeS?)kJ|UbTfDh$K zkFTG6rV(;{K_W5c+8=v8cxr|6_~-ZMf)T&2aJBXgL7x&@9cHVvDW_mJP%!p2cx<-|2?CHy=g|XsbMc%@@U1d{SGO;+AKr!}OSdLNI zzS(O7j;tN~rqM?`*ZQ|E&{4NDK%AsrF8aDg7+s93c^$l=YY5G+ecjqNOefl^HixDF zK0u)tUi6L|E{stN`wlb0Pg5kPJM@4cYak#P9Ix>Khq1ch9PnCie$sroKlGp7Bl5PF^Id{N!>5a%57x0$HKuZLHE?d{3G_V&jrH}C!gSCKj9&u;%& zE3oC4JHh8xC~=fO(W8og4WI|WhacN%;Pm&(r+$I-@^7R4wkt^^WIZVDH)wvqGJJIT zGF$D46DnyMtgM6QXJ?? zn>dXjLghvy`Dxr-6{FX@$lhxgsIo48T2?|#&&m9b>?5gB!R$gE2H{FU?3yVEH^^}7 z1A}tjBmozuyS&njxT2AyY5xTP5WzgI+~#vdGE4o9wMt|)EVKb;f)9p%HbiA(p9-@1}O0>gho`$m1CYPs0lL8xpj4MM&Pc zy((Z{urt*G0Jq_SgR$+nuRd@5Ow%cYj7Ge{wt%nrITME;7B@!`-oaZDuB6vFeXDV;saWeNmd8{5m9c9qz(7)r9Uy`KDR6hxnE%>w$w)pUn{8L8zt>v;q5OZBya6F$EYD$ea@_na}5VV zUDfF!BG|JODlHOW7`h2b@55H*}P3d0=f(kH)!bWsy!<4mc@0Ex7Bq|36>v( zjUgUhzkT&zEntp8{f^9k2i0&V_*KDnX32{4n&~!)`G<%?`R9PMo%nd9R|`{r!v=zI z(DvnGsU_$P41mN}odiKqX0Ds5yF@?*;Gwvv%$=56 zcAS5TYJP{L-@)b1Z-foP{%PyCMYe(fK6jkd{({n7?}lHO>-kq)0`~orBmda3I;jzY zOkcRS-e4{Ocd8^wQ&S`WxNyo%v`m^1EGC^i3wB)gkKg-Qvvg z@SEy`r{`Mjy1kLKp?;JsAo7akui1AP4P>;o->HAHZhF@mCrCC<@=y>oSH^4k)OG46 zPFp}7uNx-oW}-1EZ`^hdTI;axDopo`9|S31y5)d0ze58V&_qVh`eZV4)^O2-L4hzc z>NH+A+`=-GMioV(w(Npz1j)+64SI;vbqk@){+flTWj5OD8%k;{jwNl;VRoLX_-pt=wH~ z-gz)lGvNYN=&kSd;i7k|^1b7!VYI)@;oIp<76_X-g)qUVb2%xIKzbZfug?oxD2@%6 z@23_?cMRjI-nyHzUr}{#5<1_A29Lk5oWMC2*_30iGl+TYFuxTa#8AyB5zShxTPae- z8t6Nv8H}dJw>6K(RT5-NlRX;tn6>vO^G9iiEMlPPhqdYq?{ZK{F9CAKO}Rg9Zd+cY zJe1N@%b2+7DzcXeLM2TcxHUmx!zl2E!vYHI0}9j~Nh!)NDN0CDk!X8g{pLY2k1YY$ z{*vlf4f-cT{udnooge^w=_kwj4M^tN@d!W?AR-#H`B!xNZwDQd+dui?N7Afqz+s3N z;$<|D-+YO-pSiUtN?@`X^{O85IjM)0hRw^A;li8@=TcXt<2O$sc;M@E<=`&~DjE~% z+;FGo1lfzkxmR!(;SraPR@twvI$fZLDDHk|%Oh|glU9dmjgX}ibT@ss=h83pU%I4} zMx5j~Vm9&qt*1|1Oh~O{BEP5-G<>x(bTMy(z)>st5xNOeNO`<* zK}MH#I2f4rI3d3=P=Rm?6u5T6y;bLmA})rzg!1-|y<#r-6hpdNWeBd_+i8jg8pim` zvTpxsiovjdc61c-se~RjKlX|1>+-fmap-1N{}eR z)3<`OT%eA-W_;GMgzP`3I!2rr<$s#{&*iAbI{$-{02h4}wLKM9`l5NVodT+$i;b0ek>s^<&_a8dC$)W?;TEd%g-;A7aGZ`It5}AQAbS&B}9O&*9q#v600-a?2_?uBCuOo0{`J~;J0v{Rf{o=bPf7|qT za{0|%D02P2dC$uCbjXYty`?tj1LeXem!*0hD+p4_8- zH~l+IfNy`2;$Hf&L3MNb0B6d6fTnV_ZZaaPuw*EUyTt_YT zjSm9;xP+=Dm1P#PDH9qOuGHV4bj0YLHI;{eJZwPm|mnGz{C3+3@pVP+2{6$ z2plQ3Sha`Tp3FLQJiDj9sm}rh`$Ew3>hU$3l}l5&=6C6Qvu4of*_&ZuFSPPGOAc1| zqz;VHpqsnjWqrdxncGKG^!KKP-%O#lKt`2gyFl_DrlG~2_%fS2GpMAG@&?>yYP80$ zEJHCf5*f&H8`(GLIcoAt=EyX>TLY5QCKTQV1V{UQ`|9jsF5SirOis2M{jJ}~{P!fy z`;Wx>^E740UGFNNo*kmbg_h1i21zzI?ezXwI@Co6q_nKnx^zw*jF&0Zl5%tUnQBR7 z;N{dZXR>)8*L7T2lW<4EJYgc0qZr$ggs>9lGPuN;Kkw3|E0VAXB#&H{=7$k>=p?o^ zd@8o9kktXHL1lJh_1)X(3heVmX?Nv!R#cs@Ca;J)p}FA8ew( z4?aAOR=8a<#Tgh{v{s8=fkbho$FR{OIHX zTe(UHA%x$E>tR}6-LYp~>UC}*;mjjgDj?>0&+`Pa-2JF-kS2Ic+FP5WH1UHY5*Emw z#%~xNcp0%~wP>*ZL8TD!G^TE6tM;%aZt9kiItZc&qle7;1c!ZjD`_XUnjo*w1J@ij z_c#+^iB4J%49AHGYv4&SFh2NGK3Nnvx#jJC$75pGAq_mAuo^!2h?;<4;`k}D2%q{b zD*YA!ek*F>BW(W^91+g{M}uP&kfvgHV6-0J1OzHWKrE{_0IIw;McdEmBd=vnoWy4d z$f7p4Mi}+mF2_okB3Z96Hg=VFf7Wv)>Yfi?>y~<-d98D)*MJw^_+%i;G$va^n6D!6 z3DeBiu6sxgM{+6@7Cy2{+`whHw9y45?UiphFQsy5xZ7UM#tf55BW%7eOZ|6UTAj8tXHf`u4KVE znY@h5l$&cJ2d`jR=}?4VQ>1tS6q$B=lfO(KFaOPvp(YrLhMAT=;Ige}qlXkDH-1G* z{r?w99jKTs*JMBOPmRbkm06CtySZhpNIx%oxjtTca5rfIKYgyZHv9RbYl?}4AQX*Y zi`Wz&Z2K$>33$$(V1}5kJF%~=6;PJRfMZ{+T$tbnjdU(0F0||_oepHLczxezFW^?D z8IP_kkA32xFYb1jf?g**U9pF9x36WRApNc&=cbjryoAByy#&-kRT)mYHmnQ4_Kl_k z_tpU3?{WeX>w#z-687kmynbEg>n;$a!WvmR_zk+a%3=g8eEPs6sf;lj$I z(OG>24sOK-lx^8`)QISC`n`03Ui#hT7fw!9#nWZ%q39qWVoeZ1Uz=d?2D$><5f{9W ziVvS&OD>F8G{BeLpb=#Pb2@C7ydLozWDu!5$*Dh4hA9HX=p8h4R%m0yqe=qa5$Nr^(SCv4-6-4c{Sbi-R zkTa(~%6c1U(m`6y0!Q-$JQf)s`>+Y99u=KgYFLnI4 zTXNq&$4Kk?m1-ZEt%sfx35)`i;@_Yk`wfc!TyXdZ&?5M$+l~d+jH|1-wT>!#y`fkd5<8tdxTHo}Ekrknx> znle?&w<^ECd40JhNj12UoUCsyjTg@MDlXNstiW|8-S^78nF(G-a!LJ7f~DUZqo(Cs zh)s&5DfF}UP1N3=p$i^s)`-yVx{Sx2WIJRp!4;Q358%_D0OHM#p&K;(k%0!%{GwtN zmQm2UzMjL=j|>-ftu@vJaDrohIqsbtzY_g-4OgZ0XZid!<2Uf$($m-9jIaumcE`2kSpr>VNSOxRQBy`%bXC>X1zWXL*vIm7I zhrTlQrX|0eRG%8@ZArY7Pcl1=2II`Rhd>c63Ou(rtW-bB#j4Te6ByDnT&zxf#qe8q z=u*AXV>YfX%fc4(dn;Lq+U(k&G!ur(YvaOJv26K(XKq7b_Fk-+M`qezDhV9b-V$ej zq0R0$+Wsqw%mcmIKVn=G&0mG7_}-{vFyd!v3#h|CHNIfPHtW?N=DqlzB>7LY`vYU3 zoA`$&k8sp_2+CGJ(9>+x#5cxu4AVgrpQrO%#|qDVxxSmMmU(GcZiJEphCo*I2DsY-O#?Uu;LBbYF;$B{kpI|;|z+?Rjs7R z0vG142lQ-zSFV3s`oA^uy1=iPtN-fdh0~0!?h<4LoScU0gIMVJi&L8nn%1uZjUxhy zMEmD^b{m5H+^lcKJRi_Mg7ZX6R;GdGeczIPnCe8#)9hD%&L~iEg+u)TU3W$T zOU?^d9&}RbxbM3NwUvu{bhmefh~r0DTb5XX6e|!(Bk_$aWN~St!xDSGmx0TpRlQXn zLyt;gH$8&yo=o(XeNuhMn8y9u1s68U7}GHq;W6t#{TUwZB75oE7dU7`?-Py%bSXDg z_Hyw9bq3Kci|8l{a`1ThFWlt+Hr{_VZGI}COS_}X*y{3;n(;uRVeldUK@m4OE9*lN=(nI4@< z%!FEiUPcZ}t8z2~=eX`9 zKD(7La*%Dpx>M6NKmBce>4MRTjD42psqxq8A#SXQI6hPo-M2@StRAkouo12?KuPs3 z!>V~#+{~~0Lf}AX;p53)Zr^5bo>eQYte3&S7aW}LnGwU{q3xtb{}~K_*K2>2{b|o* z_pi!?7|y@#8?nkY9^mt8D4iWxOdWNs+0F!g?WR<1sfAR#uC#lFRpvmv70&(Y0rBcH zjKRflum>Z80WsKDQ8DQY#kW;6>H77#9Vdok^{K{g zpz1>-Bqb9|X{Tca8Ou%FFAWXcN+Za6OY|fkNSREF7+jO9qoOmjqEayMznSFCw<~DA z)4SK#tKhJeB?c$%KZ@Gfb$ z?J6bP+V=Y1yf5^<_~+rn)!Q7PTdSU>0&vuBhkQK))rhCTi?Q;z*pOyJ|Lm=t)|KI7 zDZ7}uY$vnlGl2s$iG!hr1yRJG&1i8jQT0bsr2k29{ww9U)B$vOIk^wKXmn+yoCAl6 z&$o*7D4`omyd11v7B#94aGjW;at%+R5Cp%2bL*(%j0fh+Iq*t5;re{`sH2*{;-9$) z&KsbynCCEs4X`56*}iELz%e3`aVL=>ji)fSx4gRh<0D#rTXID;RvjaJda@w3P5oYBM59A1 zijV``K;3W_on$x)$X+cutPG%BfCX1G^mz8-?yup(45$B>)q8W{aZ;{5<=Jr4Cz(GO zk?I&7xZU6TH8iGXyd8z;zcV`A3$FcU)`Nx*g9~d?F5|(*9|4bj_XB}qsmCKIzw!6K zA0QX;NMqF{s8;wIg$Zd6m*c3ehgNL2H)#nUd=(TJ7MWpPoO^NWOHm8Nxrp+6sAQ&j@10_PbYSi2Rk)e)Qs&0Lb-G#iT0ON5^Wk+}UN@SQlXrwCDsIf69f3ThzVt z>F_{}I-(`!wVA98Psa3c*NnHm;mSzG%SDVjic!DyKkhCqO^+_~HDZRP)L3gB6%Z$P zVA18+mMj5ycS6P)3eEjH+Jl4iklph()*B45>td`~E{6p89(T%`(LILy~OS|xXqr00msXXa1Z$cT(w5G_~q zUhCMWzO(nNaG9GgL%hcy=pgYwU7fXXAb&aS*{kh0vHcBXsqlYT|rvysqfmo59M1o zM|thX9t}&Z6l56BoF3;#AFe4+Q=a!Mfem+Vjykr@ATI{ivlLa2_A)%-IkoB60!P=#P1s-%vkpuPh5tjWsf;@Q6(NFR_;rn#`V> z=AM58zcW7lC02483MGHO5gn`@^JqFS)iSxBOV8$+wZ{Y1`MVj(mN`^c_!6T=>SNk6 zbj#}XX$eVvCp29>D)_P{IMV{|V#i8KeI@nqaxTg1)v-@ccf2e52)M@Lp=e6J(tYfqq)`wU7)Pfx}S8IQioOEd_lJRcq~Eh2-BV=YQOmdF|uO z?l4O!n7Ke1aqi9vYoVl*PrtOe$=d2pX;?=>!+fQ3?*O2QmDx=j9RbtH;}Jdv1+G5c(^`7RYFc)!jaXregFc;grV5 zlI-g+gD5xM>P{|hvq3$1+g$EWvHD=34$7vzU*-pP0O zdvP)UXj-_^7$_{=n*o!BATo{Huc;&4)m8E`{4M9d zz~=8D$|3kjkY|JIK}&bzb>0>J;Zet&4*e=6r^Fe`0rE%P=H-!NG+qtSCJ2Wkk)Pyh z8NLXN==@=)e~Pew7}0rs`9}unEI58~v26#c;N!uTJ)cD}^Pz!vV>45(q`P)tr~t^^%p3~42G_n^bvZ`gyC<9)R_a_X!Dz)+YlG23 zMvls^z=i~tz8b^F{T*~(&b(R^X+cyj^o|;DT;Z~5KAS~7*2>R$SbHsk1iqC$mgdW|{N`04yD3t7WODzLx~19?=VQqm5-BPv8BeXzN!~yzM9LHvVpo34 zKNtTNo)=zgjr0FJaGMx7?yp=>af0Y|&$RC`!jrqfaMpJjvlmoGz;NzEE>HI`f&mAB ze9&0;CMl)@BFF8x#Y06>duyR|oUU)4-YkLTy(k5W`I}DS5So1(g z>TREp@I$o;I)^B==RM(O3I{k9XOH#kWL8iG{Y;?p@Zxidpip$+ap|UmOqn%F?)h6F zY3XhOvjqmdY%g8?1xu%!1a0?!5eWIe6`_yaA~V+eXI77qIzs$2RO63BH85k1*wzO8 z8C4L12&oy*D9PB4HB(P0Tzbd80viC2hHl5=!G*JXaHzN92-sM?&2#7k1Vk{%s~P8* zfr(xJJP0Dvo;r@o&+9!<|F=ZsJpnam>^AU&=1*;``mQZhdS!64RF^-Wxy^P5n zXxPy))`y2iL~UdUJ9nvfH}X}#aN(Xwhg`))$$InV;V{#W<+*EL#6Uv}nE{FN@Nq@d zwZwjPPazQGFc|On1-1Bk_)M3Q5_&6H;ro$JNhuT##o5$jJgDy->)t_7 z)%8^r74D<4`D304Sb4a@MWu`9?NK&$Uww+Ze5Ja5Zc#^RUW1zJ#~tD11Pq^&4rd7X zyO@Zw6052My}G)GSzx5y8q}Bv&Y~a&f&?S09h_*;NxD)XL3sB26+{vPfll=8Zyu(}jz+A0E&G@eG zqL~vDYGn+2V! zQw#)|Q=zEO;eylvD(|qa1WMQG)5SsnRv!0oeU0=nx>UmA}TWpy?}$ih=p43EXfX zF!WCU$D$|xvgir0W_My(Z)Vlv(LvqO0Cg02EPT5B^#?}4UC4m=GQX&g9l6QOL4As- zG6Wd?2H1Cd&G1H%Kbz=n%VF^7<{K`cF2yti@j0m(?C=@C_Hxq9#{wzkJa{8>;c#I9mki_IZ74wYQ3#rghrWqY>|iJ zTtWL-x{(PN-2&>RB|7S~orzYwuHGI;k`_`m?tJDZA<5X6_S{Fp2W{T6mZ&(J3Uzx< zyGM@jLn1+P!*HZG9RpurCI=!<*Ph`hrN;+e|oN}_Ihk*0fGT$QOf zST$EFK~}^2SBItOx8wt~r`$k#!q_2vlVzeoEWx1kh9U3CM%SQ@tZL=!qa|9mv?OyVgsy7s6lXKN z4fNJgfG9)qifeZT+1v=Z<;&`!gYUR)*Ubwihq))QQ6pG<`hhq1hPzW0p{a1%rlzRN zuS-RTaV8pr5;q>ml)ujf|2_F_{}(~UBtv;&7#p~q?P}mLaReG5LO-^1NUMjPzvq5I zm?sTU=gd4%W7Ukqwt_V7OFdNMAARdEPl=%#)l^3-tD`9#QKQNLNm`e1IP)aitF33t zb&ljk*{IN!%L1wMoC6BFL(L9iWikWOhBjAcB_=E1kBe(<7P4+1E= zLOmOLD4|{F<+$}^YW!r5dz49RiYxqls%C=u1eiu}#)DArJykk1NI^tnI3T8e(2ul~ z8@`SqBgV4jY4g_Vf%rCwgBBckhSI6W)K!E_MS~Tcw1uE|GjipjUXBGkWF}%IN!kt8cF=FrfUUDUwYw zJA-*LeY%zuisIVv%}YO|;aGfzzQ?!OVR_=qA;mR%Sp{NP;=LBM-&rGl_2z9RqI*bI z_KgcJbi_IjlS7Er8V^-&Ke@Xu^4~!W^ZCulNiFR;gf@#JYmGzTcII%VWn$>nx)Vh2 z7f%2odqhwA(Y@~e++i(~)IwsS!4j`VNKJKrPR+QD*9Y^OXBlSVKr49Y=}AM(R?yrc zhX2&vp=D)dGYq9qdc=`z# zS^TRa|B+{E#>LdVd9!gCGpRjGpuel+6rUt-8f_WsfF>j@K1m0cW?yV)^b%J`rhTCX zo?MX3q<-NLX-X$%G4jEnQs73nmazou3Jj2~;Yp7EZ&5dNGHkhPwTKh=M^3yWG;4EF zphC0*AvK>sBSDGl+fRl0tNU1RFfEP2blS0r?|rFMA`VuHxG+zpw}&;!z|&5?hJ~U$ z`qe&__nI@Ufr0EwL676+*Ql^QaUT|h}sE|!~@gDe6IMAK&S>H{HYmEwES9L zJUS$n_5yKUC=?m3FVG{;txeG5{ia5HPMDwJW~`G=>T7l!L#Z&Uk;6b9ZIA7%^``T^ zH}(9{2pvt3;tqB{;I2|1OZ_Tb>baxI^T`eB(;_*#E0_W1YBEAniY;x20^mF*{uknk zD;wu7P6m|x-aZFA4D8&N5pjaEIQ9K z9gZ_``5kTfbx5owbf`)k%&2fqX{7AZ?KN^rn0{)OJVb6j^4oe86X7#&-u@kN)X(E* zM~$pGEMIY$jCl4;^8BGQOReCFk=0fSfUdbwMU61#9cW8=|XM@9pAh?*_ z$i&@+O3n;{wfq{S@bNw`jm0PfUx+@aX8c`ld7Z&t{L{TINzTOY*A#-#)u7A|oFS)F zpB}FEr?k0uRD^AV)^QXyqpT(gQ8zN~Nq{Op4~y9B1;#v$Q7q9Ncs05l^+Nm^@YEhqkpoIMV-s67 zJ1~3+$Y>YB>g>HZ%HHXC+GS!_(xSh&7PRnnhhw8EmSgRoU~HG?3f87@n8bTGG?-+* z1xk5WpcL{oTCKmJ5o$AhFZN@(idyFK^*DiEO4B?h`qIcW`Iqgze6!~+#99O|pg6^- zc=xoHs+UlYxtJ`cA7Ezrh%TD@MWtT99&lYCCKy;f=y6QYTAK#c>)|fOvp;f7EFf9D7P7SpMgWqC&D`e# znz5mq37`FZvDukXz1aMX78T#9dZ>_OZ7)4#T-_l-@bKj-=h2>D>G^}VM-i)gbz4%K zqg&NUtBYd?mp8@g4w7QV4%w-=alVACpi zi1#y<+!B&h*b$W6N%<0SG=08u=Q&g4s=4I;LCn#<^w{?aFK)@rhA{@i!*9yI>m+s0 zRvX5SM&_T@ZdFx#XT>n>zj9BFJL=r>iHlpZl-xZvCOy8j(j&Q&7Gttskz!i6Vpn(1 z<7jWR?QlBcFml!JX!q@@kfqX;13osaQ7bT<1u$tf_q86Nj;Wln=gus~d4Z#|ATzrk z`ZVHsW+m3{d#>=;j;{NRId$Y`Lhzj=dC{2O8ce0}$o-x>V>zb_gloo8R}iJTAd@<6 z+ftgb+z|B#&IAKd2mbaob8iU)>akn}pK+VL(!h>Tt{f;^*)>mOzTu}TZoZP$?trBI zOT6D-ktVp7X1COrMK>FB1f~Rjk~EvF2w|q@{&ugaIyeqil)ENq;rnT4po1$bb8W7x2HANbWZ z>T>{3c=tc{o_+Bxl!tH8uo(051o7-S~r~%3$~}8wHa=BhM9r7=ATyUYZVt< zTq#&&FCbzt@*Ck6;BviMY*9_x*7!=tH-QOPj_#nx)tLYTjC;nN;t{F)nK2{1fib+( zxM<_bUO(S=CN%jIo!XmZn|m(@eae9Jt0z&NY?v*-=icsFZ!T9QH!Sk}9%FH5CrIdd zDpv(CEyQh0-cKB*)0wSKMR6P)kH4yFOn*HwQgVhcycuOtT9Dgvb4KD(>9%FnWdrV7 zDgRSM{-2LOldL8E=*f0d2A`w>d+nG0m%~?U->Eamne-e-P&$DsWJ}mH3VXjbh@bSh zQ@_bUpNTs5)8_LC@29I)ew$YFD!_jpz`g%H!n?8k+0lf`px@rXSla#~;s~_&wr<%u zCS`Yp$wG47=ji#W&)C7jyrfUF828vg!5H1t@F?~EK0jhle{=Kg4tcA==yqt$224<6 zbaQH;1hYDp?A;y*83aQVV<7p0#gP1+mQqOm6xUl5J;xg=9u2cBUTMzBDFcuxZWFa6 zOA}+C`AkCk{M5Tig-xfO=aCAncH9v)O1I)FJRB?%YYHXgJPz)M`)st`Y#pxMZTs@b zd(D9RXy;j-VB}XU@PFH33^~@vz7h6lTqLQ+fZ+jsAkgP|HM~3UyEwyNc@DW893$MF zB#M;|wuzMIJtdvns!97SPG(65b0x<_gy&6;r!J*WU;BCf>t{=^ZD^tox`2nn@`QP7 zS87d18Lli)>a-wYLlgsz1%06NIVx0CfZAN9VtDf88Bh^Ul;0!j`wiohr!2vP$K%@9Kf2+~7K3?U^UUCIzj4APQA z4!3j&NH-%xH$yk)%kBM(=Y8Hk&ROg5=dfH0_w2o|eSPXuBIvK93Ua%=GIV#Pu*MG+ zjX-jhk#x~p(eL6NU)4FbbwVxdJ( zw;yo4t9@3dJmI{xPs^=r@NR1%F(x;*{S`+!J^GthJ|jFZ#qWq%?b39I*bL^L>8iZ*k(5MW7vCK zFh3JaY94)qR!LNW>zm>jYrAQV5*i!?*JO?Si=o`g8> zY8Kk4Om643PafCI&CR-RY-#1q40p_0w3`|EzTg(k5Ctn0cV^nRAJ*Tp`Z<}wqipy# zY|+XVvtOq*E{a<@_6jcMcmA9&JanY;-;i(sgyI#G4V0jzJS45 z{lSY)uIaa&1H=*umi^5cnsQ-}5B1msisdWk2he%NqwWX+s@vWYr&7cn{k}HIR71;j zy76JrnJ^`pB<>kemnE@R#NCL^nB4fb4R5c4h<@y+<7=}BxyXVs`0I6Lu@JmAJcQB4 zbH)H!nOfB*_DlYd5t%z_mwzLUS$@$y$@yfgOjoF9`{NLcpTu_UR1VI(W6~~Qfh`o- zld@37>XjKoGVQ03llY$Bo%G4`!ISAk30`3T4EH^lFw0KWO&reBj$l0rc!Vg^do!T+ zqvK94?N+S(lNPQ3X)ua^tOh-<4%bBy%p{T;g{F`B%tkv2rNEU0f+u;%fkf|_1bn_0 zOW5{=qfjlKGx?7rY%Lwa;usBf9irm=^o(bNLv5S8i+YEB7_z)T5lNd4{BowH;&RUY z*>2wkrv$q#I5=vzpD$)=$R^#`Q{j?qH)ecx&q3Y&7emIg@fP`p69c8<-qs75&%E9< zQku5A9V-Wd)>xnI9Z+%nh)bY-l^u!sK~vLuGSTZpJ9{e*2<*{|mO;8|{aA*w0=x{4x0(fq;Fu79B!&Dt{8C7$I>s9NmbWnQy^y4u;9Y z>%5mjO5N*%Q(xqhj1uN`O1$M`M|l#U#}3+UoU;xcmr5*G+^Wg$n?|vmUX$p(yPvEv zemwz9j4GehM*OR6R{vMoL>yUUx|M#O8|0zae2BBD-8XYzy>J0h0`5v1VM@iXofl6L z$Fm7rVt(AA8XNBO4Av#p$w;ITGGRg;k$Jp#UO;2Jy>?4h(!F=IV#SaB#9yy_tb1_Q z9fyjyc&&V>OWk|{oX_{4c6sspWh)Pmo=<1dxYMu|8^bMkh~Wv%3IkB5K**CEpd8{3 zOo0lyJ>J$~kd%sFnOnwJwBeRI7-ENzvC!8n!TG`+DRJx|y*Fd65$b~uCK7{NM)3I$ z!<#W8wq~~9XLTTY4hZ|x*=%Kfqc~Z*+9Z>K4{rmKf-~3NxNY9goXR-0^HD=T>$IHa zX!y9Jo+FxWJ0oM~bpj=$o8mIb;+*Yyui}e-)?pcwz!^4zuy&rhIP2yXQfr?s=hnd> zlr+EZ9Q&?j2evYXTjY&pIz2sWJk8w_CZc|ytec$8H!xvPn=z@4Hw(!}U<9<#?clJq zRSIzCGDQSt1p`os+RdwA9;@gASyhgx#l!jmKPhVu5gN~dY*wL$3smSM`MxpzjsjT` z;OSI%(*wBG$2pINXD98@1}g#%iC(RNMq_G?F0bVYCQrm}?i}$BrrNmbXHGE!)l<>| z9dj(2%hRROu2=5PKWFAL+;$oKD(~4-IgsMS+kiF?TSh!ZKv}lPkM1iR3I=4yo7rOq zw$FzqGI(B8b;UCpWKy&9l@J=G!E}1}enKUJJVxBW^V;?+9~0icwBsfRg{2rb^~Bi9 z9O_tPbSr?lL+`DoYb0sVWPjWZytf#owyQ?C=b|2-Auz6B-voKc!U1aPdB0(AABEhc z&s-7J%3Q3b=DSL37RcF@Ws+C%7z&y-)3LC4FS&_kt!;ZTWg(g}6APfxj_wTrHoG=K zk>VP+!rsh@i)(ak3j0wWOU`EKZl#G*SG*P73m7fU5~E8|GrwbX^>SIs?G{7h0h+_? z0nNiIQY%$^YXCr~I?5+|%M;XQA`{iY^P|mpq&TwTLKYo9T1s%>aH4VMSrb3?;u9ed zPCBcTt=08&Fp{pEOql+KPU2>$YHgL8udEv(t8`r6b^X2@7__!lmkNr! zi>q(#5r;5H^SBs%eC_?BIF<2YEJ23qKZSGdKZSEpEILudCQ-%)tyf|@^EU$RV{0NJ z6~rUnF)Z$M6L``O-#c1gy@S`yequvXk^k)lRt?qn!t0Ems14M`qq;MXb`CKj>FzbA z!&?E$=0`-PgNyChn$_G9bc!ZtcSwviFGSKpw6JHl zH%YOgKAflw-yCiO$oN$LJBs%!|Kg(TuK4_H`q(R6Z4)>%qM{7VIv4@5m!R-uGd6IX zeTsT)?>=Tza~n7mtFLYz$PFVmA)GP=>`iT;+#;X&E#jj@l_*}v9nCEC+%<rf^8-CBfWfdCwLEf3SdzM7xQ89zb%(YFa05e*ZW8oovkxxhJhI&NfI(~@M z#>i6eVL^80W--H>`MdwdDe!+6 z*Du9I#r)_#cJTguiFeuzRG|-s+OR20aSKlk@%C&U?@`6}w%T)WaITrI6`MwnD5>P6 zch#l8h%JtZrqgl~xy@QrxZ%~bQ=yhrtuL+YGac3RY~KEP_r(C^30W&Ypfo&iNq zdBk19i?SsfNI|di#V~um<{u|6L3v2xBj@Do70@zxfZoX-|Ao z4!xAbi372gf9i*~@j3KRC(NZaNoDHV0}ca{+lrpQ6%l$Gh`Ue((p@9J{8x??W&6%Y z6D|z_wK$m!OW4P!$QY&m5LuQe9bN?TRgE(^7kKD?o;B^D&S&jN#=%?~EiTRJnI!s|jQkV2V((RH0 z9kDFZ5!%BfEw4_z0p((V3Tsnzl?wEJW-b}{wPG+eBnJpX>ry*^2LL~R@gax*FiAdu zDu3|)SdW|)i^0e^k|Wq)EEy0j=Jm>N5tS@&uEw`qbYaw4%|%Zr+s0M&gdK(b9Yi%& zS2wAY&AS`}9d1SD_n#&kgoZ|JIi?=z-8$pPC3RgMTv;5LNs9`xUqR5`j_Y`1h_N`3st7e7>bCaouu+^+Z8cX2JXHa zW~+&*MU3#BMLd<_rJ7t=_ z&At8_YAxUZTkPaL%L(H&xa?1(88xxzDsqGo^%2`(V%ZvY+>SUGAfmf;&2u4}7na&VP*q6={5{Bu}FVXZ;v3j48bWAwL)UEf+?M0}P z`GJcvf0)D`|F+L6aA>f~@lCx}(=5C;D|O32IWDkaLpUVs@i^7-PZk5iTLeQx#^;Qz z)MLl@vtnh!-8K=Nf5p|f=PhxldAIGv*;R5>#rYn77!rQ*!=1}_e~yo8lARjwe(7O& z7U8(70l5xt$gIqq5`#?7YftrP%G-}VpR+sYgg!s>*Q_{hp1$uVb&-#I3P$g%Y1#6c z8uGV3LX@1iYM>|_nz(DaE`1w)4C_Uxmz%D*09vISkW7_tQ&q)n48KJvgK|(G%$lF} z86bZy%Sj&4&qrUW&~>szNUEx8GW~XDuNXa4POX~DySV8IwqkwecOmYvr{buWQN6BZ z&(DO;c$mYOgK@=2_AJH>KDHWhZyh5404uf%vdZhl)RC{?Mkp6#yCM{Y@y9w5(8mU& z04C>-A<5il=UKKz41}%SxKXCNbxCjKFofNV$gBQi#PwK}t&_+d9Yn2V3jm@JN3)Ba z?z5L<)k~=bffH5skBXJv;?Z#vrP=U1y5jQ38qS>f!^qhtqpWDghZKwGdSCb=dGix*6e7!T=|SwtZ{2 z{obCOe1mZ~x%eK5eU>LDe-2J`QezcE4>eVk`#w%OHP0wvzHBN8c7EtIQbA|*IY(fG z_xL$Z@!9hN{w2&3se@+p@5YWJolb>`gW9>wc8-Ry>TH<4Q7*Y(?9gE9cjR%^kh&K^ z);qFx@5OrBaVn+vvrlW+T!bdTIMXqPl;pffHPq7`diLb+c+TQ%kA^sGSgW~l_qSr@ zaW$_#KJYkS@^-8X8Tu<8@#H0ojWY5nAh0$-8YOqYwrLPBD}^PBp>g=wS)QjpPwKU% zEt{oUt=&olZtZZWE+894N`>b_jrP~`nA)2q&2p@(g-|6VkDBU~s=Jd&8Ki0dgB&tF$(iC#c zdQ+4VTyiG?pA-vJGB(IG=KI3Ro`S7f2$E=#5+W6tCcsjjN%lX4aDO->mGOVj(qY&? zf?|-pR5~wjKVeI6nD5{u9ES{s| z>qu*saqSC6YQi*3Y;8_x!t;FoBnqxJ1G?rSl; zxSJE=S6Yu&wTw39#hM4nsNTxt^?C#aqj+=EdB_s7Un1aSND2tWlMbUh8Q4$9wAE*` z8>b02v)I|~UNq6IRU2gm5;V2_M zH;bJHa&Qh}`D{o)4ZKUN*Z!$)e??SCT6Cv37oeGU_?phC7XqkTrvGR}{?q%4Tm&eF zq6aXn4wQEFR@r$~?^r6i{Fh7Zo-P2z&v}Zk9{+sncEA2D7*NMt7(PAO0X$%h)3WfL zQzdG=Q!$!}uiA6BBsJ4+r0wi8c)oDDf$5ZLx1(;GJa+wX5NV8bH^|KEoG{smGLU!P zf0iri8m#7S*WYFju2nh&%}$AN)Ec;JAG}Zax-sAWUFHqL^B+0sUXRie@!v&98IWHv zL9pIpSGalxUz1J(o#ih^Di%JxtPbr{Mhstu(4cNkNPqKT@{u@HhtZjT&a_6n0?$Qi zm&G|COE+R=RT7IlJ%kje9u@(8*#^*|rS3O!aK3)qw?jZQ*G|RB^|sj-Y|u7}N7X8! zV%&?kqoytx^=|XZ%t9Q+%W>V~mU6xhKJPR4X%Xx;bqS7_Q(C; zAj<}$TFu#=xCFaPQbR4G!Stxc){hZLd(T;9)S50@M*}+W7{O-^k8K z1%NA#;;}NXaO<hMAv};z)rc(}i_?sG z$MLK44%4uXb>{wH*NHWp@S%cY-x_l+7faR>S&_P35NBa8GG+fcJVAA;GhyW6o=dcaMn2xH>sy?Bfw$t{P3hhpd{}bv^6B{mSy8%65Hw0a^ooFo9@Y*wbJ| z5uH3;y1-B0hTg6m!9#RtPx^Jg!uqE3mJ8z#<5v$OZQLs}gI;u1ovt-9PF3;G9Su_K zY#2wbM3=)j{u;1qD}Jpn@?Gl0pk#3O!0vlV#&M_VrY;H z6@*8Kg^WR;^y?b0&fya_Pvg=MhCI>x?EaQuq%Iidy+!geQBTxmu(KgIPrx%(EhY8M ztYkPr$!p6!CqQiN6Xz-uMDgVJ4`S=?-NAaF*pNqmrxRcOKiZq~erM_r7x%K^=0D`^ z8p(f`7n)YV*CKep7_l)@dfqhvJLKPGZ;%7V7xPd`lRA3{ zn*@5eyD$6Nay1TsoY>0`C)@ z74E+0p=(;iD;Phq-ML3PiR_Skq91_@mgfrM4Kafz=2|4EN(0~CBg5;aI$(aw@yJh{ zT2Mw9@`LOru{(quoeFBr$PX9RJ z&&7wGQJN+;^bgZ$Msg<7XwNIQ4hqgZ;shg&>n_@Cw!Vi-05P!ov8d5jF@} z<@t^SSq=Umn+f5oIp}4en{*Z1$yUB1zO{TjhFgEJZie3t$Ev_LsQ`>4uAU(*;o6?6 zUY-!c+IjTSCQ+Qjf z!7guS|1h66W>W8Bz7S&|$vOV|U_HY66W~yA(m-h8UI6U`|FwX)cQ5UND>jsAmrhxEqZd}`-iRq2)Jmy z5TmT<9(se~@s2&mj_25otKZ{$GnNwq`q9cI0NznoZqe$WGe1}*EtF2-@}HWGK=3Vwv1FL%WD z)odv>MrZYV0}5{?CZu;l!Pz@<^@-ZmZM|sjFPbc6aq_729fjqEuR&uf@F3eMXM+bb zjfLdPz9d=?$d{#XjAze5VcSK7RttHfnfu&|dRf$OWp&&vQq({+eklt%N^huucO72f zkxTsLy6E#dGu8`)*5N_6?a*F?+7qg3K0ci0M*@QFQB5Ur_WG<-x}`?(`ooi zsPNW}FQy$it3j1=#JtPo&^2e4G@4Dw$x3=%J2)c-sG|ZDYrBnXm@AEW&T}v5Wuan zyaGky_A%(q-9=^Z#LaUp$QuquuVirh$sF>S{-_Efmd+ENWTfE7!5R0%&)RK6THeOa z`x$ZdJtr>tx)dZa{keTCP*Q;FcIZ&_-%uI=hk4YocfT|VrReiw@7^2WyP8uG<${MM1nqK z+lTzN2YSbIB5}E_7@ea$X8aw>8vKge*pEH>s?mnnL>%*yn5{F?b?FBk?_OZ2gFfIv z1uf1(%FN>pPzozF;kZG#7)#SUwvnQFSmntZix1Vm&8;~F&q zZ5y3*sBQM-0uMWmjg;V`HqN*D{~GD*1gz%()`wjYsg5I6PI;@roI@6OE@@F)n6v&R^uoYRobvqEFiGHsknVjB`284qPqH+SpjQRx2ERcyqh`d2UpU5>n{FQMq@CCs6dGjaEN;O=k%v-8CDWd> zy~3c~=y01CK_S=mRetVhFV zysV_bZW=!7JJ++8UMPVH1A;G(pf3)_Zm+(+-#OnX;F`V`iZn?8>7mfoWeN37*wGU@ z&bXr^?Y8W6S290YZKH)x2~~tkp`z0Q*hWZsXGG z{p!Ec5P8*Jw@OFt?+XwRc7|=eh?>_uq{ub`+7OlZtZA@41Z}Qr%u;i3X()ufXVENJ zP20(>=>$P)UgT^B#<5L!TkD*;=}@F@#RW>Rsuws)%Uf-t-7V3Z&O^aSLNR7FfmI|x zgSHyK@!}R)M)5wz9cf+{*}ux(Jpw56y`ZO(IRH{{yQ}|ENOw`2!w)(QH+p~>FpZKg zyCahB1CseFA}8p8{{H$l`Gfooj4-(b)LZ6)sr*voPw%2uAfj3R&0h7Df6krH#KiZ1 z%BM~V8X)c?C#>mT^pTMgY|#1u78ry?06w-~ZBD-XW+mMV8YA~oN6zE2S5^)$0Sd7o zr0~VAgi$8~L+U1=j$nvw>H+8zCttNH@5u8vlg3rb?}Xn*@nUzq5o1fe>FUjxN@B5Ga)-{o}#@lLMC zbG&|AB+>|xM!J@^017y&-q60j8LEMnqmJAv36|zb>rhgwACXjH_usPRjWmmfaf`KC zjXZ1W&^7A|wt@QHj=yrz##A~n+6;1$Cw8v>bUKc?-;noawF)4H`Q}lbklp|#c;aon zi|{QE+Cm;(CBpCwjJq4`y8+8<@7Ua0gOzqeapi|b0z#MH^xpW~rsmU!GVqspJ~gXK zT)ktu)O`99#XUaKkh5o)5xc%_awRLAphGW8nE8~z2*11H{7O)RGLtnJ$ziTrfM%>r zao{88@1;_RlPa1#U#tN(wRZ!e5{A@x-=MO#Ak1mBWr~+gE&Gsky=O$DdYEFi!bc?( z0~O)LRY;dj;KT-fxqw5o+Woq}LMV|c@=qJ`ik~TDYqodcs^zoc{tH(VwVx?M8!Azx z1<{Yv_RU17rNOwx!^U^8^I)xiMIQZITKz>R&ba>(YLG=iMB4<*higS-KH!d@3P$1os<%)l& zyOYTBOS$0tkt=B<-!^Z3PU~?{CXn8X^UG3{YS*U zG8g#Z56j(}2!!(4NT=uJOsTWwWyTFz+m79oxa@%NxDuT)R+j$5gX5|XPn*2jFzKel z)0Su#GW&_$Nl^x3H6KcF9QVrOD*bcwq@H}|ef6`79IYw?EQ4O! z8hUV>cQE>D5bJdjXBZgEsz&PBBI$H8DgQj1PliME^HFYR?NCjYi(n9*JC7>e_~g5q zq4luI+TKAy0ojZCFw=*|;srS6=h>7)tE2|7kMgL{LPxe;)bLK=*p9ryWvD=R`e~WG zdjzl5;KMrE?Y(qFR)M#;*kdUXRD*6j)nO0~VQiY=XrvF}0aAFk`@OxE4%BpXG~9dg zDt7!NoyYsEBEew3SPX#sq=@+|nf%tDgEr<;rM_UU2ivIC(vTSfH+Fo#Q<;1xutw^G zhL($+Ft9XRjV2&pQ9(8<7u+h$%JJQYhXo-ur!sDwy34a)+zHy9 z&%62s>Jf@QCDD^PgATeP>T?Y$H*T9^^#>VJ%J$7tbCfJ&SSu3YDD_&N#BFUqqbv4 ze^QMJu-87c4_4F{ZR^#Mv@eMU=3UV|QgANLi{p&SoY4(@XaEOTLbgSj-%Z=OlFf>@+j@8ju((Rn0Dc<<6s~O zk@wc5?+$w~vW+y$%j`2yHa2Z0uWCqU*vT*$o`!U%9JlW6gx%rDs|{~$1-peC$&L$T9@xgk&4x0N|%;puzM>Y*7+hT<-2@r{j8i!Ns`%h9B?{)1SoJ)~oL zs$1}MUHj84v+rvTTdfTHaqo|vEgzjhq@kwifFiTMQUYs$`gPieyOlWzA#2ZFcq zPWQQHxwRZtWW6^$IGIis46m%;J2e>1Kl&@VaSaG};HY2&_G}PR4Vi3LnLV#NwhlWP zv%S+)R!Tt6_H#|V;pMJ`KGBd`c^)?dX($y0(Vh$}HezJ}J1r@QE@c$57ND_E7LB$x6-J71L+@H>brqi%b(-Rze-rwmG+PH654mN*=OY|F+n zr%N{vuz%zWqec914wG>JXkZ;Zl-2Nc^HE<81Ll3nXx%%O*QMiR46HtpU1_kPPaMl@ zypn78xwSl3imQWQrukWOXUjydJ14T@h{Ms zj}3hoC^NBK`tK6!y$cS~7*0Gw)OG+p7P+LJe&l>k=>tF?t5A$+JD7;rPa^pmxEy0p zALvMQ`(2M1sXXJi>4qAKysC^?8}sIwzMQwaaDTRJz+$%+zmp$(wlzgrqU^O9Lw$-H zp*RwsfL?PU1LpPtr%5ZueQ~=!F^8#!9YecFN_Q>dy6WCFzPHp3?g@e~CY@o_eiC^f zJj_3EeY+YcmBG>RmAUQJD*^$b5w5Ff*(V*V@mWHeV&oUE!M0wYP)Am%)ms5yd(Uo(><66+3m$}&y$GK4*UtL2lYMG?w}D#`1Fhv8wOXsQ zwX?0vk>B=rowm7Vdj?)v&oz6s*V`C8t$o>spXjDte}Lf`7ip%wrmW)ds#a;WUs>l4 zAw}Rz!kz0#rG?_C$Fn_*;I!+5bQzMHaOADJHO}BNO(}D7tFRc~-|Xb}rdjbDRM+k5 zq1U;bCO~9oBZcaXF%%jZF*mP2s$=fftVwKvrLtDjB;!wlx}lL%Vmaay{+E*WMeU~Y zXU4sQ%d-rJyHwWFk6bKuQrd&m4Z~7gp1Q?p&bNTddZQgtTpm3^NWt1%)|=By)Ki^6 z`a?a!>92OaC5mo2RBj@#XB}q||6Z<3kAf}j6W!x_YC)sKL~sZ&7>}oxw1c(D4zK%z z_u-T2QS<_9)9alqZu4y+ucdl9I#Z$%Ur20J2m>GnWv>CO-P{4tE0J9By=citjw}j> z1eeC*QAxPK)D?57oKWO?V)5&^fM*DM@M4h^rf!55%bgoC7PNj*OS>dFHY;}U*qMZY zt{m+x2VLV+np$`&i@M%#^*aQ%HI^9UU3KQGA_3MbL>V)WOk4W}- z71!xqKZEvOJ8=a4b7*h~@-!nX@$o<>4Q@OrrYpy|q3D+$Zz$4etCR;9(0h;6Dm@Ei zdWvMm-fIM)QQD|cIckP48ZF8ne8>C&!1U&wIDC#%D$eIEEj~o|Vpru)Hv@$zGpAEM z;-c+oDOLsL2n3a;QsI)n?z@DfkTc?7JHdUiZxzy4lJOgwvG|i@ys}9-=M^E-_Lwmz`>N1z7 zg>F7qy}@dMX%U|I?H5o=hgf8CZ33e9FE*#Q_=C72JtCgBaAiVei+%_ zIf*?$cxaz?g~r|wM#^%MM^Rm!Nu~#48*m3P9Xa&?#<}k>9fGvwO0SiuRiR&`d;~0{ z4bVz|b&{#4Pqm!Okm~D1RdzDtdP+-!`q}&GH=$Rgh%m85AI`diGPMy&6u0@6XY>5? zZoKZBh{p$E;)W(u=Zh$EuU{P{@`?zfzSt0Rub%^4GCKYpU4K6}ZZp=hGGhfBeiN#C zAdd+~em$P3X@qyG` zKmSmec-Hg3y1gn5R4!Y@*8ys-B1n9|%b~n&_+AF^7hAbPInslrBMY>F)_(31NB(}f zq=kwUu`@ymrC|7Sk@9sX)0=@_G!%ZPxs^$oQtr$yeDi3=+&a@1auPqFwD>d>9&=iPTRs) zeDFi8osi7J^Ufm`?eSrl@Y|k@6v+aRz6;RjEVix<4$c`7YIKd*5N4MvceI$xDziT( z$I@dGYWuUF9I!UkuEjDZC8za^PW@Ew4CTx`z=mS@PWa?)vPL%jnfSshvtdc`w%jso zI8BgkgPXMZ%LJ<1cz-kJ?4I$M9apZd6HOEl&g&(I+v0OX|5R4rcPtWJGdo$d*`4dW zG%JEXfbWuvAN9qG@Bca*)jSfj#}~x_(P=iEV%t*DV~WsQ_!}p#OEDuH9kv}$qVnTh zYd=&mwgnpb1lzi0_V)piV4~I%I8Px0;@5IjahhVDxYhucfdTV%5f<<6#On`1K)Wg! z$0j!hhHp{qJr9ycQZzWy*YY5UHqW_<`L{j8VkVg|Hjp~y^iA|`?W$*&Jo!`LGMQyY zSCvO>ZZ|W=SMQ`TO!;4At)bXcB~$WbH3bma$^!mzg=|s+Lg!ZX_-I?)WRCAAIOA%< z%zjk@{eZTCjUYXld4cs@yWYUdE;=h39r;``=g*%Kx2rkJf(3ipzPm7pvrWK(o-_BO zKZh!3%nU|e3PtW;@jWGN-jXLTy}h24`glHTZXi{(m-KE@e=U!=Xm|Wu+e)3graQ+1 zneg`P2{v*fOgu>n#{-q>gE|_QOZqHt{+;9b@as2HhC?gk8i-1=8l^Eu<3XRxqoSzs zV{`H^GLhyNA1PQ;SO||67u`}Ve!akfOa+h}CIunFnB~%C z8)17nM<0Rv<}vKJ5n3ytZ4lH!snf_5Ip=-XZClA5YZ?Qbe{2}!jZTZnzR<1gX2kKw^dA9QY;w&}B1 zF@**@PEuFS!XE`Wz>6KMN0aVzlNMkilzd{c>a)`h2>6_1S}C5txE!o+cuNB@!`lH$ zpYQ1ZGvMN{HQWD|3FDdvT_AaqsAa|}-o*oaK7nBd63M0mXc}kBloB@w`G~xoMF6XJ z{@yg(9hw0C4h~@nlw-QE*~u>QM}jdW7(vGrS|WJ(2ZhLz+TEcry!!TPsq}Dyt!m0W6D$~h_jlP zoH&bBaSa8R+^_Cj)V7qY*ckkD_e;L^A88T~yqs>FQmbmKQf-d63p zaUMp2s1Lpf*e$#zVY@2WPFO!V6q#;2$Y}Vc`&`IhC~AZ!h8LzEhi`cer%d8Jftauh zP$>zG{3Bd)W5I3wqh22a_}1a#$J!~KZBJRH(!vmoY_(A$SLUMl;Eog;qb2Q;&H&xZ z{>ddxJE&MY4!H4+QwxUoEZJ}t?$%{5DuEo-TyBtb<$zgW8f$)fQ~~70E251KRi>Op ziLD8xmo5>8>ehWmR&}&nURV08VNRw{h0);oUzmcmbkWN454_`cC9d<4>Q9ruY*1aH zmYLY^Yvx2PEeA31xoBF9BEY$nd%yZ2iN|xK`kA+pk_8Eg?+89Q%vjA@cgqn4ePHbd z(szPbYnl?WTWlSMy_kv~$b z&J7RY(7JvFo1+q{*|g|9U9VxpSM$7(QrToY`yvNe1#VVQ)O`Fn)4lbR)Jn`s+*5#%~KY()X-jKXZiPYFdub^i7pW@!nm5 z5T;2iJdvr2M1(WL+_&{-**0xWdl3a6XvhZA@+KQ?`)LG3%{m+$`8iO*6-(#tt2H>Y++alK z&|M7uq(HqO@Jcoj7;7-F>zDf>ZNZ}fP4hdg(BjocNuN+CGG|986qyTamRx!2ewFAP z?^v?3I4s!S2sDtJOJ40i2WnRq1tE@D%94O14$jJJCwY$@q9p};D;Q2oyz6~7+%D-c*OP+WKngo%6-oxOnl(WL1ngT0m*oCzN4o7duS7d#|6iX(TjEp4 zGy4lg3|OjYeD46+V5lZ6h{n@Jfm{9D1icy%(Tj@=%pb$Lfyc5ZqyEeF|HCr9PC zI~>`TT|8f4TTL)0eYN*7RTZlLaQzI?+OY;RBx9Or<<1>+^_Z;AUC|~omMqXL@oC=SMjkr2Bo8oKyrTGd zT6zjhWIf=ecgq9dUIyCxPZp?|45;$G*k&j-cgW z0|UxI`{v4I^g!Fjyy;tgFZ!sRBWynG#`R136(_7MFn@wBZ)+Q~nxhaBw;O*O-8?at zim4b%zCi%5Lx#0PpLC^?yn-#p%!6|Kw1$UJ2mG^i-fM5Gyu61gR_KXgd)6M=V8DJ+ zQB~d|va$o;^Xs?_^N;C6>|9Q~>c{DRx2)k}Vyc-bt*=xO=c5{ns~-decN85xQNuy0 zf;uzuOCttW%Kd&|fy?5hCj+^2DNCA;hAvAuy{29CwY0p~-C`-oYfcD*B zBzNGErW4W89+0|D?#A5b2(tLmcDeb;Z$arUvr>X1a(z&0&3UTP z$orr;k}DYLX)E_^+y8|DzH{6qCbdfmMMYBavk4n}GBL`C>?{t(jvf4u)V4ofwST9! zeKETB$4v6_9ia68*DqgU0bG!P_&ZqV3kCk){4gn*c*-`h$II4pI z_$cioEqC;E((VIW)yLNP1Xv#6 z@p%D#vSmyATUZlirJ1N8?6njwPFS7Xo80@3^lU+Z%9`xsJzV`ZXHG#v&eNQxht2jv zjwK13+g+e|@5e8*6_ZTLWDP_Q!uVZ;Z{c%TUMK--WFm&3PCeOLv)S4?I%84J|NNy6 zd-hL?GtU9?v&yLe!=X=34W^~e^Tp1(U6m_6I_>;tTH4K&XPzU~De+Uwk3QQl?|)u9 z`-*N(-zc|=NS%0GU3*zmZ3`CX#gJFkKlPUE!e~12EWS^~ZPS8o`=8 ze+PuMN(eTOxqYCt7*XSmxHqYlzXa-Uw_Q420ob3p_YZUF&*8Vsg(Cr|3bZ4S^wFnb z1#XHp?WdHhDtnX7CLC2Gc^$v7d5agZOJlYYz6u6)$tyqvE;2J!pW{cqN+B+iT5=Rn z!B{@jV8P9%7H3gmLDLZT;l{G`*biDai)9n`RM`s|&D4h?)KkWn0909>+N=J80k~4I z@AGf}ivVK$ejk2MY${_22AoV=I6zc<14-!^it6~pn1YXm4F~6Ux)L2n=y@Tx`bE6* zi;v~Rj4qt%=e@m@9oAU3(Rk4dR>YHJ_d0G#2)mmjPR24RI~|3pzaVa;?zU>w{o?DKlQLSU7OV%a zYu9#RKI9?aAx=Xn{Aseq|3lh)hqK+cf8)KbPE^$>RV}R*YNV|ZTPbSKmfBTCt;F7= z11+kGloF%%rKp+_BwDj#)!t&oj1eO?&)ap~_x*go&+|Q=6fZT;awkZD*u5xFt}0p^_02TaTbU##b!Lzp^>IKG&03 zh!Ct>;tTu7J->&$1m^u%ht7QWa&CB5QDzx$o&Ka|`lO+lb zlhQu@qHF_$HiZiH07qMn{5_I4xK&QQa1MSy*r#jU;#_z=%>)x&)(ZA#96rbUgJ|r= zGSV+7mdHMq(KagI&RfKhE>q?8Udv)BU2}|Nx3R6Q7m<0o01v4ltE`Yq3*;sncgNBq z^w^d>yXQi61SQq%H7o1bI`?`{nSxL?!t^0;cqrDVaz`gV<3 zTa`PTykm#6(G_F=0P$ao)1`I90}5O`r{43Mb_yNm_}l70xM7w=^b+}0_T)j03~Lk1 zo}ua~4=(Wns6Q>KOIM_t#XHdeUMcbLbuy&G*FbZT8!S6qE$NJO5AdJP;&qN2qR*|; z7RS%$99qM^n>L{Ru(u)+XU|`}W=8ApS_Wq*C%KnfX`E3PvL5L{}w%@Rd>(kQD9EaAJ_cInk3lxyGE6FahzKmG)v)|yv_c| z`thf6H*;@%DUP2!MVn_dG$nVjUiZED#7S#aLtFOdla;a74?r2zjRCtU?vCB(hDw#byUUC zLYJcJlaw#A93B>EONqVz_!#O)zhGG-R|(DIm10qMnl6>bSxKFtz3kPU}W#^w?k}YV@1gF6-GY2gT^d|tY`=_D()-4|ux}WSXi8nXF~SC%ZvTBe{s;5?KXsTV zp27e5r3c_E-^AIR1T>nAaB%(mH@4_t(*LBGSmq-;Ke?u78vWtxRwzuzkA}egOo34a zf(21b88Q{u)#@ZhY$70? zpPyy@6~6;>uG!~vq_Hu!#c~!HFqL$F7;qM%wh~giW;)R7P?hmpij7S{T)e-&pxPK= zx><)Z#!rusc&o4`^g56+o(MOPjSVFgHa=G<++mJ1`chOh)Pexo+*Wt5jQr3Y+e)ns z>xj&5R~2B>x@Xvc<~>_k#XBXnbWY*hY_s7@4ZbJ(SG4i3^R8o9Y(Di40led-T0nz)a`N^aT`ew32ZMA2nyw}- za33AOWrMlX&df2Ep-GT#XSa~mOsw2vV5{~^USIR0Lt%Oz8eb<_XHUiIaC6pLwE%HVT0=T6TEim{tTNc@^TzBWc zyq|eOOQ$E8zcdiPUp!Fv8J_lCR$O)=-?9B2LXCm=HONKl4zg*DwU~aq?8BRzI37XM zw$9=qvNV!Dm-iaRt+&1L&}rs?9mbNJ2ECE+em)>rt^l$_R)?JJ{d`ZHJ;80pCv%`9DUIUTW~8b4mM`Pt}(0JhKoK+ z62ul>eK0i=fol4C6IW;wf#A>is*E;9j=fG3J5RTLD(m*QUdG>~2AF3$W9_l0R&P&l zg<(J>%lW%MHH<@e!r|*#m?y8wXPX{<;T}F3rh2w6HItzxJnCc(tqENstZ|xGSgG@U zvf-)bY4KpI6K`gp4g)IM7Qd=L4Ww^qS6Ub&hqVHFS`iVY@C%K_mF&J#>443nvCAU} zXtDAm1aYuY`HH$c@Iuw@nHm%q{-;^pBZ+0Eu3xSi5Ht^!EB#L`{vIL6oLXhxdLA{G zVnv9=o0mrgXMlqBU8Alt-@DriQ((zai4@EGmn8Upy|&SD%;gi&#Yih)aGS(nhJ|oq z0@lviPk}c0q#S-{EFOcNbj!9UH29?Rv8L`~Obsv?$HBz;LGPduX}+i_LDK*a)ICpc za7M^!vSQtkvX$!$HZzNN#eJtGhzrZpP76Wc?Q)APl$DvD?E*)zjIwcHKy0B5DO=QM zW9>VtFFej^ntY15OERXcGeUJjW$4{gv6PmxP#{}PvsiaC#wSQ8U@-H-b2Bz`;?T}L z%+;CQHf2g%=Vs^^`s8^347~EKgx50OL3Hk~D*8G%hTNHq-bpVt?gkph-9M0Kd~WS` z^K$^2SKIy8-4zeP{Alyt_A3+Gu>q>A-j=6RrNmrnmH0ngw}lSm@&Oh%pcJh`wKX-e_I;$eke-lb4+k7My4x{aGzbAjMY5Iv?j5J`R|Ou$w5=pz$8$qM z#!TwhO>TZ0;Y+pW7VG;#BS6*?@>+O~-n#E+@C%Vd>H((hx5>+b zv$KP3JI?sEa(2o@H$7}_yEwfRs(Y?;*=8Gh{fkVc2RN<0Le^(yH2~S?O!mFePD9>v zeEHFp1)kmQ(?{Ro*E}6^&O`X}rw^UpXdtwAT2A>abNA`Uls@Z_6#MN9WqF-6U#>ki z)m)S?YIb2s(Bgc1rLShy3N>_j4j=CY9$#mx-eU-Dg+DN=F_Tl_yxy7V z_;9c-DuIv2Tj%@Kn;<+s^AE~!Xe1J5R8$JJthsOO?-B8sV^cT2d2il3wLS0E7vn?? z%T8NtTQb#pGR(I#*bqG{lN`R>;wxGS7SkA7Sh zw|Pqz_IJB`ajY40Gvn7R%yuJZxjat}H2_pSymwC5m3L_@6GIq^Sl z*!$g^eil1uDnw(){N{dHMxsd&<3JHp-m}B60RM1HGWK&v+db%U7b5WT)|{=E5Wk|f zVj156Yy3f1_}kVjNaUB4vb%Kl7s$n>J6kA$^J(+_g$t6=Tg$lt@@#}$N0Ylfvm&vd zHhF8Cxu!-2Z{XiJ20G1m>)|JM4@pNuEd1LYD>mL8E4ibx!5?KS=WVCf6~sN4@7Nqs z?gHw2wkUgwZBa_?{oSvNVw9@T4W!uIpv4U-ZDaOjE60wzsdm$3{))SvNhOlYy}BY6 zJDokb{GG{!!{N7!>8v`sku#W^Uqj2^yI0%8YUHRLviVcpEjq&F4V-zJt{mB$ckPAS z@zxaS?$9mqd`(PlF2UObMHtX&rBO`}sB)uI*4>5~N3@%jD{ewUH%qGwAeaTuMIP9v zVhcldS%tid9M+%CKjgQu5TRwynOWg@wo5KWPQ_Un3eWO?a=Sb4$b8n>S)bZv>&0SA zUS0;n#CtsguW(IbLJ`kqsJ9Qor(z1FuFo|>h*W1W$bJMe3=0@N_*o6m6fcf!}E)lU8*|1Q)mVxXYHy*8+$Ow1G#rc`awvM{* z8ESC`Q-)k*NJBYqH6W`{)<@hn=sVtJCdWE>QeGuJ`rRvLZx|3433-R8CjQHGIXNr^ z{&$$;sZ+pR7=;Z$8rCeHA-Cs*1GDxyAo>zY0DOqCCT$=Z%|UyDl#gn26JO4%V{q$l zpWONjk_Xu7TDenY{_?csZBLEA~A@AU3pj>X$v>pR2r;{HoK>VfMQ{SAoB#1RYmpE$2w@z7SM#`J0A z;xDQSsV)uU;NH%Djh1w4G0(JhHS-^0hLaVe6VNI>tXsLN(7w za)s!em*i+NKHoVzE%H-pm$)Rt!nfkrfiW2Nv8IrG@lB-AFF#Q*xV(2o8#mYt8;$Lxs@>YV0cH#r_$O`?V>GKpFbx&6@%fSI8V-t??C5$&S(9jj}Ddw&}YxBlftKvWQPIc8lDS z&F887T+k1c|C`r~@@BF;4%#&RF z#s5kdw>=zwKVWY+7RT;0GM9GsX&)w_ad)8IzM?MooV2|}bUxYV<~#X5FXFAV4+9Tx z#kA+8`8ddoQ=M{{&gkujWW8PF!N1$MF5L;GD@s`J*k~^m^$EGM!jT0iuM@qR=lM6h zFO`JGh#(I(Zwk!L@e%b?ALrYsk7dY9FK?nNcj85rI{>dZ(sOo_w{J%YZ9m6IQaPjbmh2;lhqkB$d$c>hP9mU;jiJ9|t>*%Q(A zq{nxyZ@**WG|fy8=c_P&Utu0j?e=OXNBG4v#bNc&kL8&&-KL+7RB<{i2>IRYQg`Y8 zdH<@NKqJeG{=O16Dg>Kie<-Cb%@;3CH{!7Nlo9UJ5 z+-g>+m^#&10t0t}rnT@TnAXsvhmc77IQ1WXuC6$C;ubdQH99W7rJzlp~cM zm1p~}bUihf6b;vy0)Fb27s%H|cXR&ikDD;O2%t`$6j2r}2Y$G(WIR#mgBCeUJ4;tX zFuphZL#x;oY5b$F3b>s}+}{CbAdCk8Uuski$Yj}=>&%7UY&ZEnd=e{hYJh6pD1YSl zhdFruI272SdjcK#s{+*%H?8xZukE1d@h6$mpC+9${bj_PZ+zMrS%@Nx3F8hyj3)=) z@_5U#R3~_^hv~>9yom0U@oKKLf~cd7e3q%5z|ya;9Zp-7{VS_n70q<);D4G|cMS0+ zo-tQ!AM%UT8x0M%%i-6yEWD20=!hMm)+7q19%*W~U0Q8N(PuZ4_`xg8k@1N4+@KL` zMGv=Hs53N@N~@RJY5p`c-5QBsyIUdSms}#%Rd2|<$7ixBx z*Qfi-@9D#`1k;sL!!6Ud32_$-~TDV2JvAxAz=ZC50fu+nT%(<~Vq@eaeB32&T8 zTJd`ygp^?h1*^xbHtrk7Yo(%mHkqOOcecVnK+%9Bj$g@yw;SqoJpPGLPu{kZXyd}H z$`7*?GE7`lyEPvki2t_Z6@AZRi>W$zL4vrl`{31tm-|y_b&vxGaj^wUayX8(uxN*; zdWiUV(Xn|uKvLHS`6+#^-u#5^cw0_z%m;NXCiN(~A%VL2Rz)!WMTT{{t)`DeU!Ta(sb!&u9{r<}N zrh&=lJ$XIi>~y;QIhEhNQcKjg`V#MGFVD=HUA7Q#mKtd+RaoNqA}GCGta-dHQQq}D zEg`O;L+8vyR@1SkLzYyZrmWLYUZhXugdzJXjY$8riUjNZ80`zcr^5y`4?<1>VD+sr zeC$!Ukm^H8nn`5Cp=K_cn)T#!Yc$R<)EeBJi9`t2i2*6PnSPBlp(ls zjY}sgl|q1jE;k=<&=~|R8wDo^iM5Sx0c0oC0PF#Zb?)qzKMh_gTByd0m>5yum; zPV)sQ2s*mBFL8rOZeiJTSAa>bu~0N1S)Wz1V=)uL*IU+xLd#A%?xv&8rM;M%)8jDp z^@uHz@=YrAbww4OA8p}^t0~^UXcILhTq@^#`!nCeo*9|D&M7&i7=xKqExM0CDi*Ym zrrV7YR*6vyf2?|MR|LIy9kH=0f?yb{N_aeep#u`9t6lo3B@StW;H1*xNt9U7a&^Dg zU+<9F%>TI7%D?T?D86GunN@C9CKw*SqZF$E!|XpaUKzK-PhsCUO|IO+kKRaqgWMh2 zAF3{!C4ny^q7cn#$L?H<1{f+kyu z(7hcxAaS=s<+70x8U_0N20lfJgDi01`Za?t!ww6Cy}ET9d@gvUehLv?qI(&1oo*W` zIrOw30+LWHdMb?jHd+S#b?k=aAj69w-jBN6`zNb5tk6kBnk=LyJfg`ucG!I=OH(ol z;lySSi-XTt-vf3m2L>&B{c~5rPDRegaR$Ot($Qhr$VWb$T3V)VkN|ORsBK3Uj9Fm3 z%C<&U&(WO|>xz1XB@HhGG8qN<|I}K|AKa3Ip>=R!w3lk`Ct`tNJZp#UP8Pc8mwASZ zec|TB)zGv%UuJC*X{X(Ou0foyPFzjxUp$ibH4gTOm_gI(1ek8rISM4P zH$RA***ojoaiynU)k9iqDC?qbKQ`RialA|-IFVM$>Xo!4T18Yd;B@AukNK& zPH%$O--MOY)?Av&lO{tdF^q73zDH2W&a-qy0)amZ8-gz_W+yx$zd~jI%!Ff^dNtqj z9WlXDbJ6u}lI7iX?XpN;h&zm;pE~N|@Yl~E{8YWEpgatCU(Y=$*UtbV1m7O*a{ZB# zTT~3mdhw+(!@%q?FZUN9ikgo9KbAB{rPEveqM zk%wfCV|wB=$Fa{^l~vkBlOY&djC0Xkr zIIoIr6_k4IixYLa=ML?tSu)D4w53sEe6t-@_U{R)5>0?1?7A0z_xy>L|!|PQWM*YY=G;kyw)z3F6d?8W+q2=*4VKJ z9w*ek&2^xa?+#7^&CXlm0UOu-Hz#;efimjG$!S)N+1Ep1Wl$)Luj>_W{?zZwx74uX zUXUS61H7%HJ{@{66U!b)D9A>88Y^mCJD0KCiQKE=3jD2h`oYC|;a=>(QVcsu%X)O= zt!!nd_?QzsDeF!#+6Wf#fK(d9tkeBFDujI|XFo;^n5_9XrOMJUQo8^jTYJ+a`n7;b z?f5nEPqGx(T|r=UX0l$!K*&hT-nj>O#>DFdZ=>_eL_2MpSM|e+wu*mhK=oEqrGLO_ zYg93=e$m`G^94}7It7OMQlXh<-flguzlK;+tSPtD`|<*_-e30?X>%8R2mRTnQrG*? zjQ;}|la(a!N>EeRMY>cmogfYo5Qp(<>n=`1p4gBivSqIIs}m;^lCLfB zWzS?uaL>L&{p505II>b-`DOqelbitE4Vq$*x}LNUh8kk?A0m0ebW%+0SB4`MQbCAL zh-FYwLWx9B3egV22bg1qLM;5iqdxXr`rrkZionsFjzCDXd z*D>NrB($oIU%1@)Lg)v>OD5>gsqx%c{#=i|4w)~^?==5JaYLPluWnMEO;&ly$Wz>2 z!EJKceMYTR^i%UW>D_hOvBXfWyYjH^NHBIA^y;i#yg=DpDp zN#<_T`V3tSOU)L$KXsX9v|nXJsUv%?DE$jGPICBfb$B@d2oUx_zSPiokU;LR^Y)`i z!L!mQHTS;z&T66{WU7p_LQJ6cq+VBiI*u0KUSqeh_6-lLAE-@SKTSB6CDk5vLv@cv zR*&WOf2AF=60JG*pRD<<_J##~Y!9kP$#@9SPIKmMmqDu$tQ%T;*o%fAi{#421EaL@ z0j#Bn?#t2ZHMaBA8g+5CWI@Yfc51Rrh;J6aDzRNmRcD9 z^*$%k0Xu&4ikva&vdzMgM!~)AY1^GM0{;B8Hvn+hj8#HaMOU*<#{Kr=+ak+VkU}+F z@&okM?vyt;kMp)T)wSc5EI+rzjc+oH2i(wRHJ+Qge%P9g=_w9#k^d>m6d`@fCMqYz zCvKemZQSe&#gu^;<1Q8TX%5Yh>c=WNx;xKfGd3#6u3LwuXTs0e;2SmFOn z)2w5wKrC-3m5;TV5!jyZUF&4W7sNq~ECKs-_FyTr716g~iYy8|>a#mLk~Ut$%QZuN&VvrNlH5%Y75&30KL%DN7EDbqdvaKb=k-b@{_NG-rk4 zYcQd$-`~J^QD)I@i4H5VWa18=ZcZCrDyrA2WS2AVNYiXyjm|k0O=i4xovxx*(O~3* zK6QsR%{`yNyvR|(G4I7T|A0Zy0dC-BO1PnuSeMNZRK}*w9Q39bQr0RB{`|3 z-C)wrLb`oyIbHC*xeX@quDYu$NDX%KD??o~>{eA($}#N^U$}}XC^&UBPtm?yuy&kr&9_da?b_~gEW}OA8~mr?+OLg zRy`B;O8KvmSU7@8PZb^|L{C@*hECi+ zm7m>ZFcNeD5YerFoGG%L|66SW@jmh8Py5sv9SqmOuKVwz1o@Nf?AQQf%j2{sfI9`8 z#?G4*V?1fc+3<6)l5swWqzh`~B`zZluTkEB>h!mu)fPBFJmf#XPSk#Ty1i@%!u0Go zY&OioJ-vpfA2XM`3p97n7Yo~0k8k6)OA!8ZBmAF6xlzF$Z|_O^$XzCcecLMWH}d22 zRt=C=$9T@(tQPy8;I`2lNt$h8;oClE@tkXp0t7t6%{kxLzsO)(Yzny)m_uh)s#*y0 z`1l&8bC%B<)2%H&ly<*igopV*@y$xmXI-}L>k5JfS~dCjO|#A|Y;>;*4a^nf!rqm! zt3$Kz^|KvlqG{c_G2cKoi7`i`F>plAeS0NLM>jsX`J*?8mRh@N>cK8ROx>s%6|*C+ zScwPk-wdTe;7H>BvKJG^avID$MFKtPP}wSQ;Tl(ZIMil#Sk5pNr?oo_ld(P~<_0CN z2hCy0UA21x5epkx`^ktcT+w5&P7Q=CdXbk0$BjO6y9NFjYBrP6#`uFegmX{5N1nKi z@)cEWY47~*kgZ52!WbqDh4#t^oEp$um21p5ek;fhTBTL0Wf_~I6UNHO{?*op!%0W> zg`^yH6>NUktE-C^^Y2KX)S_KVJSduRgC%uOqg~Q+5+P5o{HG-BwT{XTr>CnSc_;)1m*8r_oOHD*(}Jt_I8GIWz%oDQ#5#~73b(SZ@?!Idl{5R!i==bF zRXgfA*|{_{&rXO%oIhK9b?;_cPbA5DGOgD{}~|x7Cd%c0(7-27pi^9e8i4V?Baggf;JADw@nL$baAWxxX8nw z90Bjv!*v2UeRN)qp+Rl!A3uz1cds;Pg!L7xp`BL)SN#X2YZnTUbq5b9o0HD?%zd+f zrRN0&Am(dp(C zE$4>8Q-MnAdoKD?6(;a=`4GjjqdX=-xEP58%j>`-#WEhT|P#2i5O&C_kKT6v!^^y%{+) zWNnG>0M&6xIDq=EGBUMF=^Z$Mt9DlQKbVL zb}wl1EM=`b64{(!`Mk$y)MeL!vS$HD$%1~30E(N5xhED4Qe}F z?=8usCIXiqy`Kb@ZM!1n-122wPVL%HXJB)?nP#+*YO)OF-h$xtTrs3pZ{EwFKe|X_ z#1kF8?r|Vx8gNg6e>Sbeq+H20S~)!#6v(87#!Gv+Y)=?NBg~|TZ!A0MaxTj__p5I$ z<1Xg%-Zk(hKlb2*E&mM${ST?;^ceNYal3r#r0KX!=v4f#+pKCL!sNdr zGT=|m1wcXiaGSf#(?-?Fl!l8%RBhU6pxiBqJI}t!{A&o*_-W7krYLGeIA}iB*ZaE@ zVI5wwz9EHAWEN;}zxEpII@NcmBQx5g>?}-f6tO`kfXF-Bi%<6zi)U8c4Ua|z2NJCH0CKlT?y3T|jNr~VfoDz?ijh1R{~4`5idBQc z^w`Hy<^70u%hw*|BJIm#QhEVP6*;T6D7jm>SBCTiYw}<88eDxL2SKo40XR7Q3QiI8o^wD|=`Xah*VgbhcvyDc42j)l;%ntf54wR*z#V9G+=wFMZ4`@cE& zO!%jkrMHOt^i34j^YeO$hKqN!{;cmxjgoQYK7%P!Y&A<5jav}&xiP^@QVDc7+(r1z zbe23snpc-7^!OfWnTKO*`?Yh;m}CEDKAPIY{ z+oMJ<;vm0}8#Nm;yyRte;?nqz+f7^`$Tu8uoieR{U2E^We4Ke>hwfNj++~es5MU8> zN7AsjxR)uCd!b00X3L-OK67awn>q0eJ>375pBsee`D#-YZr+ql=&p_(aK-|R>+}b1^MF3kX*=}voslaG^+L7 zeDYrBkh^>*alV4TMs9q6sY&p=SRbNh*GzR2OfpLZC$wsD!hY@(on{ z?a6RAd!}L*Vzrkz+|82vz0kDz#Vrj=NoCw-?hpPfSm;hp$}e7($4^=o_g(}jcNvYJmjwt~H!+t_q@pUB2^Gr=seSuK5&09Ncy4%EjxgGl1L0esm7`?Y%QF*g$MoW$WZ&L7B}7&EzJ8nH{k73)RMx5J1Xm_s|qh7HQzdd zgg@S<&?SJ&sRnIiN^;%LQvE6rasBp-M~qW~1;J_oary@TUQOlvmzw-HJ--f6drxMc z3ZnoKv2?|N(}iCc2%5l?PdnJr)4{ZxGN=lY@4WhtGgG7wsPRMfmcO()j=YhtxGaBk zaQL4kx^*Ue-%q$?Fbi2NP8hfiqHaDQtWFf&=>fAMX)zW$zYq6;Gk*#M=iFC}Hw@~z zq4TI&aBR79ha$A&Ad~}NB!7g&+ zVeL!vg+n?dc&XnfP3`!5rniT>>kOz@0}W``e_!IZf0lt%SC4xe^J@zA6*@Gyf1P!l zKCV?<^z`M6Qz%hdt@)+EoOgy{k>3m!MXqGuwu&%Oq$?3XVl{o47aqn>w^;J3EgAa& z0czUA+^X=yPH467w+@k8>z=PgbC<6$KbM+*E_v91nWrShYs8MGvaL=hPGNk%rAa|( z8R>a`?cAa561fxH|Hz(`~$f@S4K)C>%d~h#Wv;$D6$__Uj0L*^gVW!x^3^hwk08j6pKx3XU#T*` zmCZnSL0Zna!!U5i(EGvNAfv~dPk!-LC|j824E27%eab3TB~jmRcmK|sFh2EXvPnRk z%7R=Z^Q)Zshrhl)+=cUg6S$4bYr>+3=CwGddJ+cLuhLu7A1 zP~rC{#Slm#Xn@M`|0o=$K@^LMb_v>X{Sf1vb`t{>wDP>?FjL?>5@So9Y*Kjbf1Ig6 zZONGbSV7P6&t*BTA^v&20rv5V6d6R~cqZ-SFm{wMjSutVhJMW&ll&`^09&wwv@e5y z8P~IaEMzk=QEOvIhrzhaYzz?@EtN39h?}iySi_jVvy@{$h9e{3V48!@!u=@e^~y;r zxg-eU_nbzwciZLVxYZEAGiQH%+k)b}8X%n=oA%5-&YM4aCC$2iKwH<0+}@s|0lbCg zND5x7ZE{fm!9x+KK6`RtOjI)A0ev=_9BIXFK3vZWtxlX;A1M!12xxJnH2p!DnN7cg z%ktX4?C%GHMhoki$R%I|E6ELzL=0g@;HeS`oue|BpT`sEh^N$SM8*Sk{Og9^m(q%u zsk`6vpVr;kXn*tSI;@o_A&-uhSd!J8EEz$%v!yO9olj}@`Vk2&Ty`gl>+DUJprDz0 zHO`5gG3}j4Yra*t-XS_V&3w2PgyONqZ9vU38J&C2KVf|1Z+1UrF)%8+d7C8Rwf*A}>ECr6)XOu~_cagY!ab!(?$&wu^Y0SD(e*J)_+V zf@AJ!9j}lZ%4MA>ixyD-y4Fw|+afA7ah)ZhL6zBkWqO?kE-YDV=@*4+HEZw}3q5Wt zqL~r`q(5#k;Z|Q?%~+H`fvj7+uSZL(GK6JRrsmg%18lQ$%!b!j7{%4%nz7@x75e!* z3~=ct18B}jmoyv z;WGnSFY{YF2N>l|J9FLez`f-wwJuHB!7oH*KM< zh1KEe2sSH^&=Q}Kt&oDkqJ>PbJvw}TvwlbgNWZD$rVY0YX0k>^d-a%&V(J9cyfjh( zC+JyL288nmpb&#B=c2Y79yIkH9~>~ z@5A_NDX7|}peZvXAuH7CZy{X++z`s43ctGL`KBA{a zh)&uW&rSx^Z?$$u`A=XklmSU zCVR9x7&UIB`)&L9yw@~{0^DucC!uNq6nRW097~_XlF{IE<+k`q@4bEodF6+Iis{u{ zrJb~A3xWu`v?4YT%@ADR609~#zeVdTBY}Nfx|!WfS^*Qh8zQ|BHP=n$wSPblv!j$s zpY{j&6=3tqDpmcKHqD*!%6JoWC0nMZEJ4e{>wy8`b<6YpKNsFdA~M>Va!d5!p*=%8 zt2$Vj@Q@i8#2wvWv!f&l6XCAFByIsyzaaPQV&mZtZ!GHUJw(QzOMtbRWFmG!C25gY z)%n}b?Cf1Cm&vl~r^6##hl&(_2rg!4wlRfzE)t$0>U~p{uX8!l%=_afyH$N0g|>50 z=U^~ecYI^TySVnCwf4(rIRWE9!VuVEo}W0BC}ysi=Cy8eg0{44lH^#?r(SXNtWgS#%=<1{JU zIARr-E?w|+`qw_7iZx-{PkwAM(xE;UVVP>4DIWwR(a!`^3-LYwOgSMWArOGE% zXl)Iq$wzB97|2?bAMjG{)H?OGjw&I1^y!XrRyJ-m@crOvak!RnD;qF{G-0A36xxCX zgxa($m+lX>_W&?CZTJDuNE74s2m6j#_y_ha+u@to2)F#>PoxTv_qGuViya5`O%cQu zbu}7QceBu`A-=VVEz=+@a*F(>k|5uei&zp8DN5)UIYlZtS-y!lK~-m?(dEzFyDMTI zDQUV%j05!ApZQZObN2T%Sr|zNlFk-HDs%~n$?GPish(%rNW^0J=HB6Mx zNUja+;AmFB9odMNG5t#9&-RPyyY^Tet4E_GzR&O8+Mox|#?YHiRlB2f-(9>1~{3EDPRlG1>6NECj_y~avnE^^&n_GJVb zkPQFSvKmDob`3;*ozocAp450RVJoGe*h}q_2Y~sPEF;Vy%g}W!l@Cp?R(Avu?%jN-W{tm=GBjVP^Z5*ba+?hokg{$GNJ)eHgkw3p{eB}D%vT;Z9R2});HjK%)rri{MTLvn z$E1`dr%=!oHH_y=C5hNcPBJw3cF3{Z^k`EjsSr>Xk*ASEsE$Mb^euwN#-{BI4$c95 zoK66JIYr;a=l#1WcEdxntU9qMdZp?Gw)2C?Qn-ANg{^9=^yTM^VQjdA_X$ITexqwd zyO5o_v1vA|9G?KH4?*%uz5&yB1hfg+HM;`+gV6`SxAlVNyn`YSchiDHp6wNo#L0zY z`?*=3NRBtvH^5iZ|$N1Z0n-;BGHO@{U_gD@El`ng}9{HLL3rwSkOzYA;wk&tJ`OfS2 zEAh_0H*%tHuD;ig%Q-IM-pm4KOw-?Cq)^7&9*@hMUx-*X?=r4 zp1(mIjsWNZJY}d8p6nOnwmie`g$afd^$8YU*{f&qeK@1yrNE-BQBp8Z3qr>GEIH~q z+P%oj{RoghUZbcKPH`1$Ftx}?Mz%VZ4p4M$lQsPl0VEUt+{AT^c@BGJwlrd5Qg_TTkE}=GL&9wKN3()cBE9C}EaTgQ})M5B{E##jCFTffAI-a=G>G ziGc3yt$W}Fex2maDY#JlX?kqqYWM?Nc;PT8u5I12U^nz@;>L(v3 z`o+uFO{e_CNyOqu;VD%2p*6b(y)R;~L3{gQ^?Z;#Y9KC_>3 z>XBP9$mff6AWLBnW?SJ)AU5tj^lPCQ6ekAzUxdAPI9u`GHy)eXRaC8(qODa`Vyh0S zicTvwrA9=nQX}?mi8Q+Q1#wf?Ke*K3PbCDX-NC_q1t&WAWfdSUJ1lI=bSC#Zb^gw0@V{K?a+;mVII_ZB_l8;`s?ohSS*$_&dmV=bd3o&wbG)Pu-gcGYBN`67NsL%v@kQK5`L#d4FR$ ze5;NKj+lm=HVa54b`znU^xObn+BJS)%+xCUJAn|OT7d)@xOP?Q_^}|hZ@KDzK##eL ztmxQv{}t+#BRFfTBEeDq3mr}%tS9?-B2NW>`C+HELJ%GGh<%<2SAW@m@q~&B{!7zn z)L9JRRXWc4EkFLw!anXdXuY3p= zl_;vB`UG}6x=&LvcBTf2+l4>>-P=2j4CkvbRlRLdSbu%*V1%)}Y+R~7xwR+sJG3{f zHz?ZE*t^n{+4)yX&v>`?T;k=*fID*@J?JzhC-q{_;lCp)w|&=T&Z zneHQbk5N@Us=cVar9*XxLPbA=o)bI_Ls84#nm0~GI)iVpZgSLlsYipY-ap{Zk??NL z^iXXy*2pZhh-U%^uaJ z8AmFpUxJr3gv5w3LYpT@<+G~->*s&BzSTGPch`EBs~`45lMFE+huq3zY5em0Qe}P; z&zvwWiFU2!)~0q>(wYlY@x>;vMjv?9rigxj zgS=T=3KB0-90<2mZQBXiykuE@%;!ml(LG;jk8U?tDkm(C8UrbxY9!pyD<}%9*Y73? ze(y>3ZF}+#=>mBqWL0u62Pf+aO=WPcp?$K(1;?LMe6hIK%qU-A5$=Q6vT^4hjeX~t z;>^>KTkkJ&i7ubXTIZ6ru2;&ZkO1@Q;kCwNxaUyk^uRe3@kVeojFrrZ6GX_;jcLgg z@p{v%G3>m@&iiG6=ecmGhNK==^8LO7qrV5oIR5MWcNKX~izk2&Dfh?xxz4fuJPg-= zUFW|}{O5)L8~_U=sw$_?dCJ(N>)#*AdXtftz<5=COg+1=lzjC;C@U?KPRS06Y;hTi zJwI~pD;KeQxKSW9`gNPUeBt#Y@fbH(Ipo6`CLEV#ws%fpq-W>yxY;4ClX<`OXl7ah zFJ-mnR=;|e$ZI=4C^$P3XdV*hW{|G3xEONdr*x?{Y^|F2X>e__3Wbo@uxH1xjF7&4 znC@Y@vR0+6ii*B(kSUV4p7>L~|FmG?mlL|Gq?2(lo}T2c^hPkljT$Rin=HS>QIR9=ezYx#a*UXpvP_^lOhiM zoWM``Vgx+h3hu~-u^#{%yf%gJrb{=wS?a}y(bEsu9`BI?mzsV6WB;DIQHzjo7M!`P zG%GnaTQP2RZy_$8kGSJi_kT@3HCO_|tGdc)E|Q31^wcR>oy}!k)myQ`K2FPhD$S!} zBxXj!V;7AC#RBSOA-MA+&bCtfwuu!=J#GI0A;9=mWWeJ@@0_cEh0Q@|Nb)W)jT(iy zKGyy_eM8g5Q=c~)_UEetcRTrdDufBwVL)-!XQ{3MwR{{n?w~`5*5fgQ;SQ1hINUVbCn!lvs5qH#Cu}tf~VkRaTc41yCRP=$Ky4Y z_pCpTZ1>t&L)CKh`Vt(F`H_|pQ}2kD;k&XS7rtJR@==e|z)sa<;9;+S8#jViw(^i{ zxQhN{(h@s)Vy%=>emg&Y`c(4LqxwREf5>~5I-C|${$962IV^%< z8}u{7(A9B2b?g`U6HxQ69|xDECWOO^vf3dl#IIN^!l4=Smb5SWGJc;g`|*Jy&k#rX zi4ENl;KenBp@o_#NHsABl~)R=o%X~o*PbOYpSV!yn@ainCHxdP>UvDjA2 zHrBK}yF~27+PYYlZd_!^XNtK``L$vl{-*;oUhS6H zpxo!6x}&#r@!`D#$G#tqLb8#WFZQej$sp8e$nS|sBTAIEa^&}`m++UN9FllwOVq@( z_$$r|Uve+;&3KqVhqlihQE0-{$_V1FEUrPtq<;tup0K}c@}OQBS&{NvvQJ_n;jRh1 zasLUi9PRk zUCY^Qd+hw-QfPt4duAW~0Z^EUZbjpCy@0-UB5!_5;|ukK^7J_i{0O&a`I>0YYkxwl zpA_(n83yM{hp$X+#|q%Yaxob8NpTZTO|1MSn94=1NrWnmptQX|o{k5T8hbDQ$mlu@ zB+2F3=<%n_U`?{u)1}^jDS1_BFKB+9Y1Eg)d{y|%5{GA#Q95TtcTD5y(0K%N` ztZ@4w0=n&xGvuXp_Y<26(WlRzywgMHJ5&&exHr@rnw8?ky7o+x3fbkf5vilNqm6v2 z$@te_`iXT1dk(Z@1zeQc?&i*7ncGtG$Ny3L`2VbmezWJ;&i$xI6k5M9yO*Zfh*@`3 zJWfX&IC0-H-*xC{@2z1c{|L^pHwX4c#mdsrJ7w?gLU%ZnZXr2DU3Dww-58TWz49d( z8SZ27ddcF``wFMXyZkl~TR{ma-!>1Gyx4=?EshG&yoeO3SAH+r$F8HUSC7)5axP=Z zv9PHCWd2;vb^YNFtB4F9Tv$RQhU0g$g>(wOu~EdgyoN`|2kqwT`Z!dhtR{rxd={)J z`&x7#)-bXrgSFt@%V7tpvy+hb^0Aa;;Y&2D$ks(}=fpQ)NgS_ih+^amW9|}3SDN{+ z0wpx`-SbGwWjg%ok4Aa!I!WKoxroOtXx%Z_nHHExbk&(^P;TP*F)FWFTBDXwtJ?1D zvlHL|janHmbjI|f%k7GXts)eo)f03o7{Bc!6QuVKZ|*HOfFeSu8rL=Mq>6{9>W`tf zR&AaqwF@;+l*h^7yNV~fq&!=;>_!rxnHd=*moFP zMJl@cYB_TKQ9iyWr7Jc}CMCVw49su|YE+6&|s$V4J;(;{(5WT{J?g4F&7PXiyfQ zzH!@adbmk4z!xoT`8035;Eu__mh%DF)2r1xm?3Qjq)N;Vi?B`q8@pe+LvfN?`m74QQtdrC4J;~W z6EltIaMPPj=+D|SXzJlf(v|p)lMc210Ot8m{6`|3<-~pei!25a)49M27-X=(KdZv| z`|cqE|3aXDKDoG2bNtUMXyy9%a5FjJ%zs|+KmG@*d?ajdF9!ZA{*z#K2BqkxH;)5>TvujU&oTL!!_lKfkn-XUUUDVf)R0W@!@;yPws6(FYDM;je;yaSA zvZu)5H<6twRV^p=Jm`W4JID+VeIn=E$|BObvBPHH!IyRzXPrfQuVd5Y`;e|FEAebK zrhSGZY<{fgG<8R4HG0VxN`PM8rR7N}6z4t?he1Q-GvN!yF0kbCrudNZtw8(VXN*-$Z z;;%Mji)K25*=4b^0$67D!DE}XJkYa8cU-bkTPb_|V+mnfB=TF1BIK_x4>j^(!^{Y? zpRX2y*R3Nj!fGnm_roMf(nQAp?AS8^AdXTXCIpodRER?LOITaB*+rl_^7|-v#Pr;9 zvPWw>!3H@PM1wEGm=WS47l=&HWHtFFG)<={)Pg8dcbBc$oVa9KD@dXl z8p45GBa}d&DA9+MteoLjOX4oo#juz3>-N1W3yYBQzBTYFaZW2u@#UTO<3(}A8?trz z_qaGMNc&@xeoIk|(x%&%1}S^%MTSw}i6EBSyqgiSkOpvKUKI^&QKY~k2FGUOy;RK zD(0a!wvA*X(_ay88jyo4p*&~sQ|6(QHnl9e5epyNp%J0|LAz7xx6|q9fTq^-X;d;3MUTYZ5q)fvEiUb6>n$P%Gpi55yk^li;HRgg zn>YuR^DdT>*{F9RY6^&Al6aERR8{|Jqf8j!~gWo9Dwtfx-Y;) z5Zul@F}7z37hxO=wnool(4g5^n5>6U0E6Z5ZX5SW>T!`7IZ*9|i&OVZT1d?}jGew9 z-iuuKN}`eL7s`Ttl9`K6a6DC^6Hkt|xlHo(6iw|wKwa*<|Gbo=LFjmOPr!Eb?+c=CNBtU24Ok) z$PYlefxlUE8iM!&n$TToOC)<5`HJ-9y&j}3otpEm$^$cwW~Y$w_yX2v=5kk{=U!?~ z9{M5ADlo@e_r)bqH-Q3B1EKRP`kCl|hhDJRqi%p5ogNFphx(7xjsI#7 z_xrxBAE(0?@t7Kr)Hyb?6$=(6Oh;!c;!wlG9GtlE>@sdaM4dJTQrWH_4J$s-u_Mez z;6oHOo4)^6CeB|~ixp?K+3Sfgro{s&Eua;X85VqY!0Jh^ZXe4sZtK|Q5;(u`-=(EK zI^|JcLe})BjcUMHA$Lyj+j7$gcs1jx;L5o}aGjX*B`|%4dC->ba-)URu~-%s^P`Xz zR56^RjUvthD;esf_T9p9xzSMfBBa_V5Tv8E7-U5`wjyJKhb~gA9BHGk>X@$@2V);| zT^StJgY@&mmIc+>-N3m=g3Yj343*+bcB!0r`?F0h?>1ff}VZI zjG1)T7y+aS+%fw%)hG1r=TUD!0RXpwAGnO}1@GTi-JIa~9sh@IF13nAaKY*~L<)}; zGm)*ml0KrW^vd(C%Am$2R66#QAcYkyWn zfy2kkL1d25)0y4`R?|QrUzC*0is3$iat;>~=0Eiz&D8f-naN9QFYZPv3)T_n_LDu_ z1!g%A>SU#<)hpx?t6RwH%oAgiB=>9@A?yU-gPVs1ak+5Y71RS5=r)ROh0zs12KRK~ z!D%f2tl_5dT89Wx0K6M{C6{InvTx=!KG6cR3HxSGAap%}aTVi|?)RV ziFeMePM|8T95t2jn43sIAClpvv8ze-u-zn*)m1%ksr zPWb0bc>aFvUwZN{(GFU_KlX=UL6VFB0gn_CUTRO6gG9p=x7JSn)$|}!z_kEc;b`*T zq_}^Geh_OK)c>p!hqaKD^Q$TRJ^NFqZ328@as`;!MS-oXAlnUS-uiynb$OKpqxy;4QEbEQCKiF!_nw2CIhn++aMN}!Rhn#(0V$2ik?^h|O9mTzxX2OY` z2;vqOwwr6c=)sPird!|x=tbd3TI*Ya&305+-$SgFu1>JP{`5QPmT(3sYiK++cjNrO z3+efD`hIfsOqGW=T{J=$s}&%PiXzNBT{T=(qhaeieJB?4(2m<5jJzCJ-Pr^%AM}Cx z`3zrtc2{MA$DE;?+?5D^26xasJx$+V38j)0GKV!N)v(|Lw6jWrAR7FOxPi&;rRk+E9ogElXRHF|$)Cu{NO)x3n z)vcIab)~NOs1HTcl<|s_^yER?9tuR@65ZoY(Osv=6D{5|yvW3^Nc$^9&P(n*U+2Y+ z`_ z9fKx=mp)`Uo--ug(y$W3ePAB?5e55%lb=K9=|0MgD{mZy`qwsa={}zF{}KEn z@A>G=qt)$iogUR)+*}M(V$Ull>MOGDhkVtZMv*y9-8rDf!8X5)om(or2)!M$2(2Jl$_YI^Tm-i>yB=gKl5wVZju|sWkI)S7;F=wyA&)VK znC?m)SwoG_eilDyJ1nvy_#8>q<8HT|>w-Y(BFnJW(Hgh+Lr+kzILQl}?1I>e&yOKA zlw%|;Ikd9COSR0LiQF{E#5qiRTf>w%_|pZB7mj_y{!Ty&<{VTaI5U{U#(G|~99_Yr zCHRhws$~&99xs{1*)wL0hx<0jBZU08Iec_ni)%!MCi8FR*1rOFj=D)@c(%5%u8Rn} z5(%d|@epO$g<_Ok?A@)I=De#+=-Vb9t!)W@x*$je*YM@?Hj-bHMytdrq1FfMXyse( zZCfW0S8KJiPLnkoI@uSAW2aD4GX@bNLW zZq<;wrK|NQHnNJt^@-6)*d%9yMf!rg!pYPALoF@UhW7IBFZ#n@&an^LmpVNZ-<_&C z{VtZ}{YhdsoSdZiE{Wrz`lkHP*Bo}8Eb=Q!giF`x9vMTOz7|-ICNN{Kkkv0GrX%B8 zV8IkTa*CmYl^A%%5o_gG*59ST`;q_sv$7)LZNm#|tDo3S`9TgK$JLktFZE>%WVJrj zk*&9D;eps0E=2R%$>%+cCl7^w05yNe9bkW_nf!ZhF+Ff|bN$^1Md+~|2QSDyrf1f= z=14XKD>LEb7T-3!1ogyE4vYit${%0lexH9>fWwBzi?w92X>+U1Hf~0XkqG1DRrU#+ zvii+qi1|rXS5Hri=9CG7%}q-cUp|fs6c3JfzIBO0$YlB%i^XG5uD-39@<_7G?)j97 zXrh&1Lu*U)>^!8p%U4VZk2ZPg3^YItJ$%deJ9ycFsOq%)^DDBvDvup7Y5t+^8yB{t zK;%0Oq9vmA*+Aw+yJ-I64Si4MZd6Ny(BRJ>uY*jVo9)>#HDVxJ8ZsDuV>`3rhlNi# z{MZM~Xw=zINmM0exnlKgL}aD%L1KpQ9>=Nil7wRHo~)I_F8m{TL)FG4SSnWhv9rmJ zH>I@Ibg4r@>fJxERZTt&CPzmfOLYJr!fo5WczFWM(wbP&e~*@&>xT%MRJSx_+aiZ5 zz}-J`#D|CjDsjjH+(nWHcXu_r6Jr!GFaN9&Gn5wj97O(n@hA)Ig`Hz{DM%)8+ol@` z$!`|DW~&vz)f{p0mD2)#Qs0_N7kxfr%iRH1dLc`73_4@@P{|^++{#(E?^FS%5D_q? zl{ef{MK$^MVc5eK1b^#+4iIdI!-Ud7w=S$4ADz)A^| z-N*sp(^djhPLsUZP$0AZ_=Xi-^VG=t7I9xW0v7J@w(NP==h02e8<@rXirFyRz7P8Q zTyu6{jz1@GBa;^jGC5Q>0AS-1iPQ+#m6^>6co{y<;DFvAL zh`^D#xwnsNVf2ox3E^y$FkR|e>a$wC1TF%SmP`)U2nv82`wr!)6^+g<_=jV%N*`vO z%^FMEiQS$st?cE^zZdRD-yl>(_2Af2XEvSQ^OKB2zF!dj&$kP2QL zm-gIMu`8%DYQRUW?rG`6epJsotByX4lDQz=9GwV>waQbSzkWXT$g(bYWc}oSfFoHt zT!`nNhL~)6diEc{6<|TMwFLame^`1(vj6Zt|KPJBh7TK{8IKPq{yFczfidb|Ft(;H z_x}WAPBiG*w8U}wB>*96g20oKb#hG_;UgN_>bLPQM_Sfl^l6nj zUidvw{b>rRq$}{u_g7twsfKioba=nI6ovH6l0M4_HRZ=80T>E+J6{(uE4PAcupwgI#I*qSL_> z{k?&7{{#KuayKp-Q~_XyKEnKykP-Qkn)9qsIU`4}3k-G9#WUO&BJK%19g02P*@u*3 zfvK?|xM=2p^8Sv6fPK*>gSyDmh?JVrU>{hIkK-B8{NW6%ZM^8ANgp~|d2Vy$8y-2x z6aWe3VAb=)V@|M>k45X{DD}XGp9dWNnlfq3n%F*x9DIJB38&!?h_4rGz}4w&&g86V_wgIEU^0MJ*`IM5j?^67c>*B>l0R= z2I>*U5}sf@t@4C$VjpKyJ3FmYTBiMv)!0)4o&uU(ZT$)wI#4#8q0)uk@}Rw)W?mh5=u&V%+)}Uj0W}(B2&67KLyqs5;%vq!}jcq za*sj>~eKWVw1+5>4kV$)-WI9X;aG#kEVedH1iB1fbBtt6*R!U z3_7UoCUu=F!S@6M8zJ8=mKU+T4Gj6_)9Okd^DJ}p@qp!dkG?61{G<2 z3p9eIr!)IrTGuSKk@K^!JU3SRP_CA%x440eNMNgXaeLhY0|FbnVR00s>aIMCe_FrL zB;+uw$-j`*)nJ+Q1#|lq09JXN37u{YT3ZL^91FD>NUkbZAR%<{anY5d z%;#2aC%SHm1}$-cES%VS*a0=_p^OzI4t+e{-tL(f)r(Q?-K>r0>JR;iNntLF^ZE<9}3pfWbkibJW1fRXfNY)hZrVna|yb z5ohSw^$%g(>KU4Ne{(4~Rzun%P}tRT2i*8+LT2fLI2(LZ z)ZVD-i2M}bkBKlFZ`BD8+(S}kZe7kQ=ll7Jh9w;xwJ z-5R*mdMy^#cFjA^?h3Z;qB`Hiq4F0y&v|hjIyYdw@I0G$8A1Dv#)@kQleW+PC6jH( zH3yEKpCpwBocsEmXOZ`GxpSQUj0WtQnwQ`7GJ{mO;975P6B?*ieb=|i?%LX$;0RDLSu{4ge%odvPhen~%h^2iVZps+ zmVI5>1J!}nGCA|=&YfcLzxPr`>$=FEM;TQ?SEaB2F zafDg8|Mb_fvT2?7rYDdIF@~h5vFpb`=4^Xu&6t_9Dm9B4dnH9eG)S5rsP;}l&X>LF zV-4o!V&>YJ;7sVd(ePWZImzzk+L1Yd<;55Lm#{1AYB`l^#T9o1a(G*kcf1EKWr;m9 z`;}ItBKCoK%2(^RtE;H$Xl8}*343-vzoD6b$%0ti1ElYdv;}jCL1cLw5>?UwI7RXZ z#`tnLNB&-ury4sMZldXeWCXYbXhlL@RI7g_sp5_(v^cGg6tdL10{#N#E(IJ5Gd3c6 zzaQso5WDYZQY)}|8SN_B4x#LGEH?vpg9=EB|6r{JFXoMC)B{_!nQE&hW}axEl^A4t z+2{^aE8z+G+)8~3E8hyY%4E; zd+I|@X>1%o-1Jk*MJ-YtSDnBFvnK=WmH@dV@M$zLP7E-KVtK*T=++f=;;__qC3Vik z4GKIH>uXfISlH*e#Ef|V;=rYMJK?S5WUQX``$OYtQD8MOgxfM{ce^*DEiY(SjS07b z_jh6eAQo)T0w9g(g5uP zem$gsMG|0f@{!^$zfj*V!X$8Bq;ocf;FIn-nzy89E|eO$U-FR1PGVDw_1U=xBw)m@ z_QWqfDru|QRJJgdhF_NgD`YEzBut=s!icAelaMG7c~wx%uCK4Qm%DoHP=(DgTJm0< zbtBmD!`e}Q^(M@n6vaQR*k}46ein7Wy2pSeumlDZJ2siM`ftbPN{{h)?mQ{Kq|8JO zIxW5^ItjVL$rottHkTqPFiY#XCTPnnfJ~cVA6vJ-`5LNHsC(Y5R{sgqcddZ!nWnz1 zmtFVh<0d_4LA_72f=E&=9{grxB-l$`^P~cDS~1~a)s#VCwWi_~*jflc1MH8{pl}QA z<&kKZ!4_CC0p{rc+6UcbGn}#H>Bn&N@Q@71iG!Xb`dhHAORS|+16J!*zfHzd08{3f zcDX|884Bbz8yPSI-@sv<#TAVhdu|++#7PD8!O|Z`C`d=_g!i|oiFn@+d}LqJz|)b* zY(U@;A5{F&BmTLobt+Jio9qfK1%qGuCXV1g*R8t1Y6VDfAC;vq-H_*+gjC-+l`q=g z^K|uzgFp@w8O;}8LgWZ7esa>36fdho)m#_7=;bat6xtFEL&x3nuXt(pJNa{$M10+I zqTCf9c3hlnY4NL1XEPDP)n2*-*{kv25VBf>PwwNaz-tUI=0m=k)2m)=GT}leUNe^@ z4ZqVFH$({W5et)Jj3XG9`=)Fv90@6Fg?b=8RmD#zPDJ`-&Fed~Y=kiS4|FHD(p<>k|6mmpUQB>KAOaqq&4N zXTeHN$`Hks+rC;ioTgrSLR}pAcpX1X@cI9M=Uxk;_gD*vlY=s0e=aYw4E8V zjcyF=o*o~W8G!e1HHH>oD_@_7v7LJ}*~YBSf>Wclm5W%F<+kI+-(rBgz-?68?B#p2 z!O7Sc#*03gfg{JCaVFs{(6_`LtrC16ygAX%x?cN=ub;lvB!2if>hwDZS*GDdJxLlb@LJ zUjHQFIdO}oJO|VXC@i709@FT2MYHXA%T+g!?wCYuj5(8%T{=tsJzE>g)4JiEjqIH) z{vV4E0gNls6<014FPHDOB#rwN`P$o{d`r0Ru$rBWmQckj$%YQK!Dqc5{ZX(0?j~~B z+p|K|D;{!$)7f`5=MLZ}ksP^v2&5UVHsOHn>Hua$9D(d_;Af2a3;r;jB;|BH~z-UF`NP_R%M56y#W4fMjO4_Tok#qavI9Yy$zO5^=oG5RN}zUniT zJh0?DzdWA!fZley5p%W1${h66tJNo%6WG0XqvW`J6L?T90a~NwV)#H*z?0Ml+DjRn zZ3(;j#xT>Y4%2tuu zf@5e-m=7^~&b{5QEk}OuQhTxt6Y+CG1bFU>7Y24*y3U0L9bkF1R!NY%lBIs`|C*>D zY7(dYfjA#@{(qrRZ_S-OOGk9AwD=mXBvM#7&=!*J3Rh#J{#G1qAY;qRZ8H?RMdWPg+tbGSOC5=0A zb1=ytnlUJzhAY8H!3MUCqaIzYTHm-|L5+U_;GndI5D7}$giTS zzpv2DSG1nmh68~r(dsn5ptWe1p{5GkO2+thc-J*NxQvqAie6zQ|NfF9Dfv6@4epO5 zo)_<6V?wYvjqp+7GV<{f@KNNwU?NslBm-i(Youf+D-wMlY9LbV22yMh>JxvfTtC@% z5wgNwACf{AzyqlXdyw-PPS4ywk%UXhzT$b3J#2AdHrg(^T>Y_ADO|s%+y$P#yMg0BA(zsNj$!}yc`M&2L_tB{Anl3 z08|&uG7xH2yIJ@rpbh65F%B88Jn4n6s+4+8Z*r4&Cu_7#IxuU|J%_h+vmQw! z3>87-qww_EX!2}}`$uTw^)UrQsxcm;Er+P05OWfi9}hWxrbFRF!%(Ze(A)bZk_9TK z5keyDljd$b4Yq4xML%igHZ5+(uSAthU=tK%MeYk8T*Vh$+t|qMc0QwO44V%Mut|C9 zy)?3fZD>#!$a|BuVqx6qBPl&02?%_Ujqj6s7tas?=;MDk&k@u#h&|N|3k34(8-aIB z04;A+oQtRolr}x5cJGa*@P#FYsE}n=BPJGl^1g}(sg2ox$%m!ziiAH9e|k6|@)(av zd|r!M3A|kAS8<;?i2Gu#io3|*lX*3|rcLu=f4G!#vQbH;B5PgKHWAMB2KwV%pIGyK zl8o^Q?n-Su;Fq5WfBF+CCe5i5j^7fQ{K`W^+vfY!jIjC#muoaN^KVLVIwN;#NTmE! zHM*6^NSqoSzO&2a+e#sQJSi4rhE6^Nq`XmFbP+xiHcEzlm!e>LP@hZWSti^U)O-0D zSFTfjw|dd3GffelbB8<^pi7n)`#--|L~56s1aFIN>5X{^mW=R~cnEX6Pzlz)x?ah@ zU35O^vM^a1?dY&xGWM|d04^DL@_~S}^dm<+ z@-S|j@Bp7ku#t1EI;rJ1%8QjFrDpAstW(n6zL-LKSRAKsA0H@gHa}@f^k_OGe6}bW zDl{z=vMM~4gNGu#N}q|vA61P+Zb7E&TRR$9NGyea6T@oIB@1zce5t%?1(kwDa}%T zv`HW*g@|sAz9GTyLbADpMP1Sf>K`o)t9Q^%jVs2T$eV_?4bIvGOEh?$z^P9n*`_r4 zXYqwm_9Y*kidFeZk`ssSHZv-IePIfsU0QB+44DTn`Ze{09Q&^(tk+VQGl;qj2>0s{LIGt#aUbVCjE;$9Ob!)DJMy?{J~tAIWW`%EDLIY%bI z!;{p77f8o5kmW|~fJr%OO5&nu^?Gyt-B)RX`9y{BYCVJMP78+SV`ow^U1e z3hjcgu#vU=%whq|F+qXcVdwzU*#huj+!RS(X|!BSB{$KPKa6G+w9@M2T87(svJErc zh=|%liXM!2raZ;&=R0vLS`{85TTO5IP6MM%4(cW*NqP7KUX<>E*eLvfyP8_Z>(*}J z5TegOece*0wE=$snoCqnt}yY7TQjh^JNm3B=6Uz+&F5i(wO`&Rs<=pN^2o=yKiRHs zdMh*)u66SENw#Y$6sXF^odPp#Zh$c}ui%Rxs=m9wD<@xNY@gPa#!WRA4b)LE(|YnUw9C~g!XY( zzhk`RvS(69S>H~#VMeGQhh)>BkM8`qXR747{P8e=8-|9c2Io zN_p^qrkDTm2@+~`hq)26aWE?Q2W1tj{Wb8Gdz1g;vatV6ds3u}BwdbU;Rrvj7WXOh z#m_m#n>j27<{!$Hpv-C6hgbba=Dh|tfX2%yrq{L7mXxk$YR3_sfP{xr28a1KZ!!6u zLCDulc=cZG`Re=GUzzFIJV&7(_bou>u{aldZB|r-u|FRTm}7{orzSP zWSr+~YL$!5I7*Nn>$=a^k_n>N+uJDn6N{TiHoP`wT^a9m-^}|f!yhdjNBnws5Vg1YxWVCn2=9OOLJgx9I~JIu z9g5M;MRpf@2XwA3E%HB#7{?kmy9-QtM8W36Yx@a2^GDm+$f75B{rujlC}hHc)Kjv01}kYtd^`yTsCUM1sm z_(2fNvCj^GGVY|%IB&=2NOHTucbR47%cQN-ez>5V%xjTi2L zgeJu;q)NNm@6eLns+p@;%;Sjum!d{))T5L0ID0SCHD@AW%^@<8;+X{+K2FR*{kjFY z25-SiIVnYhYw>YYGL^;s=s_VEl5;(NWUEy4ag#Pv>+4!eCi2v1Sd0I0ZcCv)(sM^_ z=?z^J=TlAJsPFxki0y`}0@&3~X|>JeX8Plzod*1VZfICTA9t*}6vv%|1|WO&Nvik9 zcJA2nGQqX&0Je#Bva|bII!VIyk&W8(BBFZ24h}iU0l6#Ka7uCz&??BX9zs;R%T-o% zM>o^8-O`SQliXQRWioYE!ywAs(!!S8{-kqxaFwm8I$F>9i>)eQbUzP~N5u~2h$Gam zt*oj9x8Coh1}M$Wj1|A7}&*i*`zmRXeP27 zD>0FB64av@vKG7S{uIs`KpLM7s_Wj`JhbKEiPG+z08DStKp{2A{Q4^=U!C24!vsVSlI=-~Z&@3Exm9X*kacoX;nMPm~rf!L^1Y-a{|ap^)?M z+h=vyf&^2@=wH8*-{?A)j?C`cQ#DsKw|+LJ?92F)oz(`~y2@!4j|dzxw+ZB z(-V(2(`5@c6T~Gu;+z*ZYnaHc@1o*D%vW@hmEj~ zKID!N2Q~`aQ2c(I${{K$v1i;T*HXu4eK|OZ-8tL{vw@Y} zbIk6LD|8QP*N(yLk2u)p7-d=O-OSx|wt;*rH6&}p2VBVbHKtm3Mf6I(`3_B4@JBG4 z|8Lu@d{dV8j_*iLav^{dHGs+`sm4ifJ@6<%ePXx6QW z5Z|}~csW2&TnBde$KOVz?)phw8qFLr-N{8Z*ch>NZEsJ(qf3vA_|{2AOWI<4d^rO> zT(%u-ZxB(#>AZ3EKxIoVPeRqUf$YzVqT85>=t+SisWWb*mDUsbo31Zj?_GI2Wy48| z4@z0~^wD!ZQJYxPDbehQWS|Z)DueY$sq=)^KaEw3L5lG3-+Xjpk>QiQeg203E$R-$ z7-}xG5E3WGZghAzVy;gDp^Bl!MH5GX7P|!4>!78t!W5+UVeLjtH!j7)V!(K&&>F|I z(^VBDJYpvaRuV&m&dwK6q8?Agk4u5n@ z#`rO%CHXchgt#{`wiStPni*jsdKzx4#okrggmus{B|8nro4_k%N1cYXFGj=vA7Squ z&UWAa|Hr1WN{yOD(b}WL)}>3)t7_HWEh1Fy*jv$|sK)Bl)(T3@*i;EFT0)SZHWfSe z@8!Dg>;B%~`}6tzet-37kAtH}lJ}YOe4dZz^Za>V&{LdbsWF%AT!`HsYB=;AYr(u| z#$596Y;fx^#9S)*V|ck)@m_k-xRTHwB;v3vnHT)`m6n#?Fy2nFY5yo8u|zfiPXaxw z549Rq3ho95eLk9 zO}}z=8k#{cXHGfd1tnQq?trTE2S6eEsUM6AH}Y-8XHbTjWU_=3^LBqHPAElnS`q17`;vGSS{}mw{HI?V9ruzv?$^ft-}*HGU%}QF zum9bhbuBGE0N7Q1#~*(~^8aiHE#3aNZcU!To!s)(@62<2*6_=-T1NCFj%TmV(mADC z+pzXJ;Uo|v?z_vYK?h3lt}U+RLVc;T2vYVIU-@PZpdt4Wy1csZEqytd;vizpTd9a^wwD5zTd?f#9#)QdO z%Iyh^6Yi{R2<2n*~Lf6-Rn(uOuK>g~m_>Qh@YFKsDX z3k4+(@>q%}a&kbpBju@JY*Ds|3_eJccBC9_#}I|kq9?A^s@lK&-# zKQ>w61!mw0)0y%Mr=aj5+YNj8(`gjf^@_!(w577o!|6svt6e;f^k~$z{OXy<=SA8F zq~>~J4aR`uwds~xz(YpM8Uz(~gUnNEW)tH4zw8EL5CtrdK@L@OC}jECWmU!5R?Au1 z8}f~3jkk*^dk>}NlIfEzSAE51-LKDwW5DH4+~U;F`xF}x^CJH$nnd{CkTPidI;OO9 zq~0S^74^y5J{;r0Yxk{4w4<;DDb|h^DrC0%a{vo zoI`PEif3u9B_QrHXg79_2^0huK+j$75P*BdsPn6WzTkN~dwU}xejJ@LnG{m6B^HIa zlITG;I9V%S*kDPPZU2H%B3Ux`%99c(MWu|fuPfK?`!#R0IpSxJ_KYc5p(h8Hiig?- zhw|}9#iOD5>puHsd}!Rt)Pu4kROr)7&JD_vi^s-LH0E%P`{m1*h3*tn0G!Ur+LM<3 z=FU%TrdyJV8ng&W_AqrMfsuirU#iT|qTs#5(N01sO5VL{Eoy_xwpKv-IOV`*ziIVG z^f<+P4B86R&tonz5wkF^HbIu%J7w!~nFQIh2J}DJKjUP z1G&!LbK`4V+r@j(iBFhyrRx@~M7)))P2sS?P-FFIDK3+k`;wtPLjH3d;=ZQdGS4oYj^w|Emos`H@Mm(sMIUdI z6HDC2q{>(3*=gQ;lfZ{7a}K=h(|T^yeI*CC&BxNz@0fKE2C``AviXZ@%8F_NklnocFZa<`DDGVTQ>jJtro(MnmRQDXM(P0;YUAHjZ3IzeE)=aXYt3% zHO^`HLQjl#lx68c0Mlaux8_(Qy@}B_O$ALq&e=F(6$mgN;BVFPWW0>l1drPQt{k|Mc)2hbb73!;~nhuncOu)RMZHu(Yr0 zLwlXRX)xla+gqQRGw~gDQ^xeIQQbQg48#}IRbXFxqH!k{AqB}0!0yNjQf62hiv%g@ z=v!IL^??wt0v6)E&x$BCM^-47vrpVo*05e_R1btmfZ6)*=`qc|4iqJkDdf9$DAE`d zD3tlP@rxnTX=do$qy1U(pGKjyntnU%9uL{*fi?Ke^aOdHc+7Y$+x$Ld(NAtk^G5jDAejfiC2- zwuMVTa1G_lW07T59$$viEk^^d&_m`e(v_76$(&H?;HhNT|8(U5=0OKp`zIuYWa%5T7hO}!y8^Y-hWpH_Px3n{{sPN{K-P^k zcq{#PpNau$^Y=;+$BuXSINgm<3UEut)NhTr15kL*ap8U>V!qhx7sJ~?J)cm8Y~qUZ zho>u}p*oZEN|f~9A)kWAO|<3CdEHEg!j!l?7iXe3`Nu9`}du5q1GU)6+G z7baqeA>?%)MOGrW0mL=#;gjO(cdEnAzXIUi-hYfmrr9#)Ukjk$f1S}&VtCbOa^cau z{EXs?z0g-o^#s@!%*-HhX{Nmvem&Jjkuw6)9p@(qcwWDSR{Qu(j03!4MDdSl)4~sh z?5}TKHg7pZe$v}8g{V*?-5)n%w(i=PWp3@+(t#K;FB2iQRm*TmYBm1J6K=hmbyWcZ zj*&1ZVwIYek^6!Vb(IJcZ@Fo9mrt9|73wVhyOHWSbU&jHH1Am6J12pw3480i^>dY3 z;aks>A0sRJ*bsKn7iytXdra^Qp9j*<})<6^9Jq3hgu`$M1 z6I%Po-Zp??Q0*?uK}QX`#q}C#|3kUCXn-*Z_E1i`X&w(p?w4(aEpnuX6M^3TcCT4A463tmZ@!JZK>Ui|qOTRK2y`M0^bt^C&QP%Z=<;RPQ%CKM4 zbXalOG@5+oCHHaq{$Kh>5}IBpv$rrub?3m)O8*#yPkE=MI7L;{U4HnIO|$O}&)%e7 z=U8Cp%u%$uY`mZqp>{Jh6}xr<!#I>;&~!Q5M6_E`v>)A^c``)7k&+SRGR_oRHozS34|?jfzt{_0V4 zij4zVRv3B#5q`12rq+}}9$(!QjS!*2PAdnvwA#FypDyL`NDSHxQgsWPA@4r2_X&d# zG}y!8QJ0bqLW~hY?Ul+lUQ&$*&Z}gNfJ@7Mw6&hIzT@;wDTLCC!Q7{O=}NH!W9Y|3 zt*!aquoF396Y5LyVLM&_!$Jh!YDDV#z$N$jx8b*GG=SmM@(U*S07QGR@GZP~O<8na zJHQVVP;G9T#m%M^gKPc%msS zY3)yit=yx-Gy7{i4UIM-(_BcFbiJvWFaaAD-dTpg_m(we+3XcpP%@pi8`ft(D|3bJyy zPd(M&YI=r8GmXnEahqL;CSGBG0%OFq0D0s^);C!RCz^;LMKg+NCvU@ZP< zpwN%y*TfssK~rZ-;~Mt$)`_R~{9i2{B{OP`9cDjys@!?FlD`&AxTO>OF^X7+yphC- zO*q}VBD19<_T->ptRd{ECKkaJ6UIg(j&bj}%%4}Q1zo(nQF+pl3`b`T*?!D6ZPpJ* z6z!okuhH8rHPd-KLIV2t$>D3xwKA4;+NXi7{6h7||jYG-AjXzNa^%cD+V zWhM0BTj>IURo#;umFM0!BLu2tJIkA3E7fl1^M57qT-(FT^$SSuTZ*~w(;VkK;|Nj1rVJkvuR3Zw| zyvbW-bP>*6s~7D7F|6{q`fcQ?0CpJqI>y3RYjscXM{z;z>!B|PNmA}UXh9tR>Xoum zLy@hC;jua-tt-8bqi|(*6qnmXmc%<7X~*&Yk5~cxXl|FIFlnq;dI8tgwf+@;x_H1F zrHElFY3RD~%D+t~x=xH=dhT1)3$kb7@*S~OBNm5_9>G{>Mz8(N*+}ip`YHt1i@VJ)n6tGpxSahk?66f$ZB0LpP5 zlc$$z< z$W6>23`qVo58&DxM5aFb3*~$dt1F~P>_y*^Pd#HoP7ZlAckJ?5V$j{ELe8}aH?!Jm z*)?PA^wU|ebee^ivxC~dAYM^KlQ$<>EJKRWn*7oWJ=`{DzefPi4nF-beE6eI?D!)^ z0`&4&vyOKIzk(k>^Ah(`6-(qYX3@FnBhb@y1{j8jzFd2gEw+s-nVWyFri&ROoN)EfHJU`&)Yeov^12fLSzaP;^Ib5znw-dqhpik-~jc{c632XYMqEfl3v z$vCKAYRD%M!nj&?z3pl{Kf=yPXSIrnXvipyZL9f`*AOT_IELmQSq!Y2w4CNHEGmR( z+={9zgFH@~wbve*!UJvoiD-px$bj=c2o+46JEP+h%3#v5Q8UNL_JcY zV5dX%{jjCURK#oe1w_!5#Mqd$88swlaY57l9o$MHqWwTYrWiSE--dY*f~}rJ0rRrz z1YrW}W}btM*Iwfa1H=EUzwJRLxCb)@Z>EPPs2ZJeb<}BhOpPVTMP%|Wfr}g#L%;heuKw;6@>zaO( zz8SL*8aQVhdbo(p6qhSjtX14#U2!M7Vg!i4S0UVHjVjQ(X_5Q<{bs(Y2r=T*fMF3r z=u$#H{~6XdnUL{TF5*>MS4~^Gjii-|!Ingj^V8DJMWk*{L@?J+I~1ic*TY-=F1T8f zaSK%}sROZ(;;p;Sasm90P< z3MxfRKUt`7$_vVzn_@57BN+1}qt_9@1A^F2vuK~U7?7=|ezHC=7LoCH!}D*=>$n*j z>;A`0=*4=hU@58p7jX1%Q2yN|N7;Y`~Tqai})$3XD!Np zGUc_db8m8cFv>j~N&R|KH;*0tLx!!yJ$9DXO9Bmo8(dObU?&@fw{-I_J}gfJVL_Pp zc5oLPp||oAja^oLW}&);s>pI;+#~c1B4O~9r_l>5wo`WjT^U*~D_x*gwHg}YIkc=* z<^8tODS^Qj3RbfMA94BKxk2iXa-t_8 zE|B3;iU{%T8R~kZAHAC!rknR#)Exz?L6)54k+_{Q%=&xoU9vuLkpZCd_th z{Kn>o$qNc5$MD1UF)%sG&3&4x82lYQ_(k?F zk0e2@(~=jMa1B!2ep*u<549UX^j7Qlxd=ytczV!og<>fx+-I9|A%k z{QZ1?9(oQo@_U|gL%W_)+qa1hVTH0J@b*w__||X$GBm$_j0_+|ngZ(UcpaLNx3BuO z7+8|*I&796gsE6%)<=hIPRIj!A@uMlFwpA0!A7Mi3B@>iZwxT5NASMdZQD1N1wKT$ z)-vyxrxP$)`f-O5MGomNf>-o5nwMH=d6NeCWN?ws)2;Cd4>YaRd};Y>1A4L+VHorS z)s=6}x2eN@8ZorIl5}3c$Qn6>^y}$a zL2R41)_VwNdRbS&6JfX{amCX(5x8k|vvK7rGsVY7!^s0#^Xtp34_rI0ELDA~gWSJG zWA1p9apBgQVk|)A8Gzfny01o;VewpNM6r?$0_>mfk^kN$;#Z8uBl5{h-K zs)#QGibIJ%J)OHdq?W!#tbMa7T=dcuP}5Gz2IgrwYixLO@GGci3u0>I;THk+o@j)l zgAfTNMHg<#NtWdnjI5@m%zV3QI!M(aBaxF4pX)N?L^9CLdnPU_$w0YFEq%V`?uGno zLENpDH?YEzWt=bInzJ`5gnw>Tp4~=85KZO6<_ubswg@>vp@_KrgC3}Q4(WrrOu~#& z-4e3jh?|_!R!20Gs@Ki!S#V!pKOKh%Wjn(|%a}2ow(eq00#rx6441tc}*M?L*ffnLDrAk))$Rh51$m1NZtP_oava5DCQpQKu zo=*IeS!lVMyzYd!T3d8D17edT@CxM3MdCi+-j+X>W@YawLn+N{oSpu4Mrei0CA5JN zQjziKD+%?gdhPxm+>bJti32-)?3tHhFAFl>!72_1BCE~v`u&UA~EH%u}q;(#In!dJtN|v?0P&wFSSwm@M z0Wn?P()AL*d?QP2zoK(UmJ-6jjMyDv;L_dvO$Zl34lK%53(|itFMZ(4+UmX*;IU?% z{6~&TX{{)O;dK1AmZF%BAk%6K*zXBy(bRM7tOG=Nn@ZiL6~`?0_f5m&7DN*$woz2Kpz_C~~POAk;6 zvq#_Dv;m|$_1^5|!ZfcjXcUAgTeG@HfnW*P-$%%EeacGQsM#XYA2?fp0HDN%J48jzaT6@kXesGY-jwv@z(~a9 z9uxc;?(zQ2C$w_cRul0WopnN<;Z@)(bI$-%apE_f!tEQ90@$fD01rK1r4@8?%BSV} z2MB9#s_H^XX&2LQ_-Iwe59y|Vws8N^nA9@;lRq+n}d9Y1KwRke>4_2c1b&F4;D`Gw=QjS14j<^>d^tNY?4m9`gU zoquM9QB-r!+LFL)DBBqdz8r3PmRL%IG&R~r@n%dk&DB!%@hoM>J?18iMK2k5-vuv^ zG+s7lBJKkm#rZ^|K@A27p{G@qgamug!u#x;DF;4XvN+wA{*f-ffq zagVP@Y}<*=PNFN6g+c04U1Ps}9VwO4Z1ky13LmIhGUd0tVquQ;3$iy-HT}_|VCkTz zt?sh4LdRUg1ZpONKHPW$2F1d|WyeI88rwtyh=3-=9*iNH^yLre9T8UlGs|=I7BFcx zJs}ZBBKUi9^~-Jsw723nsfn!#12&Rs19_!3HZ*HW=flJ$CP8|qFd42QEI*`)T$Ju? z4$`mqURHYSz%A@+FHsv(#1>>yCX&|>dk$O(5x1&b-a-Z?Z#@&KQRSI_z)2bVA5<(pqOZhmS zFtzC|&0MsR0;J8|CRGFozBf4C5=b8~;3@BppRRm`$7NDu+hRQCaB4(4>}kC07r1bn zF3Wy9qW4CZs#?EUM7GfH@ZjPe2V%g6dnFrd=LMwY=Z>nhfUMh_A@?vuOuZd!>(c9< ztCX!76rwozcqH5It6lM%8*88nx)liAI11*aCmn?u*@sdx%z?F~*<@rVZMbfYMd8Q2 zB*EV$aUj+n-=-|tt?suD?P5Z^>3|~#YPVY*i{ssIzKHA6=tIRuc?oc*AzJ3YdFmgtEK3`k`|BhESy8yQGcKKsq`+r7_?Ffs9P{m_uk z)huly8GeZse6C#@JWI$172jeZ#H8xRn`FXQT5NK1ihX{HaBM)B^q@^G=Lm1mfEyRd z(l~*RxUe0U_b?1{oSnMWZ&g?>!&B20SkGKk)39{iAKR^hBaz4v8SeF7E8?>4=s*!OyW%L3DM z6@Egi!w^EwV3LP)pzfnWi8Rb}NnLxm0|Y7F~Ih`1@g zE`+gYsB+15Z+;n%4^=GXLZ+vEteXsEcD7N>5UG=ne4h+LP%~yl(S-L$g-b>Ynb8O- z3xsv<=8CgT=^Ba;_Zec<-1K}3(h3XmIX}PsTMMn!i{ga5Ccs}Qrxsad^dutIJ;CBZ z_f2EVt;e46IE!XyPM>1F2em?u&M-2 zf;IyKDKg>Z&-d{}4lly{_gvu!&Z++8q%`7w5hR0&GDFp7UFmNT|F77Zs-NQuMm$(? z)DI~`v^^q^uOLTzbHkYYd%J<3Uxz2ED4WjwlPrIvAb)^|3Pw(-#e?F{5CrwLAdmvb zEX+fVxKC|4FT$gnZEP+fQg(cwZ~2}){G3Z{k!~#G{JzqoY8!CE?>-6r1;r%`)Jmxdj0-l`f7-$kCtd=(FT zgdr`klGHI}l|fA;|2f=D+U%=TJ_hd>enPRS7y}Bof}~n(!LT2Hsh6{=z;a3JM|r`x z?E}IAeyCwrVSfuFZ+~HMXo_;YdAmH%U%!i5DD8y~-t^1;_$ZeW5_YZPJ4p9FCz+*a z2qel5;XV4%u$63lDa`>mH<6^LA+Oe%Nad6WHk>Wrrh#1v577@0Gs||tUu{-OzMbit!ia}%M~FM-e9LT|V6ed@0S84Bn`7IycMM8UbDSjUzS{La92 z`$rA}mgaSDr!dYcNY2;W(bf3et~$L^ah3Q~QUx7dBPy3?6*Z~aT^p|A%n?vISNR&% za5dazRCgVExGhG=J%y=m!nAca!MxKnrWV`ur0dv**pcNWs&jQcfjKM{IO(R>D%VHF zf;fxy>z;aSXYAC=pPPM5nDt-u$#ix7LB`RO3acB9jn@hT8&rz|*YO!hxfH+@pRCH7m_sK!7BM70sxvBD?&SeOEO8avHy`iXz)<1 zV)x)|xN4~F+?+u}%8epEY)r-8w|JuDz6OI^MXn`F$r}xZ?+Y-8Hrbx)tETO7{*q3O zF#4ZhH>YjUUww;iH!hU_G&vb(|9z!@v4?|0_XaXbouMm%6uE3+Ztp5|uj4zKoDixO zc&O(0d(DDZupuegD2XtE;;3_G#3mS!+p~a;Pa{D27-RnrZRfZ@`acO(E$~UsV|;!7 z|EsHlmK^<^vkr&X{nb@TJ)Qa=ZPfq$4;U2CHm9ysN_x2_@Os`H&w3xdeD&qWE3&Ou zM8Bq{#rNNSmJbOo<~2$Wv|!*vZN`$dGQ!8tlI29Eg71@=2Dmv&bE&_57(8(XW)uLw zL`CGbdX}*lvgE7lMoFjbJ)iN`*W!e~O~tRmNr==u93y-Kcm7soVX>eOPQN5WugX{LC%_Ne+J&;rh zsz-kNA6;7>Q6p2MXQAeX^u*f=lKN%I2#Skl{Ar%GCljaCJbzf;h5VoNnB9+sW*alk zcb|C=W5$D^p}Xa|4o|fYc}I_O{Ozu|ilK`NAYBOm2*k|L8d-}5yXkW2kE3hl%|NNl z8``Y_pvUPJ?@FE&_Ri8JvTQu2g_U@jxz5*Vb)?+R(n!P)Imw9JR~GI#?()bZ?j`n1 zI@QC6$!GsNn~wnyo#7EqoQGym!GPhX8AMb+9d9T4m0M)?P%BgU{O>OJ%*(#ud!5=V zb5HOtG9=>nY{jO}9|k1csasVRB*HD-!O=P3!7O1k+zJa+X%C7+L{0N6W_>fyj*D?$ zJM)Fd)r0}fh$4MLL~&LZs4h4+P2}8NTKlXv8XQ8k*F393)3E!qM1gN{(HmLR0&D1H z`%JP17r{P9YH@v856(>-dVZeyQ;-eU)1B=2_D01p0LGJmT~fLotJ>q5p@?+>F$~8? zj)eBw-+8T!nU%h~qOCAWjMvlnE(@Z2X*Aq0j>{&5ymxNOC2t`$QtPFjvfu?ZQQF{c z1**qjw1N@YAlizt?y|WX*`N{5qP1ExS*!*YaV)UdLc)OEav1)(w| zP4*SAw`6}Zd}98b9Pui`km8pIXUK}bV3%RoIx)6V#TmT&SrRD>EflBBOs{s!BiGlD zludx#IG&VrGz1un-8cjn>hL7}cU~LE4qbI~b4p{lc?BnS+D-uLJP!|lFeTwpRlv-8 zeWbKplC}94g|`y@xGnmjXy{bclU!`o9IXepQ#s8!v7cMqlr`o$Co;V0wOB5?DEC0N z=8cj`LZ1xR`}BY?avg~6CZNCmDrpnXIGEab;RKK`U19{0TO6%m!Y(OV5fgX>`n1^b z7aeHAi>(keH~b=^DR$~tr#Isv-VaW%2?rciEyJCzzIHW^A@ z#@oklRoE5%)Xb}R1TCp+j7H$ZFFhDV{JhK2$c{~$cF3M~qi`30Z$SdZ_g!@(tn}0S zS5UVn>OEk+y-CGGNzZRlQ%5?2gyUom?CDv{09ml<+KvO+LKY7|OwwP;dno9#N?p)$ zVE$Jf25HaXb8!gPrSuY4&E$y+A)y|l7tA>*gS>~EzNW6i70<~a546$>^Q6g(AwWOp|#2*s-sfnu)jr%|HmAXQ~MjTH5#9WFiRh9U(o981mxJV(P=LdCrFA?Fg zwafz#%gy*jhpiU<8n{hJ8`iog&q@0My*q+bg3La$-Hk@L3Fa2N4e(rco#>J<80D;_Z%QfEz5pmDq@_ zo`7=wn~-XP^_b+YWiR3GVG^EC(%zh6$vMs=Hq&v`-v1dl@!NkartLRx^yKf$g=j~y zXpJ(3y#-GT)nDoIPMTvBS}}Se?GKmOf?8*N2#g%^TFYRy7V)}XBIRIsVzmrmBa5la7Xe#vc@IH6;!Ll7H*`CdD+s( z8w1hAi-4aO3u@^}o7p%3(E5e5VEJ{eN-B$H{`B^nY7VPxlTj+7;WZTc&w&pV%VnqD>SN#iFMF$NuQC8NcW|IJ{2fu2bku-2m@cxU#zQIPb#A$6!d>!+=IR2p^v#yG7rI19)8_&!r(*_y8?I4*o6K8C6s5AB zKPN@yEgpmD0b85Xrsek1l5#%8Q2U1#Yud!DY7vZc{f7(2RBG~M9Cnug*2B<7P&P_J z%K;N@3*g4+PVjytd&&<_~shnZAIS`c&T8i`m1b}ySVF>EtrS&BOgEd zE=gDhG-J(UyfskU8bN`lGy^xzTn&1SPXAuich@GmB0VOr;zBqW(4Dx;T3{h<7`YCx z-TXolMLIUHUBj=$sc6i-fL!3Z1`c+)-;^u4>~3BJbt^EEDK7cVJ)}suBg9W1dJKHw#KnU<;;Klg6SH#JO>)UL?7%mn0;4Bxu-7L$ zcz%OBT+TU#P38pc)c@$O{&jl*a@<*cHu~>}=3oB7piqMdKnJd_AD=@6eBg+hCur3i zKiCxflLGe_@jf_pytVtU9pK_Atm1}W48~3+U;9m_EaT}LtmWw;CT}o-r8~bwkj9Z3 zREpFa7&GxfV%EYI!hYQLnH|BR(!9#i5NMMu;qw+kjpERBwyrYyXB zYPW=QoC+oH(ofvazF}WC>_5>=_b>1EaWxfcFQO&lS2zloi2ZGl%!_;=NC>3vG&Fh? zr7!J$JBw5fo!wL#ejeK}#F@00X;9|UKvRw%5{*(O<+N zyCYfFl(Tt~5go}(U8teW)9P}jL`0H%9YT!#0NzB=6h}P#DhHw0OB>M|G2)c0rX+{* z+N0ffDG(`r6C}xZifotdbu_rTH%?CkAyJ>rIaU2S7_}(H!KDbD^_M$gxW)A_36iDM zQ~k^Uq_OolNPjzQ*xd;kllJ2bi&Gtsaz2XF*Vi9XK^B0G343vO?2Vt-q$hY2-(duj zT40AFR1;r3cz>j1(6dlJ9@M(Fp`O^kFJRUTdB84lP2TQ_zS)a>YOj=Kk18AtnR=#* z_Qr?rDP);Eafq6CE_2;{XQf%?OO4CxzS^gw5nUT>C^MGnr2DL_QR3!E2r?dQn%5%=ka~q0_tXOeQZit z2%QrIWGu;Y#H$CPLq-WwGf598+>o_qSDh8>a6SMWld67MQQiU1*W4%O{mWdqg0&h4 zs~1tQJvcin=`mRX*K+BN?WdX9;O)oRDPbAV98+|c!q;``)w>p}@E$iq8J2S}$C zansP$qM45)HrO@(jz`8!2AZfB8LS|i)XQ_nYE_ntGSl!!H)kk6zsbewuF7jMo(%_n z|70?x=X{dbJ9Y18e*6~_YW4HmNy#T=hvYcj#$&B~5A5NY)`w=%hn(6VeScD^%?;s6 zF3Riu&Ppl^n1t$93gjj_MIm;V6`H`I6I>)>M{pQjAExZ*Jt2UcTD`95m&=O?8oN!# zEgM|?k5U4dNX$`(vYI(lADVk>p2wI%1-B`+l*$#&y(P^=>eus{{59oNfIZnvO}J__ zwY^I`_`OYjCwxR3{JVO^y@43U6TC3*T0soSZN~0yc1%oM7itsQ8F3_UUe9gB2ZGu1 z!d7c)FUi@Qh}_a7FRvik_72%XX?2g<#lbg*ZmnXlAd)#$YVFSs{$?BQ48qi6c zC0Z;b&Fi<9@D|pe$UqF62uy0n9aPnuK0~dmU#WaOk{pf*^{!k}v-G_!RcyXEt*DeX z+wZg;MOa}dGZ}CFImYYf_$ql5984rA8S-F&gJe3xwq5A(dxqUZPimF*JTx&Fn%ar*2u6gnjlX#{$%cV_$-CM<7luH)PrgypUt*mHe8w3*(jN}KQa`dFCepm;ZZ$v^bx%CcvJ^b_fZB7cf$#dPkO zX47DhI>DJZCtWWR)H0yz5Cx8Y6^=mcmlfl<;3FrlEbo;XGwj*#uhDsU?|dox@ZqfN z$dOzOVp7Vja=knjvD{#r&MLsj%4wLo1B$8nMdet(=nO5Qt1Cz& zl#?p!QJUuZUg>XW%kA5}a!r&UN~#*A%o**0JRRv1`*R#X9}>-~|N0Ey7WhWJ$YN>q zgN0g&|3p8tRh?F{wNQ5KpQYzLJdn+b`6f4X;g|8TvQ=I?{o+gbH}<>_N}Bk#rmqYf zo*69DWgEHm2;lOOLDWd;RR-{d86(=hR(2P4s<66nH=Vifc*+^Mu5DyB4L9GfWgKu5 z{7kZhJN(WtDZDQU@Ig?(6mCZ&s(Ec!W!c>Oq};okK+?Fw2bgA1*GzR2taDTX!(KN( zJYx(R(~$<^bG{|ApxDS!DilA57dIl|Qi#{4LG23Cnc$zqc#FX(ewzOoarJj2hN%G5 z@ZV=|2)v!MBP>Ne!0*FT=xAl2hiw5=e`!z}_q#5N`*FDmiu^*VfdB~1IzwzzqBMBI1PJgLE3|-uH!UQIS6Xs z`1qL8 zID30FnS07$@L0?mN3I(z4K?hjXl+p(@mGXH86|im(K+t~(G{A)(sTF+&6ta3+>B&Z)-bsCOY3+H1Y_l22VXOVkK=0oD3Z8qnGh00hv>Y;gFc=5z@i{#3JlJn zzg;VnnX#WG5{U~XoV?pywL}oQMYOXG-e96M#_2@VPesI$7PN&MT{4K!fhR|?2Hm<4 z$MTit_Sr)Gyz!H|%n$x8`)S*DUH&?ZL%#!cw{o^aXmz#{dhKlwb{)y1KJs?VO{ay? zEuK%k35*SaM}oF05kvBZUevVmUK1b~qLWl<_R9NRBg&TXEw$$tU|N`|0wI&fP$}OJ z{!b2#$sLj~dW9{2$H^(XG}Fy%GFGW{r878u({Cj;mHy=V+3f8`Xs>SEpHfw6yHFtE z<|urd@?E;bj=8pUJ!$<|PIIuO?1#0C{t_?5R5J(hZQ0fP65f>fNe{mJX@LW0dZ-1t zQ0SgDe-LEpO5xksH=js`9nD&eB8f&GQh*TBCRfOI)D^? zLq=CP=hq0BW%AA|&IXG+J&`Vkzg$EvKtGDx!B#8rSX=018{b-KfMbN;eH~R9Bo6Dk zu5*>_a(m5%l+07aa_MUdU;(ZO){?q!FsS;>pGA^GR@$)#MXn)Xl&fDlpziaEy}eU; zJhT~0uZEVdc)qZX)ncZh z177yhS1q^gCE4Q1%dd;X%L}&Bi|xC1bL$G20%WLCWuM&m{e`Cb=KZyF7UZy3di>x< z)~lXwD6+O}l5D^-Ji0v#M>$W!?~KQNPv_*^o(F5LJoFFn+m{bCVpII-$zhQ^mV#|v3c3R*;dvwx>o zL|3oATxS7L5#8^aUP$_soSb&Uoa{*DS;^8T84`An9}CoKY0ziL+p6-{Yb7Q>%;&eY zt$EBOXHyj}sLmcYo-S4B;?K^v^~+usT>RnfbfX|7ZMYAv7G>=n$ zF}Ck)LDxj0_oSPbX=W5+@|MpDaF42rE)WdyKlX0266^AmR9>$8)t|oP_0SQfa_4dV zSfxp4v=JxZ?qp@oc|P^goxGH_kjng$l#Ds+w0D)(2owAV6Va5Twt50X#|BU;Tnfo) z4sMu268JzvoY8Wcz*uDb9ChigYr>o#>W@UE%8_L`3mQI?2=AZKG-PI++-g}x{UWtL zmtUHReoP+@y3hP*#5X$+E-LN$bb5lYB2vhy@yONIL%gyrKBW9t)g%*62Z~(hp zk5!`vYen;5F|sgEjNx+jFt{8L;c0^ojw(7nufQY^IZZOSUvlSZ-80U5CV6HH5H`PB zq|D&A0dHzgmv0#$BKbIV1-D#YTm2t~^peVIh^V$s}ZgYSY~u|x8Nbe}?1 zp+DWiULec;)wmZM#}86!N!2SMJny`H8Vo^Cc+jD>s(wpr z#&GKr8!#F4%yPpIUUR9FY(XFAK1Ltx4mGGo@M|7yHymlZWOeK= zx$4lRX7o5Ls|4p21W67W*R$-p!5qhaqTakq95O9EJ<rv!Jr(DIC&B~Php|T z0w*VL68;RL#|FfAZ8!vko)e+<0#(t)TNzp}?t!q$*i?xuzm^k0jOAOFZfWBKL5JT$ zg99qL7Ecjc;d{M2Qhmilsh&Q5&FnFTrNaV@ksquHW%^!KSkzaGkcbehofvKM^st{4 z(AsEXC|yDBSymo}wL3Q758bz1{z=?NzKXV`o%Orz?$IkGHmHpxZ1l39A4-Y9-?{;8 zNQf9ekM}$-^B0}+`l-bZ()cSJ^F5f&LIw`y?KUmXjF$0e>WnftX2{vOQCupLtc`9& zdjr#Qdez-5A8gIz@jVmg1(QxE@^T^{mWkx~VFGw}#XCGdpRFC2PT^XOUI`vHN}#|y zm64XWpj8R+-K^hRtB2-WV6+|@ZpI*s^8o#|$|+{Av{JFIZB)y0Z25%W(vk_IaCM6B zQ>(a2wUYBlLB5uujlv<+0z>+nVxtoOn|nSHL%Ix`2j%8djWQQl)-)WI6#~3Y@aw2D zbr>4ix%L<}e$b!7w>^31HPNhB52aLnuX()sVmM} zoPlHy1%pPV40|B@U35gOsvO_->Ws4bQ2iITyBn46Y0uNmh6nX7pc zEBpP;XdOP%QePSco(zsgLY*eE9Fpazi+yzF0vB?iM8DN&+%6OL)dggT89qz<6(smf z2Hq_Z;W~J@tV=V$e6JC6m9g^EIl(^hVO#nMK-AdhAlBcO?8~uvXnLKx?|pTarsIQ; zg^C}_|0Q!vU??evgMtUnX6XT+U8p(kgW_P2FiOG*3)Y zmi!^FJmV%4Q@;KLiY)pEHFA+u$`f-!qpY;KM*#n`fyI%$oavg|WV&n6losUk9I!M& zRvW^9k0KKSfWkLEzmr@Dw1sZUQ*syKQ)fy&5*D^VnxpIV-5NN89;y}b-B z>nQC57^XcZXxAd(SHV;J+=EdwQUrr;QgRO#>d;Z*I3_lfC;q))vBAxh5Ju;=mtMLs zE`8^x50|6u(1x62#ex1E-9~pltH?n{1gTF+K;1{M_JXgl3~}=eDCJ?ie>K3lyWKn$ zgbn8{dL6sV60r6e;UJGZiX($-MD)R%6+_)&Etv1CH4EFj_ax_w0ox`U zjePe8CH($5>jwG6y?w(C2}*h<*y_}Q3(aR9|37TKbyySZ|MpKKjWkk<0!oSqqd`$j z#G+-CfJzKRdUQ#PAc&(ZP{~oFHX1~v1%?B*(J^53=-aq zi*M|polB2;pz>+Va0?by(@pJ25_ZJ*?vGFyR5Tl{sUx{}pj#I}5{NtlIlFbp5*t81 zO$%f2+Z)I}3`cH1g$C}}$9=5iw}PMC18D!Oa>IbN$&2o*tV0jVgw*pZ?X(6ha*F-9 zhYANDv^o`!vWoCVycfaHe>6#KhCr+Sez^O4l1koO=59$5KWE14##dkLd6aU5Xss#| z0{)D+Ke@{Ydp*txHxt}@tx;m8N!_qo&oR#ip{vmGb-z(M;4Zei-P(Y26M?-B(AjmF zbZ3nk4(m+_t2MQw=7W`#;4bdfx|zA#qI|88dtX(+E+yBl+1v@ImkI6yBGmc$0?qz# zP@ULb(bg03FqmuNt?!}=1$1x=Hq|uNBR}7}Wbn>T)Io8<^T+qSUK>y8R*Ca7Z0veY zspNz~K3~z4V+0e5Z|~wG@*xe@2BO79oK5i>C6jt%z#gwz86Io^%HPSn)%9Y(n^^qY zKBMd6<24KJM=zh`g*&s8XCmwQ(yqToIfvPG1Y=9zc$drsW}hSI68%?8kLbv`RL^YWF7sz$ItS)f&A5FUm`zy<4-M+PW&DB+nn$Sc8^YapH%PY1{lPqf5 zc`z=LR@Bg^WP-#ZolcCiZVU+%m03%R<}D+3Lu5=vFk4^Rp^gide-KeM7_+inj9`JU z+bQ+W$)@;Tx@+tSc6vA32n6H}qyGD`Z^T@d<$fqPPmTxfHR_UUpV6HHs#xlX7SeMK1!IPE|wa2TQaRP5k6I_;Q zYgjq%138*~(VsU4L%92Xyi}HgN*stao+{@GTG8R1v z_}^+bv}jDhcGM#QU>9%AXx}0VfzwhF?g9UXI)P{Y3$eARx!SAwFwvdjpsQp9IPV}i z%{vDY`1gy8`#WTLscYg{pmj$J5)m|wa?mn~PY~tQNyFC{LQt#?DEi-iT&A}GF)C(| zqB0JPWi{`x;7l%HR)GPqKu&tvCUY+k!hE^eB?i9}4%LJ;$f?!c)^$+o zMZDoKmTp{5_VT*ll1yo|Pxs`~!_eAuCEX#UxIc3)?w&+Mp|7(90pWsELm6{o)J!5f zdEo0zBy z+rs2vNkuIH9a;e4xgWLu6MF-nq{txKq)podHq0&PX=ze&Gm3_TEN4ZHzq%xplLL%* z3qK>Y4qCA-gUA#jF8;%_`Aq}ut(9PcQ2eImj>*>hS$kWed3)i%Sl>2~tLYT|6|^wO~5OExi-dV^9Alq;J|{}B`Jx) zO}VRjI>pL$RAYLvSi(mM0rfz7JI>`lt+mL3w3SeejqZ4Syl#TBioed2+SveX)ZyYI zas`+T2a=n`8pW)9>1&OQUq3pyx8muU^|%)>N)arWucb{)Kp-RxIRnq{ZvI4iw6_@@ z7iKi{rR_wjBi~T$pJ#c)&VM>1Zw7G2s<=cX6+I+n8q9e@4c{mP6^6Tie){Q$wrmBs$KROA$Zx=NuC_D-fJ#; zOcc4-XI>}~%z+egv=nTm7otPP@9rBbL=$V;;J+JSreRy&Xq&CPlF*E*A2B;x%;fk< zhzL_D6Jht<`dLeVLQ|4937B%>Q#v}Yk%M~4z;SRe{wlu#qLW`=EfRGqudF@6lLKE_ zCt~|fjkE9M&m3yphWJ&Y^qKvVU|B5&{7av|N3@j-LW@3iv~3ER&DS<6;Srv4NP9)8 zgmotTTg&d~ic75cl#wkDi9%T(FQasRD#xzrdU4o;EDx&=s_{}oN-70CpBF9Db%88o zMQ;Ec6&++aOQS?Vj%4f0*tOIaI$2Z`*~QgWuvXKZ{+?x3j0k4(6l?a>^#o`XCakWT zxY&DYQjt8wgNYO*bU724BPm_qg3O$fh9MlD=rU86mAD}uW4s1ad;qtp$B}9|dz_6+ z*ymntA3-OfDyqT&M_+W_$BNmMai%%aEZa51tMDlvm0k_1!ojbmMyvYYa7ppXD;<$r z2X(-r4~!m_Ag=|$By_z6i&2`4yD~mrl+;*4mo7|_s?0{wMnc)<-DN3g*_%48MNR6j z^J$59axz?Wb`?+1s1i3mnxEBh;}+@%3O(@``F)SzEA9|e6U2LuuhpjU?<&?1uP0v` zP*fU#gGbajUnB_V)U|t<>CuAodS-z^H~$@)s>JZeVm-^+4w<)(!#X{ywJF9|qX zSs_CwcXktt^oTlH7jK9?g9{G`R5r;g6Wfoo^lbR*%zUV-JSnW}N-}kD4Lj7uaPy<7uC_Y~%DpzR# z?PkdA^xM;_E1dFbDR(~bQ$=;1V48E)Pi2Wgq~H6XLkE$gch4f?9@~2Dt9*17_Z5Fp zrcBf_Y+&ol7=N!M7%5btEDc_?fA|5T1u*C0nJ*XILo-%$3#i9UNo>O=Rsp|&MG|L{-c>!5-vmf4S1F0i92~zc=ZT=C*Y4vrE5r6;MzPl z)8bkhDuU80IZ%P(T<&AD;$0Sgh6NnpkRADU>@^ zlFfdwg$ot8G(yL_+V#wR;v*$eQQ1Xb1-$+%AB#du2CfZo}PCcNkCI-bI?Yt)a9d))h5B={+b9)@z?YER`fS(19 zy_^WElGN|UG?>CIJA6$POu5=L&H!4_-G@65&!l}2d6n*TlijTS`is)vx4;!i^BT3> zQlt>q2+8(61^hH<9XEjPP)Mwp*M=6itZ;k>%)Iu^?%{K-aobP&-D4NuJsh~#5FDtykIp;r|NEUaz>nV5z}v04;FH@j^NUBZkGy;z3B3-`vE$M z79&{I)mQRnH6VSi3!4UFmMhLEIZf0mU)jI8En4*o&RL7ONG-NU*2Av%1oUjJjCHzS z%T3^S1*JLoi~>_^ZOIxf2dqFeRxeKW_A{v@RfoD>M93X z))(!D1=LFvrxF!04N<3%)mX=i31d%sEok5#RFyJsQ_&L88^6dd9Z4ML(t`VnURx&3 zdSsgvnSQ6rsM77)4;{?F?w6ipC@5%ATVvVX-)n8Pr}yH1IWCX$E#=8EtjdVSor~V9 zc`2btTQ4}GP@ht6ucd&?rNyTT%gL|34|xB9IOe`8&6-`8uqj%{{Wr=do>-d`NWS-R zQJ~$`kLc8^>Z5!nlCaPv5Nt8ADQ8M9bUgLKPZGIM#Pn@Jpj{f*{_&Fwj|>dW`c;k} z`8L<1q@Kv`Y&FJ&HSuS~S@;Y567n_3Y|n8d(+!P`Gsg2um=0DtgHW#S$jSpv3Ajv0 z#MWD3Rw5z`5d1l=DC(v%W4o4wQBQj^lv1_|crV>-jXTdqzbez< z*%uh8ho|K%AI-2~BxtJWD3Sw{EaS$n5fF2?Mp)_KM24!I;PIWzm!byie0#qXm(yhV zH#skht8wQV44iDC+~|%sCgM<~Mntu3x(;P#WlQ!J5wW*f-F#U?{Lv&tXYZATngK)O z!%*;a%Ng7CfkLB6@2l!1BxLWh1MenSR*AywK9IX2@iWJ&h|ou_lMLhV2GsAz3-Vra5=RupWGCksSCqzWC8(%w`<_cTkzYajS7U9xHt4V+pe zQ@9On0CUqDD5CaHZcW72U()eRw_NeFPz?X)2Js0pbkbteA-jAvW-@2uBs%IGQ%xKw zx#r6EJoik5fWg|`kNOPc#U)2|&cpz#w(agfQJuXVg_8CxABMw4yQ6ryvCTkLFQRtA z5!td=<&~7Njt}JVHa@~=bBt-wVOZF5u-4W%LI!QwEWK*Tj{o78T9Nu&dXuv%U6EWF zZset4L9ApE&~sA`h&6S=bG>RwLlPei6Po@6VEZ#UnrK%J6iA=5DVmzAQ%cz4z)h!h zyy%`c!SUTz#o>pCxh7vsyNom+`93)o_$@i8yV|l1(v`fw=&KmVUG9NA@6d=UF)PCO zMH?INu@245MV#(`9A}wX%i5#4c~0Lh0CR(|T?$0Ab((2UcXx>TAumwvJ5OcQeASBh z8-@1VXfD8;jmDwvhZ|B1Xe$D?Yw;1_+CpHt z4K+hn#!A4+4FeMJ=8Im-Jd#C}A)^XGzaPxVqF>_d<8k4(m-g1qOI-5)+po^n! zge0~Pfk<&=dzu9U#%PwX`u4$>7=GTE$I&=I+c^X8Y}ze%+5Mr1?Ymh9!~jNyA%sgz z_`4@Syz)sS%j(pVl$jc49ItPu?ZndE(x8)Qu#pLXt*>`tf0M-H3}mfm<-3}rmFgxP zX9!r4y3YjzN_CG0+@_|^c~M&Egtb*7MbxyJ+wYO@Wt@u8=uWVkRk+tQhrO(#);g0S zZ+i!{gDEnd9eudcr9<}yh|MNMfy&kFtqH}UyV|P(_0>L$3xgaS+juYZ{(W_*r9dr% zRJT>T{=Kp2VkM+{^pB%`+jWmuvj2kVjnE>k5 zrh;`vfXDp-jW8o`!z;YaF!OgJllRB9W8PIjBj3)%Vh`IqN)kUp3elcYHik>?&fc33 zuvYE)&T8yznNrA_w!m-F7g~Q+PU&`S#hXqp$M=HR*a>F54=cuVelx}XSu2DQo?N)$ zpa-P$ zgGt|3Ylf+r23g9NKf}x9Kh#he#R)f5`a)ObXvqdQl~K`MUzt4xJu>JhLttPIZ$-Ie z?)0kn2aD)y1+qp>0exc2{!#_mi^ya`k&(2HS|AguAz3s-+yP_7CXn{ zjsGUL+f@Lt+`nUR`w35~3y=&9|H&~zv+2!ff zX`R;%Qb&tSmhKj|#<0EzO(DTR?n`Wp7=J6$_`MNlbcVWNCm4Wi0Ha9;Ue>4#Av=ID zV6}&+YgB2h7`2m0+qlvxcKq+h4Lur|+dws|vCg)-A5fpgnNH{O9kp47zkcgp6-e>8 zo(&K}-F$Czv^3-&g_KKhNZ`kpwC@5crB*0P2sDc1KKpuU&Ah9w>-yp;>m~89^RvJs zi}MeLfx0{S>cD&MMgzmxugv&@T#=3vT*_IEU_^>kFzUjHHYaBFF3;@GgfU6d{4@EJ z6-@DW9yUSp6_Klo|B1p2L0gjXs}4&RFtq(u_AQ4Wt21-ShFVNkK&yo` z4A^}{{&)mGQF|1!XXEgkLBn&EZbNLo^#?sO#{@7VucoH4tg}v|#Y8Lo zy-5(7fK1Y7Yj0ic)tA}wmhJqtGw~+fD`2e7dU)xtODVJ0sDm*!J>F&cMAPA6Cic%D zlg}2e#}-ONtm?ma7SCO6o49>gfWGT?(G_3qF$)$KRTsoO;%`X;joT?ufz|_oTm`Iw zcE$IEhK?71Mm1cdcCz1dX?5Y0*V_K249(i`Ow}@L%&ONKNi(CiJlvpXjLm!efNBe~ zt@A~TA&}jFLnsk(VtLTSs=|QlU+KwW&?tzV<=`}x0i>_sJM<1;!p2=yK<+q7h#n7! z+YCl6cFKc{Z@=vNr>8Nc1WFnEHC`b3J8*w9JC@#2c0U{1uK2Co0JlOdlGb~aQ7dY> z?SrkiUI0b3z=5p&Z2?kqW6>3@ak430py){5creIdQMx!W1hsh8WV1*&;z1&|bcoA% zr0m;KfSJz9k8$3`4QS=qB|{v{;E1>2LMdHlb6v<;6Kl$pG6`EW?Dp?zUMPY@{wsN*{kXAUJpK&iDK6l2|}0HZ_fMEfC0;4!&N_ z@MFzqu==7Y`lKGkI5}+dIc{SZK<0B58U_rtL;9kY3VODg_n|{8N8a^re>**Hs3)$L zPR`vW_4KUYRd^_k?S$}`lT*?@0>dYJ;CEC@;f7UOuR z)gnQb8@eCdT5%18OJt8(QHyG~za|(274@ya_*_35p3oUGVNRaqu;b8n6NB&Ev1$n1 z9muArx0yPDNCYHXBdTIg{ZtcbL3rOf81x|_)JW<4*F(WcUg?bs&R~@k@(2K?o70tyLBrZ}7Wu-a`pX?t4C$U|^fscWdtTUF#*H`!gYdR`Wgit9Mp>3##{6 zFMDP+eb6w}_2($AJ)#S@%R|yebvLd);zzRZOIg40W9PQ{AVracuLNHF$lap0{f-Ay zU3ubbuC0q*QJKW2XNJ}+7 zI4;ep2_-26?^GgM+<4AD0WtOlFkQ=InC_LO)!#o=BW*c5o2>ZsV_#n^9D0yRJ>0^< zP|}Qgx~JH!)Qt-9r^bx9W!%5}a5Qx@;QF=qlE=Z5*Xb$keF#r2l4|?Us)M!_LYuaD zMYUh{cTc3R*Y!DURhYiX@29BB8b*hC*V@fN%Q8&k{!7cS+Z*(4lm1UaL#X$gC;pd!MvzYr>Wie z9BMyZb9HFq?%4lC+t+nRS_Msd`pkb#D-k=GyFvalU}$4Sncychi|t(8 z!f@1ajIVa@A-_7^ar0f^@*<5snA7+ZZ^WL`I8zCitp{5fOCjO01jXP~yi55NAL<)E z52T0#2`&8L=g0;=?mvTFHJ5hdHWoSNsN}J}LL5XK5Hsb`<8zA*-pgQ+5Y*d7$mF4Y zG>i}nd*Ms{N=EVdJ$PBd7`s<3xRy-6cW8E0Zr8vj@d|r7PvPoo*`NmZ;QHy&Qy+Xf zeiO!_O_!%2YqvSZ|1o!+#(2z~`p4gHn|E@KId!W%#^ynH7tb3Td(*x6kZ3Cskas$e zK;yO5BrI!*=I~$HJJj=c1}xt?FErRxOSc*SzAXC4WcLX2&(z9P=os~MlL5b9*?+M~ zA;FaGH$cZ($mDbx{x|^Bf=jEg2BwLp+!~pu<+)`Td>Suw<69qp(~mHVTYSp>HRxzh z$69B%&yWzt5x}gWvqN=H*S3OvjMkGhP+9SV(QMnsC~RAAut8&Ed9Vi2ZX7(&JV+1- zZ!StVQYTnkUr|Qq8*6o6qw{VJ8Q-8&H5vC{lUc+Rc^a%Rr+6vYPFu#_@9WB6Fbf|@ zTW&p^)X|~WIb1bFrdb7!>~H%h9W19TJbl6ouTAnXfZy$EQGvS77P8{^KUEDLUM%vg zpAfQm_f7t%6fRw$5ro4m1T*2APT=E6WS1OCp~ZoFEBSIwe3Ctl+=)T77V{jvpOOwI zw_lV4GomGB+N%Juyjq&Jv@fc(yK~Umxx)6t&JWX%vnr|IpO)ipB+duzupGlz2vNRB zEF)K`oxspB)pVT}b9}0Q5PL{LuLN*lHc4gJ0YfeNM6E40l9Sb*wB}Yk%(~~rtk%)~ zI=GD5Vqlho{$Por0a~ZlaM+kA!2--HYh=pXzugwS_qXWRB)Fyip626SH4!<|ka@@- zdQ(f`LkV+6NQc1T1X0?hDdxG?tK4SDs*^}%Yn+QG$)R8aq`VZoBSZ?d7!kv9r;F%j zgJBO^p6@byZ4SkF49z$FdQ~|%v9`L`vSJhr)1!ggr6vAZ{Y2vzyU?R_4@gvqI9;{h z30W=L8zI|G5X_-6snzPRpPRI{=HLH9E6`yVJ@~Z!FEpXcc&%9iYxfI`4RPb_$PdDF zOl`k3CfvD?8mOSA;V>gWk+5oAywGz=ESsG}r3o9MaWTWcNXk|rBPejGhNnI{4Jn^r z@!O&lV6ZK{uQ;|XJt)>h)q&XinYy}ksy?olwHc}$c=rR>{Ganxaa|_a>FJHU{Zv%J zRJX2Od2G=nnL9&lnieE&R5J;vUy7`VQqc3oT`wq@rb)S+-R`Dbw5w5qt^l3QrlPaS zv<=oQkP2t1O|zFDiCp`8<{L~c*mFZP!fl_&i-M(B^|qlw^t(2bNaAg$vjQS2Wrkwh zB$*#KL%18%mJLS&ik4$U>TgZV${{4e++PpJ`Thav^`UGSIdhzXM!%sWOYeJ~fq4nl zz{682J}HGHFR>w>T8>kn>UQTe%9q<0KCSfoNh!R{9@28ytX!Tr^Rm|A6Dsm&ro(2^ zqN#{@Rla5mj74AqmG!)!)Su>;Mq5e$&;xGFl8Yv+31#V-XYJT^%lXD26SfHT*?iBx z>$$Vz$Z68JS;Eom*1@p_yQ z{u39)A?$f&ft{=lxhvg@!pChKRYuNaZa2f5(Di+&5JxzFRl)sm69+Sa2-;okj1w16 zVy<%$gn+a9Q-zwTp{4xgKeIM_LZKy})c^yYU|}K-a4i7oiq?dYUG!d3hU~gLm`{k2 zU4FxCr@g`au&NWB_VjlprP?g}5GIs1A)q9Ky~e{QEBn60v;2?H{=bbFn7RL#-c_YG zPWcx)ii;NmZr}Z>Y!HAq2KnzKi%;rb8s>kSGgs2ey?@)h+MRzCOHNk*#{=myEvmzW zDXbsv`>$B(fA%Ruf^dy5VydIhJvZ+G&NK)6HBzN)he-YEwb4}a)K>-m+V`9{UW&Vw zQL78Zt-qx$kTZ+U(>N(&i=tP~=ma8`C!Azl-J9TFqYVg=`ke?ES6!+Ke4V}b+J36r zc-^~K2;LAFJNc{Lk3USg3$MDx5#WE&D2Ky^ZCN%ZXCl=lat;Tm)Qd_vsYY9Kxp*aw#4%(4D0P_{(i<2y(LX5eff&)pRpTr;ZiB zJLFwr7_`MgCNLgy*Y07q8-6#9VUZfS9|Op;m8iq>q$0Jrr61~awbJai8l6!#j?aa0 z&{Gj2Ulq}3u$GN7`63}7W@t@s;F(^>Wi%&boQmp5U&LyR@0^Bj*k!kg2JK&<0J%>H zdS?bPbB%xNDB+*kVL|q>GIo$r@+ouO7-;X9#$9>*I_ZExR&~|i!hn*iZWUqtwvn?w zxgCA?^~M^kUro_MWn>Lf@5?0WzyF-1qqFw>J$eo5JeA;|Bhl>?H_o<6^hf@^lK-J` z4}4nLMrj9Wy%*idZMED#_KNI`0*1?r*8GaojU-czTlHTlXUGy4gVSV6tjxT-aQ{YqfVsOE;jk^3R_UakT@rRhCMo{UEh8Ge!ksN zLG_Gf!<&V5+T%}Hy0+_8im_0`L`8Me7xa_YgHtvq@IS&#ukHEu%JMfU3vHI3e$pjW z@GHd4MO#HD5U|JROcWK0Z;X?QFI2mfWkDHhljaN}HTH!&{eA1aMHd|=d`4V^bX2<+ z!9}@vvfg{u6soDBqFvHS1uFn{kj{N`%}jLQPi<^$D*@rYzlOsf@ z_A0WulM`o%uN?JM%5(GV)ErfWqX=?hLVc@3jBYzB-#||GWO)lt;ZCss?zS~B;1LV1c=Yw(apsBh0AoFh+J=ao)6wx=kUY zP4xvANJ*{GApXD_K_@0;m%)3kLl&$tEKq0FHN28@>7L2Ex$+VbYW#sVOu2ddCF^Mp z1nfMMGiC1lHTEYByS9!Wzph+19`T~_8Wd^vG8T&vc^Bn79cAr5a>Dq@sVEl-#qmQ` z=P{jdWO3~F4W!5(qmAKpI+=W-&=0A!rcT=r<`OfSA~`a`lJoSDooOQzbS&8Y{kQb^ zRQv6uEV`DF62zLLlr;p0M>hd?znp@?E%VB=kP-NT#2(yH3dAo$7(=-~WSND=+QnkO zd&FLRH6aaJKUX@Ke*A-vSWpo{>h%Tqt8dJ?7M!f*cCyCG!P%UiUXK$9t!-;wFJ5Cv zt4J-`J@oyyl-c^CCNHPdPFMye6w#j=E71JTU$EdQrEu#%EnokkoUzF4U`DuQMILet z-0`*fG&s}+Dqh@6WKE`8Z=DHf?hdG-w>Zs@p-s2s%NKpaVAV8Dr44DXJOAYTW+$`^ zL~TqY9JKJqj7=xZ78ly2-8oN*6K z)^5M>r(5P(Pw1xvVbIf8CNYpGBh8Ys03dZcdb|yJ>$)^)2C##uAoBoGqG63FcOS>9 zhQUx{J`^wOsf$n0FVnV82c~T`$a_f{YLr|ww|$-`D2RKq8iy~s^LA48x2C1P_^^d( zX)(#6P}uQ?M<5yTdYp@DG*n!op_2-3@vs5Hk<+#4bx6QXFoUm$7vGRE#*gllGMnEf z8-#qTBehE&@Tb9>kx%h?BEK)V=CkN}h^AD(Q}=m<#Sc87v0GZzsrOps5sk}-y zpdaVso9TMn4B@{EnJpG#z&pHs_j=r!V!-G&P&s;>`Jo#Y_5oenGGtQw@qs2*$hr-IC&jx=#>2MS-ccBA)nTW0eQ6DqKZc;w+r~3= zJZ4xyS#qIXF;kvw079aU5?1Hi@kfUoM7^U;cT)e9@@p~C=9(tQj~~|sjQ{#jc|C3` z2@N=SQ=iW-gB_=($JHSBT{7QTjVACquLOsRXqghbubltOJG2<}DN_qOgbpbA{E8Fz zh&N)lve`6fiZw%3D*_GJoz0&UwG)sXKVfyN3~l3ex&NrS7acj_dqGMneT&9nutm1I z?_`8?X{qW3wPdovKI-(Ipt?zrhlQow;N#@BN9h7tB+MmD+KSdT&3?OHc0-%vN>sDs za#PKXhJ&J~2mj0CmcR=G7I z10}B=olnNfW!623!p|}vkKr%*j<%S=WXvV1|57UNx_b_U*M<+Hnp%8c-Zx=(!J=tw1521o%i4VCgO3YT^G0Uhvqb}z$3s- zNVwYw8F6=22SCcVCF_1IE^o&e2A90ETuFMG-Jav@tJuE{uFls1L=Lir2`CS9c zvo=WU%c?>5KC0zjEF577s6ZgIKFC$k2=|1-EP?7>c!6Lm=LGsYn>#V5}lKSO~4QZT=MmDev$#hhH|M;o5MAq{zko3@q`-@F(| zJQvx}5$?cRJtmQI0-s^o=h3Z|W?Pd#JV@xO9IKd$7BJ9-J@ol%>M6@haj2A?s*-Ed z^knFekMFi-OiaA$I3;Y1_hkNoOR%wzkFtR+Q9Jujb2>}Mu5Crx3@lR@JaZJ|Std0y zd%e2a*eze+W+KM3ltA{nUcSDIyzid4qYdp_4#bwiIC0;XP+!y&22~OobSdY&CZ;3= z#IhT)E-D1*L_iXJx-$RoLA+(HYmchsdGL-`PaCOpdyN~NrXMCzb$`&%QmH;6)gy1c zdUn0no6J0<@{0d52up58&3uEBv^^wLjmsX8pwD8)KYOEwEP=u4-m{n|vk}Et1_(w_ zQyFm{l^RKS0R4xvou&f~;+HqNZ9K=6?hmMi7*!@-^nfgC^2oki2qdDXMu|5^V^0RY#dfYR9@y%Z#^`&!azr(}w%$ayf;KK?t}FC~ zUBY9|do=jRzE2Lisxldzl}@EyRtZf1Z!7kHdMsSm#eaR(ZGQ#-vxCEhfm}O8%B^EX z+wnN6VEBLZaL3;<_8O1;kHl^3xQ+Y&wTLCQiJ2g^1^>U{Z2&=x(naS+(w+FhjTw91 ziDlr>Y?*e@nu@_c;d&qhMlRgvK0&y9-1j9{Vz&9TX7(;=-_KwhmHSTpx9P3I=``uk zGE0XJ8QDjpxlhwP8&KD`w0^z~&=GYv2T+oSPo}RyMT6h32X_qgG&8CL0rAAcy4bq% zRg1Mn!vqrF!R+Mh#LlcVeWGXy!iQ>+zw0X%zQAP~DBYhY1o+Un2O9@nxIF(B2}3JQ&zO!zjYe=X_pMnPftPP#95 z@7}v*e<`La-lsMW@&GGT#sK%FizD0!#-Mu-YoN1(Jz8LYUa@>8%ET(O``g zG&jO5BT~uE+qyX~GRN)3$eg#94M~ z+WALRo!pXXjt|vZ3?p*XCF~8l+N#(jAA$PL&Ae(*t&KH174acg58I)nL~f`DAZYSg ztEky5f6=|Ml|Yy_LrJuvlgotMKhw~g2U>-jmyJ&WdiH|?x(~O#4x}^Ah24g_55!-E z%{M@tuCCyLp>uv;6aB;(^yZ=bI;XEkj6LQq&fI>t;P{q=f$vA>5sUW56WK(pLYGfM z?WRsW-o(cbJ{x2sFEAD^gmYrp##@D~$D#*WpG1AYIGK7D8}}GNSCqM1j5aM%3l*0d zGu+K+8`U0IWM}!9C%Owxs>itincE1O^_}G%)9b^ZD)ICMMKJc7NFS4BKLz+A4&RFH zX{C(W6$*##9f;@%)@-O#dskdUq_zKkC1!FEtX6zq-G~$QH#h-cqSTN3K138U+8cyG z#5t4W+ldxg&;&3T9W>)Cnz;U>w4#$9ITCw=z>>Yky7f1|_(QRBY0$C3S_Yh;o2FG@ z!D{s zZ-PPT2SKsaW15Fl1Wf0k!;ruT;6+a>RsUI=3XOL`Yz|`*=d*@Wy|ny{Yd=Z z`oVZGtj!X}7`gN&q=73^w_KW0L(!DQ&N0!(x#(;&tl%1DMuL6v+MhNeA^|7x)Na`jJJ!d%gvTJa9A8s=QEM2wzHExv8>@+qzR+oj?Y(RpRntK7-!*lLzXZWPM-A#N>3 zBbU4qv?nQ`M$goJ{-X1^SS#}uyRMrFgbzE;6jHcU*CqD`Rh^FddUCB&#qQ(M*!>cv z(CL*pJvV=z&N04_jKVO#gEwVg`B`|kuk;mBJMSHy_@gY=#PB(fNQu6K^<7diyentz zvEB(s3biDeyxT5gtpJKL-e5b^ZW41Yja%hFTH06O+~`nbG*{=aj30Ggf9*0W(QS!Q zt)XJP_uIx2Y4$8u-X=b2@xE9!qf_sTey4;Ntv+(A{$1B7L&9i_y**1m?-xP!-2+=A z!kD($XV)XdZB4q&Kbe%|URhGvl(XI(E4z7Pwpx#pHZ&`S9vRmJIqM~C|0NVw z!JgqTLuVJ*fO!2=c(2zTK|LM`QXQ(`nL_3sjqcx@P=9|npya8+e3kWzJGc@5WHmP; zK%TOxPZuvHeHQnq3FYd`;MHkJxer5o6{=)$V=5Akt(T(V$kjX8mX`D4kn!-HiWB3K zrhmOOS-t}3_Elzz!xS?<4tQjc6+lNQI>X3eveKWW8@GI!gQo~uhJmK$pr-TR(#FUb zcgBE->B$^wlc0g5bYws76KK?o6UvN3n`vXdw)9832?D6v4`UHbHLa=?TuGsJ&Q1a% z=)6wT4+7q^PNvMJYBrbB5vGMOW;pB$$L0^0adNtbu}3p3(4^pw-_iNoi;gK5D7o0c zX`o2t`Z2Objh}f3^4&!0PQn4B061^%1iLX^?5K)kx*Hsj@*Umvg0Xz@Rp z0k_d3E3?TD2QmvLylrC^;)N&GM-bAbkY^)^f_mS12sSZr|14z&lHCZzcs7(4DV+o@ zN(duyS8EscVbJGT08$GS+}Yp|F%JOm24v+p1u(f5i@9tCuaw-}+#WNGfJ#C0N%^n&&$Ww67T zW5jL)V~J8)YEIA`mvP@qX0OL0t(7F5U|1;fC2CV|DA$W*#d3;Z6eU28*G(YlME@eVMrSktOAwq*zMvorEI3i z=T+1^r0TWVtSw+zDd#eD#635fT!86&e=yp_fAw^{I%r!}=;_PSId#Q{g3**;kuC3-N%_iiCYeR^@X~Jjf^s|Po(FqADJwGfBFwnfD$8e0FmBAdt!F{M z=uHY~yXaLQ<9aBcF*91i-}#mQUDI1UX}@lNTm(eTGI3^f-txoA?)*-W z_9aIfZJJyK5%&=)_&sDNApbf(TQD*)Xi5(Yu$oFfn}a47w^sJ`wH&_snbjvor0=8B zzX#!*s4-tZh^a1Z3gA~fZb)W_UD~W8@~=ksz=l@I_;91~L-AHuhF2c8s9~so7dW~1 zKQrO6zn&Zeg+bmk$q@ju4mghiS?Mb{%`L2*q}Sfc(V;Ay$YRxioyYHugW3a5(Xk;l zT`0)65qmYf)Z*t9`w#l$QD*~ArJyXP5oI9Igj(YqvOF1{6+<42~V7 zNBW@O`?aE~zR=yzrA7LLo$9TIjx&*k^D*gr?YL(J+Q3!ZW5jSCS{O-#d$=zokzOK|CGO1~~9MHW|w;3V-74WrkLTL~6cE zJ?|6Emk%;%cfE7RsidSNCf6Y~C*PZSP*^0iZcQ}BxR-6!%<{Z+A+7jB^9Rqaa?`c_ zIg|bKCHaJ$?s8)_LG(@h=>DazVUKDUzlJ&9R9RzEpg)>pfI72KT0O<_6m4?!vM$sn zo^bMULfuth!YbvXZ&SV+x1)Ny%QZ5zpZ&4-D=KwA$zC#6_^yWz$CW^@s3u7B(B;DZ zIkR13V|tm<6_<1=23Y5U`|2;OL3s&*U`mh}33Utj6yziN!ouOcaVyVL<(O9aZZ@yHeMD-^Iwb0Id{Noo`uHHPDTbkwUV4(-9tFy=~ zLiW<4kkn^5wd~U&NJ6y_^F!wL2M@FMSV!I2i# zq6GHoessz!hlJ0xkm>0PY_=zR&v1>_*dF04@KqBthYmi;gSxnmpWpQSW0LWm&yMg; za`-D(jri9cTM;Fiu&1){WIad_FM6)4_--BB5}D1>Tjrx#&swg4x6!$Eg_mKv%YocP ziNiPEva}T&B4K^2XD%zMN%vqQ?JB;XHh4WQDY-N9=QWdbppq~~(As+jGDw2`Rcb_% z<0D|Qa66(93*MB{)&$|hAjwNV013N|ud*aU&Ll9H>)*dY<=fd1am(JiWycS3wqdcr zHt`(&V5Ac!c=MJw?UZT+WQ^x^0ZU zf;@NK&Gv3y*@Mzfki_Q9O^PMEm;nPQ6j@nrdp=`;4bMvFc8>`!%#2HA3%6b9e4brj zSH2Fui5GJrNHiX2Dk0giiUV%an(A4ggsoyFXoJ9YAx=V zpRoTqy)6z`;*#vs`13Pher3gxC}e?JqNrWhDgLu4h^CG-r#^2sC$K%XEuUi%binxR znc$68!7fgF*DD`8TsQ^rG5enojZ*spFFAgMHe+_?^b;>vHIBseM$tvniakN^Q;4%=2b*et(9OEy^;8Euen?NGu=Q%HqKL4 ziT$M(IfYKAGF5kEd7(kLB@9zhQ>|WaNf4Ylt8iSx$@)O2d+GARtPc%OK>F@Hvr$+7-om8Mf zkqmp*;a=ELF!8%fiuon>+_%qD9^SoL!VB$);fej4A{SN-=S}Lp%RKNy1;ny#@)f_Z zU?%kU<^}`1kI+}+OoH=z)#B^^A3t}b=x|M&J2--!+Lcxnj<9OBr$s!Wkc97Xba1Eh z)lUWA-kfW@C2JlW-sH<;rTd#X_jDngE50i;99SMsCAHt#KZsdT+rs%P6mHwmcNDI4;CEf22G%;R|VG0J-!Z^B|wMG7H>N4v-+bC$5)8Bd!dle z589`-FGeT{9Audn!!{}vMKFChC)+8>{+O|!7y zkC~2R%f&-*-yh}mqPNFcw(&taerp|4#LZzVJ@5T_j*IHp_?R-ToO4PH?;Nk`+UpBQ z-{DAZLVfzfdHxdP8Fjdf;*Re34xuD+;X_@Z!@L~Q$~Xy84F24V@{2D1az)BGzw?bk zF(&TSfvmdlBWS($Bi{pt{FRT)-36f#tLcL;pHK`JeAXws*B^};s=>Jpiyw`O!CC{} zU>K_{*HrDvS46cgW@z$xo+2VByANX%D>%ey@j{o-_YL*iG0(ff2u{j+xK_r2kCU#Gow!@oy6x=z0O==O*|T(a^rp^A^3Sa2ij z%vZuhc6 zZ+_$1PbYobR6<~P&oaL*3i}!Emy$HhV8llI4ed`8Bx;+qC%7#MFB5C)=mP;Da-Vt2 zzcAyc+;{H4zv;KWFprC4Xg_Rm)nt!-_@ZiiPKj`8Ge1o2RmOe zUOKgXV0>SHTKvnqp0geuvgh~>KDO*Xc-u-oSm&}<< z()8CE_jIp*YnbEm0pfaDIhW1Uzp0 z`l8p*gExT*uaL{IG{7-XX%&L|yi^t$ZSvxlML+E$GU?yGY zVY_P}PmuJRb(8C`OE2KboamjG_&YP=Vt){)oTd#v8g9$H3=6($$YU%_rFLWB!K0N2 z0-|ENcFAWS5^@vzB*O`DboQS|7tfz*gZ+POy>(oZ|NAd4-CZKxB^{%?yF>v&N% z2lQGk138Dqys9D>wZw!Ut2N-QCw_FS0H7E6cC@w{N z4+=U+-W&6nCPi9G4w&UGKYzduS4ipuB|f#YX&zbB#PgJZQN7rcDac;{(IOU81`d+I z27tHX8NHN;wn*meA-)&BaFZ-8LhfFoFjHBt5g){YCCmXXS!b=~EHBwRYE|$0$+q=O zRyMpl?`rfisL;Djupfl8U@dmH$9mYib^JJGV%mFB`H;}Zs+=wJ$&IG@O(p3qSq zkAjWL4Vp82`&$__=+t8Wbn!I~%0J1b((hGIgW_ol2n>jpngS9q((uj3lYHA3pUZtVA=`LQS8x-mYF$6!-elKK+ytg^Ls3 zviCmkCW1SNtV-lIpsQX!;V0=B=}#u-q@LRS_o!}VQ*(>MTPeEGnTXE$-E;5f`)QJi zH;2b>Dx$q`cz#f*IsKN^=YNSBhYfHVi;LE}u$t=^QmW{BWIlY4?~%XX`Ibz;*JAvp zT2;N3CeNFdXjL<9tfmii^dterH1+uXs5L`ldA2;?^Pp_u`-eb1%@V z5yzY#jGf2wZJ!)pO;5}?*nnAnWK285_no<#oVD#qO5We5@N7a=m6m>I+wzmJA8?F(R?4lE9bhk@tDDK_sdiW9B6^laC`EI2 zJs?$4^y4Mt-t9{6MUQ3LG6mSw99EYxqrw>3=?ooz4rk_h(ArK8d5TVHdlU zB~J0dEq|xKNuNWJ-1%i%%|wzee4)-QGEaUA1{%61B-X3=0 z5Hm$*J^$d=qteM3_F@s)=b7snO4pWr(kY8&_K}~XfyiIKowDeCV+WXcVq8S;#c)o! zb#dZ^b_=ilko#sU0uehu&q9JM`1+!UaO0GLzEI54qB#vo{}K4OG@p^wG)Lp>?+u%& z11OT-uwt{yT35hYz50umSpLcQ4}%%S zL}zf88TY}5$nt~+z;}CQP_|#6Gb8f-ubSP7dA73EAd~EA69=(1Ei$xa{-k}+vro;l z0&GH0o>Ed3WpKtrIF#p@inuxh`#@nsa85yp&`i-ugN^jx8)tU{GcOWq`K zP&*2mF@6qw#lu*Z^Vu6pg<6(zG;IVC^PlHnO93~S=tuI^xfv*M;JvO{c;A7#j^4CwZePFu36KaA(nsou2N#zi5p}}d^;}D^73u9toi_JZ8 zl#&szYP-pE!LF{xlj&>qAd}IMeQ;C#63e&`S$;Aq?v_CYK$HK%tp9^1m-I&d0j>+b z$-#fIEb#M(!+m}5(qmfJzfbrFFeD0JUU}FVko?#44?lNaIsN+=gD-qQ00_Rmux9+P z2mJF7mR7di|4%Fn{9?fhGe0-fuw!C6JLY4d#`Ip;*23(u&)72@=dVDg=c(0dm_wHI zV8aYriJmBXdMi7r`>oY*h8$@njRTU`ZVoCtJLHAfS^*!Z#rDXb{_-fO@wuxII@*+<+;W7gIQQWUM$vkt+C zpZAon4N)iAkmhu8*S8*6D$mNMU9=`$Q09N_xIhcZVQd*FRM zU(>$0<3S~t&FZC2Q1Qm3Tq|Vd_RjsrTWps7M=|H?X0*BU6l$s7BMKi_75fBpZG8pW z@aZe#SFL@!phUN5)B88NfwK5f2YnqgT;${te4NKgAo6dY3h04y%G&vnA>O{}0zh}%PTa(c-m*^IzRe+a;Y&yQ!0mxA z;z`Eea=|?JJt~Mh)FK(Y=~qdhOZ5XFJO&pu}dvcD6H}9CvlV0gS55#ce`h5Fb(0hyd?$I zoMm7-!~=6dnH(UQ>8#ji*@`XjdJMp=M9lyn#MCt6t?FhR6*J0(vwPk2Q9c zwMmj06cauPJr$$x_M}0;3;ckG&tsQeO{AtD6RRk^Ph-6v?)SYM_t3Pb`R4y)e&L}f zW@{&kREGjmc_rU6yEZZ3x@?SeJ8)gHph6J!GMxaHucj@Tio4yZSS8Ge+_giJ^Fan~ z{|;^{idLOz)KbSWBHg%Lne4AyEBqvpVeS1es|GNHJJJ^!*_q6yjAx?!ll#{SIEu_! zL#aklh3<0zwW+l$ZQh&YDsuzEyjLe_7V(wem}B(KA#%z!_Xn9i8!1x%u&O>4TmKje zv!TAOCYiBq&t?v&<&c$jI~NR4`DBH@2wf@&;JHctz$s6XT0v&WFs z->jPjF0C~6el}rMyEyDl;Ge}M&hWsQn)VMIP@!3lC&>v3#lsz#gN@`l=e%4@8~NSt z?CtCx=)}6URY=9C02T#KP0+pW+;3zsP^>mk^uTpi0~l{>Qe=Yo!6XTC^JFLxGd}|& z8w`T+D|fI_pK6;r^>Pn^JLPl*wumapWv&r`~*r$Jl?KhIRyB%3h_ZCczg+s2t3Z*1~3r@rGY~*pOtMu~nREUWd z6*y;AGD9aPTAuql)Q z=DZoLQf0@1ygr=Ad`YI)ha?I zcZF+3xF#~TH+LqqG1V+xwHt!B?#7}JN!p%WJTQJ-^VmJaVwcI>xR(OJ1OF_$2B_X| zVVKs?qJE%WJ`{mK#U0oZ!1&m*5vZk>T=azYeZC~{thSS!U=%=-|A%kXKaibS68ON- z3vKQLHT2e^06!7VEJUA0=WVMdF`yUG6~}Zs;2lQ5bLtTZS86p2cAO3tH2ED>rz0 zRQ7`!tnDuFJ>{GWl8z%V!E%Q#mpm8a;PV6b4l-o0!amggn-h3TtqVD290gzB;2;mS z(gB|bi?MXhWOH<_7$$?yuLI>F9amrqwPQ4K+SFcbTRoQjCE>tU2CR*lI>D$4(fv7t>TP%jeepO(!^}QxEG(ax0_qTRoNTD3i95 zhNtgv=~ZRGHgeD!NqE|UY&!1E&?M;~qbZS&pUi)>EqC@Y=C4c>uoX;WW`gfQf`Tfp zP2&79k$M#d1QXa*By^`(zFK1z4KR#`BdR{2x#&(pANz70irZV|(CY>e711-GZYoPU z0}767-0JAB5Yg3u11(JTDl08Tj7~Dx<v^B-H zg`QNkOvN&nB(4b^1x2(-T6%@ zw4W9@@Ke3US4$nuE?mcXJ-S+>ILIVus3fX4z z^S7~`8J^>4dS{L%71m_!4O`nQj2$`f zSzt@m0<{hGX8LsEu%7r1@KJrjxRrHviLh8~5OaBpS; z#|k%{p_(~|=qGo4L!M^-j~N4Q>HIJ!B8j>Jv)~Eh&G`n7sLNKPJd;5%H$+I0_j_C) z461l^nn<=^Q{hN<%4B8cvqBuwF$UDi-5+QC4T1N@2RHcTqH(D|5SV&}Q)m|1nm#he zN#LG4UH9aW&0?T_aHp~|m0wZp&bm4zH#Gq)&dRSaN#jq`NmaDlAT~onXMtiOTK$ac zQVkcR$tFiWJ7MQnKX;v>n9XoE)pED$eYqOsjq7Ox=fVeD9X#%iZAVm-Zk+eQ!=Wf> zM~~4^*U=n_e%sZN08kOtmStW($>KYGkO;<#>oErru^~>>nAy6Bu%ORKUoazf`SV8! z&gOOcKu_euX@=zRHUq9ir^ph2L}*x*GD-)|s~-D;?lH6UST7TntQe)NBzsw@C( z4Ms@5{O2kEsIaZykZ*#7h5rkf!iBC_GyeUXOJ6^&4D`W|x7}R-`vG+!T@!t9p=-H& z|0l);&IoCs^#pzNW1``Eg~_B?Yn5&=D@1togx>Pmf8`_rpo<*trHge^8Iz^i?wlW-PV>)(9F{5hu9qN(Y4#Tw))(W$=DrlH)hhc zU>bz*w@-+kT(ps&(mz`3b|_Hf`wt~zW4gcC>f5U;plQ*pD(5P}cc7=^}0&QAEX_7CBc_}1x8d&NJJXK_?dTbi} zh=yg@QR*S5X);(#Cx?w_HO($et8wkgPT!acc5m1bztjQ=by~mK{+c*ux5-B>TlqId z=;6fzWtibW=`@2#Cjo#`wYMir<0+IecOowr-TX0^7Z1^ga3tpkoX3#y-B{ama|27-v3KeO@SgZs~y!go-^ zGdr6^?`TOYHkYY%M@qH*c$a$(Wv(jY-!tE?oJMWc8!fP(j^`Co~= z5rL(!@MS#%eds^nHkq>Uugb}I0&RwAbH!JdUUcM>v%@5(7u_+uJ{onRFg8R3CpBWw zm8|0BxMRxby%z(nB>qDj$|1x&n^U?}kIhklm^4wDw~i;4FTxS9NR03C{4^*}-_lEW zV3uwqEFYQz@os#iQ|<4R`B1yQD)0GI-Jvz_zX+H8y|4XqQ?c~*v%$5@2$h~&Qm1K# zLWSO22#lXMfbjWgXeo{m@G5@Ob)8KzGAkPAAQz(|*n&R+R8s=hwKQa@?%UzBzdh#T z+(d9h_euiF_ALQt{2m4?qf_3I(pjSzKEB>?_mvL*^2u_Ae138~eB!9)${ou~)TrwE z_b<25+piC&yMO;QX*G*V1R$>luuYm2zJP+l_K~HmF8%ZSOFr>(4Yz!M-Vh@-!5AB9Gf*MwAKUA%C+m)4(m3a>At6 z2Tx`VC%iqKf6{~TRe38kmHZi7^6f}kq6Bpp;Lu#3Q{@-GhMB(*&3hdR^QRd;NTfwU z#T=Ljyq#-)#EV7IJoWvv@QU>74zSa_*AH$W_3BLgZYANe``s5m*@L9su$1naCW3XE zAz^_vIv%e!J{dj`(S2Jo$alM<&yMQ3hD_JD&0$1))8RAsCD zeiWXqRyMWXo^q3!KIPYSD00G{c^|U05)9Avo;ZGhq4&p`^IMYrSbJpJ@2!yfY&4)a z!DqQ$OTEe_ol^XfVvQ-VYfaEd=O2_yH>ieu?x-2eKdt#X1U8+=OPJE2Wsl-nyO)zl9?snxl-xISk<5j{FLV!_ z=E73cXf`u7r@LD1W}^F8uQoPbK7BsLn)u}Ag6{n44Kn`8?aweN*;~F50b6yy+R`;9 zs|50J-njH=-cv#WtmBktn>5yVh}?XBiPx?kjj87LD{pY#mfSE__K_d-? zKfX4d>{-bP@fWJc!N=cf<=MaQiR`v^5h;t?>=Vy>X|UcEMeDsAM`%GpZ1gN)EkWKM zB8)ruwPLLXn~=yzy)ssfl4(8JoU@aIYY68A1!eeba+=h~Eoxb^NyIbCT9aqN%WrbB zHgD0nXZZnfmdD=*qN3{(3KQfMjle2dJ-nNRtXZlAm;W7j=oze`6n~}m$1Mj2x4{Bx z_wm%euYz9agqlxHL4xmH%LrBR5EgL=$!06ot-n&3md~2)QQVCj*3(A*cZOMOH-poe z8|fIFz>V0R2OEWuL~--)02z~z{e1+(o2xja$GV*`W6Lqsv_^;_!S=Uf7h3{qe`r8- zfIv-S7jZkjg6SV$QPOPks@xflyZ2aCapEJzWV!bWjoDBlAbI7CAwC@kCx6=9yWY=i z#9mi1^m*X=NXh#^OTLKLwy5;Mw`6?!*KutayuCd8Gsy&>FI9M-&C%T!8xpUzktNg3 z+!kE*Z1!}dK@w;XL->fCDt?bM++umRV_t5tR}t-qy8_{Y;=>g7YAx_;p6WoGF* zydM>CATAboJ$4#tW1TB-^ya~VRx%j>D4AZ1u+=F5U-lv>?`k#-1?}=3*R62lR-OIB zcqJIxoP}(xMjQPuqrLdSz?_OK0cHGnRh@Z6LxIB_Y5!0D`bBpKSqjit%xt4!g{=5Z&oxFm zFz*K5U?_v6xETaKTO@$-k~#PnfQ#`KEL=vi{fR3AttW_Z;w zcfCEH^}JGgjLG_83XbAHEDNsOhxf8Q8%y9n0#twK2fC9Zjw1+zE`>E(KCx)MzEpD= zl1&4Ocw3xx-XkfT8GwUjuf@MCDF`C{ryaWu3DZTj@-Mo?I^Ja-b?z%Orx@f67yXA~z99QcG3x+LpLl;?6e%)e z;PJJQ3b7a-IlCHsElP*D)d|3N@S6vnoSB7Pf+U0v z3w0HJ%TkyPvoSlc>{mDkwf*onT+E5(b2VMkGnkTi7j2?%m;WRk3__nJDoBG)VFw2Q8clAn}j8%i+Q+1gR&sU4oE0 z*mUnhr4LyfV&pW~?cu24lh(xjI@tSPhJ8BfQzN`l4V{stMH?eQ%@90b=F7{`+b5T1=h;?<9=C!^;o3qxY^R$NFG;y z9tV77v8S}4zfj}m0KGOd zyrkvMxjHG?{DLR;5HJ)}i4&Uat0HDTuF=0t2Fhz+HU)0gK6z3PSuz);oDw_Y9#39| z`uvHo<=iitnuB&aY4TNa@7WL-;;Y59cqGBIgYQYF#T+i5pT))OmPHZ%pmA}S5##&aiSMp}p%nRTtxN%`n}K|F z$0w6`t)&nCReA!QnedFns+Y;6tVIjVEGq32T4cEPA)&q>PDKk{BoMdO=sXZl;VAF* zUH2dD%^xyVU%gA$Yoy_iBGnft!wMi0{pe*}OeE6fU(}o+;r_tDocT@5D*>E%cvwQO z?WmN#NJ)aGf4QvRVmxK(OgiUi!0cRx`tvMxUv(wow5Ir;wtKX+5ALWr3h>K{(N<~T zm;!_8s+3F%>@fxPq)5%Xh@voVNZr=_)2|+V*V^fw!Vh#NfKJi=W1f6n(L@VOMx4GG zfe4bVW*C&N@zL48WTHbA!?%JNSrEG`9;70y@kQl##6=g8a5jG& zeZS<$4CAf@*1Kdn%5#?IG^e;G+4I3u<7ZLwsc!r*^^#1ekf0^IG zlmE-`d~@9YKgfLLui^RnH^k>ZKI;GPUj_g9f8%Thgf}q?IzB&yn?2yOkF9kF-bmG5 zTkp?Ig!C=8$=r4QinT1UIBnWY*9o)k|9M3EF|7#H1)c<>L7O$$H5&atQ5UR;U>L3u29ru@qoI3BOo zS+XEb#2`ZSv{cEIL`aBO%*K(ydm#*61p6P(BV~G4wOBj?4(^>nZTW~3ROfO0)BnjE~H$n6h1=p#xv}y z!PXbn@92o9z(SHdlqfRD!@n@00Xv(F@9?~;!LKmxMj&FG_B`%NC%u@Jg$xOk-SLSsDu3EsLe7GmB3Ldm%i+*S5<&FyH}DQr|838 zCBg&O+vg8ChL>`af^Zey2TCvBun-2P^Rgnyv5Pzkd>SZy)P4zB$4Rk|MNQY1E1I)7pEivhX|bQ6l&fego*F&a!TJ0TC=f5@I53$w<>qP^BVElTf;LwMKq| z;2GWpcX%^;Pga_fdE}78H(pS(sc)0~hpXtESs`Bfi>{qZiUTb%U_Mt`>3^WPZ5){0=l$-Kk57oPN-(thEuWGhITy zA>Tp~JN3*U=v69!FwGPD%qscf;&6{t)|%ruRSrFdt{3*&0}4| z$%pm%i-wt=jTr44d?M@$?ynU2uYdXPbFf*u7WBjYoB1qXhjTp6b{rYg0ZYf(2txWD z06OxSK+qh!2Wgk3BQw6^xi=x)7AleyV0ou-v~KL&Y&v6u_M|kMxpT&k`IAq9sCj}X z5J*Y)D-FWTtCrfEB>D$ph6uolC+|&FSU1J<@dw_Rp_r0<%4*Vn*{y9n-_6Fi{k)b9 zza`E!wYj*?ZqxtfrnpK|5D8!*i1nOO^Sy--@jL{rbf zvP7|(P)do2ef9e=QgaFUoRA$eLc<+x6ypZv!0r@XT8G`SCB@!(R_){`JfKSS^Ky0b zpZ7jDF~q-mr0+vwDzdE zSry^T#V3RO-tjtG;C=alX`@VXBnAU~`$Q)LIiHaXE-uJBA667Kpf3zf1D1ji6Mq^m zOc~8l0jGUtj<2aF0*c_b5-Qqqh5K5*C!c6?azLrnB)lB8IfdqmWYy2*Ttw_*=DvjW z*4KNHn~!aq*$iU4**n$J3j5NBsiU=D6&P#i&v0{M**~7-Pa9>9(KP@OgWziJ`jW)P z^7v{*Wc)eANNgA}yV&>u32}1Gv}ztL3TJnQm0zgf?>=X-qKD7diCzn^T75SBjg==i!0)(OX!N*$AbG)w|KUI+GSAT7 zw-tn82u17L^>**Q1EBfCcC_H*o~9eZQ{fl=(#+-Dxx*>~IkN`jXZnMb#-@E6!!jqj zm@T_N%99f)C4#OUSm7sE6QcSXi}q#Y-MB9+rY{pCRttDwds`Cj`=08hw};aT-!gyU zhgZC5A_=+*R5bxJ>>VG+_Vz|eDJtIKg2lUgLGBI>>r4ZoWJBXGLpcv_w1<6c9G6~^ znWJSYHA|xho&w6U@Pzlfk6|?M8DAQBtl9=hS%#L0eb0Ke1u&(wrU6zJYoyhJw5y-( z*9r!NKx231%UdJKXOd#gP@kLDs`{(fO&-9xS|S#|Jn5GDVnZ&X@FQL4VzIR`Z)c8z z0HBdSk?8}K-tpP~OKS<>Na?uA!!$lTD6Ppr+&q_fG|Q9x5HPAwUYz%VW<>hnbaWas z-#?5CV3o!PtJE)S050s?K1R&q`%jCm&J#6<_FX1hd!yoE5bND)Qd~rLAjlgv-cY znB|b4)NpZx(XnD3$MugCZaiFz8zHd5`f1PK(nG*Wd}|t*s={5URdt@`!tXJzfkUH& zeor?ZbpnE317&%>H}S6W5&d$}Q&LKvR&3Wqa7O}D9vy!EHLBTzLpJ(e}<$5|r>o=%v{ z{{0bhX_eJjhqf7E^1&a$>R)%jX8XVTO=lvQKG!TK40MLk=1Y?G7SZs(cE9^JE3>LH zSB}5#NY(L#4$XDtRp!ZiGr|)elF&gR!W&p>0Blu%Wa?s$=R*GxHd7lEx-$n$QOsI* zMwDk4kNtaLenuq#BKRFZJz@)!6(U|5+dl$UR!T)JjT-uDM|Ee7Q$O`gqG%X4JU)3v zaYwB+aS&(lfE!{lbk@YoUuWRO;#y0NY@>sfx~uz(37BRE(nasNmhOF< zH98RobkgZ|0^^FMSTK}6=UQ6eWsYl8I>(IdmRQ>7yLqk<&qKSoC%2d6=U>K})+5NB zRFc3Qj4_^I_M?yV2)ju}WaDe#3<=;4zoW49=3~bpZyk`cIWZz!*%R3OW6m8Hf1wDl z0LvFEW@O}(8*Zl_byEGBV6OJ3o+VCdtTeK;;810;C`jS1Tc?^WE7UAm)_3 zT^`K}jL)D!k8n}I2v|e!9CcmUpEsBP#vRi6+lszTZ#LBu0p@*})@qfVG)M{G(f4Zw zsqpvjOfWg3T~9UhU890|ODgpqAvw}7Gr%@jk6ENQ?y|k)X;9-N@Fbh_0`Dyq znn@xY>$i+@H>7tj8>(bHA+&EIi;@gafD?5h{8GWAqD`tl4SspR+gA_%G7j$ZdW3oO zl%S%wz#P->-K`0barx)WZ~8%}SBA$|uq&OJrTdrj29q`v$6v#j02x|gW!@FFG^wlu zP;2KIVD%jDmg5Tw=`Z*k7pLC| z41t&K(X^#dEw>8#(lm9y_OxIh0`I?FGh!SjdeN*a*?l5TuM+$?rD_0Uq^adg!pVeM z%Ao8rniA>VJms*#6UMSD|f3F>LL>O(~asWl331jN>+|?B~QTB6e1zXup!)5`FMAu6lLzG-c9G zmBU(+GG8gf-2Xd=P!sE55wKsO3U_7a1qDH87(v&mtlUn{Lwx2-8LsReL&7X!Ek6&P z%6A_Kg3b;7vp4S9*x42KX=tIfwY-?pFy3}mgh*MYyWJ;19cx$A4))ZgLNwG1_JRB| z+I((Yxx;PEvqI=aRY2W6N)%gWOjokBRur&r8TKiocU8?c0~k=vuU^(urA*vY_gmDU zG{KgYA|`U7Z+k4U@m-dC>+Cr(5Owe+F4D#26Js#ZGdHN__!yYPhwH5(a}U27701knK#- zY6key^_l1R8c?@lHd>v(XnkhfAf3RtZoAbsyH8=P+?a`eLd2Fg2c*q{%j??M&e6M{&;`<88&T$2g0{xGSN1dVM zM7d0F&|;x<=+3iGXQvdWr)^CE*=YZ^>zw^0KyeQ1L_Y@mL^8%ue_9ym+zYf^Fdx13 z232OQTJ0R|Yu@d=2(D^8BH!8F)$EE~nO2#O#~(ca_usm^1gaL_dP5+0H$Vb0&BZ^x z4OPOSQesf36*_6=|79m9K{mE0{wMD2|AT=4KY8g6d3t&TBXV)P?=OMyuX@S+e~Iv8 zf1>`k2;T*W@c%_N{B@cKceAe6UC59iFOmOmSGgr-{`)fd5)PeUaJY4 z9&RSOZ$^zrcF>uwd=MJY-AytA!5COo>(`)}>Q?EncIy_w7f!NTbJb~^2ZMfA+f5u@ z7$e0eW2aA3mot*Ub?UyaUojyLivs`#u5QhcrCXRO4cSr!=doRQFx^gp?*ylF(&9{X z84EA?boMkH1)UG_#t&)dfX0p)z?+=4g8l^EKgJWV?36%3FU}I$8?~oh=YIc9K!cRd zhSoucXWqE}NqulevaBR zV1Hi8dGeDg)nw5TO^+65V(SPKGJOwS?`J>yHK_RH$P>BVKhlwswbC|w7WO-=kuqmL zE6!huLw1-zYl}cl-A`7(E+hFSiPUn%hPdH&c;?`<)bcNAIjnm%fB{HhbDWP$n)IiO zB4YlE7NI}#9|A}CYSgxO;L);N1~TQZs0!H9Ei{82(iJ|9uIZkuEj1WeIS%)NiI2yB=KxZ^bC-1l`7M>2gGK# zU4X5k%skpLLlFgGr@Y-!rDQ%&(eR+Vo$d+(6@R+ba({^tl6X)OgKiO^eRh;wkw;JK zsEnVwtCP#~hKh4TC?~>!73e2eZZRo%QGv2_x0rWD*U=K~lhUkNm{*+05%m-)>5A$} zfwr^8Yf45+F6GHhKezw*DOhlV8(#yTk_d(n*?Wa@z*f3@O^m7Ef&=UTmkifTdM+|M z7oDjs^5&xX%atv7T9O5k8eclNW2CqODEX_(pH!SM+W6Y(1ljCM*>*T%{36W^RkPQo zn&0oh$;L=FG+z;a(WMY42&WjruCP`TiJg~IdP>+K(>IwB0hkv=Yncm++CwM#0zZmQQHh_XY`1Mf-#c#+k%T6`Tl9>ElHb9e&s{i;zWU5~U zqQ!oFx9bFD%cu=bXre{rLpv+_HdEh@CosINbfTj^Pjj?saI(|UyQ+?pW^;cf<*xUT zs;RKHNsVQ&_UU&7LbDybs&B1a6I&JWB!ikD`-R$?6JbjH`&z6u`rv6TqPaA4PUpVYIvd0G1h>>%vCOp4O$HW>_q=!P|=-ad;FUB>>@iuL57` zf59~=5BRk0X-$5{V9|pV-S3!{@8OfVETzuoF%cg=)%)w7FeGiMd_5Q@@My`>4A(c1 z63(-tvA$7Qk`@|bRH04rk!8jPwI^_ax?M&ZnbhL>juR7A5E;eWM~UO# zZA}g56%%Ze%pCL;aCulXDv3URQErb(q4uS?uS(Dvuom-BMt-Mga8xz9^S(J|i~c26 zR4xy{R4tZ7dWWgOHtyBShDti1-Th#!7RyH-XJQ1dYYo478?)JvOUz~vmalcARK`L6m*$+vMe z+TqkG>P=EL^~rY*36g2lXWASB(WI|wDK|zm#TkraKD{ioks$KmneF_-DB#O=VjGT^ zsF3+CZ&F<_bz62bm?j|E>@j^vx9|%8mt}I4zy!b4EVjRcl=MhBK;;4$(M#^^Ns3!b`Jj zd#F59g;L$fp5HQ$w`%>nmu{1uwm2Qf+LOIB?9Y!m-aS?Y_9{Nr>494YK2i%q`;ZNm zaL#a`1=e!EO({r0#&|D^ZdRqFx!J=xB<)$nC7(az^mWxEHq%t3cA?Bfm>B-19U;^ z&(v-;XI7G1gtE#(WO09IDL@g@%rsQHuGV$O zSfO#*4pTibA20}{*)F<5fQ1&tEQtek2POBqJuGwR!wLc{3eNe{#?hDI}&~gs=lMpLo zDq2XvFw%4rTbZYEq;UPo9xoBUUviI=S}FlS?MAd8botTi04uZpc?6kV=5PS!1X6O<4kf>d3n=U((ai zhYfI~`X*sLK+@{`_1MECU-YKqGLi}$Y4T-%n(q<3$kLAJ*5Fd$?gmmgKV;L+zqX4k z>0z`}rmR1ZQ)asw;(Mh?nn8XKGMFD|;}_d^Vk$S|(-~%H-^S+&($}8F^xl zW0P#2wd5TQ5Hglb(nbZh8}pToGWE`%+v8)(Ft2}- zg1~H`;h0iSj~NnR>Pq-wGQg{gQ&-5{t2=ZZYUz6UeqtxmR~yp`B{V(`ao!I;Bzrqg zf}E3VT7(|PlOzA4zH*TyFQB^&AHq91brCz)VT;Zam_I> zz!g8;HxbFtnXl=0`@E(FtL!Odm9R#fV~Q~YAqO*h$cqJ(ZUN5h-Ka=5*Ue^b9fA#e0~C~tqhzN0s*4aQ zGCc0B&_w}w(x8XD`)H$0J+h%$BGvkJyj3726lm}1NP9l#TCp3;i#cP9N>}a-^_yC- z&{jSR-jp=x!U>S0wmf)1>9~2#ME03Ebt)sO;JAK&ziUXAIz;x@_YW_|4y62p&!fJD zdpE-;vX*={WAOjnHgQ6LDjqH58b$*Ho&M~$y2Dqdvy%ue_g#wG1edX2(+}K|9(SIR zcLJ+Mc0Re4Cjw*lr~;8)SAA^1(Ge|OAF;UNyLQ~|@aFPdxg<7hJ+x)Bf#{NhrFJ6t z2QM4x>cCFJb93)Y>QJW}E3Wb1yp!&FP5WZMjo|YY8h&!=a9510ZaaA?dQz}&w#~;m zsX=#gsS#g+RwW_{E=7|n8&6D5KRlfo-R0iWrs3SqC;xdR8@3or*bhhk#?Hn0k~(F3 z3KD6Y9YHpS%7-Q%Qq%MYc-3?hwhutPGOfL`NA|UeB z*gRglyBNGK^#fPUz)Whdel%vLY{T6zid?4EM;%%soJ;xE`z@yoAPszoNIY2Y3KmF4~h9${qok^NcL9 zNh?d9+ou@rsKO!6c2*MyuwGP*J`e!Uav7eidnun zU3(WIBOg;~D?ioWcMDA-k9dJ&QJib*z#FQQR}q5=*A?E;)L9yGbd91D78#>iOaa#1 zVOj;IwyM)s<`)*ErnmQ#!F57)BhMr#fyh#o6&kR4LrbB_qqWv^U!=+L9>fE$^B1Mu zH&fanaQyND&T?3%?9qAKI2LWW5h-z!Y=26EuTw@r9*A@_jEkQKoF|!Ni0AAeLQ9*i z@$7_KU#JZKe^K_9VNv(();|c+-Q5C$bPU}oQi>>`pn`xBLxV7MHyCiyC8c5^B^|;j zIe^pvLp$^U0}TEDa^2Uy@BM%FeqKB;;qVaUn3>;su5+!=GWto88s*Lyv1@f5X4Hd8 zZdD;&had7RR3Gzp4<{D`l_7mN35Tfyd^d}*PzPEGS%NDO3+9EPk~%~u{B$j)x$!SuGV`c<9*P%M2k_M1&PHV*6@#Xe_r9wM3hEzYQDB;cGH(kx=6v zt|XlHK4@#jlI-%|e2@nHa!Z9W{epStl>GYDT$ruKM^VD1`l2>hf090|hUWAcbw5_y zeA>khX3HX-*`?`M%F6iLNqc|iw$FzuF6+i@v!Hu2K0Y;t10O9-ax_@m_OZKxCyj-n zpV=v?4m&Ug>TI0sT~;WMmAap_%I!HJIC;6Mx9Yt)L|ZEVB+&o=hJ%M!|LY0(^n&8{ zKcVy5R%WIaMYXw?|IOCV@;?*>(L=R=D+-sk{{KT!_}hrESWxnhN@4%6Jp12ngok(L zZH&B?53hv23<_8O>867j+d<`g_+X>>xZe!t2uiiUQp4D0jzDHR^!i~}3_uKT$`52c zx8-5gpnOXPRs!|@6aneS7|&rgZ?HBc0g9PO_V*j}Imrc@$`K!3hU`0X(<+cdk@uO`A-ng=V9TD@mH zA5>^`Er_&z?vf-_QAI{g8N7bLOi;N`D!k}Swj^I#@^Vb2VnbNZcjk@9hgPnd)NdLn z9cH!y?wF$*)^9JoC_|2WZD>em&jSY^qT6G+!g{!Z7*zK_KL!BQfLDF<>2B|(CJX>q z)lP#QU>%EvFYbXQtWcu$`1aIKVbPrX7#CdDXjxAzkbYp!MlWZ~>kJ?v1tDRnkDlVUi4S@mR^9Fy6&r7aY?E2=On zpS&rRjPqX*mp(VW_`u!Ol-{o6xeVeF))jw)HFBJA9c z)kY+X&(E#pg*}l;)ifMudY4nr`ISxH+~Syp@Y-R~qh7qI70RN*Z}OP<>-t>hs~!_0 zQ6Iexqb?L_VnaN5GEVZFxH*%e*A21V4ek7_R7a)!ho9*%$u1g-j0BQ39`xTY!ei(6 zA+uU`A^VI$Prm+*`nfB8X(<5d@B^g?By~Ho2Ec|y?|bWQ)V{*>&u({*ZMskfSYDuJ z!|hyYQ9YIR5HTNIS%|4Iuq?t4Y638E>a(5@AFM=C{SCV2{UXe_5E3csLP>A+sPcoK z^?MysF!nI`H%*48tS>PLBnf!HW!Ych4WKWn^%>%p#0MxR!V=1_Dc^Qw*6##`zY{N`e4)V!B2UF4})l%73pj?R{U~zUYv%I zqCSq+Ubt_psBQVMzfyB8K;?q>`d6F7M*Oja&MjP6(CIA)kx{>J1Z)`5Uus@nLV|od;K)3 zKm_Yo+uI*2uqDSY1*j1D1f<15a>y`gBL)-iBXI6tuQhx3yqChq!KwX71n> z^Sit0rb6j)%I^RwVAT5pItPM!-8VuNGPqN$MQ=%t{*Gf>aRU8J5j5`smF3sOrr_#& zvT;^A#_bM}o(!f1Va9frNeg4(Zd6Eu8gq++z6WVEZ*tM5qq~3Q*=fsozn7G&j_5AW zDD;0s#0ljLPsrwaoq;5M9A5>(u#6FRh|FEjIRuy_N+vd*#mQ0^72eNl=4{F-sUze4iq5kQ64C9gsTlh+(sI9^qxCrcu2Jq!;E6E5n`LI!tFmw3kiy58WM$*Wd0&$2mBPhSK%h@mSfqx4@0OVG zjUyJ2;43MdxJeQxqIv$sxfqm@1{aHM7+Lq}$X_YgUhUZ1RY7C1c8;xrMHxQY^zPGkH64{RZ`Aw9S>!f|idf zgT)E6&&9Cz4xEj@Z?A!Unt76ozN5}wS88ZDEmi4zYA52OwZ&KlwN=01Yrh*@FWF1X z9s~>Pj1v`Oo~x5oV)fdW^JxsIM*B&H%U&2<`5E45)ND>7^lg+lI`c?NCyP7b26-5I zmxtjV*A>#`$HJ8=bhkyGq&4ty-FRatT>Xo%;Wv}T{Y=WI$yY1Z0~^ekHm zM$a0DIxq&hKa~wqNDd+yC$_du`F_ZlKL%O<^AXNbIii|u0t8nnW=At^tnOMfBC5L) zA37j+@thLdG5zUy?&?v{rZ>G#x{=lHHc~RVAi|#UwA?JlZWMvf@%ah=uRF7~U)o%) zyaj7jNl*+j(~5qferzp|9~sQV$i6T99V4jAZ&}S|(Q}6Hc8r%1RXg_;GN6CJd6*LAXpzgDB!-WH5-sulg+~lXw zgplA9iTOkS>6ES0^@92Q)|trj^R_dOwRv!?orT|mp1ML|m>63at!?1yHSCnjrw-(H zQ}RHbGVyln1)!-erh1k^A~=5Bojwwyf?06S7v!G-qjR+!NhcbQ2F!oDE`l01ydRV< zH@0mW*0;P=H_nz`T~_vr7w-Yv08nVP0rxf9-=sj3T76gH^mGh41n(kyZAo2-c`K;E z5kPVEZHo4j--Zs)ajZHk{rsqUcMsAJdgDP3?H7$w8UDe9r*koq*b;W_xtPv*^yeVi z0GvQk*k96deQTOuqF>RN5RrLDEM)TvR}w_8m}uy1GLc8>soV0l<+E5?3LDZtxI%(NWiovrSQ9QckuWZDqCrAjKD@=Umo<%KMumsjR`@ zS<2Es8&3lN8q5rifXP7KfQ|jx7$md8tu$>4QlR>{D&&9DzZ+=^5I8qj z{CyiO8O6N>A7r=}OVAK)wOEg&b%In5l6Hp&=s@rAo~|$pA)?8*H;S=T7uSZfst$gwcy5EOQ8Bm$uoI1ZLY-Z*k`w%qkzhkV_7&ot&~J#XXo zo%&G1lru2Cd?d5dsN7!GuA;;_+SONNKO2q2{HFYaMy82t|8U>WYN3+~8Tn3mbm6lh zh*G=Q`xQC796qz;iPfx8@XlAj%jK%y{5e>m?{KVX@5jUT{CXrZbdOx8!*J(}@C8@m zI(}dQ*NarX5V3(vx%}o7+{RWq!Xc=2U%HlBNJbSPe zP-kdc=Y3^e*|dp0pIq7)`St_6U!3B0rJ?bC2etJHD^dZA{Vyl`R>lyc8*I03eJcL) zlM%gkLPgUPjwEob3j_6nN)(-k}D(UnUnY4R4yo3UyMMK}H48%Efboc2! zKDcnDDOo8|I>-3AT)u=ov0efp^gRm_z?I3}@C=0ODCWNP@Vmr9M@GCQ<<*k2 zmAZLbpR@dU>2bpwPxmoaJ_;R4^Oz-5QJ7`nqfFAm*CmWtJ{by7ad^$sUaJ%REROKp zI+P5*ogmm|(btLhQAhT=Xnt4Lycuo$ik|sxwIemamrCq<0iOI-^n86)!Z(%`q6Fj% z3+tbgB#CRE&nwO`=O)3<_<2->Nv_2lX$8I(0%8il4F3W!)0fO5v?Y+cI7@fFog8BD z{Wn%TA5PwN;rgFc3Ff7up38Me?)dCKMGxJgK)W#F=kB{zw8rx3E!NsRznVaU(u*qh z4Xj$2BU(GD^t40crKvD6D0$Gg3YOm^e_qL|wYn*IF3X$5kF@XbdvM2)!$% znsZgBsiNCDLioVtm-mS~f zbaT1-4!XPrb)i%mv!?Ff)Hk;QQi5OIFN=2eHP`5mTk@k~ec0~7gRw}N=J@L7^sHHr z+B(vZGDtc{%k@Qw;`5xa)hxq!>5vk>`by8I0_7dV&{s{XhX1>+K=~hCfspSX&Eeqi z5`IVR{W*O;A(4v>0RC8P2loBV`2=N zX+7z!na_3ErFC&JJ2`M?yt>Lxkk~h#r9nIqj`CK3+1!(&>mbdj*~zr2`Dr;P7+fIA z*vv|S%8-4Ka#F?_x~WU^2OTU!2eb!qU_u!Ko&{N7j3Wd#61cItmkGQC%>^#Hj{EhB zi~i;GHI3?Mg+VM+(UL-M>5mi7c=N{;tL|`{6IX_T{J%iN1sZ{ z6)Tj#1Sn^dfAV!5%D=KSB5Xm{QXzOnHgLQ7(4$=T(q_<+X%`$1r*iYel+@dsbDSFJ zM`Is}?D!Wmd#;{^Y^r9iUdHde8ag5T1dazMfyT`yL*yv*KK@Y}bW527E2^#TSJQTJcx*81`xv7>9$U;k%R&a>h}RI$|1!6l(?$N1Ahp7J zDQ}1jpHxg(p@p?$B+%SqQo5_=lq~w*jR?Ejs$P4BVUb%1bWf}ynkfkkE+nUO8p6kj zB9i)9F?`NlLxS-+*b@C6^6)Z02XU^j;{z+%ngF_UpFgVTzAW#J=#0N(ksn`P{-3qL z={d4P+zIC>TR)MQ)@h5kT+vK51$Yyi^0U?cs!As2sFV`?URyv;LKU<$0TjDG>MV9Q z&z%s^q1GY`!0T;cS^eUaJm9dI9DSfZdv){7NLbBDgDs9BT@A0O63j2jWT;_^C@8BI zMoyB6OV1JN^+&z>tS3C z>cCSe;(edZ^+zo9Ojx{*diW+W0^TQhde*K_!@6Na^cTIu!&>fHf5kw_;u(rzIcdr9 zNyJVF2?~4uF*(XLF!j$Hl39+U8wkW8DVSk$FK3urTya#Wcf+pH(9yzXSyTK5wNEiT zk&QoBv+#$~xKotazs_f>&**{JY{;NGyO*qm$C6@wW>Gfcem?^Fb^mb5Sx~>t2@fv} z&rnCxBF?i^k*(!n<^BFF6cRD64Y(V17yL+H4!V}JKk6t$_{Gu4(x$e4=cPIUJTJis zYR71P9GHZMj`o(S#~`$dJg8TuYVlklIELwZ(3U=(^Ey45pYd?^0`r$ zs??<_0^*5RSI)rv5>v&iOhfq%Xci$a$iKr??{&F7-i0w3=YTjoF2!{lXK54-UAp)^T}#gw zDwB0y@kNsDcci$^oSwU>3=9b@8@_?l$Jqy6q7D=dmbhAt4oLfk z3TeX_VyRCl$+)U0J^rrqX13n49KoZX&osZRIFcl}XxW-q(PYU)4yT+`NZW)&FBoO; z2$vGSh#BvA-1@+$*@f?czd($?LIQm}RlQF8<3kp(mJg=ogMH`RmPOrQlt>YO7Qx#-c{-_=y3F~Fo^PIH6 zIj%;*M=crLpGOS6C1T*PBhZvu7Utn&K8-MjkE_#lL`IrF`Kv$#wg^ZBLL$b+oe(R< zL$xor+rYd?*Rq2NE@1FPAg}zQF-fYGBm&Q~k(58rE09BEsL?ueiJegjG;T!Y>Kd|0 z+%^_JY9#(3Lf)(-L=>D>ni^io zok(Yocf^50Nb@}p0*k|p;^GunxLZwOUzvk@9!u)&wI@79B6L6Q>cK_$c#5RSu37eI zco}m@j|+yCs?k1`70^CFSiBBW;UvqcHhbp6Wl_UiG&LaYjQ*(A6SzJX042DLx@F9k zG;P^We?L|DR82b7m~HN4D(bWU?dNyo(}!*q)$bX<2Thqn?S!z@J|*qEYL}AfmmOXp zvHUDEz?8+!c0Uf(V{Lh+4?GGv0~45M((==PXB#%BW4a8Zjem8QQR&VJ4&rKy_y!4` z91iNlICBz`NvGa*eoO)E9%S}Ba_u7sqO&!zc7bMsV49ZISdT!>$zQWk(B3KD@s!Rw z#+tksXm9scUEbuGYbgb9DPfbyrq@kOor8*}5A2A2h*(j~ZQLCi}e&^iFlkdz&+%U-=E7f0D<- z+MumxM3?0#J}c9#0SD1eLnyBA)b6c&^4Rz4(`4#ro2EktRntN91vg5L+}x24-qzC9 zYSHlVJ)3H0&mb;KQ}L2yo3XjB!MdUT)+W-BgPek5yV(%Y>5!i7p@NHJq5AcfRvx;B zv(4}mn)n%=&`^W?$xTGhIligN5#e^F(41lW$e>Co1b8tbJ-dCrh8av>Lnclm`AO9@`$7y!75Z1(x^|xv*-v&#|-DZ*3ewJ6k8@?Le#9;6`e`wI4G0WRXq zIlZNv3G&C2Bf@;4Os98<(rj>^3Uf9Iz_A6yhL`@qET^sO+C$XtB;o7S`i1=9feoP0 zDv{o8qVMRAu5rW;edA)tks&fHO%yJj7c|(!Q6{{95Y@J%x3utvka+sZkni;7IbKQK zBOIdL1Gh6T3_gogI54tBo#$C|T-H%A`*og)=@{u=;zDl^(wWn0REIXTdIJ0dL&|B( zKLhMN_e;$IR?q>_Cw{*vKjb0H} z+o7TQ^VAfNWr1U5AC1`@E*+eoKJrJnh2qG+9g6V=cGtnN_!P4pQE)N7DpN;R8Vaqq zJU2O8p6(V`;)%%zaV*GoIi?z#+o&+s|5!E#G|l*hZpJ59r4&0h+znEuo;X|32<4~F zvuACJ1)FK4+!#n4T#dyJ^U6v$mjaYB>PH9_>LAdM%!vXA`5pqS^~)f~v#-btG7Bez zh_zc=aTIe*#I}0`EI-0u*bY;;`&#PnG7~8P-=TP)^};|8G*?MF($qwF?L&W*P3YJ7 zfdch9r}c_F9Vk$G(MZn&r)lNnZH_Y=Q#WhhW;QPi`c+@`}&hl zp}MV2`^=x46jF%-8w)FoIPNn4RTKr*m+L0v5q#C^BJxK7_D2kM7!hHtEA(!qEy*Io z<2Ju!-5|@Cf$y`_mnQq!gl`jO7JV{#o3OGJkn&{RZ)1j-ea#2mA6Q(N-P|Sq98<^8 zRN7z!cP$#e4nt%oj`fp@c75&Ju&v=$3qT8pOEb zB4_n|Yji!7!8PfLa#l3-3C>S-w5(oJ4rm5%e!cJ76f%vHsf+L0C+9;%G=`C(0Q{8H zqfvB*nNut-;(Sa*R$aN2z1i*A5cQmJj#94tVop*j3dh=m+gW4(m4$kXN#^cp{`QS5 zG2&|OesWJNl->D>Bd4Cw*w7rBOxC}z@M|`&L9d(sm&z&{22?~-u???+8_5r*er?z) z9oe7Rk`tkC{HdSW6k*;YR48|!_-?agk8ATb9^$+4mEmo#nD5~8Uhgq; zaAba)K~X&1E#!6wNAtHO(IKhXM zckJDlAw*8O2<~>it!etAw3X`a*9K!gJ(vB}sk)*T&h_;m>n|3&R?lY>zepl(5;t@Hry~v7#8_-{q)7=6i&JSYR*<<@~@~S`Y^gugS0ie}zd;|o^ z|L3M(_x{o2f0omYR*k5CD1YYQ{`t;}0*ozr zX=xM5v-i`>s%BnG)YTS|&B_ygpGRig#ej3J+olKw%ChCsbKAH&0a`gyIwjA{ZmEB$ zrk%XxZ%wn3pe!8*C{gGFBMAc=#1Yq6YUHLtp9!w^1F#@GyD!f{KKwX)2lUziiaj4= zix}uj48S?61kgVmz_|Iv{PDo8n4Ol9&6QaIIN2kwo2)I9q;q9f|CLE>ND7C)NtyGj z=10MU$EVD7^gY(vLl-mzq|>(TF3y?xl-`64O)d-3j{C;+_H+MLz796pO7k zZND?95}zNi<{5z92`$v;3TEL7#A#yj{Bx+)y~tD#a3z?fJEq0u`5Eb$&4xb(+7iuJ zKJF$@+ZX$c0sA%g5J+e%<@Un>YOvT3ml~maC0bwmIlk)A<>;#5LJMzE@Tbb01)XM8F3_F>CGsML6AXTqUgEZ+E~=~&ZVjaAc7y;x0;0NT?YGai;(J&70gt>xHi-q`6CWID{=h@O!6sZLgvLrwGp5x zzrZ4q+flW%x?l&AI_KlPAHXKj(Q&3;MLj=WjKz<8N#PY^=F<(roMd11cm#tOu6Nc+ zSy7RqbQY()x*5jP!Ix?Q{9l(U#9|t$rLwMgeFs0LSSIG}33t{k`IoF6>~( z6MgcBv*+CC>OnPR9cZT^THO-3`Dh+lL))Ub=&c1D1Bc$4WWC+mPr1UVYF z4Z#59YOn_u>Rz`qWvg3#q8LzJe zVsYD3y!1bR;nZSOsDHMIM7T;M4r+ewzri&TsWQHLc54P1HWLZ2RpX{L6-qri z1kFa#&tHjJcp_E!JlWsL|0alpTtq_h&OLTj;B2AYOLTGe3wcO|DjRSM`BRn>YuL*1d%4Jti>860QA3~TFfIoc^Y@eN`e_xK{pjQ)8FVx{HF4e?u zfm>mt1P8dZL{qu|A}8?S^Ac8)nWZ0jMNgzJQ%Q%)}$e+6Makdiu zS)Y%?Csmhtx_IvMy(?Zja^XRbh;}J{5`c1vC2kwaqOwI{rT65+y+mfecg)cnIa>US zdy;jZBP3ms-#RtGQpVm=wH^a?aryfDx13J*yo8vT5tAWa-8Toh7(YYWHYk=iJw)77l8FL7uQ{(D^Dgh zz{vCRAdoCKi~b^8uApR0=J7@dKc(oKe&N{ENAi||qUh~(NZjCt=F$;I-c$l#e-E!p z1Ag>13vB~eMY4px$S^aRUCOv{Oq%3tnl0*Qh>0w1^3=6#=n|CV4XF#Rv#)D3 z+iwo3j>UY}vnld^d)1c$qJ2X1^fRe1g?um3H(#n+jqykC-+G6QBL-*O?PQ*BFS>s^ zR9HhVe;h$Zd_S%~8Ahc99ygpohJLPZzvipYpQXC)$6x+x&NyjSPwxbQ#_=S1rxHsU za?wk9vdWswo92`mzrP~CRb$1MFE95DG`|)%RG!fSB5$3y#6kxJ6v;=|oTu{( zv-y^z#qC{3opHVr?9~-;E=+DsV)$P9fqq>Y;P)1#relmn-{*i*HxX-k1KwOE3mZtA z$wuvSffO6LHUkTAO!St0?d@*h#MrzU%Zl?yDzP(^A zXTP`;KWv10YaWE<%Yq6U;c$I|4_ zPyD+fbghOPz*hqpKd_M!`c_5Rlbqkn93-f+G792L3e!LNvDNmSMn$a+pso(Yp+@0U ze4gbLs-F#^8a%8O`uZMESoT((3T|pn|DaY6^WM(ahLV|c?5w$bY7+%jkhUSq*M1u1 z-h;o$9Y+OF&IYVgVvln^b-GMnAl5(qvP-=9(}dkk#GED$mN*pz=_O2Yg)W${g8n-s zxkL-k3v=vnB1R;BS^?U0MAWoDW`hQxoNyT@6Mnj;;M1KRbnhpFjRU#kKkBV-xQM0E z<}et#CuMd=m+Mb2Z5@oe6XK+j(RkE21=UPW$5&sovO}|?y71ZUNR)fy!Hw2{3^l75 z9CLE=;%Z*tk|%D@?mWh1cGrd_XG#mab8f8Z&i@pfV;Tee-P8!DC8>5#k3Z+8^)a$- zycc9X%}(;Ca|V3J1(9DcK$vP`@0<_JWVwWhJ~+Dt=T{(x@!d2x>oAWS;M83C?gn>0 zR@}y!^6ufpgo^^`BIw!Da!f@yb`5lhrx)sxDO!-1oUXQxJ`)h~2L!vI?}ju$-=YX_ zD=50OwrJt|}T z$V4pr3ctib^13P29FU!@ck+~A%Klp_$1~g8AnI?_x0<=|37!-}QinlLh$Yb2*CpY{ z4X`Z5x7);UH0>aA0HQU!9kib5{p1=aMRhN=8AD*NN}7ZLfC#Jgz)}5XA8S!5DGIV$%=(b_mc^qqWz z=9|ep{lTe>GMUe*Tz#2>1K1Vuerv5!Ygi$#3{B0ww;h8}YwbS+`Cm?yr+)_h&KvNm z9a(ptiAn#{JR9We6cun#Ea#--q<5ab*-SO@-I)IqZ%SXSql2JR$tU%DN-(5hg;r}z zrPFy0*<6|}?g080kP0MVClSaPUh#*=6lzCH_BsothqFPTGGh6S?k&6VxHz;@ZEb|?em3ml*A=Lv0uPUeCWjyn{aE-T8Bg>TYC z@j@*TX^TaD0y}mt?FHLohrERkgCeeOt!q`(APAGAh*qjF^b3FPj;wr%9HRxrKbwDr z=kHKbGiXF8R*PC>1vnhcOvyrhWFZ_x*`h-oeQ&)s>Lmq(6DJnj7F&Hz{W?_) z65cqP5msxa@3=jcX7_*`*Hc#<79}6A?*HBHmdtKK1;Kb5pU)8T3M5c~->VS5{_qI` zKQ%yMBQI+Dw1189v=b%?ZQTs2wA?OY58Q18>Q@eMO&N+s|#eXnvd( z^w_znrQZZs(~#LE`L*P(%zdQuI?T@J8rjW?9*>l`{;ik~3HTP{(nP_XBhF6(@tNTE zJ?8rEIc&M?Ezjr)6he}kCG&%=_`~=Yy@7kt7bAE(3c5Ig4(L%}u%HD0veNnudv#31|c8qbk`CM z8#{N{7t;16o+RU+dga7JBu)WexhajY?Q=R3&?aJ{Zo-n8uj%e+?$wu+-g*uy&)nB8k#esrbxyXrJHX#v2$zD3j{23 zkw1B^M=^^-8^d5&tHHH0&KA;9`__HO!2j-*|IfnwyXG_K_58cOdwVYMpH=$AuEC|a< zCQNP`zy+<#T_2qaemC@0B$#J677Z);@CkZdUf7ShW#}D8K5fXsV@{YyMGVe;{xrhd&3zG-93B2ct18U&^+7Tyhw=( z@E@?yp$)U%lA|Bfynm$xTGt6^N)v2p5v;ZBICtE>iY*zJc+`(KW(3V5Lv**nKd=BXB0IjnQ8prh+8%byV z=uH*9LO(78Zx(9Y>TyCks) zD(X;m2yOKQY&HCuix=Y3zifk;_CB0J*R>wWxw?NSivL3EtY{f2vfH1QNf)>K>P(@f zA}tE~GauCKfXJ`Nt?6+=sKwKjal3k|YO~?y>tnne<}N%b??>Vn*H+hWw?Ayweetjg z2{owmWv$YjRr+%WRLd6F1bzOlk~U-H-3C!KO(K5gzynMEVJ${=ngQP3=e+4@xMpcs zp_(#{@9(Fb#~ljaFJ>F(OPTF7fXyWTo88tWD#KLmoF%>R_NmIn`%f*eJ8tn4yH6X! z*U*8(2;aQs{ql6*i1f_|`gP&^vx^DD%#74uy{clNWrgeYn}W<0w>+1}B-~Z#6+Oa; zI|9VsphDgmmR{(oyp1GG3kWBK!mulXGbW{3UDs?Yc!d~_vwIOu;Fb5;cyg-6u-5mL z360Z@%c?M;4{mEk9guI=UhyeRPGzff=O$gVv$oZS;kr99ZXpE5eXeaoIJPz3d7G0P zBJVlFlOo(-=l&)&Uy*0XogiyB+>9_Gp(7OZ)6jq^Up6)8KmHAu`%D8`*Gsf#nH1*I zlc<7n+JB=WR3c>Bm5WJ^dMq{LDp`N5P)0TcUbR|-v_Gg$xGj>&Bj|*8wP!qn*`GbC zJd`aUE!dCau+0ZU@l$j5ar7L!Rm`dVJ6_2a{AfXl=G#)vAIn$`lVzIct_5KLfh4P1 zM`$F#w}k%TP6c3V6Vxb!*w5jX_r@9QXEURI$_I4lFeXTK{6Z&loiu&^9A|)&S5k}2 zRuU{M3}gYNirmeVz+CSY;Dzb_xf}^G7d1Wz^D<8$=F%v9pb((pD`L9y9a=QYyhezT8?BA8<_sH$!j?KoZg9@F3Ulp@+GyY9>PJu?pp z!0g*B@bvY)8xHDYeVm?nd&sL^e{v`0VZxl7udX8xteSIT(;QsMB|&35eg}}lT!H6g zlP}OlHt^^@A1tR;3y`)tKJfc2K!A^F!~Ig=%K48yaQPkC9B@MUDK0{62zan56z_eb zV001j`e_2yh3WjpbOI6l)$PF|8xzeSe8=ZG39>|~moz=)AXE=}BbZe2?DS|4&9OzY z(i#HDXr6!aYy7VP3z4m6%hrok>C28A?vMc5^b&`J>hbivVFx`(M3?)*pz}I>!J0ts zqYlGPFcr#gPk}vI$De*+BX&P)Y*0JDZUH6BI?6jx4D2tSwVmYCs7eKe%qXZt6Hfa0 zgdnxeEwrB8t>9EH$rM_=lJZPgcw2iX!xEFK_$$$ksCB|`i5-crCbbDy{!XdFpPlAn z#>ufOs!iOqJ0F-z;ggp`>W;msOB^f>N^qFGsKu^qSKMSs3t-R^dnhd8&rrPEZ62^f zLzvSD)@>=0BoSTwcGfxe+<2A91|-?Qe017G6=6fHWtdZd*lzsuxsVt^hL_Vs>CYZk ztW6N+o~}X>t0PWtOE$nE7PUHXY%uuU@RQT3Z&JYbDtzmdF1x^T<-MbZ5vz$Lii#2T z(?WmNeLNL?qVX@d%K`LqjQn^DTZ;q%4;_w6wVM;*c7ry5Y)@FG>9vkCL`pZGETCPz z0%c%8A$P!hRq)7y8H2+5uE?S|Vx1yx?Pkrig_C8YOtnlZk)6xpn(xH-(yrSnboj}u zC)(6Umy@8R<22hp%U0(MS1=UPN-4-7>-BK*?Y$$K5p_-1xkALU!|rd{@x>^sD%r^5 zn@=BVBup49!xH;g7D8$@^GmsjAJ7_J)lPdT7gLrQawS!DN0yKH4GnSetNkf({N~7t zjs=zSp>C4Wy@(o)mM_8911B6+2|Cc+Uk(sqQ$KwHhDvGRZb**eyUWM%ml)L)fAxnhetQpOch9HceNR(9j(_1t@>wwe!eJA@pW!|v<_W3WycE8cxTza((HM+XD9P4;D>KuvSHPp?13-m<*{-yv^pTTmXxDkRe zm*3_&3n~&n!uG5mKk~nu{lQv;I_{HYuIZr9@<84of3d~iHix09X?&q>; zgdU%5&wbJ7)&#j-&IRuz9e05Me)!%^l|ueWi9>)Z=HESKKsCl~SD^{~CQeTL^$6=; z!2zV0p=4b#2>CX}y>qR~zExu0=I&Q=3lF5pa{cB~Oi>)m*$Ajx(}f?<4c^IS{3!Sa zc_6(fa?9N;VBkV5T?Y$r^r-AvgP1qXLYS6C`Ui>nrJ_OMSGRZ%U9E zZiUaib&uMy!Fnz++LCj%KZVQjP*a_}<-d1v>se^_y#RdwNiPN(JE3T(`D8GSoyt$q ztQd5YIYT~zy8WlIFwZ**SR{boX3f!ObS4VpwZdDi(jKwnD4#axePNobwS7Qr^^yl_SoZ5U3exO_nO~A#_ne~bp{Tn z9z|c3Rh6N(r?0MP@V)}LypeG|=wz^X6G9PIox<`#Bnw`|<|=GVNm{V{+Ne{|jH50- z?-X3(90mPlHP@|354Vv0NP>C^Vzh?8xFZO+)M$4mn{VTov_W7_G~uSpNWMQl-=u9_QwSCHVf7zXI;2+xv^E10pgv-10FQ-RG`TQ{2QEM0<%M zT?%4vfLdmzZ0^^N;Soy`NkyaNl3}yVfVQ-0hNX!_RmQ<9QBElV;#}`qF9XJjxYad6s%acOR^hESzw(Y|0Xi z94mWTRs>#KV9^iN+vQ*?SX|n^$B}mO8(5k`A={ytaM{1-g4R=!TSDIV$L2uSKqP4q+f63)S}NZ^LQdkDEN*40%w9rT|DacivZkSFKSgX@o+hGTIh8S zY`;Dv0Mwf~mhj^r%`rWQQ{n&BE>z2OB*Yc{$Xl6gaAOf$lJ?D=at{2jz_-LiZa^^PPa&-LdkAJV)+#6Hg{pTE4pH~Y_D(TgOs ziXGa?9t#t-A-K0s#NeFg?xVpT%pQ2?R*pRvOrq-Y_IVW9+&jURTXuN7jvj0p8K2FI z@@=$={cM(cIUM95!fOY}gg8h!T=QB{k(4%?Su(|SGcnMV5Q&n*c0>;fAowg_>roFd zY^SFGiEr)+ZwUssUkcmvY^1m#i1D@&5puRv5~r@saD#TZZD3+g^`E9}?sO5Z@}Qyf zej$}5K*X+*z8XnKF)JNw=2PZ}V|t}}lQr@_U2~CCTrX$&t-=7}&bkSeHJ2^yUmRSZ zdZb}HR$8ACl%rH6@g6xXh73&H;9{MA{ZJ}K@Q-N+|8x-50;0^{p$^74mzdV5d&9 zc;8X?yjuC4EN#Au3@aW$y)*Z8#@AS3SJ*!p4I2558nu?!)}?&e_n$MUFAQ-XE_gW6 z7`%@voBh*c9on;o96dgfWHWINwsl!@GXG>#qh>SF|A%p{cU{%;pqRd-sFi>HqStdS zA@5^}KON|jjagMy15{}DKk1vz7iV_w=YW9EPi)8o_giGX^wYKakgA9GiX?wiD? z{KU)3K?f9pC!2NiHRHCr5@51WIm3>irQ9E$fDXQ5<#VhMZq?LXFOeLG3&U~0_WIHo zw-diE2J`&O8Kvd&ZGQ@Phzw^V{r@oBw!~F7% zv9;@KEFaZoMsDUUoKz0dBkF$S37Jt5-lO6wCypBlx2Ymp$DeDm2A>KXJOlbrUmrZ+ z$??eZQLpHy(%bZ%{46VzwAHjojRAUpaUCk~&Q#AnB9?mrpNrJ14Nl!~LGOSXhfI5? z^9-8qG?TnQl^k*zZ9}_eQ9JG>a{A*}8zG|z(4&zC0fEsY1tdk9 z!A5tCl1V+`SiB4ErwYBn&*(IA_1DYl++yA zi9zQor7`qblSJ6Af&;=AJOBxHf9%5x7m8uk91grCR`YoO!of~O7!w|sU1@phJ(;AtXs=iN$HS>zG)H!Qo%K37naCPxk?6pZenq z%iW5j;ckqL?`t6UYou$om(+1PE4U!jA~}5`>4=#MDiQuqXCo#{Ir)PuF44ka45*ti zUr6 zgkRD&K#UTQ+Jk`-TWM?4R4F?695DiLCe4u|JsIGP*=GO(a~JS#eZ7pxx<3vDR$);< z0#SPfT6;b(<8Qjqe|FY8CZgimc+cGclAUnn-FFvXDi(Q~Eble_Z!|`vD}=6(^1Z)2 zmE$a@EGqk$-M2Ec=CkTzUJey19c@MB{pCMI9)&cmK`O_WJ_5m0_0~r4J~%Z~Qmg z(G$71rG=7dQPe=ptdRFY((%2b~EnFp5@LL z?*}@t5^?G7pD&4`iL2F+;}T2ssNg%F5>@lc@FKo=D3#uuoy70@A=vhXi|~om;ObWz zmRCu4ES)+Pfw64b@38cpHKH@z_HO0^_Hx-Gk4!rSH6ZH>K(JgLgMw7p12I&i8pN} z`{WBOKU4_PeCvp6BK{$Ppkg?2?W8POXYHQMnbJ)0b0pQLoYn2$CJw=nPPB6nz3+J( z9HN2GI!}zMcV_MZ9mp@=-n?RjkB6oIm_Y@cv#Y_jJtg*012Z^7>C-Yi-uYn904WRk9>zdT7lTuJHK!0p>I`4H6tKL3o}PP z1-wL~H>pEC8SCG0GYUnsLUe-9_1ZdArRrFf1N)i;%qk{zK|*aO6CAFA`%07I!A#6zZ`YCuPP-HBWEe6fY+(0Mpu4K|qegMG zd=I;0U5~DvDt4vdvx3WmW|ADAohPT$P{>=fM%(QTo)o}g$#@;GyuMn(QHz4C{rR)# zh2l8FmODjy0SY?L%H%j_|ZhL0%3z~25z}ail2_T1Gq!aJ&H13aR&mup7 zVWABJaDRrwN*1(8KXv_`Mjj<6SiRJ}K0 zWf?EUC7X%c!`w^D0$9qKFZ7hvDU66)bX9B#dhOolH=UYC*PiB=)+-P`UysOaDZ)|0 z?~@1#dWLhHP=4nS{bG&TnEYn^CC?)m-5nYxUENlCHy=A3)?Bhc3J_)4SK0@DWwD%He8AB7IFuCFJkaB4uZLPbcCm49_{ zFg@bR4C4M$G?+9DWblZT_fHrPZ`uON{O*{1(8@1a-DgZhC=>58Hw+y>&DX)HIBM4^ z88V`-RDRuiAce1vcd2R_&HpsveMF@ZC^>hFT>VMgrw7u&nNzv!u0e*}8hS*aa^^)! zKJ(L}FUA%6bq#_AUJ7%OPd;y(MRt%`w5rm!PL=%FVbQHf<@*`4h&lh^)@i6+rl*VV zGSz+NNW1U_OQ(&D8vfGgO4m%8rGAP&uj{KV1M8LsO(D)B36a=G-ps6OtM_sIulh22 zF$uXu&cr>SafY)q9@?%1%PZT6*j28wM_@NcgLBWl{A!SdJn;lWbIe-CV((9IYECm) zV~i~dVkp)%E^ZqE%>Xja{w<{bPj}CMqtO3F{tgVXf2W5B!Jfzc8*u->+*LQ<4a@Q95TU(UF_cLI)_wPx#3rtbIz@8tW|kOYUnKH%c0Ulnaf7)emW_kAcETV#h!-hF?@#F1Tv9x?yW(Dn_)k##?0 zLC3O%b~QguG;ux*4F)CUuDA`gD5n_;*a~bRo}g{7(}AO#QeajpEzm_KII#?kz3`jB z&XKTrv}uniCSBTl<2fEuqkZn5ou1hE*2c9f+K0JP81f((np-yp;Bv8WedwYYO9>%T zyc@)CGQKbzFB%Dj^*q*%(p&fvj_mxuUV$2$4m=mxm>*eGV?Zr04Eksk`PRg@GrxW! zos{S93e&ikb-&0a3U$qlBOU5rr7riTEnX^ESNqERw3GZyhqo$3dk57MNQ^MTNVWkT zq2^cZ^UUe6H!4r=1DYI*m>>|!|3r{n=%G*`CSP3Ye6QIZydboiVpsUL~lJC6!%ANReU z^b`Dc)?y9GR-s7$5+uCl`44jrq$PX|Y4oGb+*YUZqnec#`}s5OhbK)okdiwxaP6Ye z`R(&%A&7DUh($P*+y;vto4;7dp;-m}>EykQJ0;IrvB!7<@p7F@7^XoXW?Xk=MNXWi zN8YPyG#O!pjp}ug;-vbIF?~SzD-frAr}^0^E=3QBjR;A7T>JnKLS^-vfh6O&@gd)+ zoH7=!M;X+{84WP0VuTqFd+gsklH7RS^8Q7qJwSAvu-dfQC5OM5+Xp;Ng1rNe*LwpO zKRm3R;BHJ;DJ)9 zDu+yb;jKdN&=UBwz4lp)=2w1X5H-OyyH9{Y09+YP_xu)hJCK$?L$l&k%g`62$lp%3 zYkyTiJ6cA8)TrK_w8%3gbUHmD^F!eJD#jsYoh zhS^7M5~y*~i5rI+?rB6LVjc)kQ*>*RPoMt*d=(Rb$Bjaoi)%ETWMA{sZv8CfZN5@5F7_MWzN=Dg;)e5WbH zJ##HB1W{GjfhEw#>T-2n`GYh(bHRSnqmmS6xcM_IsDnKx=)f50|0AIn$~)N{>zNa? z`hMGeGJ6S$oEF}uoFk{5eX*ZvBc@!i$7!-up=8--7AZo;O2 z`0iF6f-$MxJz#FqnFS2y4Q>08+}-6c2KbbVqjnq9vShT(0i@vEUL+IBl5(e3J09h% zVk$Gvd>Hexh2i6U!g<;UD$qS>qjc+hqT@o` zKN;UWEJ*orFzy-nWi+QA3wPhI?*WJAYVgFq(wIH1cN_JDCLHs>5?4floM$_%qBROt z$8nyg!pu>}R{Et*Z^`NC(kL?+Y`|bz-X8J(sw!jCm)AjNJ&}CHru3CnCo|lD@}0Jx zXuz7$BsGoRCl(FH*XZZg)7s=oo+RhgH)oHuR#TDuvuE=mP2T>#Kh0XAi-r#aoXB7q z%IVC{k}}=}CF}uVdok*F5eh>HU$_zIipC(DHhHOX=ZIWW$?tw1l-GP=imOd&qmam{ zpQxhdcAq$*IyA48bB#Tfh=aqGhffNNI`p-!V>pR*e{(_T13+cPuYJ4(%kT!)V7d?Q z5lEDAA)Y@@L{zTS7?8m%MdKpHZPJUI1&gHKaGAgP;=$+v*_E<+<7E+3~$iiv%-=;o-aH@1AylS~}c0!*V_$*P`4UW6@K=a8eOn+^l> zK{&dqfkx-uZMdIa2csmOwPd6A_;2k`3HzUqpSc)dA|=75z#F*FRDa}xYwXlhh3|H1 z8S0iQ1=yIh$>+D`mZuFis~ktM>;QUvCTn{?6X==20YESiNGleInjA+Echo-omA(yr z&PKr(UPqh8nDb+lXc;OK__xg4+O}zk5iYnpdQnXCV=iv~YYbXj0g{93a?-cj=tCgQ z?GK&GAQ}Zy?LnpsIT4Wgx*qBL9lQSm&EKLRzFp~c|Ljuzj>L9Gh)Za@w)YK+30TU# zVB@zl^F$?VdFt%jMoyZ^hHJ1~J9n#P#0?QD9#=%;*8E%_!$oGh@_}`EWiQTp8qmYX zU@yoXW@%Kn9W#wyXvkfm3ti6T&kIqDH+BqA6V3HA)eq@Yi38)90)cFv?gcy!URIt* z%{X_ceQu7xOY{>!V|r#Mf6GVumt)trJjZdeTqE6d>64_&#dJB%jG-e{JiQC}`JpwF zl7!El)?MFde(^YZ;n#HQmH&vk^I>D&2kV222a)$nGGsUw7UDR64Ro$@gfE*aRG0Z- zJ74ygZBgu%|0G#pfZfTwE%mU35%rtT z-+e8R(Wm{D;`6-#;dV4|ndzPDZm`w-+4_9vk^?yk*y!%qjb^C_VOGt4Dz?OmY7KN& z1>Q}27zNQY!1S%!`PGpl0UNwD0w}exC5S<~`5eE4d@e_WUt}H$O4@~9BCX_r6$l`u zt%Hw?0VeJNEZFHMz>D30cdm$j#CiCncYovh(JHjcIKfNi@wwY@xlcnBL<4rlnjK=T zr~yfAqknr@pLH&sDd&x#1?bJk@~fs*eeWPsW#QbCE=*7HO*fg>rtsG0rx6Ab* z4r2_xTKw4&bmWsyUp7v_P>7n@ZHIrXZm^9goZ;W;;ERid4O@Tr_tV;%=1yM6 znNJiASKL#`fiQje_%Z^R;er>x5s;JE(u{`benA9DE9*llo%e3DuPI$+==hdGK# zquKItUDQXT2uP19q&?82;2R%!-L}!w*twg>8I!wTB^OZw=;;+6VU6IF4+zMI8~15Gl=~hJ$q4zmXcTVS z7xfnG!CF-f(OX75i2=afmqer_uiJ;>tnGxHT5Z2+kZWoq4e_9^PMp*(^XQevO6i$I z7vFi^F{bwCw})LT1)~1Utht))IQl>5r9;b^3GOwvF=x2R-h0?ca%7>$9y_$Rj)b0Y z3d&P{oR8=p$p((4kt&~|9O1)TeMH*Mq3FNGOU|o8LnMtZx5X!%6NqNS?uTCMK})e# z-PeY~y>?{x+}JHAWc5hBWJMmht*CvzE%7RpqTaEg$X{!tdPJYdsr7>=nLS$#xFEhZ z(Xh^{243cimS^$HY84ftf8NVD>YHoSe=84YIXlBMP|_4?j2COGTEZZU)*t5dS<`;9 z!Pa?J8%#&HVd3dXr~T|%*UtedM@ROMl}zm9PnxzmyQ*u!s|h`#?v>D$Zb z2T+b*lekoXGu_Bi9=HExL2#)hA<=VXDh_2z=C#lloUiYRrKY7V5v#|v{d(|^{)2W! zV5z%B`?coLE+|I|kzx5;OyVUa#2sH^L~7I*QHeBEvJ?I`5wkdn;1$2#XLY5MT(5LK zzFmx`x?}I~`+XflN3$NPhxTQLS^g)#*F>Tdral@x<@`@WV3E|~vx~$%*Jk$bYv}KT zEZ>(VU3NT^3%CPR*(E~jso>giTXWL}^*EsWbzjEoVA@RLMXIDIu6-m$I|2!;IGHkt zoP&ljfUfl4Q^WsIqW>1w0!%Sb!1-?(=>G+z{(CVxoESAPv(y^{tgPLg3oS3Xkki_~ zW)R%grU*AmO--O(5e;@fc`&a890>7%@B|G@Nwqs5D!`?By1nz4{r&v>jXIQiL`jD` zQ9lyGJl{1>tj-e$n0!zuuzygM=y_OM*RD`c48RIJGfmXm=L{KbyjSxD9^1BF+v z%|0GNU$$+yV2bHG_W_M%xI-%;Dz&N`v?G$Yq6iG^0Cu1X#^Fh>Z@D$H;1l4U{JM9J z26_qASUaUHOO~O6_;;F&t1#NMZR7b_w&IO|?(P4P$zM0nBjcrg_GlOa4X$QmZ@(;) zSTck}eW<$EHO~4@mTckc2177l{=L4Q_Zyv$)dyNX6K-0myw*OkqELaC&(-kX`5*r% z*#gp1T!41Azgew4K%z?2&CwPO`kX)~zXN#!GBSb_O|i0L>pROny(e4$3!!IiLJ(dA zE2Pv18kk5v$=rLSH%X2S_%{_lsySEj7!m%WY-ZJx70$ktX{U)WZIL$P3B3$@aNZtM zIxG}DxTXG)@tV?Nd%egl4dj6y7?pER%cJksC(upzVJHx63*WPNv`&$qnuCW~W%asG z{VgWUW}qdOt&}TglmK#-XC@-_U|UZZ!=k-ihIIo%Z9(hHNVcEVUW#Y%Myc7;9ty>95XzVN`Zr8A&(5CF zIQD`=I*lGG(Fv|NendKy9J6ftp03d>Q-V`Z%|-=R7l z+#OSF-XX&u=&}a+VN8%dOYt}UG*qm+@tUr6f2|!0QL^3(u+zvs>;-4toRv{AAElHz znyD|o%2mPsX6iIT`ZgOvkd>F2a{0gZA}}XyKgUDujWQTjk`#YaU{>PBsC=rZa-nPI zLV?=skDpO}EJJEK1&}M{Rx3PC{}|T2h4)OO=T4Q;*yGuj&y-ysaTWna*c8DG_#i%d zV031#HLC0A35eY2{BqZs&FpsCOXjxSUyJ}Y{WrH4C3oQgCkZU@fTQp9XdqUPgBr-! z8Q0wEiKAb>2H+UI(Cc8pzeUjw1pd0*T=}ReK>qO`Xsa}NA(a6{iT_V4%U?uks*|Hs zrOD56pO^Cb77$Y2N|v84-`>i?bH^^D#@auqP=ue(x_YgcuRJ)=@q&DMSJCPviw(Pj zm^Di^qcci1SLGujBH|!w$2XE0LY`k=o@MXKxZ94#+kVk=WwK*Sn(6r_UJhHUeV^k_ zJOK$WsQ+%@aB-%a;mX<|NJH@MP5&U=JQ#A3XI-H@XBbu^o#-?G;v)YtJx%2(ZdIS4 zb#`oQD_8OcM)gx-enp11VH|7|)=k#fCEjVO20mkQHpNY7p3x^0ck!!Dy)lX@9Z7d} zk*rA6rMP6)OXKfu-gS5@W%#-9D*hN!414u1cq3Aj@8=A5$Dj4ZNs=)|k|=VvFNti@ zq}6P)PqU>0S#;o@p)!S6Ie-rnkN9@;?@L)$4Zi{cwHI&ezQ3U z{AAkqVIKRF^*ZS@t6p#1=I~|$o`-9DWo4xkGko#lHf)DXF$_GoJ8Zb_5y|}cBqpY+{d_eY?Lgdb z`@A$E;dvLDhhI*d2;RxFen6r6syfMWel_Gc)0wZalJ%RGY+2ft%DDuz_M2c^j(cl zp0cpB5-oD^GF8N{I`L*vhSP>DaNX~he@>XeOZ#X7 zgpR&R1sAq`Fz-{r^bPTx6Z4{8ZQ1bkZOm{KL4l^-kfIgv6=t%I8 z;mrY0kX8+t84|WrFl8{2>Tkbjtlivdl3(2zw!UMqQ*cq{?AW<@A4>|f47%fT6G({P zZt%eWq>i#F0>_urMD>{@14*$ur-XaYpu=R%VhcwUTF@Q`S%4BMu@_Xp*osMBJq>{f zR_)gue)W5c3S3B^74DVf`+eAdP)yigwi7ri4rSJKabJ1odcxAN2o8l@>_H|b6$DS> zT`KLxfVPRjqhS}{331sh#-5B$C>APScl0Y}2?`TQ7Wdb(rEBzFf;yO)UOij_x#HZX z^N(|y77-Jtfz(ax{scv?gqf5nyTG=zXFI>CLOjtU!7w#7uCMHPBXif47%K^z0o!TV zmt|-e*NfcsXJEa+>3JDmz!E6dn^6}I`Rxa-F$v-{e*kh$SR_S{hy=PPuI?=(qN(6S zSVC;4wteq^OF8p60(mcjk<8Cp2r+c-k|2?jO6PIc*GAIX-Js?Hbs@KpLk+G(>ogw{MXXYno z!&RUC+7LLuqW7E6DlDl#?D@tz#TbNQ1yXU*T* zhZ}{DB=+6f8#%fgjl!(ysoRQtH`WX+8nymprt$cjM$aKX&CRKE9tN0wy(ReSfie8- z%4!o1xwr?Wf-eg|B-&V5;mf2?{qsB<+>kq2%sh8onzc$k^Bytg?c4 z#2g58@s`Inm{|n|B-ufEGR|cR+C91MkkB7{NYUlFx^ z%9ATT#H4!c-r`7Lm_CPWfvNbl`kgraD`_KbyQMHTbDr#lIENU?M+wd$c%d()&2N)CJZ=UG$5Mud0&Z*HFupF9$0Jjp0Tp z*?N0rEZal+6ukouj2fnFQWcYc_4=cmkM~@19Jb(so{mMwr=)q^HT?rs#gtduAz(y+ zL7E?7Uq3f56Xcz=g6|84SNvGwuHRyLXnfEf_QRFmeYp4f)c9j&6F`!Z#rZHQHLztj z!!L{c{ACVD7kkP(UH7JD4#N(Rfy7-_#@fOS`3!bCyG4}ns{Cxe+H493bOPG6BlFb| z7HSY*U>Sjr3!#<6g#>bV`WMG8Y}skgCZw28m%N5S)t3?)%>S?&y&3?{0qSD6deK4= zh8%$tSg%PMI9nMHh8I*9Kh`p*RtpvxRvO_{IKO#TRNchpHggrvXL8$-CzstIv1-?s z(K+S#t-7x3wJj-EAFQtOUOV}Z-igWmZ#)RY$bB)A8bb)q;+8+xtZn3#xKpzQ^s!<# zO0nlz_8s${o{b40X$L1&fkRr*My*WDm+ViiPzMNuU`#a&g<)15M5J?8qIz(ofC*Bu2+$NE?zIOqjj+}&qlN&LKRe5 z;4!n>Va|M9%yi7;{iFUlJewZ)+th@@h-4a!w zZXe@buW`~7p&s~uMgvplzuf@tZU#CB|K;KT`~t;^Pygeg|NjsF=RHms*ezS+{O`r@ znH^e+0?sb?e4$n&RQCGIh&Do$+EwvtlQB**0^*o!j_WN3s(yFR$c_T(?QVjPk*uPR zLD}A?S)TtAJwt~NHwHqoK?FSD?)q^T^-d6TvMrM58zT7WLw~V*O+PTxNlH$J-dfo= zUfuC1me=sbG21~ZBO2HA?b_$~t!8u>TlgEv-MG?cc#fYXg9WKukGL#*8jmJB8;`t~ z5$UR!)VsmVF$09CXGF8wy{;rUC~xv5#D~xL`uM2GE_Ik|LfA8N6@zkA zFrI4zBE-wgp`g8wn(R@vKUzy!N6ZKK!TH~wnsBrOhrGsGGQ-UVeSWw3w?3Y&z?whR z&dX^r|Gk}%lX@UUzA744mS*y>z9o--`tH%|8hpjsd`ft;@t?t#W_bZM7oT=V zA)}N-+jXCNRW(V2)u&480qpB!W8ED&-jUt?LynXm35lp8T``ei7VxWMtyl_$PPIuo zGI&gBg56Dyl{i&pjC}%%&fRu$<3Zru#^qbDH z^qi~;K-Ku;;#jY`IWdMz(L5t)d6mWwnN%HB{1#QhdOG~%FLq;bYQ7FUuX>2?6YFUV zBtFDJ(e<K>d177QO+TbeNehf#w;Y!Np&@mF3VP`c~GQ7cDr2b>J8d zF_f)n%3LxPJ-qxeR{!Es!34bFVW2&IrmKxplj1hp39eKQhD0yz98VH<3tE(66{O<7 z7Y@mMN7vQ#diwV#T7V0EdVSrc*tI?LTn_^Zy`|vi6ri$;>m0edMF$;0rjr!8$WVLc zJuk>m1^ME#gmfg?>VDcDcUG+0>0}no<2(ZS1=6-gu|LfTU6yr$D!@uqtr@vaz{ed! zmir17bjy0S-+1BYNQeYpI@H~Rg{R>Vf99X0D9Se1(>_;b)Et9tr}LmM%V0p>=^D*# z!S69`WiJGId07hsACMmwE9M+J7+-YP@-A-sU9t!892NT)QBrsoM9b!@c@6<@&pt7z zGiO!dNZ}aKOe|%JRO~v|w<2F#P2-GcH%{0-Y9YkhC5^#ByrfL4dwlB`lEj6j?Pn-DacvswKzKp*%bUN#|(mPk4j5ye!+y4Zt@K-2NU-&MrgP z|3+S8uB$QV_A(COUh2MB}ZG zfY`I|ai8`rLD@J#Iby>w6c^%TxNc+ftmn5<;$M*BE>%0k60th`C!B9}RsWHwj4~N{ zLYSh|voo^rJj8kt?9fBi227vw28r)O%WB}PPQRAyrJwQ8tR}zo^HZ87#6>8Zk=;uR zib~C_@W*AnM!vjbHkI)>Qp4IAKrxvmeTvTGp`wVxk2P|G;GGit&E!Fww|J($`JS&7 zr&fvhlZJ3$jTWk>d@bX>;e~pWvkfb)h1X?lF6&dk}J{(TuiFr;8Xe~ zMC3PY6=#9mV|SWzN~KL`w zhM9au6*n*qIZX|L^9^l4J+Z^`ldO!bOQUBmCkG{(Ykb%F_`p-5119as^GzEvJZ59% zoIfwc3^zPXqES+_<^AAN77iSLQml)x#+Z#S*tXSwMdQ@u**1P%o4%u1$g8T%`Qz>dC@-n9ucd*X67)Dna z*>e2OQB#|@{-C_jS0*#Ru|k3~5mf_63T=6v>zY3o##}BMbRsB}JJM)t!XSZUa58|m z*6nq&h(agOGY@BU&7*6Op#Ux28M6eo=XoX_2KkNI9}I>FvJSGZf9ZY9ti}iry@B1Ry#hmtRvyQ;2o49c8F@y@tVQ83ANX9=-KpC@v`}MjP zz;}1G0HyYK_*bIUv)L|?CZP}`rnpL&Q-FpA8b3_jZrlT6?KB=Gw)5(EmK5X3s`qF< zIrVHk`dsW{HH*PN&NqWM1md|1eLJt#Hn(^%yR;8fh)H6=;!jJ5{_c%=&htd2?ct<-D7P2>pFtz&E=Gq^F%AU z)clJ<&w6d&1e~RKVt@qD34AuAe31%cWTlI#_#W5jl5P6+2r+#5Ra=1Ln=*A(*Ohg| zh+G;^sFt;c6hAJvid0FB(p82`#`VaL-H31qfT&acV^XPMY(dd~UW7Idk)i0Q4<8b1 zd}17C8GE~BR2TgqcJxKWBpr3DbVzm|ANRxo5YHelNIN&`zr z>YXf2R_uZ|U$hoKkzD>Q-qEd6n8dQ?GS~4F7Y>#jnc(?z}Dvg_LzeUeWm1&1)pl!;6SJ ziiq3HwhvFtcF|P|yS~JBTjTh<>#<^-f+2kiMFy%l3Z{Ai$QNW@3?IUSm(-MM3%#W= zt)4GBTF>dxgG0i4kaiE{%gY`+ubw~yWoYVRqQ&II?MW`XIOcZ;9wu_E%jgynTW#uj zOM>VEoje1@bn~AKJJp(Ntz`R*kT+)>>9@&FQ3;P7v=j0hSf0z|3^;905u#QC1a&0* z>og%6z%#nc45Q&UpMU)I z@qScUklYpJLS0be@oBw^**aJGgA-4lv8IgqPqoRm*1zm*T5`HG#2xGC^v~u7cNs>E zY}r^w%rA|!z_zbUIV-KmwwMY4+^#kY>*ZYCpLFh!xn$B}NfyeJTlUl-H=KE1UH4c` zc1j!yWh%&hCb3@I7lQCdd#*?BmW^a+wsx#~PDv&#vjHut`f22f?Pxqpt#`>lJxj$35sbhny$2~m<8s3Hw~prt_bPJxwU zx?k}y8*7rc)_^JTmSD%F1RK|33bUsf&dX3<b3|ufg?#KY@2ND4 z&`d}o?OE89Yk1S^jQaz350>kZ_H*&_yvyv;`^05N&gmBZr->5UG-^h*Imh1aBQ{vq zq+p@ZBwb`o+o?_CZ?y{R@8hB&C6MvM<(f)@V+7 z{gtPxxjIFP**qSjdbWYD1@{-)_j=(d%4UbN7vg^DGGg?(McQK{d?H1oI}O&NLe&$2!ii*iYim~}=Mu^! z!a!o!F^>~5^*1XXIH`}hxLp7Jxkjj3zn@Lo+>t^_;I?>Q^rFA%nry*&2+@bbWh%6GR= zbj$9rc|qq`+xf6_z+QswI-=vY-Fnbsgv!f0r{Uz}>U z7fib?F=8&fCncCgVCmX%Bec849R^d&ctkvR}rC9TpvZ)i+T~DH0C(bjn=1a>|@9 zzU%3z)#Y1Sz3_OyvMM|^`^>-#!PX5iC}Hb?>?fB*ja)=>_@A7$@^&19xR#jn?59C& zT_6b@e=7|5Ll3CoWB#Lf(%tWokmXsi?=f2=xG0PdJqw&ny`T};lS%~tPe}L|)$Q{c z0bl;2#AY{5&2g)E}hnC~G-QZ%@at<%gC zhzz$9p!0Xf+RW-~C@aG}Wr7zKs4XGB;Pj@r5Osno5dOT&rnug5y^u%*ByhoUx$21( z3q(2slFB2Djj{ERe#Plu*PaWT5y>gA1i;|~V4iyPk)ZnMiQx%GlC~2-QtIO+uDh#l zba&v=JrIl4KCRanL1OQ^uKzZ5@v>-1a)AFm&46OIIW<*n$y9Dkg1b}jBewIALKOYQ z+2BFT_sdbmw(kHsT;jdbWg)P8yE&R2rC(xRlE|(|9!0^PpW}Cc=e_nZ=mntr38Q4m zEN1d*<>;u)802*bi7znZtIA7I$m=w;Z7jniU`yfn4;|?3zJ$LTdO2uSHQ>(sX3>P- z`;W6x8H^ZFTFf$C!yE!?kYn*4GEv{EN;4^}cfRig4y0I1X)T>MQ*a@R*Xw9Gr*Odl z$drK8h-^@0>pMp#b{b>N>^X1%0ZiKk;{23$1SqND9v8~z<-osmt|!10=uW&md~fsz zmgq@ml0D9K7CF-GScVFmZH=z}yII&BtN;3aUzx@FA^A;wL>L`)yP(0VJN{adduV2 zKWn%j+oY{@3W8D$_d+}(0t9d&Q19!S;^6Nepy{s$G^yOg?ujc_7?nLxV$3tGrL=_o z(zU+h3G5hUI&#&qMoTtc9zlth-YuJgBEse+U;dq}#yI|$7rT(sh)BrFtC#lc}r%u^2D>FnB%@Cq(zj%@L>C1ndbwb)s6;!eJ8?)ZX+3OY|$vlYDBjF z$=wqZtCB6*saij6x8r$-2AUjeEH_R>iYMYlStR7BJK1~LRN#tkA3m$B-kRHi$hxw5;n+vo3O=iDKlpIOcK9++teyF{^Sgr zf2ry*>874hUMuz=Lln!I&rY)xy=%x~e(Q9gJqgQ#ra0%rHju=o$IgZ2psL`OPvYgZ zqRgJo{9k@c8mM>;dGf7!bhRDV^&Z@gR4+k-u1U^@^`Cd2jLB;OX>ac6Uva{^z{e_- z2GDzyJntZiK$X?LSN<2F0^^3D`gV3IqXAA)G(fD zWd4XlLik*^S4U6fSWmSR5*NkARa{nEOT@s)Rv3vN)wM~XCzM1n-+ED6yeO}0jL-p> zzteLplyp7_zZw+cYptgdC6+Bc4JCifzx8FBCUDh*|ExwTaKcS*mqY|qF;K;tG|N|& zz8P{#-A+~%AaDgA7~y+Qbmmw@`&Qn-kf`9Rv!i@ozZyK8qva!CfRx$p!Lw(z#SL1;!hH8#gpQAIA z=V}nr)+q)P>r$Cr--g}ro;ZP?Z2ot&-TmYh-J;==0tK*lMwN00mBdAE=6z(kAALyp zofwntgi8k9Ua71XpBZQyvx$TGi++p03!640GqBrr-f~1~d3bG|c{T)Bv?jQnzgNnw zfHM7skyLP>1hgOvCGiU?w6^`M9%=PlXmkDt5fThthVNYQ(60}O58Ae*pu6oDAjFE_ zy01}kJ?UWaQMI_;OdE3VwKY?qKD1#N+ZWhTF|u}!lr!1}#YG?m05gytVwiM@aB<^Z zewLuW7?u=Jj5C5;;#JfIGL7G$>~Qd7f6VJ&hvT9n#7##DedeRDPhLH9EaHkkB_!() zeNTw$qsd+tBo1cYR1b5eq5DpO^42?oPT9;4-$>fNGPv@@>uOx)r-094ORYM%c7ulI zP~HD5cIp3ej<41^m!Yq}M*eLA{6y*>7y0B9Hl!B6P5QgwkwVgEpZyp-6gtf0CLiXL zCv;%LARBN&+U9#D<+1hC!u#lZ8js`z0#$Z-aC;x4`q^N@8Fbe`T)+9@$OpZ@ND*&( zt7C_x=_`+5&7=u>vr161>Zqm ze)p+kUkeX&iUzI@f@x>#BJfT~Hy_2+f&?jzSVtb)#bqIS{>Mz&VFxJeD?6{k1hb~C z44u^yS&v6nDB%CRvK=Mwo?DjUCLDD$l%nyavCEFbL#=#==qy8NNYl2I%vwX(e~?c; zwzZA+w#!ZdPB)g<8Pg3-^67;BttOp_6f6{-n`EKEXkJ=u|EgOTs3h$WxA}YKADyft z?FGYHW>QDCWQfFLrbd#v&dsqU;G!C~%57c<^j#QZ7TIr~Mns-(qpQBW3WIFdZ|-9| zvMPfl5zJkU-PnB8t!1>A$ybvytp>nu%JioUjyDN2{JFjsJ$dzJ9h{Z~EAd;-H5i;v zUeCG#HM$B|Euoo}4iw0gzM(&35~ zWlwgA?x6r$5|JgCN=UlYDs*uV;*A7cAo?XbIpMF!S@PJfPH-M6iaqgJ%>G&H&ficd zO0z@q>ILE$5>>tjEfbUm@m1@CbV)aaVoDW2Zd|ZFIo6tVJ3p~b7)wP6>YY;49~pZ< zO({Lm=}LBwWad@ceWdN~oaEf@Bgn^JXOq(cEWz#3^UuQO>Gh#vX*?)lTN|ONp%!dD zv8iv6@R7mQsF&-4VS;~#%67V^bYFc-oHzeSBwOxQnr(Xg*6WO`JV2AS2?5i`eojD2 zAU8Q_#Bh)aEeJdr@>>c~yh^=5s8V#V-ufr|9%R_nGXfQ8+v-v-OAmLp#_tavb~#PX z)j>LCAsyNrpVm730GRA$XnWp~$Qm!9n3U~53dB7(kfLthln%XE4o7FLMyKXC7TpDF zt=G=FOH#wf-Nl_mv^wd|d_dh_W5vs>@?ZPh?0s>WZruLygt!)QwU;#wm!1x!tuyYc zMaX^a7h7%ciL=ahPTJg4&N{vh6bUGnal%OA(1TxVo@eI~kgE!kUfSI*5AO;?S~ z!#(w@YZHW~x!OI9LQL{=BG2O3u(mY>o;>RNDu`zNhr_!6}zK3JB9-sbp30l97WMO|h$SfuQcn zV2AHQIHi-eOafb$b#UIQ@5ZRf>FBzS^=-n$u15LE?2Oj8HXELfFAZ@jN4^%h9rNdZ zsfjM<8;lf$P6O{c=cuYdly@cHu14vwk;+ROAww9uk;L4}yOaw_!Vn24qq9d1*F7m$ zTNCWADQI4n6f`qXdd6vLQ|KG^`x)jXXdo2ty}(=0f<^)3yRzN4PFzk}0N^~qH&kz_ zecBLvx|=6=?OXLt9O-efe5$US2@hA9G|cc3Yj}8RXw&+hQOoD*3|t|hYx+rNDsA)s zQ1%|sZ2xipD781WYFCHZ&{ie(Y^#gfvsM)~B4|Yrd$m=xzZxafsu6od%$TLMTf_)r z)TkL0vG3RKx&P|sXokuxQM zo2-RZKJl~wBZt4*PsS4%0yjLQF5-5LG}TeNpn^86{&$^7ZGc8B=!*icU23ps!HUw6 zpkoY3CPEv8rN!Ei>?Eus^WV9`S%L`$*n|FKvTXkE$pSq3FLWe;%4xBV)$w5IuK#Zu z^6^>|kel=G{+GX#lJsAm{+~4FR&)G+=O^K@tyoiF)>=~&7ilQ+gUiyu1F0y81DLzH z@6|Efmx~NHyXGwiX&};14?A6*5pYxDM^uHvhUAL*M2~}9CrTA&2!8>bD%*MqP}$sQ zAw(F3IK|F%?ASgcpI1A@c@zQeUf2N`=KE66(5Z3|-UK zF3ooyIwYz*0_q-4Tmg5m5Z&1)fiZIbc_2rGqG_C2ixb#Zu%g#?)m{KV0n9AdrU9eSP zZ=+$e4Y(;7itD9aCPg)o<{py(Y72;$IF>j*jStQX+A2C~6}Xh23a`s5s7&dWhDN=g zYZ^Y(4Y1c$bGrwZyD{Ku?^x3&l(Ig&&bMBEBPYDxN1JXk=_hb?4!ez4GMnASI`6iv zkNf-nE>Ci9;QP>?;Iq-|RPy~#s7c+_@XE2rLd~8&Ab`_1P}cB64tbt!o@%KA)r#E? zuj1yY=oKV5D^OmU6Idy(BQ+Un9+r>eqO}Omv;%NVxvmP(b#WMY(C>MmcfM7s@{yf7 zVOy;4=VwDCib6$x()uX!eg4?nV!5T5UI^tm2No(HkZU|CwA--5iGWtyzxbcPVqSin z#Z=eJQBEcyML%W^+}1j^*OB&Q6mVLDhc9~(mfPdivVkf;UBto+$4W)88qNOMsixFT z1-9LWaATfU2wP&@a><}hiT3h+vaW6<%#FTo3#k=EiKAiu-e>;7($=+~dHVe_G|+NrxF>o;7Zt?vwj>e;}IVkhS)9Y!xAAOGEL{b9H8n1NWDw@(U^BC*i9 zjV|7BT$rZVb&WL_F49D^(!q?b)DO~ec^JvKJ;oarDg~gy5{*Igph3g*r@PqAkU*oW z!|Eaj%4zDsaaxzJ?u~zo;mYP`og}6#M!`%640d+DuN*kW_)l)MwPNzeJo^X~WH$&`#GDL;foc7hiheoxe%@dj99>1{K_HM-j3MB-^ zdm&_%b}C%`svL5DKo+^-_2zKIj2Zo$xg}5!P%6y6Isobt^Vn;z*tJgwpQ-w9atEBJ zKD9+MakbUFTIx3-t9E(K9ln|C|DyU>NQKOJyqXbj(A>0d#$V`4#PQXbIT>Q^x!FCQ z7IMXE`Aq8?L%(bvCNxJR7Dbn1C4(!H=!q*MmtM|ORr3j#nr%I9O0CrkEM;|YW#}M< z&W*ZpT)}&^8$t&W4%1gy21gjMRdJ-PU1RHwUHzOi5klT%T*-J$0VrasGzSbDq;pRG z3IZD^;UX~h3<>V$-QBeukK{b^x+^_G-=z`Ig|S6h_1D4+%X-77}e&UnnJOl7g5imSy;ui%eb(x}3VmsHUzaxi}9F`w`urK9WG3qWR>I zGrZP&qgS_TF>$Z2d~cu+0EmIo?0M1mIZnGBKFlAWdM~FbhvV3G#q=)U2X!7-gYJ*s%jJHpZ7C{Umd#tpuQI%Wr(E`;kqEYXK;`;ova_?JGjeF| zn@7bUq4vYpoglwC#2jFGdmqmxgzpTEYS(6`XEmfcz4^D6%J|h0MW9#=v~)c(mRpr8 zVDedUcF=?-M-M#_pUkWe9GD)RA?SOvghE)r+`R9TE}mNwGR4UER8q zRYoN=2E}f_J%J%U1=2WoQqd-g0rrmI)&3{co(%;iipqA-X?S5~F-KvMK*73?RhIpZ zSYgdekb6&acwu)POShayHhVLy^=y+Q|8xvrU{LeWb;MIgBK9DrK;TEj(BU@t6$}GBC1=7!y)^9em@{|UrPYnDOq9Uzd&nM_AsqU?sQQPmy%+4yNBdvv^#Ny*B$YGO}f z-BC1X)s6$X`X}@7nf2i2Mi%X%e=A`#``9L49(nb6F(<7BB6yTW@9#j;{RVR?7c1rE zZnAiWV?heo(3727V0eeFXlgPK?)i+>E9o`V5@}u7H4eu}(v6)#Wi?>llc3G>%vFtj z8!>FSzSJy%Xehi(CmdFlq$`kG(HpdfMR-$;8-9k@ed?4sV)lAVq13Z;$sp-^UX>P% z^Apo;DoZUA^a{M+&eC-j#SuE5<}B5{p7JK2e0jo{`7b!--T1U1g|gKLvHl4s37*$6 zSP61}#P|)4XYb#y0>5WZuGs?IeH++|lPHi?QjB3SOzkYfux9F~swY6qwz_bn`tjDluxyZ@ zEct@N@SduS1ll!ky9ciAG%MqJ(wSZ^<_qz0#>>=Rbzt*3ifVe)Hw^s)dQ>tDaw+k2 z^H=tH?4Q4ld0b#s5M9qg7<>%sS^KF)E#A-UOBYmDtx}|eMp=HGqABuLD!?41e=r@q z=g95bj=wL`88HmRmYL8Bz*_Nu__ou0pZ#9tl8YhnOVRC#k2LOY|S^o3J);3iet({C^ZWua*i zHVotbsZB?>zHoG#D;qce6x%>EBIFRjf~&j0iKISYk2ViqrJ-LFS~vWJ}Wr3=Ghbp zI&(-!|NREb#wkaZwaq|l5%I4 z%1$>(-3+0?{z~>1mY!ATr_2+)beQ zcU}yTT8!E#QF5t z3JoXc8L%t}jLnBZ`$N}F{E9&bVtBPqriNd>q!5cf z8SXW|w3ru#4hZ(+RUt2_8=m2W(7j+$KkLPr@iW)+Ocy;gJ zHzU>eA=3)lvzwF|eC)wp3dp1V3&5M3QFr4SvcXV4jf^<{Izw1Lqw=Y34_W&*0oE&_ zCwj+{VqsRd$a(Wjtdlj4f2t&b0QLSFD}*>EMI3XPugSFETlgE?qU4(Z1uckV1q{c9NOTxmfcjOH{mEKgP%frF6Ne{L_T_ILnBxM`3O|(+2J`H$AIWIL5%O zB?-uEyMP?(hp25ZdvU$-c98>Wm+tYzH| zg|FUd%4GT#^UT=tR{zSPGj~9&T=QE}$uO@)3fr%qlVMtIHEn*RwnUfnyO z*!7ej|Foe?{_KQyEX$yec3UobcR1ohx~UwSn-#3~D(~`1Et|4Cxh6;x`@egA;LR5XI)7Qwt{g2K4O^ON60FILPmyuUQkUhF~)k=3OK*! zbnYLHD{z0)9)Ua^ZJ(8}wMC<$I zzK5d!g`E8l?)E=4!vD&K|7U$LmIl~l|EHG$u;YKO?*AVTczovbFm<{I^MTmy=tJSp zMyPLPM;oRMkKN!IK$Q%=hn`#Wc2g?nG1x5(%8x)n+OUdAls`m~^Y>#91*B+ z<8$iH?9fNwd2q2+|3tx}y5uevTl0}X@K2oc_rHUkMhdP+31C}bf-SKEGO#-ye|ad0 z_j!eJu^mSL0D>}gq+Gb7P_@Op!fdUkP{UiVF`l9^66nQ)|8;bTIR`R++-t)ETw?|8 zy|3HhgD=CUd$FUE2{Gb3j@w6XMBH~m1`sC59JT&11(Pcf~x-S{TC9$ zyKB~BfHg1p!s&5xvtjdTA58R;a6TN;km=qd?CLPi`u)bFUdaF~Y-GGxSq|PJ^rx6G zx8kwW7|qxpu?b5LZXtnOM16gqf9NR31yko+9IF>>lq=D zfUo&pM6MdHWCA>Md*K?`6>1~>f~&^B&rwLfx#w}L?leoh+8*`p$`&W7UwPB;3Vplg zOIyG~@eOG3_5#k@bOLXv3eqm0FZt(!dDtm~Z+fawcAp@0(bORG%dEQ$&G0>REYK#JLBen^smHyvy`2 z(9p1n8x4x9RZS?|cP>7C`fl$mi<6w>`ayaYwY&QD(`X8yO8YSfTvVlYZD^SqKSj4q)3Q*iKRDu!?-`&rqg= zSt~qk+SwzJT8}mj=$+Kgj$bi~wkO+G;!Z7fJOM{Ii>87!b60B|_wUL>q7{{z)YtiB z`-5gbuGgJEC*##mb?9E_35|};$_EMJHc22{M)uz(l1;=^0Q2HLBYA6a-+s;0>IQi) zhvnM)J>k2BvnV0(dtX#P!=fHnOA1k>B0wFUb?ez3GS4ckj_hl6EHu;nbgiixhFQ8i z2RWU*d;Bkm575g%8uYt)20vURM2G1SFql7P(lx4r5{fV!{(GiI>B>@>xww#tCV4uzah?I82d`oD zL&|h3-7@wb{qH2BT^e7FlTHF=1w7R%RjaN1%(na)Q+ye*qmb}5O^1bTl>3GQw^Tmw z^h0?b=~3(Y)CmI|58UKyh18J!=pwW0X3~?FrTux9Zrhm{ur2gzwd6j->ZA>$YJ-&I z=$Dci(+$X?O9>ThiB)RGHL5jK&-ME#OZ%yf>z!db&UQ6dryJBe03c)iijx#`BFClH zA6$DjIvgH+2n4GQ1b-FwCsF3A8^o&~-c&jH-CV%?C0OssPZaU5_2}nS9s0IELfLVG zYESa*yW)gH&Ekb}fsPII3G2)zo7SUVAj9(L*A?WE0RprLY?-6XTYk`86?xcb2a{o$ z4>`)w+4=0wj8z=FJ+o;bHFD_b&_LA#DvuEqa?~3=zrdE%cU}ba2ma+T8cuCd zSNQX`!O7p(~d7yyQ3_WD~az-fFne%KZ9IQ1jVgADfh^j zK2~w_-~oc=jTC|s<6}Cug0l4AY#$r|uM%#7>;`}Y%AMF|vO^)JzYijQTp^Q*4>@qLe@LKi1=FIfTm_yxTuuiUv}abB3`^3f z2CKX7nT+gfv@XiHYTMJfOL$j2YAnyXqmi3Tz7%0JYsH@pktUnAJu+j>Pd<>m0CQBy z$YR#Ean=44B24G$$c%$ZY%Oy)p#MIP@COXN_VG30zjiMmwVCVS4g%86_YSPbAD8q` zQ598MNLRtzCj?C`Pi4N#yfE>Yq5{fabCs<2G>*R9FN7|EUwRZ|!cBNJhZa=|E3Po{ z_Y7G$x-tB8s4X$d zK>VdAr0UPbDy(!@_2jP{X^?oTH|2Y_Y4;KlHbGd7>V`HHgl(^XFwuWnGEfq2V!<+; z{wKEFy=kIetX6j&??aePl2=BfOgF<&qib0n7Fe@_$jgElR#8DV=jhc;kAdYV<;rJT z9y}KmQvZ}hw-C}(O1L)ayiN9OT&<^VzJok+-+{61r92|?a{Ek(ZL~8QP^Co!vg453 z?U0(}rC`(y0SnX$df1uVVCP}#vRL41O>qvFa~8gt#JI+@IgOwH=JA$UjFT>CjOFm% zzWmjnq1EU%4{s}tb89XLzh((2z+!xal2`YxDIawLtcW2C+ow?2+hdivx+hl=RXR#@ zPi|g8#?Z`sTODz#F=&J4K2-7qFDLROn zI(xg-f{pzA))0tZmTR(RB`5Ws?EzRvD5;NP`Y%6yQw9GBKQkZDH`g2cy+jNc-zW_A zN@cPkOC3bZXf}Pg^c~C0w3YVBb3iE^H0!tW7j5LZ7b`b>@Cw$}u-T|B`r!Z&YDuup zTnanVLP6in3kYW`Ft>h=8jY|h@-$TldqGTbGSnW<5k3Po4YbOzjdD1DY~MN4Nk4ce z!ecK`0;seLM1sm&eZk5k4(FHc>6^&d&62?2`o)fJ6$z^r$0meDW6khyU*}APESz1)E=E|>(2pbp)FR%oxenMoo@NA z#5bR?93zR$@^WeqGMSimeKc&l;DxW8x$lhf=r>>&TjRQ)6ryjiq;^}xI<96_ycPo1 zd!Chd_?PlrMTvH9779hFc>a?5xe@<1$6@K@@yus*c;k(#mdkEMIfsOm)=^A4V}PX{V2;*h*=t{W6F;FYY-|%OUT_qQ5jj1M zK9Wd_+{tP_{^(E6YHpd2SA8OC8E`tDKl~+%_ltuN;*Xz#Kbd`Z;_bX8U|Y%)m8W)l zUN-0QsCJq`{#nMrY1Hb>{uqP3P=#FnQ@3UT1-F@KksuAx(kHY)D{TWGZX7z=q2;`V z6MF_C^=;fbc-VqfKGmUs`Oobcho{;LNc&b$;zpdRT#ZThmvzx#Hx840_opLQXY^eq zYB~3$%4p1k-L5`~I2m3eOZ3Ju=)SMW6SMPhdgNvU&8YkHu;kzm1Lh$#@GkU~T;s#f zMtY;D;r7*>PRw$plbB??Pyf5Ompv?DHgz}){2Ftdct*CT`{LNx&By}LOU(KI?9A_*mU5h>1~yP`C7If$q1F4`$!Hr;Vxj4lrCnHdwKuP|L(|dTBXxY4FUFukNt- zq@Ux2jOn3AtwX!8t6!%u?Q8*0t|_Cso7Knh`z5^_gHFL#=}WgmA!_p|Z%Yq4j2M<1 z|NBnSdMKt27?iOMqD3D}8tZ6Qu)YRD)`5egoB3g)3!8-HOW%H?Sc#o~J3E15h5lbo z(WCzWcg`0)dn9NFAJVA(R@$tl>174#FQ@DyczFuK{+^)~e|=VY;iv8!?$N!^d5gG2 zE#FM&cRK|F!hcdpM^TgS)u_!s=51KR3{g4%oJUPbBoJVGQq(Q!pyK=HAbwdxuG3sRu?NL@awWoltTlaFJ0L;ahHI(ZR*&oW@iT}^~0n;1h|84IRf)0uo~ zW-i=;<J);`B!BYDVOyecxc3qN`RES#V>xDk`_d-Fur{5hxjyv|4an+M0iJ)-PeXL%r}+g{ zH;lvYuSX)OKvOe2IT){SYs!T}(DBT%NB4|z_k zWL!fs@d0|8-b0>8fPOUu@~Zu6xPN=NSLXRD2W-OsBRFu*exD=8=cDuge8TzM`0u)K z-Jrbd{|_C({|l(#)rvi#CT@f(YWr60P37JrLr*=j8Y1>|oI*Sn0SsuZQ9cM2ix=!+ zCgeFdQbV0cJmc@Hc(`!i7>1r_Vjr&GE;Ifs)`DTEkOXy+plxE@t-3b&(raWftw!n@ z6p(`R*(rMYJ)-Y+K1uicb|aA3apfjTX{3U)(MikW6A$Q8P*r2P5>jp+B7^}H_ceSn zR^Knw-Gg15&2Gh-Q^-6|r=BG&zmbmsf}cbwn{rukE-Y`!eUd<-U@eed{~t!}npu?_<JCTjB4t=oX>Oi)y58earD{z#(HJ%fqsY&f{5=6*PXD zUj-`bQx!7rVNImP=3)vlEtS=(!|N`}_KJasQEInJcuUlA`f%qj)$+RfPZPKXHe~C= z72oDFCPCFhsmOQ^5qB}x+Cx)6!F%-4GqE`kT%4bV*^S?cQshOh=5bMlBElv$M)8kp zC`^y`mzZ+FRUU*jy|L`sMh}1#xQXDb89hHT-h?Z*nn{r!6x!)j6JFKLSQXM4i*N6E zg+>3(_{f2?fJ%R+li6v9Q)wJrei>waywqPaYObJ>(T)XL4F%#vf@A$R4w7eJZ0h%Q zdyfV8s-p{Zxvf7~ZDd#WH1Q)JOKTP{=CC)EfVK@*-x>cF%Y+nuFEi^US-qe%x6Q`J z>2%Rq*-{(s7uZf8 zg>0X7xiE^@;+nW+kujL?1am6*d?*av0%M9?1Uysq37Mde>)zt#^N&JcdecRpGF+@h zNg!tiAO|7IRsQoYJx=P zQt~D4vI8?)*IFUCpMpp_;eXNrd*k}AllZwn+|*50JbRhfcQKr}#uMf(qXM>#hGxZbuw@;A*C z{F-8&r!tg~Rfhx*U&7i(vY)f)@vW|mjM{l>BrA~Wa}>LG+`e>e4!{Ed%tG$bUC-JQ z@@%w-;dzBJE%6-Ma3&(cefDmo8R5F<8`053<2d!4Mm@WHJzSn<;Z*>Sr{5TCHJbr4 zJaSHV2Xtn$=+5q{l8%mMaJEG4letohF^IgDF0-Hg_fc)pH#_@lxsBEhcA0MZros7^ zU98J)OVH7k%`v_FEm|eGq5Fwg_fVeBQu~l+;DR?@y{}P_ncNhs`xt@c>iK3r6Snms zI+dS4o{aQ>;zL)>4rZEs11NKbT<)S#JcY#Ne@Hj@28U}Ru z23g%5ne+KQ{bU5zCT?v66(POiLMdupfG#LAdS8oS+#7jLv4 zWVSO`2zJ^Vrmog_uz6y?7jsyGwqrI02gaRe(H& z(k|mioGi+=<>32p18Sm*DiDH$1U^o<=wM3zJ4Uk~;-(5(FW)0UCGN5)N5L+H$Yny@ z8W*Ws_y&{6l2%gzD;{tN0$7)A&M??ksVtxuyoLlK z-LVMvXSb=qJag?<;sK8^0_CEt9b9xh$8>ZQlCywWOO7upT*i~FZ46btKT|clT9%Q= z*MC!`H((v}$@A~nt(BQpZBo3Ba8A&xO_;s=^Q4uPT}`Niw2A%%1z+9B@Vi};a(DSv z{D{@;qRB{cBF1y5+kd5#o5gd!TWlpNkuV!IqKwX!_HFq`8S4hJnZ64$Y<`Y7{>5D^ zU^;UkP#|C?ncP$uWm)4SaKpAC@zDS}Qa6`?F2R%xPO51fgw2y;Y^MPk#NSu^%*Xr9 z&BL^&Z>c_XP+HEtcUf$T>7v5P@FHXIPbqO0ESF-yq@EvkvJIo35Wq}7o9<~o`}bss zjim5oQX~xIe*ObW;rr(r@)h{Q#E;#M0QL9kkof^Upivu~R68Zs25C5Il|5@dJ0T_! z%oK9k05|TVS@8UyA((zNt4L&ihToU^)unyyfPtGP#v@b*3o=qw#kK-Z5=>WyXhlly zkxkcf@LV+Szw1I{0bwA0a|s4;c0UR;u(EwO1G@3`f&vws$*p22yx8y-M+eQzOs@O; z`9KCk(+?IJE4qTtMa^ZlJ_EMn1$zbGjSG?Sif8KI|G?i(_3J44=sWAj6Gyvw8TwJR8yT{zrGg53y8L$%Ys(sl-M#9p74} zxN_jl!-<$*hPvTdxY}Xvw4dq)7jA=P9JoueDWk@4Z#5Aswr5}2Rninslp;S<-eQjW z9C`-jm=%bYztjaVTB3+{_7bldr!P}eD~NVIPu!4!RmTyYK7J2(K;Ti~n9q{>%??;e6rXH4)PHb~c0H0Mb)ZGF{NqlP%x< zHrVC~jSsPG1AnwT)wvmr_T7}-{56+PJSz&Q-Y|+g>yzDcnL6q@arJFIX~yZSFOeDo zf*+pDVwOY+ z3>-E@qQp-R=p)OT2UK=tR}PlGgeIR`zRBG^f5%d^$$Q(6W%cQ7f;d=(5h+^kh~Fg5 zmF17^hs;3m87zTLV_k{iq7kYENB1TCU2GX_-ii z8$ss}DUs!^R5O%@tXM*^*So=-F|*iv#w^ySy2Xu-%<80Y9# zV~8z7hb6ic`|z23NWsQV82_T+wt^e%>Ou$cvw4lUOfc01iS&lDKrsv^SQ%?UrO^H- z&*$52ao51R`P}?!ukl(OD{;;Z@xnM9b~?lUmnEX0)JC?tx&d9kuBu;|8x8;mk?*sG zU9}%J*SyNEzIbvX<}S41`Sur4-O~2TS6qg6qU4h7j?opQ!=?jGbv54{dn?*8At2KD z*eUe{s_oM3ZFOY+&EtXWhNI-ZXh}KB(sqz$mA#oGq}_s;ik9~akSEvN4&eO?o6ucI z+z71%1)!tyOkcuQzp6A%J+d~=c+B8TWdy_)uP|zd23iMHt4sw%rY3e3-77IEl6@qs?QRNz8;FdD z_~b!^GFUo4GPI>=@OQcCkx;le28_iVR#Urkm3=^2fPjHd%w_W%0^uYN7z z|3zMKcG~lQ!vz1wD}kt{|Ef^nO8{d~k3welm>( z8S|YHw;rPbTXb6b>(onvidDME`J>{*S*M@q_KDFEnL}huNFZ-jaYP?SrkoubAv*aN zpa}$#DWljZ82rQgGWF|e(X33XPjo9gdQhp=ls26BQ5T<6G__}%5E9igl%Mx^u5OY+ z0O=evFUErCXZ-c^NoC9+1ZJy!qNC&ZOMZg^PA3%(E z1#ml0TDJ28$P2hU5XVFe%%_myxo!1^O)tk$K16kb;$abG#lDL-ldK1RnF(R@dDu_--`iMAR?zUo^+%C3OEF(wbQL>VryD*@Xpj zf)0v~X12OAzLHLND5=Pww<&Lu<>sQd5MFi|g_kiRalC$t_h3MxC_)1@bP?!^WWD`( zA+ypvyf2%jF&?6>Crd9k5g8T@#zZu4ym z)>wt-#}~-Y)}gmEm$z!h*cjvQ!koxr9!zg3G$LSdEMKcY)Y8c!3U;N9GB3lX@j)P| zHM$$TL$mT5JvvMJ=r9}zM32_ecA|fkG?iEU(sfMS*S36{Y#GmeRNxaaed&dj5!bcj z&TY3_$DMJuIwVjgrGlEL3-s-F;KMhT`9Y4+9F1G-#gP{Dgl=svZJ533c&0bMY;il} zou=aY?Sf1&*(LC;w=LrldIY7SZ@HRp>EC7ld}DOjAzqzjGnhmfNzg%}y8Z-+&7B3I zFR^a|NZ8?&9*xqQC4bI)PCskj@@5zD;jULrfdCog#aV&pH39{xjRKi^C8E4mk3A2+Uj61TmY87Vqb`7TFBSpAEGa<8Y7P?!GLtTtYQxnkOlzBxe;2iFQv zMQYcb0HA-W*0tWvtEB@2F`3M=Ov>;z`lg!a2vo;Wh(3$*WexDL@Id(N0*MfXO>D>h zOPnCL{8l2DGUWfBd)EPprA1u^on*ZYl9|1gZwAHCAQ`U=U-|v;0!*-JD}y(?>)chr ztke8BkUUFA+UgQ9Ap2ejy%VKeH(&OV+|V5iQ$);{v6wdREG{K{f(nY$Z@BhsV5BFG zs?~+aH4!zD<*q)_6y|}fH4$yludc#SageFiSEz_6f}lmvdu(!C^QN%VrxIqjFv+Og z{Y!Fx+VT?Hu94h)`%Sr9ovCRdeC-X+OM((w zNEv0{Q*+yr0QH@b40 zzo3NhaDALWDdVdiITNd{mJ`Lm)tR^%dFOB5y!`PrYfWst?%fv(K}fAj%}N}U)74&G zUM<*LL)7d!i$|GvRBqE|wQ>Mw+9ny<*RTBxI@4A4YcSye?~QsSyK$A&IrI}{7Ci6( zA?srO)|P3O;Ez;~ZU=MYauW|;=Ih>QCgBYAX}pAB&bQh2P2R#7dADV_YS~S^HL^oMM6qS z^=LIS*}ruo&r}CDomddhZVoQxHJ)g7@8g2xXUj!?8V9p2@NVz&HDjd*#qlKzW;-c! zU38?zlO|)&7RO$s#hG@(xv+Ea{1~ z2^pe`JG+u)Xd9_ZFJ@wSS!oJdJ@u(vYsgPrULbDX*9N!f4wRpM&%Da+9O8D>^~!U( zNxWveJ+z6#LFqjwN7~8B(mbxOE9L2uwU8XHk60oP@x{uK!9d8zavx)*MGF{&8Ui9> zUO$s-N^$nHq9E6V$l<;zyAcv3ovUiCA67NzdUn>lz4qwuZSna=6>*X-AgPH+Ii16~ z6D-DxVLK7un7ug-GM}ySZ2(!Jk`3d3o@n$c>{y1ONAoGanUnfxD}mscIH#z`lz{3_ zo?tOUK6C&VKzlW5kHJnr_0q*^i!eyT?#0gsAbzCWz#)$S4t+1&9SO$TAMyPL;=n&p z%tT$O42NAYl?$mYFzaTbSYpZp(&xH_G z@&mv3T&2W~4xIFKt0r@Y?0m zXB}Nc#=LJ##EU>$8$*y4JsFOsd{!Llgyw(X6ixMBmlUo|D5p$vij zxEy^RuTw>D{Rqf-Wz%yeo==0``42acoU&KBsnCG1;OEb{FJ!rkbP@!XFH3}5NR5}j zYj)3m!i7Uy*5|$rR0EQIAcbgb4P)f#5$q7nuxe!cSq#t#H7 z#YV2uU|z3*CB8nhc^GmJ8$MO$LnAy^BlqNhcE5e|lnhx}Fk^L#zm7}VS{L=QYX5N- z_O=Dma7J0;G>p3#v)kCrGlTjx3V2}w{p?m_JpMydL?NV)Y?|Ns(<=Ptkbv9U>qx?V zDlF`44Ff6GN#;jGWFete(9$)g;E%nm${|Uct6%2zyTNuh+F`pWn4mmz>)TS40Ct=Qu7$ete(M!v_wQpo@h7#&e;M8}v>0-pbm1|Ckp`9_@vbo>5sbgO9 z3O#654egBlvUScs!S_F@v(6m_xkocHtNRGt}0O4odJpPh#+9{xfkQ)%|)V{m7Qluj({X5UsypJDe zK;Jyf-e}Pka_LLP!276|>(m+Cx^Hu8Mr|A<^uj$5{G~ZskXKibISGV-1dkm9g)ppH zQ{NnPu+*rIHRuxT6GDVday3=?i5%|Fa{Rm|OPx^HQRcv6XBJ8^0(QLQ%^xf#g$&UC zOi2pcKH36&wE7D3fXjhb{lpl>&E*RR>Ml+Lq;WE1_D>mz*#rFhKvj@p&nexCmzdSi zdl<%5uxW%zuBn1YU>3E_?I4E(<<|UU_LM^Acx2W%5-$)Q|B@KpaJ|HYC`hT${G4*h zoE@YXLBWNlVQxCZaGN^;Q318U;NXbKcj_#u=JR!2|ew!hh-ZSYV4K zOk|QVNGM;iX({L9p5lnKSgV5Ue_RWST{Bns;q4o$>*t<ckJ;H zHIxAAXKbS0chIGuv6c;Ym0wq31SB zE8ux^+}#};m3^V0dk^}>gA`CNq2f*flQP_?Pb^c3e>3Y}O+UC>#r!=Ezy2JZ9C!I3`O*@Nk2@*Tx<+(& z{ru@0Z~e_XzCkyc0N^zM5cC$vKnca#``|G^P-=)1(D??K?rHxS;&}GgCn{omjy96H z>f3k=#365dK5EV0s`!80cttJ(-m7!8_<#KFzew``v>pHdpwky)QAtVgG3=%=E#XFk z4s63i8^5gULJYz;vJ#z~mlF5xaRF6Z6ynGXE@agOm>k|(o76WLF_z#HN0C+@m(%l> zt#&+2RvBP*i!6%eZpnyU$>VB;Y}NN-6}d@$#IZ*~`0ZNhEC9e#c{ma~9k#P1OP@YEcbT<9;H*VR9jOi^ruwXn z6?x7ihX02?pVXypn)k8X*+&B)ya-Vv?)0N17Kf4$h!R-Cti4HV^cf>S;!Lg4oq)+%y&I}@;S?wM?9?{ zz?3vh2H_n(uVq%=rHdUe;e@h#6l>a_%jubZ|GU?g-XJ?gjaLBC26xS00ztN#&s@o3 z(@6+gKJl>Axxh4s*_-0!w=6Vx*)CwiwYF?n_x= zORR=L0F{fG4%}B|WSj55Rt|s!^uV@NQQ@1o?mmg@{1UY2M_z!PYWRiAl{bK%m<;b} z!(G@SsRDk*9HDo=sXdTp)n_h_1onzmMk@aZqWP*{#1ukv;Wk2aww#stD-oPf$&P`} zBs$D~CsiBHs^r>ZG*uUB@}x&w+m7aP!4msW9fE{>)mhMjm)}RFz(baGuTG)}PW75c zjDiqSV3YOX^OH>;w`f|upnX{&nlxbJ+V+G`-7Ii^+&Y)%dqh7b!xx(3@d-^4t}U7q zb;b263vSbfrlptW6q1CHMPkQrDrqA{nV9_IPXTf!s9?J4VJ>~nSv3T?EI8XHXv-dt-?6D$ZbIMmT47S1=!;afhJ9@$<%0lP)#*H1ww$UN?G0(6!1)gA%YT^ccyZu$< z!sf?xpNU(Ve&~Ncz7~fK4|qQ3*r5dn&VtXI#Q!PWzit>w{Ks$V32n1=2_UQtsDcM$ z*H80MvjjlVAFV~+16E!G;PMa^R1n>Vv!IK|Ea?<5}Tl z?91O+o4iBpE=6_C?_W|+$14_GRapCO&oigMGAN|>mGTY`6%-+0^Nmm#F{9gjqSWcI zT0!uer0o{Xo2OtA`2@&O49TBKdJ?N|w&vvr^TbY>nbFAy0LfiV_Ilmu-Nb*_xD};{ z3_eZwozA;U-b>jI_!iv74&E6BA6s+JqR0Wlt2?acdLd4tP7eTo4;Hu&4Li$5)eD+J zH=aX}3S@NLZ$Yo#OK5ta?J3x7kkY-MGFk*O#MwTA_SN(8$BY^@HPo~flqIJMGqm=S zZ@T@W9?y%sy9Rdoc)MY4~5&(xRIhf;fyx*_oN%i#FqoPJ*>u1kz8@<1(82RcEbJ1KW z)19%*GC~T{x?#@b0E3k99<0)=pRRLoUqYx?7%(S)%|kZIiy}X|aC`5|%>5qWv3&T3 z=UIcNthK|f!ij=4E5xD5Xk}@l`@^;959?Yzg9|hvVg{_@7^p!0H$cRxiV8zrbf^Y( z7>UWIX#*3qhK1q@5<)S?#m4T5NhZ>J{wYjR(J9$MX@X?B*1?U!@r(314{_L%l9jOB z-bpJTPri4E)q6bKT)}zdOM>{k+!qyQW@=OAH>L_%F>SSYYAE3 zCqKET@>~P;mWAE@!)MmZ1sAS6JTnK~JC6aLOR+SEc^I{vGdzcD9dC4XH(nPRwVl#K zynhrk8K_ZQSEsPJzFsgP6M2SZZaG}b7v4$2%3ipbWk`ANWEz9=OSernoFwhUdRH>! zdY5=FbE(N&W*(L`BK52GVT+jxW%%k&vD5zTzuwE!TJtuCs=z`f;^(~cY8FsAVT$W1>%QY>g(y#jh)QHFVveWf;Fq_jR*c}#!%#W zjb=%`x!}&jZLUB7&29-ZPeSKevO4{>JwAN>Fp3kW)AFKT$ZdghHpeX@WX5;Wb252A zfL+iwUJIJRh&POQye91kjo~gKmzMh*#?q#d*wEIo& zEtnR+{*RR7hb-2Uy4U~Gm`~z+zd%I~1avij7!4Xt{}zM&(t@*H>|K7`NyR_2%6lgC z?{mIWuuWZkiZeWWZjE?+goDLXhvPtI`SslhnW^EnCKjwqggoU28r23xWq@t4mdoN$ z@kp_LOj3(if0NFo-;3JQdm$8V?~%#o$nTpEOHg7A@`y<*{Xs|Y#F@+V)LpFkj_j)j zPv(0IsDdBWJv;D6{8y5py243&z3E3dCA1@zLiYXBP|EB;W_87he5?9kn*bgkl~p9neJ zfeXTPe)+6VoC2w*F*b3f31`~NXtmfsdC6J@=v^GU7UkK+Clu*}9aOT#*8WD67V-4Y z`mgL!)=gHyr`Xn)3SlXKQ>ahtRn>3}2ZW<^ZOjLhW}#=`@3~;@_UUgcrUTgqVGO|9 z%EG+#Dn_Dx*7G(KJG%w+TFk3xX-@q8l)vH)8Lx0-^qE-?mvJE!Y}Hy&dcejZdrJ4d z22;pQ3hc!s%(w=T17w=UV^9lEQaJV0{sAZ@VR=>jZM}BoS~P0pN8KjSq93Q4CB{zu zwxuOLeqYMHiB*av01#@`g;yIt7q>I8#mkKZXVi|7yXIne^U;zoARS(M-Ev*S4_YnJj!L=Y-x-GuJ$j5s~4U(?_LFpJ%tn@)!R+*RZ32y#njH8DWg zJV@$C-%s;5S=yJbuCqJacY{x!v&F8L*KKD{Yq-qIX2VG>4C$ls+YamjmW8F%f)K`Z zpbou7-Cw%{Y2UJp`ZU;Fc~I#_Nd4Eu>L=71$HA7JOv#dqu7*7xc)Fopwbqrq7&N{| z49T8RZrR*(sm*+msZrQy)Ohuwa!XSy2}@T@`yh(wMd>mVYDcQ<^Jm|`aq1@j83jaF z4QRS7bcTELst$9qeh_!nAped(-3`Rv8OqYAEWP7}P_*aE!|ZCN>u&5=-g4vM$x=7=#1v8K^N5c}t)NNKBjRTaf*W}P zs>4y>EI+^V5uAv=VqxWRGy9_ldp4LO!X?zpDAa4FyxR&7_FqP$aL4Q_u8Prg2B?p6 z;VAWCf$xMfC6d(mVsW3Z^^T7oby>JypEn*bt4UoRvpXd*g>(;4YXJdt$!^fLfQUxT z@-8B7T+t2=xW>{Qm{gkBI@D1#yxoyzXhiFlHfO96oX`S&i>owG;}?G0WfXnq$%AKt zyxo0K^v-7THx}iuoQj}g#h*H$BKrrW_(HzhYyTvUum_2x5+>;B8$?1tXs+}eM?`ZJWi6ICHz8aZxTe@aGXrFi_)?Ftt+=+7U+G`BAWrF@;0 zn27pA6T!j6ZmzE{aS@!25tZLP-=6PDm@qQ3{SE}Ughyb18Yw_f!$5r@MBA>)iCfEl zyPh7HTL@t4IYsT7eU7~*yBbzweA`|48p2)P%;Kk!>gr--(yj=TTfoYi^Lff}{?E%0 zwrP9QfBYt8T7Lg%oSy3=sc zVFKBi{Fd(}z)U2s|yrVgHsOJ<_1 zO%piY!t>ugledcJ!#@;%T>Y8xUUjyaVSntX2RK?F#4{BRr^&Fp(Wo9`scvb)sBwl6 z*kS&6KX)7akNDgFv*G-oAN`jK`5#~Yzle}C5^p&Z4{n3XRd@(^d?4Ju<< zf)jW<_VJLV+1YDfWz{v(V?UD1@MIFq5@;&!R^-57f5qKdvuMFxuw#YW!4w#hhA7!(duRZy7Qh_* zuH!AV8<`z)q#i}{jv_Vz&XE=fv8)2?2XI?xwBii-hbpHAAQPLjQVcJg6&uNCG><6U zL;4|sw)$ugBtJxh!#GE>a9&JP#bjqErgf61(dODa& zzU8tC#=GU%o}p>7h$8*h*gm3#;hxr$X|-3;nWk;-RE~w)t@^xg!}*D}gQ(xw@drRv z+4SVaVbO$e6mE0Pt&a6j9+OD;zT9_)5^6pXeFXGs=?Pwgmgpuh&G*2_>enV1JKiXx zID*@!{1s*HD*G%}%{q)q&D|>o(aJC_`xlIaSq~{J{KZ|Lgo3>xKC^0G0x?FBzR}On^{W3zn z0IlS!dKq;MXs|hTX9B*?oLdnKP;SMUhbU@NT2RFcSs-$!mvKd37Bayu8}|Fo(V>v z=!YC7zeUHXnP_H3z*(CZM-wlP_(yi4m>Vv|)XpYzCAiqKqqLio&x8l|Mv}H22f5AP zLZQ+|aKfuQD`wZ17%+x|cRI}-w>5b0@8#MBW(%;8MAHf6lkiT#_# z?Bl9IKr2{v^7^)SD0PaMf)F2vag}@>F70(JL&%PXA-Yh;@8182}kOM1|3>UY1s z&GYVeF{isOUoJW?B&w!qkXS@Y9I#JYlNMo)Y2%nVrO&}aR|{~m3x$o2iY(jgW{1~; zg5;75MFVaFPc?QLxafUWJqiQ`eRYiy+}5@EyfQMS#V@K5X0|o2WTeNbu}q~!iKnce z;VNJBe36Y=owy)#{hadUG6z|d3y-yx1?$|aouRsP;epYi8z)*QVojl>;qu4HAgYoO zF2F{u{atL7Oiufu*69hQf=^f6W@$;Z{60;nXCrlWpA)n!ZH3bh81v&y zT8!1%pq7HdH=)?b=jOG~8ToCXf`XRodko-9h4+gVG+v|8O8v?mx{DO8Evyw?_N_1O zNf(hWSuKFsZ0ni4iRqCO&H+N>QocAfYH8f$ML1{vYr3(3<$XhexkjaXR%E)RCRrPX zDjD0cSK^;1g=(a8E^h?=vEy<4Y;;hNVm)k%;~|*ML~Wl4H}pT40*k-RAV15mnr$DQHj14Vj z)-DWgsZjEA<|NWWi(+Z=7bC3{o|vY<8?B!c!%ecaARAu_b|zs%iP4d{4&7#DH$W(w zo*4bXfHFMsgK2Ajw|(LuSh(JN+0#p}?IOdBs~_D#9Z@L{%Y(tzeNp$jehPz4G(a&k z5+&`pUk=^^wqzramix=OkVua=kE4Q1T(nFdx(+xNrpvcslmeRi$S~WE!;BmCeoA*5 z$%(y=u>4m{6qx$?H?$b6N)F5u_+GSJYF6v>@4GTW>UmKN#kQ1bRneM=lhU-0*&#_Zl3F8eVR`(72(r{^ z8WfAyG>K6IunJrbRA3Sl_TwVa>cQMpLMvswY!EH~<z|+J4TaH^x5O(p-S}4< z>H)K&1xOI90A$nUa~V-WP&I|KUc?Jmcd@PNN>7D#iio!*|Lr$CWg>L>KHygp_)34HYw@XU<= zyZnd(T+Hv#&zORfry=KnwdiM7dnQB?$jd?duF|FDGvV{OT03cGUrdVpYlGkL7}TFs+UAmEhaY*At#CTISVoP{&;%XB zCF!o-BRy^$*tiyfD$aN#la-yxMqG6x%n_JoU@E;cl6T-*6v~WS_V*kY5m8J+Q#_&X z+CqPGK0+fa;7}z)&GSV7uz~mNZa`1>LJB>u-}Qja`ATc9ofwdm`|*%}6~n>z4jCog zf(<`5DOWVzk1Z);*PDjf_}n*x+D6hOQW6j>pI9waBpixwjfuT`9R*5bo{d-d_(fE( ziA?EqSI)vy;#tiF?vIMaLo(QAM(5aMan3hBg3#p;=X~*!cS!!--5Zv9md#<>M}Ih% z2c})dk7ep<*pIwaz&al1Z=o7Hymaid3Hew`g@1#4WqE4!MXLzdRgWP;q-ahw(VJqJ zuebG7Ii56xmGm}oE|K~L#_NU4 zw|S0^AgLU$^M&NZjYUSVOxv8NZS7Rvw^tE*&2-?C5B|?ng!&A!ya-3k-pzvscZx=w z_RZSknSO^}cS$wRT{vg!wAZVC+HPOlE>58txE6B;{n_$r%(KM&q%cU8Ns|a(+thR_sMIYol}`eN}R>O+3FI*CNkg}#jTqWJyFILicO=! z7mJw~1fH4C+kZFZbTn$~{kwML43}<3?0jx3To37vnb*Q-3%7!9pL*_#EXN{1$godQ z`%2*J*gDxW{o0?%LaOm(5aTDb{Q08NTSnJ!)KS!H$;-K z%6D)a;&8gj6l&d6swT!iG^D*h=gLRufe32T{2C-`(8!EG#$SLQYMPAIW9Z;?u(y}0 zsq-=Gm0zoHGnK9Nx~bgD5~B)R3yU_ILKc-0ul^ib4@LY05rm({75A~@!;(cm+#{FM zGp+Pz9YRSZ za3EQXBBPC$CuK`Ey)R#j@F=`7Cth2zJ7sS4hvVLvV9TBo27RBivTj>qzie2G zr>8lt8=zn5e*XUZQFZhEo6@+-2~&g)CZ@XIxs&EuH03?RMm+RdT~*z1L(W6K(tP7T zlgrK@0|Hyu?)p1iB6)5lkl&Y=zm5V$*X?rmMFMF@HsTUYR&M6)&_Wi5H?flm4^zkb z26$OF33;kt4MruuEf$myzMV6FEZerK!E3wS&o)Bc-;s0nl{@*{x=Y1<~o;NLFDJD_ju;}=x~w3`{-F;etqWSoW@90 zF@PHfOJ&aB{IqOqp`(lUNO=McaS_xzt;&BPfzmAWHkQ|yaI&fVUhEiNd)OkAJw6`{cXNlB{Uh}N-9giuxTG3|fKWDp(feolt zwp{2PM^DqiZ>r_7g{+v6!19vlE{C}GErf#mUGz>*CqaMX~N{@(Zm^z?>L)Am)QUdFqW=Hr{DA%``JA_u7q7T0SQ$-O1VFJFLWm(5?NX%$p3 zUXL|qFRMH!_cj8Zv3yY@nUdN9jVAaqOsNIzliJyf_p~XHslekPCLETt!Wo74cO;}< zD)zW8qJ5Ok8=RbJgYz1zTlZ&eo!%=n$t*}}3efYveNpFdYqM}VOk4X#?|L))Uz)9< ztPh?=1PwiF6*fIj`O-V~{J%?VKfZGQoU3aX@$70uVw;tb9Ruuv=P2xf4nly(+4)fE zo=o9=_X=Qb-~oRCT?n-oc2t4|WYVuILd}z0?xRGa58mrrxvbDec83@q{%P$d{o!+b zJ7pyzdQ?q0+83=^Fsupt2oB+Vrq(e8v*gq1nV>$6Qx9g21;0=y8aT0o;PU)PfnVLn z6Zci!TQHhFCD~7LF-_YCt+QHHkr7_QtV5|~`4y^<&Kr{D7bI;yaN-Y__Vz5!U;?*R z1%%ZY$ZM8!s1zQ|?BYoXp1#@85Dg1mvxky+SsUXvB+@p)z2Ao1i}Ck){ZcwqxtWiX zwXRcUYVQniZh!Gm?%E$0WNp9ANS2=}JEDsX$y@VDa6S&63}InN+H2oqX>&dkzW@7c z$cl!G*(O118?S)R-lsad-ofJQ(B%v^wn8nQ4rc+}pt%^b{w`hMqn)#&{b}C=Yu4xD znCfeh=riHfG@kR6i!AlRyjAol0C@jK>Xo5J3+)+digf=pQBo-?n8k6YQouVna&k&7 zud@<*8Oe{wDcn6lh++dantZ2&<~CaQmx;7T>B)rt162;EYRU@tk>mKK*YC3c&%}wD zmG~m6v^b#m3T@NjJrd#faaflF9dUatk3zDqWyB-;9Q3RK6Zq`uHr(H~pH??%G0f)G zvkXL|pH>G(0Fv}{c9n$vQS*$AD0$~G#v%4CF0S%B10*KSMZ}XuT$C8&vRvco$uOxW zE+coEtHQqJ>X8_Qv-#W~5NCm)X&6W;f9a$R&?2VVb3S{Vk;*aMd%K^CKOp&@JlQpi z{59@>dtiM0DzIL*WHqgoU0SOM^o7=5f4K0$#y#doR+F%meIWZ2-#v2$z9~s zzxEYzG;>r}QBJX$UkXz^>#}$_^a54=1G`zc3wXD%HG+zg7k9~sTc%Mc7w-DmzH>~i z1QMAxYE=HTNM86`^8h&|+R%ed0diCge=s!nZ<8~5o4(jFS@dE6#>y*(-3Jy}JT6RQ zW;bHWgII}4w04v!ya2J|(UqwdVm7DXxtjJOHqMFr$+A{21{LH8hV;j+ojzB6=Ei;N zNZ`qw2RL#3CVBUl)nDmIl-*pgjQ&Ubw<^dh>-nhFs^F3bkF-4y_tr)U?DB{ptDEp6 z6X3%p|L) zjFb~36chB7R^Q`@!-*VJlI@z5{Bp-1f9g!tYQ>$w=2;IfK?78n!-W+@j zT7d!j9t~ztG)l>Ta$+qJGt9a5jj1J`@^oAhInE&59-iI54WBdTV~ z4$aWeL=%VLq59nKUj0WKr)#T&gxr0T&8&?h%pCc0pVpmncCilac~91dl~1s%^HA01 z{Jtbxs+q$o*494emwv0DN+I>FQ1%yDW?dV*v>rKTJ)<|-*6V``^m~1`YvgYr#2QHzB8=|?6T4-DUgFcJ;2u51eXvoo4UR{vnQBqetiPxPjE-LL%-uxi-o0rV^ z%5xq}!)f4bT618a@Ul8~jPsh(;%_GsIzZ#wa4bb0b=HSznw+r<+eXwuWZ%&DRki_SC#2+iqx$U~s-2A8NZtri`J-mTT3wBU| zn30R~bE7qJO(lT`3aRQxUHe(xb07`$z;Gg|0(;oIjH>*z@qq&WxqGN_5R{Afxl#Lu zq!!$czHFa;j!=+B304erx_KE*36=#XUv;y11(%xux0~o7=fqXk*0KBW{WogRiPCbB zK@I^uWsQV_u-bOE*?KTCs`+fk-uJ1l7vOSzAODI0VwyE%PPm%BxfaPbNlW4lV>Zov z*KHSp?CFn~?eL=Z;SM7-P{xm^8U&w!$MzG)mYMzlCA}Y{Q&09+M}qCRY?t0q{uOnB zjlkG19?6AF9}1R#XSTsNYqFUzk8}=F8zM{E9}m$|Z1hJHai6*H(`EHS^>NC0L|*xI z0QH0G=ugX(1O`UUC7`OgC6qMOd(y$~I4xEP7Yu`5D0D46iI35j_mg?(hD+%oz>6^N_o&e?k;IPwBD><^sEXC{2XYvcv{_Gay-~%%b7-Q@v&Va81^szxeQ1mebZB%AMYus%b*}UzN4VADY=rUPf59LWif+a}Kh0EE&}I|7r$^ z;L@)bjZ_6E4O-<4ySWi-5Ge40$_z4QvYeDI(TOG@B8?F`slw#krR{Vf1lBQ|7TwCe=@jM6YC$2 z5t#N}H^TpnHGsCr=y6ly{`H6@b)fI9xu|y_yJ&|bTL3jCQ|r2M|EMl(fn*$mQVzH& zhrwQG1!$WID+_XO>ZOEJ*Hf%^> zbky7n{Bg9v9X1Kt$>l8LxV z#;V!s2|bezhbzxdMfcyPxF_ldQ(>FBS3n*Xx* zGWbS0BiqqZ%tGCj-eYWhlsakl{s>|7I&1dE$7umaoixBjt7w+2x%-bMc_F6QX>wmG zEvxx8Q{n_`6OFQ!*9gDLn((_i1iPMHVzNxpZ#~D(&~(1vvO#)|GB}~FN*TiXAm*1J7&TY5$hMPC)C+-x1Mxeuw6!O=Wg$&75R%(mP++|{KH9u3?G%2{iq z!4-ERZhP9B9Q=uIj+wc0yA<&b$i$s`=s3+0i_LN5-f`E7QTh4h3xqiz!pzBxUY9)w z)}XwYM?Fr|iF$(JUvHPR!i5HIW2;B21O-V=v?YuUlQ{r`VQ7}=B~dHCz&l!f_O;;F zUWXV7{aJ|na||Y!kR9Ol48tin z{hiL6r_67w zXEM}ZKUMIO+{-`|+~SobTnzx1-40Z_jeAUJ-MVL2`^gd#O-wBk_iF$a8_5{Y9+=(q zH82E+(1XC9tICaD<0SW9lP(=pD%qGGJ$Q>cl$&Hv`vfX%4vqG#JukQb*PE`_(xOuk zVq%V}Ng(wqw%|e~D7<8PTPum4F3;KciQ`_t#e`fpt~;y8 zPLKnnUxO#f^0xd5UU==tJ{oTg+Xn3Z+v-Q5%PU)LI>(SX;g-L}VMkhXz7zWA85Ugq z9b_K2I}{Afo?4tgKS$fLL+JRP%Cr3YpV09a;V9~0@G#>RHx)46`g5=#f40^KI#d9r zGx7F|AJ`7-xLQ`Y?j~C|7up9N&~eo7G}JWGAmy_K=~0nWETg^Usw(`kK)tOa8zVZ- z2LpbfCzaa1CkZs$R*OP|JKJy#cWh$Y;jFk#0pg3axn3^3F`)(b5V#$Vcxh{mm+U^c zAb-e6J&zx-VXC#084Vt)N1*zi^5Kouq`4eH5{_j=Lxkp|I{r2rz|bS=!cg3> z>Ph2&Xo$I4-E8>I^Vg^DWh7&Jy~kZth4FD$1t45VM9=7YN|;wU5IIAur$}*3OCHa#*8kPNc+$l9*p8o4 z$3a@KXvIbG-%|vZP_7^?d0I@ouh>06CB55g6DI1hLgTGnIQIFZgDk=ajLoy1VJy}%pzM-DjUlKObO0U@bej?kKtW{Lq_)v+Qmt0Hr z55>>1ks=VKys{L?d)yZ~ITPmSL+)i9zed{t91Bu+;r*VsA$0J<`+W@??t~;aqNY>}u3FH& z9dRRR60Kn!iBemEZfq~Xhx+*Iywouu`W;!t8hB%c^fqtktDLDBlJ1BCgo*armtjPS?${r z8qLaVYP=y#xPantc^We_um*$Wlo3o3ANKUtK0sv zRhGZXrplf(&d8*8!_0Pv6nb2q+2cQQSfl%0zy!*xV&nHPmE=%M%KR_yZ(qLs0Q(9V z&&Qg|RtmJ3>ppI$QLG-_IBM;GUuO~1~u%N+|_B(@}6KvMLP5>`&-QFHAojz?4 zg;MqZnv);Et*Sc8o_y)x=JMyaH%W=gh37ihg|yOIq1l^YMK&)$1e>bK1e8rT)(wPZ zYj_cLwGWib#==x(DP6}&8tNfc8@JyHHg+n1wbv{!El%fA1AwM430UB_Eyig7GwjB< z*(dXAwG6F&L=jyE<#?7^Zcdgnt0<4aS8t^-eO@e`oAw>6sD=JVE!PrU1-Q99g|c7W z$lx)_==`Sna%{z&B`kA@>wHG+4}(b8H1R&_a)Bhi3asT&L95E!kxqh#iLGfyZz2(e zxj~tg2-nq$hh~52W#XCHJlG7`e~)93zU#W$6;d!irg?nW$u|UGevlC3t`VoBTW9gZ zX>QkjK3z&@0B@jOT-4~p*OP8Z-(4TsF9MD2{W)s9{P`B#Cy!JC{6l5@eQ*^85z4pA zYk5rbkfB{OKdt)vdX-k!3JNZVD=BLk^cp9gOSG*1-hwqAj0QH%d`#!4+70h-_d4o3 zIaTSVx@yh`$_a79`9O4GSo^Q)=ev8hEVIhV}4=qP)Akq&h z?N144Bp`Yu-U)9+;e)lKVr4mma`nVJka+QPH;Tu@ljfw(T!_W-q>l^|?M8 z6#QA$fz+22E0oOY7imcxe^;)|`(5qPH?=^aaFF)yayI)IEO)v~U@H1t#$;j(_D3kg z6dg6SyE0#FQ8H68>p50S5o)x4%2#EE+fB|T?==_#2{c1}uG%WAzs*$%^f@K?WSR0^ zoi8w6y?SHp&yv5jdvR#`8(6vVdVd|+5U*y7>~#9yvioPoZI^5m2!=d1J9=Mi_^cgg zqu(crlsxnmRl)68-kxNb!kpee+$;8fMckz7Sr9f8=?*S&(m@{vA@ynW4ucd@>1Ibg z(1HP5YO=ueG>aRpZuUN_xdQH4-i&WZ_6Y(+hSHs)xFX|@M<3YqZASYl*c_V+;{_L2 zY?i;TY~(i{DzMg?JF<7WdAC#R>1qR;LermzKJfHugqd7Xqe$f4!oF2k?OE)P&thLA zJLG@{qfBvCPYUcvS@Ry7yZ3@;(0iON|2a5;m;Wz%aolGM{86&s`qyIA^T+JoehlJW>4!+ykFNjn$8ZtGj@%Yx_bg!h|9{QQquq{!eU9u*@%1G)Pi0Rq95Zl zE^Tj;iYbAKNPETY6pzGub?4esH6ra-wwBH~3fR2<3U7gzN(zzS_7q7HynCUU3k1+C zpvm7yZLg6Cqeb2T4@>KADLy`WWri$2gQ2z^tiUc^R0L|Zbu=p!%uF$n|2CS)3gSE4 z9-P$7@3qUR$2zv*N>AZCm8E}ZhCXMHY+*0{LqDv2R74}?Sy(lo`{1=S*;s;MYik${WPmh?O3tn-`z{1Yd$6RuCRn$oY6J&KkX z+>XM#kyG*@r}v=`u3OVjZmNtfM?J{HB5I*bq}y*At2Uaz&Kn1NCM!aA7eU_@iS;a| z3plGhYVfaK03Spf?#hEZ$x|<)fbFM?mkEDchFtfIyk^1nB59${Ktnz&J4OJIaH%|@ zh8-tHhT6HLe?}p9>zZN-Nf>&LnYv=|VPO^Yc0p8kF(j!SesJ{98TEYb^32G*?j*~T zPQ2IbLb(9H$M2}_7unXXh~H+~S@lwEZO-yb4WK*^Y0VbJM1DHb*`as$AqFX3 z*ktbEo1$-`8MG=4Huuodh-vEydz}-xzpBi82F}^EIct5o6@=!xFPs=frUkySHN_Rl-?Aae zj~}mX-cfGY)9=_*w5ZJ!mU`{t8wgee#p88N=^)d;r+f0=QEAvqT;pi`gR18N8Mc-E zY5ED|Vd~#q0!ofkNQ=mczdOS=Kn|KbN!A3|2POZkV&<#i`0V*lp>sXF-`(@`*)}y! z?>EYKK3Ymm|D#v+a+o7gyqZ$2kDv$!+-gFy?e$4}nqj(h4=|d+#Qw}cZcx*I`F1^e zYV^-mboX~5t-cNX7F*Ql8h|b4fD7!hwjhm2V*K~%LNXS`tJ zu-~ufh!(Zg->h;Q4BEPv(_>J+2ka_ya?wWj30DbMAw=`gPt_1M;wT#xWtWp4C5I1+ zBq8E~V{@dks{H0H5WD**kbZ5%vMI2x^}&F?xF0rCC~v+%>Xl^CSg~iC?g1|qscY>! z_OXb&Me+cF_If00hTjNgVlFK8fWI7GmiQhjI5cf9ts>om1CDb&4ugX(<_hYtI}YSt z5!|W7KmOwST5~E{BVU<0>Z|%t!dWq5nFA0<-!vw=7%?G{d^&n{4`>XhhwY71NC|Fm zoE59)s*0CsxPFeET>TDJ^|)~vo!7H7c8`i!^!&5I4Y-`jJBQ1D&kO?dD%cRwpVUeOD zX9Qv~>QknO`(-a}6m8&qUrra<*(_$;&YL8CJvR45qp#;^W1k zi;V9%9W47aNYma>udPqpR*PMgSX^_=Z`N&@kG76*3McNIfVqA1Z)FjV-xlKeN{WjS zt!AM-Y+R3LS=l!YNW681sJc5~@su=mw+;7sFz>#4YG8+CdqAJ0{#8Z{^z^565@BlI zCJjq0uVJ5`pO4x#2O^ZrGb~oQwcc+4dloZT9gXs0h7es}>Z4u`%d-Gi<7Ya&v4c3d zmnzgNx>*Rw@C21<%?7(4XGny4!?hi5YmT)D(%jNdus#Iv2(v z3|io9dEiV*WDTY`jt;7_(Vmo|Nz)Ov^3fdUP8zN$a;_}xAP$j4R10;f#y7b^9+_|o ziJnfl?YAP&be=%iXv~YMcD=JP<4v9l8sX~YjI~ru?x!ovXS>)%^?95H*@GB(J{d^X zqHfrmf`3zq+1r~by0AC)u>->(wWB?5oI$Qf;_<74|ikGPsXTwi6kBmeGu(3-w-n|xv zdd-z|oAu;KamPE1%8w&r_v&sPY98TeK+HR2x-JUfsuAv5UpKn5F}Vnh852PkLY@#l zGiVS(?8v+jOHQQ0S+=dB)MSA)k(C2bDg|#VUOQ?rDB}G>1MJijNUR!uiwq}Rmx}@0 zYC`J2WxN*$%mF*ic^Q}vqNTDhsekGFnD$=B6cZBsR7KDGnsLwZMbY;LhNiM02HJ@{M_V~H`Dg2Z*gZU02W!O z1u{MzC22LS*U5!(S8(~ngof4Hjf zN6d$IT)$S{hF!K@G?}$uF&8dJ-2j>&IP2}^?t#f@L$~Aa@ul&k;Ep|xnXz+8e=YzR za|2<^5?B%;^^X|w>Qt&eAz#;y`baKkj(T~4F@MuHKA0GiV@LEm6F%F2ACt7TpwW%W zE+Ul2f?vp4<#=bIjyK5*${mIgp3!47eRgh9a|U8=boV#!X&tXfPIQe-OPh8CsBmLh ze{lmP_l8||1T3T2xcyO&i*#4(g%9oR8-_qW^l7)#RQJgnap$aOEbpEP%$*7BoLPxe ztHx4*XcNYQuD9M4C@K%SU^q&NNzrK``<0&1Ls zOM#vpJK1(C3&~eG^E$^J}C#h0#V5)nK) z)#g3{4gGXUx5USke|eY9Vn<$o`%;}ZeQtelN#ii<+|Ns8h)lCj`4KW7+;6()!A5*^ z)^-)#7p5E^tgPlah&1n}iIIN!sR*$>T#Qb%yC2j3HYb`yO{Go2lu?x@PvfXy%o)Bp zibf*(bR30qKhP7Q4^u&%aWxB-gYUid4$#AOipWq`ykh83_;g^RVC+{J8C&`lvaW{Q zlG%$|SlO_CT$!@Jw6$&K=#Y#rSz#LO-S9T2DkpH&N2RpsmOFm#W}5Xtx{GVGY_?HR zH5g*Q%L~;$cXqMikfX-OxfIJo9P}0u9lEJ5&3P^G9n3CSUPu(f-4%_UU6hymU*)2c z9NB-$ik&2d{&NFL_QU@o|1|UQ4hxMW*f@qr@?*s12 zH%Wv7(XT71GgnE;KX>6s)WfK%ya0c zq%por?6l(owdp9A`K@F!*b0{v0jsebfT!`h4xSfr3i*dAxVjIi-$^&f3We5X;D=pj zyI7|D%m?h{MP(K8RLC+@`JYbqe!w9B)CWNN#JKjjNmFc@+5hOA>P~;$~;{EKMoZj*pVU zEua4G2Woxm-ph|8SPIE!vV<-E+1i%?YtVhI@uZ1PD@&0!9JJ`~0kpQ=hx|UB?up{? z3xjXwr7 zs56+^1=U%2>C7qg=K5mSIZX99r&(iY-#N_n?cI)uDG;BL!TDO6Unp;=JeoHMoHYKq z4~=URkOI{RKRO~ve**Do6x`4Oej=F?7}FfG9ej3$emvO3AJ}7KpoTYU}1gz7O~ZySoX+i;1l` zT~>UQ1PN=V#MQ!yVIHMYqrx+O?sc;pu!_CQU=oEA46ddF!My5+MaK0`)8uEvcle(} zQ3n2+M&}~$NJwxGEiOhz6S^uZN0<27z6+ro`XMCRZ?H1>-H~U(YScA}zm1~WN9)tk zAi40?d8$H0i&qFA)h5Bgz-Asi=N3va{#qJCF{VKuK8N%TF3`i-H?ag$am?Uj`WBE7 zWth-BEW=ZEpv0M)efQ|k-E`sz&@4mVV`cBFqmK^M2(M_kWU%JW-oDOO+owCX z&k?2b!Fdm2-CWAP?EA@1BjBx1AiMS8(EFb?4Sc&bpNe*ov?#}((t{)VR|_n>Y%#T- z1znqk`hO{dN;5c)PMwFLpj5QStEF(>T5w>it{UiC zQ4QxXOoS_y@l=I*h>KIz$}3?czQMTctPWr^{|HwsxwNl&IyktrpE_`DLHdvrlR(aY zT9yO9hKRD$L+qdWEcs5TJc`&KYxiy4RS#|S4fqkTiQM?!G!h|U*ox^X6H)G(2;PpW z{!}!Oi{1=l2cFWkfYZsD_q zNf}$IY>lyGpT!cAx_$Gly}`c+G2`&+Bnr*W-HlpAG`UPSZazFn+Ie6cM_*k!tk48~ezp(~W01 zU_NC=;_&@lLwbX{8P_K`nHG01J>|gmtTj1LuQW?Hpb{R5i4}S2) zut)08{_ua$(WY@VdB-<_(N;mH|1C{Tihn}pZMo>7WaW+f@sfTQlglx*Rr+m&=Lg>D z$90V&P6vA5rp9#H(@wr#}qxa!>Ba&Of#yjQ(|qGBDY!wr}4qbkMd{P>8qg#AfJ_a%%xe zi`=ZypaAyTKlc1*si^>|PxJ8jX0w>L%D)S%n>_;U+o=Z(IhpX2bu2%e)7LxBcYwSG zr*C*UjRpRJvQ)fIhVk4dw3_(2608j|6T{@up)?_ZL1hZ z)(fjMII8~X9o{iZQpZH!mIo#!aohA`S2oNu(8ruL)Z#fi*Vndi`)IZ3%jn|jg$B|= z=JK)~uh)=q3G1jY%86R9>uq^YO{MtQ7mMHt5`V<%p!)!XhB)}@fkD6!*CPSms?dXO9X%q{m?FrKT~%N$6KzmHPJwP!nB% zr%sYSf(^<_#)TuJlOyR)M=l!W@;Cn`*6tCw=hEQL-qFVS8C((FE&4gLT&Ig*&4?4s0S8Z=BJNH>VFq5*<+!&7;7q+}&95FO& zPrvANGEnDHLQnqS)PtEW!1sBqwx!6D^o~5BZJL3J19yBWv7kop>)ePfu2ZkKmgC>% zQ?wMBD_ICVZC^prBji&5h(-ShB76>k9&zKqUKf#6ut*~Y6v3%HI}#Oi5m!(XcgS7U zKdJl9;Ex_@?Ms96?o07^b+Ol2al?6$dMSXb>Z$0^iI}5GSJ#9NJ;E zb2ma|UjBr~9WVj`jxs9B(a}&2icRU*NJ=S7@Y(lDXU~G=UyKj1*MH|JjVVOZEzR#| zUhL@Debn6ScukmU#!hct13#xnbAIo;K7*#c6H2`%=Iuv%no}v2I=vrYR;a7N-^OBA z?gJJF%#$)}tU$DAtfgPF1?5*qT76cO*4uT{tZfd=R~ezLtz~kYK-LYV+SX_*J=L&d zYA#E!YUY>RzP}=z% zrKi@KulSID*;F1=Jd7Z6X0#qVY&J}$PhL+;P1{&!JQ0yqh&J!I`M0uy>^and`#!Qk zsG?uTIH$EX(kas5=rNS*wxr!srAc^c{Ku+o6AjWjpO1T1-LNvcn=N*s{iGmR)>8X{cV|{@Tu;{_;rVwopwKMd6LZ z=Eqf2TF%)Gz4F zU)TOwohH~4rbkd!JCzu^ms3*U|F;gfb^d?ILjUj8&j0@+$O3_(x{%*H0Ty*G0p8u( zL~;-Gq$Q*II~~wUvQ(S}lUh$%7HEWHhCUvBJOL(BS%ze&xBr>^^U&X;a_5iTb^a^l z#l>%bv^b5lfBXXMcm;vVM%GsLNXAT4SHVsE(t82EoOK@k{*I-aOKZ0|o`GPE> z?k{+;S}pL>jDAOc;=(LBfquVx6Gdn8jMK60n_DukZQd~c$No)%J6MN_jYMmpVQVdD zU+nJ*mL8A!?_kKE;V{slCu>enZzdbGf0gEf864&68(4N)PSYnxZ`u8&t$2##@aOMY zHHiWU4Rt-Zdf#jx@$dR6eF)~}0UVuW1JFT84@FG|6$`wSqWn z3Hz(zCQZc#qj6HMt5$dMWwQPBLaq4kCc%jR8qYQ{T@of=adruemQVYw;uBRVh1s#K|+ak?X?rhH9J{N%6RTs@22Y#PyZra z+KzqD@TWFnGIt5%^?U5-?foco|3 zD3oq_+U4i{{CG_fG)Qb61T@scgnC@in6^bDe~V(x9Sy_IupvhSWB|d>oM`k-dJH(Q zWpd{p0+^c-DbC;Y;rbiP+j@^@zUBDo`1laixJSSdP7R&v899N+VS5J*iIPJ^!=C7b zM0RcjJvFOf6?+X~i#vzVo1Wc*_O@T=*rwP!R#4QJu=Y8HknV?ZTqEj@?@0<@8)~=itekptbw3eCig==^EvD7DUZP*1;Gn~7A~jq zT^{q{D|}<3GB3LA=dcDyvw2M(0^4Hy!6<-DBhSjdhH1BhJxE_V!;Ue&9JYAqHw`)q zPAJh9ewaR1MX038t>00n$=KOX@gH)?k7(DtLD3lnf1oVTplk>eXm7toH+t{|wxvD! zL)*^BW2w)D*6j&fvv${gA3lCJzjNFpD^}lKQ$4G{;mS_#kxobMhfdO{EY5W5*z`d6 zHqAe(`oA_Tg}M6qWd~_`JfNpKcD{GmOfr>%$h7qxPEmTy!4$PYOgFV|LS;M!2Y6km zF?v;U(ptU13np?uzeOERl?&}KakXhK& zH8U|xVcZz%s}T#+=d3h~B6eb5-DdbooscFKDo?hr44yZYq8Q{>`$&a`7?CKls3LvG z(Cw|UijM@}47auK@n`oIy|eBSw(`}NxF$&rXRkh*vhmC6dU_#AYX8W`TO%+2d^+qc zHMZyao2$lB;}K1S7j}w&|IA%FR2bj&cms`dpY0icj8d1?m2vDyS!TryKQcIV5|%O5;bAbs zn|@W1;w4XA=}K219(q~gq2U8zc7~i+&(AhjCR*dkDdV$PMo#=n3ElxVzh&PFE*TCX z^vDsB1GaOS^QciwI^~cYG!`hm$Dm!fINIcVJS{>n|;Bkcd|g;r8ToQKv#oXZ7o^vSH(p7n1eVIwJ7 zd8f>Rq1N+^5SfvLSwAbt?ZbqJi_R`miYEDmD)%9!$%d#Km zF5O``;QQ#h)QO2ZuWLP|D*W)%*UDV<(8br4u$64hFl&`vCtdfIxrDzkE~(eaU9%H; z=$#BOm6-|KY&O*=f4FS_As)rcw-s+?>*%G@?U~X2w#Zw5ZR_U*Y+tX5Jn`KHxvddl z8~SvQ;N@*fW9$N2A;A;dtBW`1DaOmq z1Z59B7|Wk z#qB!8fvb{N539{H(u!i%FNYmv4?72!%135z9vt3?(;E;>tm zGZdOFV8qhDlSK{+1G{{`K7-7?8Fdwjoae8Ex8my~R{W0o*u@H>x}bn%_Ia%+=H3=o zX-nDHKciWtIrW2dfErDFCmTj6gWD|d68R|h^rC^NA7-%r)nF7c@5jfBY)Fmc>ywg2 z5=>Yv86?g6e#v%06&I`xIiuni8+k2dilHlS@fwH-Cv-6#qe)^(=RY31T+%l3imV9S zEbYpar&$Ry*IGWAwn4rI8*|qV<{poHG;5?loFmEB+i%P;6J4|CMQ=q^=)>SwId*!^BIj$0VNqmt7XaOmJ7`#6&hj$&G(C#9OLV5CHRaoV zc7AO^Ag144W4>2Wg9JE9ca$i9ob%}i#@OWES1cy}D2TBjQFN!*ka;jC+4wyPGll56 zk217Yh|9K1i$H4V(4lMW@8d!d`ewu=6+SXXu$5dIuEeb1ig-~J791u_gwfDKXcTzS zyrLI4fLg0Xb{Opk4+)5xB`xd#Am0W#Tgna))~btFrn5Tdi1 zSf{`Y-1HLgUey#7XXp)NFT&MLPd1M97e=x;dLb(QEvvNNZon6RXB0q90K3)M!ZQ)b9K!fmGCsq0yT`(J;Ih*_ zXzp;OJiMH^AutFTbXy8ypugM>ktKZ}t|aASPd}HYeZdYVVu7`lqk2TFCY|TX$t#P= z$#8=r*Pv#BY(|<1U^gJsfC0A!+7*MSs0R%F?@e7{3KxQTk*>@T35mBswIjA75%03F z7RHK!{=|`t)L{k@u@|+Eh6jk>`~c?3N>X`vJu& z*ZpkK|H;@yKO{bEsgEbH8{|$r@zT}ef9WU+s7P*b%v&Ulqu7ir_F7+cF0eol#?inj zgfRdn3-P8S8iEyBNGkW1Q3v?kGS(EIiezbe!R?OSRU92f{g}O-F=RJ5VIf-=$ug{| z)OIo$i5Ajw2-{pow;!&8MaOOXyo|gG{FP|sxm(6QPZn3{&)*A;b`j#j)r$*3~>>ESr&sr-Qzt~4atiGwp>5=Cp#R0Tom4)AhBK(}lbrtRT& z9TH^`)(pOK?H|e#-x&>CzxyUn%>3SG5X!QDIj{2P%FN$Hj=NFKcb&;utj^N=&yF^7 z{C|i9`S(tk6t|!!gUjSyR}>$+9(VTH_*#3*vDKe7L5UyAJuZGQdFe)>UFixT%RGi7 znSd8p-ZAJ&f_g)k_Hd~9RiUo-=R*&liND*fc+3GLCKvD?*76v0-r4Z7NgEfT{Uq#_ zlz;6DEkb7F@mmj5eEgclJHO;>FXT_G`l`_KD4k~-wBrh}_zlU2HHnS?e2&?!WqAGA z0zuz&u{&X*GZp?vtEK!E#_>4iK>_N8v7n?UMX3I0xIhsy^Rl}6R}i4rK3X8OG$4W_ zpRu#N;P`c%Y^YD*jcffT$T@XYIsQm5vI@++cqA4Z+!NWhkoZEqgkTOvCRDk>!fl5$ ztz@-HCd`S;l2p^g_W;^jrGA@e2&epy1)XJ9v|4~8)lp{*?q*d7%8qL-3g;(XR)#CR zt~J|~!hL<0*~v5fGps?DXt;h=Lrt%FzZ}IEuYj|F{f#+FffK8ak5$B9skLEC?(ie( ziu~tIUunxk*d5;$8OACTqq@Yc8GScJ5%=NycRY?Dq7KcFOVwy0V@pFg{`eA3jX*`t z*jPqG=rACDoZ5j`9~0xq!Vq*Yz$X#X;QX;wDiuv)Q^^x7Jg!A(f|g7tlEemB5tJ_( zzSFXf#Ytuj#1ITxWFqOsk%Try3+=oer;VrppBxwYXzQhU>E9liA2zihzR5Z?HT@EqIxPF!I4e5 zOqjVFVTbSXBnSVc4)X~W+YP%@*biCdup+n*ZH;En9IU2b?ITxbib{QC=Xll0s#jdk zuv8}^=P_3|->UP8U!iQAkLJ$Lyt9X}Xo0T7u)D3Xv$Pxf^XwxD@O4^o%ZyO+X`0At z?6C!wccf4kM+;4;SV=+x0nTOsLK@cAAdUhZC9-+?L@KfihEZP-5?K<--1Jx=c&YgD zxEW`OH=v7V^}qlv=H^yN z3hf_}hXX&ZiAB*V-}@Nq$!lqDUMQz$B{K9I8yoxLT$#Jo330Wt*SN1SM&Yi=pN#G* zh{5%R@*8-)7%0~3koocYAGOetT@W|OQ820VW=J{3aZ>UDF0lXQauX}=VOURzak1?D@_fwcdQ?+DXQ zVkVC5HgX1}Eszo&4--6%5FlJ)S&~W)kG%r3W3Zn?0HAN>wWBdaoi@gOQlgWAgJ8TD z<3^4dY`fKX4d+`4Wi}zJxJK=R1{I@C)Q{J;u_&X)WAyhdHFt~Fz}n_U<{O7Fb@pg$ z14bR&(sz99vwmio_j~VvmcU5$o^3x%h*7ai7t`2LlFGrR$K99?WVv7P==O?o=^w@h zqwYeFwBfu_!NN_-X18GBDcbXjH5~c2m(Y`EQG-!wvSwg5wno;#Zg7utDu>6_22ln4 zca1n3>6VnCRFj;wi&}dT{X?m}YDDC0hk@x0D4-WpR{@V`p_Wu+(#(HOr@{-58WV7I z%)C^&7rQ$ZM(I)bo~uJ_?AxkdH>kQX;$A`7$FdN)xQZCMlcy?StLsHKLnB98|va^XlGDdUC{C-u`4b%f7H+$|(kV77$J5 zCy%epb6~ZNsvK1Z5X?K@G<}{KwPH_i5+5GFYq)w+M@H~MXlkQKZ%aJG+he730B z6!J)o-DYPanUL~}`D&9|kNeL%SSyp~RPGFN=lkcq;X0~_ib^|%gHL|3N!-+-dj-EN zZY@x%O!cz{{9G9LJ}o868$DY4wfX)JW!20qe~L`iOP9Sl#(8#f7j)w#u7?ge{B3PK zZ2ft;jrs`>;j5msr11ab7|GF_Rib6H>ER4B!_K;+miHzX3>AoH6VIq6?2$8YS#!%Ev(udip&QE&=P?*dv_P&Lma!5ysSC-PW>1Gwqq;3Ey8t}Rb?|?+enay3&)AjFVuE?G;2+K=SFlg4cEf&p$_0n z*znR9%`Ge>nrlC{{*B6p3BP7h39BaJekM+`j+XdX`mDFl00&lvRu1}zJIE9g?=Rra zJP<)<-dQGO&lF~)!nIJvHBb$byi|TB%~Q%*dE};vjv3eJiZ1bNZ*kX4g>TaLeozAE z*q5zZt-2KDo#e+)TuhDlkk7y0#~$+B*F$7S?kgpkFeM7#dox%~ug93wpC6~OCt5;g zNt)wG-&`T~9St0E7${b|4m;`O6ilr0%|lUncK4b~_KxLNL#pwbl!fso?kEpO{LX_4 z=Yr?0S!>6&gmwcvKZp_0qQy}$a&HPX9C`Sy7MeWQ(NwbT+iv=5tg#gz&*!kJLbfgP z#>&9UL}XUNpaNcx7mZ(qzuQ|(B(Q9w$8arURcwG)8iGC#5X3+Dutj0IJ;3W=laR~j z4Uj$nrbsYc;(6=`&YL>s>7D~KsLa81ZCeLZBM+*yfHMyQDqLcAmo~fI3zluI%x6h$ zZ(M{6xl`d6`xboSc zr+;~9uI;ziYD18XZ>my#$&IPQvNN8-rRuZZHWeL^K?P|`K_v?{5A9djg6roRZ$<%b zMdFf@cag6BS`UI>kM0)QHLw0ObQ1;!7Y$EB2B#%>s>@AR(G``2JWC3Gn9KMqmiXW_ z7B$H~8)2JYGJ#~g%iXMZbETu{AsM)=G0!Mhma+C7lbi}%MpPcXiomTGnYRt{OlN{5 zF&}2T3O%~-R;E;yzkyw^dW6L*ce+jS(0rqFFj&ZU-?tfFIS_m=)eEyBto%Gd2zzn9 zGQ)ArJLqMIdanoK&IKz%;MI=6HKMj~Q7l=Ei;i(()OI^Hj$)I8w5<-}@wM!m!lR{F z&!bHNc)*RYP!7lbsHS)!h10(}`8i0hR&wi&gOY@-9cQ4Pg6>4PicI5H;hWZX*L4ut zYIf;R-71=)GSM_Humf+q7?S!%>VKhWvi-l&d7~VMbyy|Jd4B@@_lh;B+n;6I3i-W$ zYU%ujeLMaSW~b}UAPhL5>oZ?cBL5#)TtTo`V!Q2sz7~8A1h(PTk++s-i%GRiKT?@n zS_o;JcNFyHuIx>PCwJQofz9uqGLOS|w?UL!GS%t#kCV4dmWw1uT`6{7PPalR>0M_o zI{o0vaVqTt>uWI56q(t(1)y!WD*m43PK257=yp0Z@;=9@1K!DdcU1vV%+WOjo&RA}{@`J%JEfBe@DA8z`*CE%e)ern3f((mgq zvyq1ADwgbY^V!Gn;P{#xiZV(K6w~#+wboCkeUDPPMpeo8rt+VIbj{kYgX9zP(SuWY zqJ#7jZMFBGWrGqOBED69mNLzNsxNuOJo?gu8vAU#uw+DELiTb+8imt`ZmJrcAlJT{ z*8l@8PI)K5zwDO>d--WA-Jv?%a}21ll+Fo%;}vZ%^^^H@0t_zEPTJ$=3sJ_uop@f$ zz#M3K&(93Ggi3B0P_#8S`1#7No{{TcIHxh@>S9Zsz?b+))*zNH2v#tRv?iF;1H-Ix zNFP@xbVviZHh2+8!bFS4W*|k9b)C@zQjotwi>T|~vH-8a=?0`lxuuK^p;xlmk)joU zR4E9z&bejuL;L)~`dl?v;l@ioxa_^yf4U)A)rGP0-OhLP!EC^2RnQ|>8rOAbQ z=9}(KP$?kE5}hRR=6XbG#0&t8Q`gygNOs7jv5>H6+3`HoE3XC73mkuJiHadCBD$;C zv0c-$Y>_L>eobrxDOyj72gGrrzG(w-vZRRx7ctS_Z{SVPjj>eGIl!<`FhBhjj%fMa z&5e!ajrfm(5vp0|0|X@rXEMgUk*7A&v<@)9OHDkxH_~nyAyp{1;h(9yyae6c zEsKRm5GpNdrD9@gGX2Dk$4&NLR#Y)WYR0s<$R3gvq1RTnEoR{=4U#v5D}U^Uk+ej1 zUq{BzH;&p?`=F5tLwSpjE70SF!eZkOci{f$VUJnb!GYy&J9n(T)&-W!FClCHVIA;m zgZar10Mnv+vhpOzXdiByltDv_G@L+`{07XTMSWt6gi20RXs%>s8c5VF4PqrMm_4tp zXc#d85>hRQ*&N8=uO~R>#2b0IUL4`FQT;DEMw(vk714%oYx-RExx}-w>8SlhH1OP8 z@d1GK!QdO-HrG}f>9*k!-+2s2W1-y`|8<(K zRCFDyR6p+4a+ymR?-eV%sD9J|qDQb0HjZJb>^IEPZ83a~J_&|rmV0#>)}wxb%#P^& zB>X!tRi#6o{McBTq2FxbwJi&x3bpj@`%Xn($5+iaSK z@uatGGxpvU2{E%#%M$A0rt#rg1Hog;sc(t#7fdj%!5PB$r@QOfgI_-lfa>P@#i~_Cdu?#U+V5Co+ zqrMLhHGA{HIi9Wfe_HPqci>xLIzCnZ zDA883XNwcyA&)Gj;9gkt_Nyt4E`!>2)(2AI$Y{gsS|6Au(6`TILbVWP#=U89&-Llk zp|NyKI`uxMl6A^mbW82Oc0B(}&zV^P`GFU<4<||yr@W=CAhT^ESqsH&m(Zc5c8Qkt z*Iw572H-E=7*=R3gWaQ_HrmpsLmtk!WmRqPg;BWMT4%>4_RjoMTGoYK*6`(Le2~^j zYLJvfKAcIm5p)*ybf&<61Zp3%(q4=vnmM^XzVU$BPA6lL|LmIC{=mpQ(}J>eR%h~@ zPP~wVVJ7Pjw2$3kpzK+KsgS4l?Y_pO(F;Q=_O_Po4<*Yrl&q~lk`>2~jzo&%9XyAi0#_dG8IFR;*5ydtnApsU?PVqd=Dou=49S;U~K$m-(i`!;P zrC#L?kJU>jE8GpxwqYAKa9!WQ12reSnL7LM_GxhTcPMFJZr=UZCcAhy{%nPat&suW z>qPJi$V)B3OMIRow=HRobUBo)@Q#h-2D^VHfopwg!7}E#9Z4!1qS=cSExf zSa0>Fl2rVaZAri+_`x^L!GQx>Loy8+UeQXqDYcOiyC9R zq#_8;e!t}LzO9KDwLtO?47dh#rqSPj4c#fQ%zqTMzufLS`MqrGyEeQEXx*r&H0V{P zr4AcyB}OtonLzOW$!dak(|;i`>k}JV@pB;4c3FzH-t`+^MSVH-@!zoQ%l<>Z%Rh&I z1G9fWE?V3EMp%tvCos$UKbDg?-u!>Q7JQEHPk~d=@f1f1Z+iYaZJ1B7SjUMZS)w~o zO`@o2BT;p2i$M#RG9(Y2@G8Bcc>C(fn#vaz1_|dK0~O-HgX5es{Z`Bc<*CzYaKH9O zdU}ycki67&udnAl;n5TCJMeib|D5Oo>*=3%*8JBR9cLNPF}oB_WqAI;h-xbYUg6ck zG5Tn{GMxtJ+3y|PK!50_jVLa!!Hm0!KQe$QC}H|`jy?wmCscReIIC1zpfqjWn0xzQ zn%dtHU4Oi`57-&_8=$#%;QdhEt72cLM5~~k_nBD&tp{~FTQ0uS7G6}@xRN4OB`?Ke zDuxrYgx199Q0+4pR28IY8lOavO4nlsD{YV<>BVQmtYJD}P7!szDQSJ;*>EW8M%1_3 zvp8(8Jq;1zPE$PkKINA($pV%z)Lq^Dg9o}3!gn?J7a@m^hblj?{VvteWdIjGuSQ@` z-cr3U{E;KBsb~*7`gd^Oh`+tC5r?i}v4b@h`DlrHKCH&GUk@d0!4(A_#$1&jL-C{I z&rm#Zs}d*lOftAt%zSvGmHhY!|!6*y&t+b1p==mvQ?#>nd6OBH{$wS8W`0bcN`m6~%Pu{?np-5Jih&sIWK|$0EG_9q$ zx*blK=dcKnXzCIV$8YAlz(Uq%ivooa(Jj%-OGTpNK!(fu*qv2Qb-j^}I&A3kZGah6 z)&{9$zLTeK;Z%BH^%RMLul~|i^F496r^ykrqu6&l_43fu_gJ5~u7J0tu=(7TJnFSi!7th!#{@a-i5QuT!;5wKi7M$xxxUHYTIWMyYhvp+TXoEa*Lp zp^YswUW3)!{n&wZd;v2E0qMTKhp3cqYydgS5R*2?xXwBy5HKfWf0f>Dqrq(+t!M=GsO^-%n!C09ov~lZPuE@4oX#-{x;2{g`i7h2>Uf z%RTFVU6ce)a1170-3YbXbWIKc4V>C#wG$Ch{rYhNTpjzbXD65Ts9$vW> zyVX;-O8qa>v|> z4?Kwzn%Hx+%+^gXy|yRQ?>~7}3Zrn?B^gu#K&fo7ItsS;)Bf$01S{kTPgzD@ zd3q=jg)2J|5x>zNzdAmXCt?Y&v%-tuRn-n~VDeHIZLF{qvp>4mTr;hZDdijC(@+DI z9n*?(bsfvI{Nf~O3O<)+D>`iMwO4NZro(q@C&aQ*jGB`s9A#NGL($?Gb$!BX30^#; zh1)upej91U+gzZR#QiR7V)+0+dpydOwiw&XUl6un)J0r_yTV$bWPW&nx^ z3;1E)ju8v4N0*Gi-ZyK9phvXaP~(Vw+kZBpdTK8X+fsyq-{SZ3ks}b8lgB*jyy$w# z1-ovp%~wxNWog>%r2RnwZxQcdpislKWuu_N7M_qj#DJP{VH5&LsX2n;iz0f$eiX4% zO6%w4*a;vIb8CBm<`3$<-PH5VTqL4AeCqJMz91j{$o zTP=pk!00A@?RzOr8`VZ`pbwq2-WlrGj;|U6=w3xf_ab%=v7WvP+$I=uPG%-NSD=@p z-dwL`l}m>jB~=`iqnv0+3Hr|cSnvjSH-7nMZDVW%MeqLZa;Hax*oe7>-Ejur%aH|5 zchjiGQT@~?&GFPeCq?)v{uZi3tG4`nHt@$-tH+g|zgjO3FtRFY$+Ew zdr3wPg6a6od)wW?gZ#2GO<5H`l?1PN#35TuWhTK#DRZK0_|NjRV*;|LZlM(I#iJu7 z?a1G?+yA|Qb=iOEeo`{N4(l)f`<`!w$mOzydf*pgyXC)6`2YF1{=ey({|y@owu7}t zn%!Ln&dVUOUNo-%!OK)v2JYQzsepi%;`Og6jsEc+dR2gPPKJMkyl!m6BSOgu9*&B% z@<$w48np@>=Byo`zsZm=aCZI!IN&Y|oLaM)_-?^Wiax4YBH~`{v{wK#$wIfv)2*an z=LDydZGPSFioN_P0scCBuXKXY82m4OljIsp|I&7$qbE)a)!POP?!H6Ul)M6MZ;2jf zjvK{&u`bQujOf>lfuXU#FCzI+U_7D=fJD=l9(%FZ3t1AhBM?s$@v5R}VjkHGPpV{5+2bfl3ys1f z8m#tXibyk#fU(_D1V%D%FU-VA1QL-tgv70u+y<%}kJnI-X{V+UA7qHD=u|^>ns}vXcr0EB z^`J+&3lpUP%Nb{K><=9OZts)DMo}eF53F^XZ@9iy`_il_zqyFxRzy)t^b z4Kj$ug?Ra7d%H1NqH+)T!<)20@WZDjHrOUX&6PrE8AJ#s=y0WQ{Lf2~xxDZwEYedO zW@3_|)-Z>y#mIz^d)=h*C3;}`axbg*v=77pF8`|oQhq{kmF=(%?=2{aZrBS*!GkM* z+MVl~3KU$8JbCsTV~-`m4ELQ5Wh4Yo1qD`4jl(NoAZp+Ch)Hw=$&Poi1wi%YZmERq zc8KAQ{vBQ$5w6SzESJh_3X6j^6mJ(yd+FNAg*vLx?ny<%UgPzNw%YEN-YR$Tyxn-u zK9~}%s=cxcrb0>Tr&N8rlK>Bke0601(f)s8Wh>N2WZS^*kMCC3RaiOpr^tBfXab!1 zgpOfmUJnod2v_WTk96gyh&$mfIg>f^wC19}84`0LjKp6`RBj)s7a&npJE1P)cd1SB4LZv zMFnf_jAxbnmM&!)r}^~sQ!wM0ybFkWZKO&&`u>g+q}!Cm%G$jHqw-|+xL3eV_<_HQ z#h3`ae80aBoh~8vR9*n7%oyP(rrL5Fum;52Bl`tG_`wKf3B5Do5W z$QeBwN(c<-wU;135&+2?(QYq0aB=9u<=~6?Nnxs%!R0&h6B!@uz<_P9=xbHg?ZL`D zBQWAP48E|uq&8vEHIx9#T&%zZpZ;kcuaAyl{lrnPKs04T_fkW(M+n+OK*oD(6R|KW0 z-f68N5ovH!wEYIXEV0DJfGDC_@-1p$>ce>``eUl~Wmi3-MK*Pu#P*5??QBHSis*MG zpwKJ|1r(Z*zjW+JdDDidz`CZG_b{(Rgt^m3aZolCl0Bjgt7e{8C2={Gm3AYU^HR;Z zoJt%6YI@i0!_%R|mi_b~ZJFX!&5h8ULE^i3$)$!KdD;s1X^Ls0GXnV(nDeUsrwx8P zhFs%~wabyE4K1d_DX?kfDT8fVOJHKYu06+5QLAs=F+OyJ?0=nv~qM02Q4MXEDR~?h!qHQq# zWPsB~v#l50pqjTUU1&ET5gHE_rsW!{aHY-QbOiAh-qK_9+r`S*1Oz4taHYYal-P7Tl2|hp2;_{l{Ss;?pPc|0db~ zHU+g4u#oTc`l$p0fPTA~aHIE9h?9TU|If3Xv91Lc?wJPvQL6t-eff`{;rqcHs`VO$ z5xJJNNZFFgtu5h}GQ>)^>VjdiI;~0ujLPGhs}_UrIbvnUELpzo01E8>K5#?x2K0ST zSlc1NRYPzad3y;&@9NbX=$IL~w>Bdbms^U$pF7Ow_v8x3H+4YR;&+mlpz-`6vXSG| z73?hK-;O_VP@Z8G1DgtHTU4tU>6-k2DqwvjbDE+IEy`W<$2bh&zmyb>Jwa_fpq2q^ zlF#~KPw!0HM&s`KVrmE9(_8JSasVvk_ZZE{(}%(;fS@Je8%JY8kqb8`pZL$Pgz%Nx4ZVzCybTqOv$R zA$IpcMlS#AuC7PUy~{Z&$8T3Lxd%I*ACx?CZzF%em2g0bvcmL{q+a4iwyg0t=l3t= zQ>WvVM@EG1U47eu3=_~!ng~f5A0HlHgb*7~!mH%Pyh)ptmrJkzG>C~XDk;$(p77kW zNK1Zw>Ib>^#7-sJvU}Gb1Ht{noXitJX{m5)4sa!F#x)HNS&WGX;@Jq{^df&D&V88_ z-CPN)tATQ>Y>58LVjCi!r6^gmffz_~38QvR>p!R25_fbdOCqpe&nB})i~ggiRdrH5 z5{CQ;Oa6XEH=nn@+>3?zV%q;zA@auR2$sNE2GDIn8W`#f*>gYsvn>5Y!4^_jcM9xv zk2Eo?*YF)D^Jv2LkX4!74^9R#gtgb>F6)7)*leB}AN5 zuRGYb?c7bH+n+mbo#l|FLQ)i)1369yTTreEBj>CUlhO@+P41AJZ$Kv2c@>;#KsjBV z)Z)RnSOTy^qCAOqdnMt;Z7OzOIZibRTDfMN6W5JXd81bdPz2&P+v&JzNd5<^^ z>2~Uoi@LQE9gy}+X=7q-z zSiTnMMC^?8m?gKuZ`1EvXr8uF7HHQ>hF#3gCc@<}DH9v_;$xREx;Z2L<2OOOCCR#0 zZ{*Kdnf~O4B?ff%zT&@k8I3*ce?e~`%=qc2_RBx>^_Rz^AQ(tWL`e3==;t1aC@MJikDQQ)vY6N!j10 zmWaK|9~F<;+T&vHaW4YswJgc^Y1%pyJ%Y$)g{=u||?OZ7Vfi-g=a!%k>^lNAxJ!7dR)pa^l8)0LfU~^lbZS!PU zk@hy*tTc{{0ez`^H1d8$Tm0~Q+0QQQv`RHyR0L_&1o#XOVkF`Rutg-vS8}Ee4cExG z1tQ+^o8Pe%aV^ZpNH|R_LF#gNu6Pn?K(Xh|K+sHLQGl?_XKi+RBAYaZYd~DXEL;t# zqHwF+BeTXmUg6C9pz5?2$O4WNY2k$aWLJC$Om@|HW2411)GVg{MxE9 z3lOXylBXTeB%YPpzJd1Qs;mHJaTG!LqE5T2vi9=GD+mabhvI!6D*rEYK$M&`;^e>D zBff#{1zN56RfSghHb4E}#I^O%|2!0T21@_-0KcOZ|7X#A7UdEjg6@|AfBip~jhBE& zPvbwRWP2#l5UrKo6?RR?AHgl*TvNj+D3BA`ymiYk>mPn6LlKVkMh?8ME^ftd((pL- zEbO{S)$&1O`4i0Ms;N_VzT*Fkp?#$TyBkJ_fnaKrC z<;u7T7CG@$x&JQ?`PeE&5(dWJZIM`gRsDTcw#Hcq3H z7_FCKZSKl(8Hw;J=0EbZ23xRW`JiG!%Hu@Y;tXPym`^mR2Ud{cRh=j*%7tMeKc`jV3KNq? z@8H?g+rP!u*s164N8i6HPJ{<$h%mxLnMbcrE$7c?%pp~#t!IvYvHGra0T5)$Y&vSC zVw>aY4F~6+^OtgUTSwk$XEA9-zrs+zI4scXA5V*?5gUaq@L;ejfE~x?*SE|#65$b( z)$1cT194e293>W$hHx>097_qEEu#L5<04OYqrqm8-Vh)7=BVI=+@#y8&cQ_f-HhkD zFW0#^=Kx;a5`Gzz*mN_j8HiWQ*fCLEz2#a@nN{Q917W*|e zJY;>)AV6pqEt;KnNApetU~dDgt_krLCi~B}pF>ezFet9Y0}=omiSjbWwJZcp;HRp% z<;>^f+Vw_?VAI17FWDPfRaIl}YM^Zi@Ocj}Rs$qDy5e5=TO{2`%soUWET&w`#`p-=*Xkg{le zR(bumgj;>*y#iy%(p1mLjTcN&D01I+dEKu?k%d@>IjUlnUTGvha*LDcSyxqz)BaPD zj!`Go4%Gw;$V+)&6XBhfN0aI(d)SnVuAIzv_!{o+I&K&=@Enqx>4bFvj3oGZ|2d1% zk;B_l##bhHge=3B6XD8vEC*-?puc;x15dw&$8MlqfeEfwR7K~^0e*PB6F_1;=P%3X zKtU7|?80)q3$xj{`80gx_Er#qx)&mQwi4YEoz-b;U*oRRp*^V~=d}QTsy)O)KtuGKXg4;=9iA#2yAh73|-q}<)$Y)yst+tyt;lc%+u8(rE;|}SV+CS z#)^8i_|Enj;Mb(%W6mq}xvJ+LF*dzRVjomt@)N#T(RcslK-u;DlwYq?I=?i`bgi`q zP*xy1aW-;@V*Tt|kUhR-W&F6mNM&H9V}FTm;W32p2^$vQtB&sdeH#|DgfyWoh9LO? zx<1%llCkMx0|5i4E}BTjkdQ5Rh3b+v8nB*$7L)ME^`+{lFAZ%s;pS(80fpDLq7_#d ziT7y|V~7JRkn0sbb7DdpJm?>=-y!cT90;$$Xc6bI4Bd3PxBifgYELo^Ve1GM?pEWRqi@Y#5pax2=i&IY}DEUGuzm{6oQu zNbERP8qR&gYHAw((2apHZvvOO;lh^B#KjFEmQfXM*f|(eI#hQ^5Z9Ir&&Q_ixuEHE zHoH3akjf2$MV80c$m~ZVEYqSJmkdlCn?9n6!l)5PvcFsgr^FH?lac2C4`=Tk)YRH` z4~JetQE5uGf~cUV5ITs8jUpmNLQzpcLI}NvE}{nk6$K?ADoQmJDWL`e3Q84$1PQQ1 zFQEi!fe8F=Jm46ON#>R`Qe)m$2S-97PTn1dIETPxP{_qlJeD=-=tL1U5( zgPNYJ9s4BM&2Os6XlTUz`rZjG)3bj>n#kY7boILbua53_m-icUdEE0mB?2^cY}?j9 z8YyP~ul#xcU78#R;P0h2&2{{5jg9B8xRtB_=HmI*F=kHp!s)WLyS~ks;nyh?L2Ts! zg>B%f9pHjIPj6VJlH+HC&}5j4OB&@vo*+y@-^-Pmp`58G0YjB?dnV1JpG|%J=URIsNOtU`19fF8G#m;l0iU8mgJL66eh{(Uy2#X@7dFn0 z+Mpk`24eSVO@U0i{CS8}NS5U!Y|d45#TRbQ^*?MYxb0aI3Y&6vi|;t7uW%nDG$Nie zeFl(UYh)>#4oW(XKB)>CTc9}5%<%r!3jLWW_w2iNosG%OxG!zmCP;kpr5V6AB;+A% zQO7mjs8}CcjXabq%#WX;Bwlp>20vJEzGB)kyxNNJLyK4Cp8eVSHt4HCNU%r9f{%nO zO?lZWDF|ddt!eTv!dgY}`g&bRAc+`=Ra>!e_RyACx*RihI_8tdtOs-9)>6?oiL>-C z0R7k0yhOr8-@}x{q-7PNip#bq!KuYZfzm2iEIDT^5L1HG~<{nsJue$}eUb zibXT+C$4s~1?aV0>3W}2Xy`&z zJ9sjZh|H*2(88`!)Y^3yjJB)+OaumOnXUIRiasQSRE-(Kn4dNV_Nv2xS`PjOLD97x zuTJs0v|jRTVNIS?A-xEQ1hNdJ<&~S+HTBI)EcLA;_RF|A0<%d<>+4&o$58v0UJ=kC zYyCjNhk(|BW@Uf{?qexwvt^7_v)WcF$NFZ9mfa{x!&FwSW*e8=6r)c)W(!)zTS@w| zx=Mvh@V&aAbK^Jw#r99#u_#c&#G;;ZPrMn`%oVL$LHj zR+ql?dQFhNl?DB^8BLu{lGr`c%h=esU5l8L^q~ZXw!eO=Z!Db|{_&6KR_h+)TsUDy zIX|@BL;C^)e+(E(^*oFw^w4X-2x6YvbjF>vnYT>CpHwb7PElpia0Y1EX+yTMjgN5j zCS2h)67S6{-Mz<}|BBZQ!a-)K=ZEz>c)=AQ2#G_s2oOsWx=$9DrZr^54EIym%?I*i zRTWB}_>fMbq@=1e2B{@n?VKV#kI-m_uS~5oPjassJJ##XG4a4$m+_M-I|@gWnThba z+xIXZzj|~P!jI0qWR7v%Nig!t9bp)I>pW`iG5_j9;k#sUJWe&!U&06*qh0P^hd@R2 z=1rQ9-4_}B<5#!I)2=SUaRuMQ{P4>1Z8)doz7Up0OqaA>K*h+(pPYA|JMHbeK85tV zc{c3}8*$I?hw00o&=g2~m-eS!POS)?+$(P6c6!^jHVz_Jn1q=!Hi1;eoWzuCHuzjR zdbryDYZu|g1yp7jbWot~l=~h5g|qQ$VdiiM_j$gnE`VZy4OXef;I0#rC~T=h{I)Th zOJChI-P2_ih-qi}L4wfA^KzsZxCA{ZM%i35Uk6lW&HYgr6&Wumg`zygTJvfRAwCL+ zOk{|`Bbfs~mYoJRU$wUPFQlVG=OaVC7QP(~DL@ARh;?%{)U$uNX0vhAd7-jq;bo}< zt5-xuy)!eI*ikoXMOAx&e>qEDjB#N0jS1H*MwL2j{7kBNh%wb&jrIMGqyk0gLc7m% zlfceFDGgGr#!_q0HQ7NJ)vKnHgnl| z=JGbTN5V&%Rx96|vt+Kc3BS{9U?D~$i##KZC+CisDgh~2$Vhw5?S@$XAt^4@{wROK0wt z4&9*!E?uVN`1eF$VoKMp5=uu4N;01pfhj`T8kwW)J(rp&*1V)pVqS+F7WSO*gNFPb zcu|D71ap&SsHOZGLsFZ=FhqVD6_jRq50-gnlRrEhSWdDtub%BSC=0Y|I*>KD(tqki zueJbBUKvhTG)lOSoq^BON$BNm5#QNzMZpN!Id7w6D@>*R=L7T=Zd;y5mL!qPyw(HI9`VEb z9&jw&oY(99l^Z(WP7PTW4hdc8N-!-&U+$p`_Vktp7+Xx_uwnRL>#TQ~%eb?el4^=q zr>uy5fZ4Q_uX=Ttx)Qjtow*$967EHXxIC-SV}9u1 zqM9l;0eo{tjocXGinDT{mgvQ1z6cl;2jrUOwiGbDmToWf(st1EVJ;_yP5}Mrnv`DI z7IUe5^=0MeMQV-Drm0)VWco_+Iui)=0+_>csGuKVgzmTKA~*NyJZk9+C*e?=MN0`Z zWz@Hl8%R}V2GMeND^Ft()KUiiha0sd(+5_qnv`Dw06dAXeU}YkS%N|8%*4>gFfOQJ zCS{Onp~k%95VEpFQVZfFL}E~Wm5#*Y$5MlXdXGOMxz+Gz0g3=8<84Ty8AIvO=eL7W z^ZrO=SnQIuj12L@tcWaj7(Z?wRT(<-ep{cLOeuGFV&xRCTS?hOE~&(fCdCo{#QBjv z%8^8{h9P=*hJAra5T1UHAlzk_J7p%4WS6b$Le{E|h4=Y0KJfdtx`^wO_kJXnbnIyv z+H)M0)<%CnXq{T_I$=3Fcd(NT9_gFUz0x{a`{$ADvX1=<=0=$2D~kLJjvD+f9wyi# z*-FmERb`0)2f>Z-+EjIlo7Ia{fzlrex@?>hT#)4r^~`$2Ie1%F=~ZUwjxaSfY=Utj z2hDxTFiZWfWmw7wopubL6)?}^Ar&*s`-rDo6p5nTxF1liGMl`>CD~_ zD#%9{yuQ;DZz6(JPN;r%HDSaCRcnCuvd$yyV$*u|K8=;KkeW2^Sag++`{Gh<~|MCvdBA9FrJyh)eQz<0me_Y^Bbq0HNMxAP{clk*MPH( z#14)3ZZyU#BmTk^7W7z|xOVIs0nh|uG2UNt!>#!BCts8Y2x8=`RnZ04fd9nZ6Rk6J zhoHCUnEvI>^;n<~;KCTE1*}a95p{CGnq=?kJCO((z}i;g2}zf){`?lf1=&81g+o{x z+J)(Re!AghZ__DE$_hi8Mzg)9r85KcPqt9 zGRPZqvQ=UdEPG$OBwtmgUOaeere&xnXD3U5|~m_=RIi(lT{~8=qgq z>iCm_UuOA~mhkmYmk_To4as2`3hBFZTgm!!4?ECE%QWXzD#r2`lpf&zQnn|R-JNr% zln+|n49(H6N_F-=PRU)e{;N@OxC>cGz+}-rSYex##KxsZ5ec=O8 z;w%`SmSQ88$K5W2U|3|?yj18^-w3Myzt#PJHZKiAf6~BqJ1X~(ekTHxqoB%v3jX-d z^~HZYbL+Rp<$pwZg2pd5z#$3k)V%+1bTCyE8^j<1MKe>9c_+MB#~O*K?(MF{*fJ9N zh*4~PgESl;?Hx@?7~9N6i)iFyii65)SbG~bcdX|)d(HDO%n@crn)hYLH+)P>*|)?( zLOuH8j9l(lhOv+Ez*R-i;qmv`(&9MXIWM0$iW4U8QqXvlfIINl?VNAwZ& zgsvw%qaW!rT`(xK4yChr4i`hV|`2^XSAG(g}4@J;sCb!6Ru9>d^-6R{V zKW%W6xzhY-08}8|gGm06bayElG6dAEn7)~Qg$zanv_>rIeL&_Y5@#Wj2FzEYB{4AJ@ZB_2r9$b7Ch zBs5+-=lT6^Nv3Z{>1m7-wsIn;o={O-*;8^1`TWv33C^nvH$Liour5bkN8H5iiHK=* z@zTev*?h^8vVZ>d$ZurPr zmj(u28wlzvjO>|rZWfC~W18Xkq~40jU%=mCpW1eRsX79qclsDsDsNa;w%)~+S(?I! z4Wf~RXKy6)wUFs{`8Ic}P%#ks30|k)KE6oPKbJi=4AX|ry5J_j%!n+R(r3iRLYlw0 zn291pi|OD4ed3B(_$&QKVLej-><>+$4~3~HexT{&rOWnl5u@CZSx>WX0g=Vz(vPI)efBJRUgQ0_ z@#zY@+BY~!M_F{l$bI+-lYE%XT{kQ8b4g>+tH2xpOsQOQ(3_#$`?m1RrmC$qyPmMQ zu*rmaH)fr5t3er1^S$(xQ-1Zb3zb{5e1Iy&e3wzBy3^=J@z_UxqG|!2gvrtL^|ENh z@K^unp2rD74cr(zhmarlFufyytUeqL3W1HCj0P@;@dx*XG;X8fRt9(0)F%&Nw=)DM zW^6Yz-fzmzaD*->s0nXQwPTcZR}bSxTTM%pk|b~ywp(*k_~UNRG=JuotV~2&z zHBMp=>r5yqVAGHFKB*&&7IfVzR-_@LJFvXKDdkac-UAoPxi>|Tb%d(_|1olL9IUtt z!+5R#Lv~4(4ngoR@7LUwssA@#WCADZFkW-F_}03tyqyTc*nSzCeqJfPNUkIJlr3q> z)OxHy!NEsr=g`K;%| zBl6oDRZI;1cPlUd9DR4dR9)a#hU$*WmkLJ%3T(fU+-&R=&MfP>C0wi}4C?LXfW^T_ z!F(!i4u}Het{2hz4;AZhG)^k$|LYiIkAL19pACpH9(%sndI~XOi>%cYriO;f3g5gW zW>E-oQ-MoSq`YB1ySN}N&ppCIJBMikLYmE7r>ChVcKL9}&#*q9#*K56I8gd|jZ zW4i1J(Lte;D-Su}MqQV*P<-H}l-wB6+o_>ImT=5X3s@-gu9hFk5$%u_!I~$%x2I=~ zKdXL_ngDXQ!|G4DQZ(%cp4$ygr5#uzqBlQuQKUHE*IGq(faF|ELb>%UU!^#u1ys;! zbdfs3*m2+YjMiEVCl*yJ$x{{OPTkXi=RwQcvfdS+Sm_x0F-(6nKVIq+byW&@!+-PZ|Gas4^1OWhSS=@g0`(mVaJapkg? z@Gcuya%8cL{|V~#ijS*MaOGUdi%;p_g?yio^@L~rly&;dLC>?oR)AA=W^Pb`PWj8;RB)1L^po)w8>UxuRn-9Vr$mT^Zq z==7G@W01M>*Aa3dUf7!NTI|?dP)F*x^sA%xGc9+t3etBltW*sa>Im8(jgN&p`mpQf z3iCng3a6tyuAG#|o`6Z4k6#+uc*cm;);MH1@eJ?{+}BD0b?1PiCTNWQu0XbIcVGY& z@oa(1WY|ozfxP_@MqKw;zI*5U=ZZeAN)-_$Tm7)0>J+})faqaM!-+;eQrY#@XPtQA zR$&5kbDJG6(c{J*pR`M%ZC&09Id3kw71E49T1znm-?RyKw_h_3j!9iUuOsI*?RW&1 z6r`6jt{$bN-kx>`w{@lfdVx+0oh=t4s%BkCt(lmkwr*NYWWQX@D6obPyqd`@DM_IA zy41$HaDse0kBg-oWCEo%v!nnuswjK6R6Q+d=69C+3%Tp=~5vsxQ&bHYr`-O zgyKAq4MaOfy%3>*P?IPrHi&`1+t!?`+Rb z(R2g1Po+LH%BW4wpNAf{NfJS&7>0S77Yd}=9HZJafW*GHP{ba7DF1338xa_yO*HkI zlJ4qCD@0**wD-Yq^YOiBw-NC$$WZX_gZFpoDGH09Q{yN-%kl>&D5%pDOU4z!apO9EXhk+JvGq`~=eq1nc+lLB zuIA>}3Tt%UlnZ64A{qmBndeTG(z&J4M?1ZF`fRJOFOFr{Oj>MfGjw}%=+~XS>T=kF zc6Q1Y;#b=RiDHey5LqoSSNcV3I79}hjM^w|8TgMOvN;0V z#;hfD?_u`Tj$Z4u({yooWs937>H=OhR!V5cp#C`R+jgNaFYmKH0e6Q8@8yW)*KOiE zm7`y{K~Y?gc(Yv+w+n73@h-)K>IVcfc23b4v)y^UB{_Ni-|#~~QSJjsdeexcLWQPs z8uBiJZT3CT`f1L4Bt|@C4W_h2g})F5#w`QIuiL#MMn8xW1CnK`M>*UrygY9Zw6DXv zEg)W|Ji9kXk6|rA4D%M*tEWHt5VId!WLh_ED(1dh4eTd9hU6`sr{V1XMx9pw=jj0D z$mwnO+9vA>er^NUh4_jqa3{NhBR)@*OO7gH6HJhosR%K`1K;}};>tD_gbW&w zak+gJyya;eIMjD;&AyfIji4ap%{Us9 zlb4M#VU{+!w05bPphw2eVDhWUZxAlYICz1FLW$Xf*koD*2{ol{F^C8XU0W9Qmm22S ze=}1i+TP0`!LeM#!Rva8V+s?T$V=s>8cCzl3M(9ZS28#oLwF7HSPJ8-cP~1|-)Kwt z34_|kF*d-&P_}Arpf#|>br03Tjxf_f&69b17iN5|1dme(k z^I~H|H|erOzP3sCCP8TN!revAGHWJz*5Rm^0nYC&((YW+eGE}`pl@k>qd8U(`c!ZB zXsKVvW>{eTTL(_%`lo0}=-Z2M`TF3p8#czzB70j(2DOf!=kJg?_CJRL@Npima{sq*^krlDagZEA z1^F+hJy1{1|NB+nJB*n?*4qjcYZDdXsA6j+7wA<>_$W!PY7yguZu!UYK>PJuWEC|P zn3n8Bu06Pl_A37wUK*}=55w1;Io6lgLUBHs#1HLMkO!;Bb8qaUNj;<(N{N}o3&_nX z9NU``hT#gQJW{S_v<`&$yk`H;1cMfT52S#Fuc9&91qGa^Zn0rmLhxK;KIkA!J{+YH z*uqef&;*>Vg*|cK3XLC*sncRBQWaOCFdl(%p9zj{2`{!zuy^~DZUbzafxpnB=6NhA z>K&`@n9m(v-Z(($;2B!XnZeQ30ptj1rN!uDRDz^8XqrzH$^1XFt#aVcu3{#|RiOanhiJ>o*E)l!O zTp*vKG51PzwFAXUZ2~_d^eB57)uXI6Q7XBZQ6%%I=m~C^E^S*Ijxpq6-YCx(K0Sfj z^tLtZ;sZ1diC?=nfu}P6j{I`6b}|DGGI&O~B8X1U>kvGya32|GJ66s3S@iP{1FHQ8 z(z|*Y+YC}}=uF_1yi%8*F^pv*`Q}~kB{~|fTqNK83d1z-XBrX{CE|`du60T?gvN0FpCELDWF%ljNl|?+{Zk1gkff-cAb^Ju^)FTs9+Vj`GBai*PD|vH9ug01_EwA4zf*vG+{}6#1fjZDhyKZ z1SJ}<5iz?Sn^Rq#8_kL7TJ=7HP1u`i-r$zW3*9WRbk4~UCBCoKXj@M}u_H~U#o_CK zyota&uIltgu0VkCaMQ~osh=|!j5L5`YG4G+{Z1qd>3m#UKxz*I1_BBer#ouMfXXXR zeB!V1Mv;Nv2`6&hkW61*)?oautNuZVr+WNFQ{5uN|M|R*ivcgQZP)&r#_%|R6T~32 zP|dl2hAfV)I>7(muWAJ_E$_OctBF%K801|%S^%N%VYMG?{sP6@omas_SA+y82)$WClRTXZD%$jdTTZQhe z=fG$t9!|KJa**qoBp1XVYl7)vOVZimvm7 z?mzQeZS%&C;DS-Y{@;rSEEYDqek7 zhiKcoq$1H4S9lCp(0RxiXd)J57Bo z96Fz*2tu}72DvOcx+&aMg$B@W(ZDU8nQuBxiYMEwM`>|TSS=!5S!;B4pHFa#m(cpJ z2a%6FPf1cx8|C_h7hZ3(X*kZOtf|wkg<)|qw3{k6O{3-n;TgJUt(0fQpKu_FH3!*R zjgD1V0?W+Y+?h=>D-oAl;f%Br_dTY|u=lymUIt{_-3OxjWq%wwTI>RI6FVfb>oR@J z(tfd&@u7McR6`tD0mp1r44dtiR9l4l#<($~1s(cIU9hly_tE}59>wQB2o0+#DCd?m z3!kDoGh%&QcT5}RcO=#$R$z&RgGi?TheHqUSn=F80be*KcLcb3`^B!f*bBZj~cxW?-qb6t>f} ziyK33OE|LdQQklOiL@N?`7Xfp{`t85To<@ zJJ9Cxqk%6R2~6h;F5bu;)4l9WkLC7JEkjWeC;Q%GI$s|xY3F{T_$;Fym;vd4nIUL3 z$0IPhi;3(%-fdA_s$$t7PzHZnc%&KM%4h1B$=zs_{GNtv3aS*Xfe}B0dD;F0XK$$K z!$}C`2S9_@w!eIv;ic5NL=Nb^(Q7TTBIe8FhVdn^7ByPd|N4Tn9Y)qJ9}#bFHxpD{ zPPCb$8inWf=22CjjpkXDUn3+G=g8Fk*!1iFfVhfc_LkgDsN2*A%{g$29KM)`q8hFk1uIo>}6OpEhX!%;&u?9yf`96+@;S1 zfP+ZwB)vQ-fQHk#(bY5eb}rVH5r8 z$b+$am>Z8&?_q)-S>A^;#f2eT1(!%fSM3Y?wOdEUh~*|i3-yFU64-^lb78Qk@r^N?5t&{+@vc`PH_akMiK7Np(VyHL_o@e_2_AFl)qf8TuprrGtrbHoTwNyA zbeERTtgS}CxzwOeD874H@Y@v{os7ya*^R#&Nd}Td;6>yb1l)Yo!Q8tfm;X-gko!fAJ<7v>Y70XccA z;QKr-PUdGF;W$4rd+Dhy)6lj35F2#DS1MEQNsKI~Pvri_hnIB4A^v!0VKi|W!(27^@)aYF}X0zza4#@DO zA}^V0M`C-(hAavHq2mHyENyyP?mM08GqfcO(!dS}J%4&$&B@HdC%j6Mjz1c=3qxm| zW3|lUoEberypVe1u(nwdWOPlzBM+U+SpI-;UC9)8Z%Sp}QQLSI6|8smmG)>h7i~Y) zs=LD|gl_t3F+ry?>Vw6guFAf`zKxN4?C5>9Kns^O@~a-0eb{x-5KiDw?!CN&sG^Lr zPYQT?$K<(G$j`8O;gHj%^kkyrSjAZ0I+gez%?PRl>AF zwTU6=yrX zPt-juKp;aS?!9$24B zibUHI@Z8m#pCR$EE?_7WmTmtKEt@)H?;s>!%Q#FD-2Py+07tB`Nv(nF_o?v$U_ z-pQf)rI~B+c~JEUzabo|^`}tRq>oIEQ&79+^l7qny4@mg98l4;X__JTRsQ7hc!K!zpX((cEy19S|Ig?mu)^e#o&J_A z^%2kai;O`^Cc*0oPe1VFk7fyxTt&Ns)Qu}zKv08YL%Fn>V{{uw+iGYKW;Hefk)@yNWP=kGz-hmfD{v>~` zMTaZp5*tR5q2#BrBDMmif;wHnw~w-uB{;^dpIuT)ER|pzxArzxGIAq=t~Ki(0~h$n zEQa|W%z%5<#Qovb<5Q3ALQRvV>sN@b4oU3iAZ-q?jt;Tp{|0tXI3}wxu@1vutJvj3 zC*xaDf;*_CY$$$`>I-~0+E~o9DEAu@4%r_AqQ_m|kjhMQUjuiI8O$VN$Q$OAj$lD_DT6LYD}EFp4gp3V?8S(OMC38Yc4|3q9LJDBLA;lVOlOL_ z3NS=jwS6_D{n$%t9)X_+b7>Nf+%y|9x?j6`D(JR1@Dbax@vb=7YuT$==8L~ZIB$Q> z-R7v9Bm+Np)*+ZgWOuPlYKdCQeKv&}y-8Wyu0&W#Ejq%yLsF$k-ca;j_<%9x#&9q# zzjjEKLpBc}?}D+p=iR6434xa>>o%<%#6sCh0j4+~sfzw&-yjZy{^+G8Lwv5yB;%W3 z@nOC4I@FrNF_&bqG1L1Qt!#||Y>EMre?U?GwS!su0uHLpa2}pcDp(Q7fmXAj!&0P( z&-XCIlY7U`7}+jb@*V0QNRc#TJ1%aYe*KysbeVGP`|J21nTX+pKE%n3UPd;DFYQQw zekS;J%gPDG=07$ca&ulxHxU9$uzHA zSD{vH5D=u!tt*5&rufgiu|0(KO8K((M0$LGlSjD)G^pps64EWHwHQ9r%GE1=lXJYU z-E({Y|F9&w{?=N6)LrhC$3Qn-*xD~s_&E8U?ZHHN-vx1xOW%$Mt3>9rU5or$mboviHU zjt5=X*_gUR)4=XJ`VA%Qu7qKd-7Z*4eTr!l0>0)q7Aw1ZH998@i^)2%xI_C+sfs-|C($%0ieK2%%bl&WrtZQ|JGAVnM?` zUdObJVVVP+6e#onf#RMh(L=&3bN2_}^MrcxfwT6#y{`AB8Rdxx(L)wUL1Jyfq$;lm z@-s7z_N6Y|w7t~}C^Gj3Y~ibpgVZi2sOvHuoFZdIyEJ{68M0cM;t$xAMI$=pyw=*0 zH}b|pEWb2c$=u6Mi|>t&t(%^rUdWL{di86%pdRCLg!)};#gV|VR#%i1BKLmav<6Ly z>kZ%`?(nV;Ki0?zjR_5S0+Lb4C5`c??1fjE4LV@o5D4h4Jm*XWfzd71wtd}z01xey z++1WC2` z8d4HFY;vHPYeGGt&C{eW`7jnP!{byqDLsgc`-%Yoq4$khFsdurZT-<5A(rtT98c!` zNz4OT@C9a6zN;gsu%)ca2J$k=izzf@T-1VX?NFf7qtsck$lNlPMZ1^`eRB6lV3^mX z#y=CKpH>}Pp$x3&3#7OjvnlYlpd&st;|7@Qx#}ytUIL66>u=O812T=IMW0&ks>1P~ zh2v%AIC{#!*9#ztZKmWDp(88Hn7KR&*$e$+a5E#pIc z8ymFfSQtcPpby-~v@NwQ7i{;mTl5C1Ko@PMJk0&?X!75G|NiKAEB@(P^Y{1gUs$AP z)z)aZdHo+e!rvk6|K^Z)MqpO-z73s_%HD-n$M^fxBG&JvmmdKTss-gZU(M}0kC%f| zQm-`fPjKgh5?`_RmFAm2Dz)Wadu+FPSuHjkc-T;w3);yN+k2Ai6I8m3BkXe>0^;qT zq~Uyrc0?^Lnh|EnZvsh3e^+;L7(%_=T%qg+bF2}w_P`whhzlw7ktCQ~WKOU|v-{ai z=4d@)ffGIXq421a1aNEmUjmL(KX0|gSZ}*El>%JS)d$I$GDJ3QNJbc{g=wdDE|i zEk%T=<6;*=IY&q`s~+Ic=ob18!1Btmk20FfA9mGQWcJJBg#aWb4A+B!c-0?zrX$_X`i~WEy`Xd9+J_ z!Okbd_CRFkW1}?A@f2nQ)oCZOTcjRgA@jy!*wh{e!m0@93JaeX-NWQ528%9hdNM7W zGV=@bFa}H@SprPCSK>)vBh$B++lUtXY6*V;7nhU23{%5Q_@FM)Qt+{o-+|zq&h{Vr zi*fj7Xd#iC_6EZb8>>ESU9o;ATdC<%L=SvKpLbEz2q)%6 zbH7lb*-%kMR#qFyNx_c{J^Q^iTs%RUy1-FZ@fnkY;fSMMA;_$yJ4*u8dDU8>lF zEOUY^O6{an09)^#*0S~_QgT@AiMwv9Wsib2B>Y7H^sj4u%YZ3f>mm&pqte8sM%aHL zZ-1^b``Z^`ge08>gN4B^oH=!M8z=Z-q#|=ZYR)o z4F@*f!>EOTMt1LZ;^gJl;2rXx1E=YPLEK8|JyY=j$H*0*)T?VISvy(Mwtj@Uz5YKh zq$8%}-p_`*2Z9|99Yze})GtpM#*ufI^FXZ;$3~{0DNBGY@ z)&(Bwo=5(Ozm_EjhdfW^f<0VU7^(-b!_)gH>)R@nS%rtnkCyc)Etl`EuF+VLSkZlc z{;PQNI)WqTt-^LW!0`x)zf(uB6H`M4;+Jw*_QG8iIr;e|heqzG-W%~| zkbP$&F|V2f2Bl)53UV+?aP_JxTcbApi5<#O>(^KoAN5`2sr?hJL>bL9p4UsbIW(Ku zEToy13p5-Lb;f3wjQzQtGpilARpvJmu1Me@qEDBNIJqLl@|l{Xn+fJpN(Fw$_{T3&PKhN`Z>!eHL_4qpEZ(I& zbiw14TL{UxyOeR5>&E)+7fj0o>}oD)pjhaGGA(KZa{1qrjswErZ| z0Xg#mcI@vNi01&@_pi~&*Hl0c%H#U1kwJ{QY$~LW;J$(zssPSAKH9prPuNL9ODEiA zPB#VPJku`8rS&yhI@cLbd7TLit%!?_QOf?@^yY%B=pPAn8o~30n88rzJx4r~J0uLE zX*kTh&XVNIpRx(ZC!d`-5R-iW0GkJKj!s#?V}f&)Xi{%qd9|XBsYX5Ic$=MV+Qllp z^%2?hPK~8w$lorcvh*`n`q7( zLq~at7f89mDS6HBi1O~QG969)v@*UrKOL9w*@8GGL*vQygu}AfO7c|l$5OHji6GlJ z=?`oZ$Zm@nbHF}$|FcfzqxJ4|s53#V0YZsXC&#{Z<9 zSiV;vTZ!MrK!bBDT{ilGi>!GJxc%p?e>mIOXB!{CtTI$?dWIRRvL0I(qRO%M87vq0 z1d{zm*)8*REO2CK?{(!MW?K%bQtkks6|)r^hDE_iEV7XNA|_0V4`PZ8ccHuIPD>Is zZh@cl&5PEhIeI*sQc6_m6?|JUndV++cMFxuQ^X%TPvQ!aAOZUaYJS99~$k}mNL^3 za5C8QauPOPi|VXLphZ3SvGC&MM`+*SGJx&C03{3%Ug?lGJ|XwJU>z{@Dkog~I*1F2 z816N!oc(CaHz+0YQLxn9zeObwd)uJo2m=p{cP;|L`LvV!6|(qWo={uKWBhmW;05lo z<1pw-c<)smAEug>dm>A{?AWF6DsBoqA?M>7c`Vg;N5x#dxjmNNgC(4eIIICP`D0`; zA1I-TJv$jQziu84BTFnt1K}0QTOcq&8}q)$`1=dW?)9J0uQ3FbxUohL!X5g_B5PO?Q2o$1;AlpZ?fx4{a z;nV(aC~PKPHM%3r08gm}f34;5of{cGWl*{*W5y-8r15$@h*GYs>cic<%#7#vT&*^O z3ki^4bq0U@rYg5_sHuQG?Wn@359aurA>`YQ(Pvr#D2Cqdx;;;`97557D zb#FeuYJ~kj1eN^pX2LmTTE?QRfre`=0V_4bc+Ch}X<}0RRy4(DWn>G(x{En6^#` z<9|-bR)qjER(64v5JO%gPV|B5Sn&49iwC_8Wvr$*G#*EB3|`4N_uj}}nxn=n<_#|Y zQY|};IvLH*F3mK&L9iBW3C=TXFSnjvp237te^~Eys+sMfbQ^*75OlIqEn+`)fy{;V zewS{~3jT6d^YctkA#$R6_Qm67MbH^J+69)|@?QK|HgvPwdg9_6RVjiQDvMm-#Ed zmhym4^q-g3)4zyz5nNj1tSw>^7#Iotlb2k!c8#STWMxuL3=VBbf}N+V?I4>W&~!5Y~u+$+`Ql0 zN!DTH;r54J{Nf+e+3chPFGXRV1~(1j#y>TxXU|hYHHPjvho;`xd9I$|`MGSm$!}*GO9+enGKllt@qgu2nXLkKy~6go(!KrappKdD0(_|K_p?WN;+g}H8nw3sY}Mq! za`b@4VBPz#j;45?0w2X&0}?ElqziJI!(sJ&uJ07_HdK^wWiTBHOtej&IFME#dj ztHwH+=CA+YEPBWO_Nu=q$88g_l9XPSm|tZdBQ{X_qFlP7bJx;F3zv6Sh&pe)ra0xc zm##u4Cu)4{7bR_cSHyERf&0LxB5!* z#5AL|&Ka;DGDCwsR3=~*<=1B`K~vJw4KxC#~tHdlY3A+rGiC+T^KB{x_oehYcG z3{}t`>~qiRDo>6;OnFB#>ZTM2@p+|ZW~tbj*kQ4H!duK6Ro=Xjr zRcPg^HE`tQ?p(Z|FzkB9ax{mtv%BsBMiiT?&A@+08R$CEgWsn~)^gtdIOz)52LW=Q zus6d6K7BReeXkJl(bW7wB<8g3ofIQM+Afvn#sHE#1Gi0#>GC8In3EnUrkcR(^f$$c z6p^~Hx}_2Z*i5t_jjG*26@@*DA*+i-l<|IvI`WS7lKM$&a^p%NZjnfXsY)0r~Hn5oq!2h zz}eZsOrkwRtOC2S?&S{+iQc+VvJUgP_DR|y_!9FKm8bGqb6HV>vu_1eRfG(uVj#8 zY@qXuj{sK6k{`;M36(NVHj*&(Nhujz4=#}`0aTJtf$k#2ACads!Z9;v5UQkBo*@H< zaSt~!`%V<5H{{=1+PK|+VUO2IZL1qwP6~hsS^rN?@9!xkBdB;G@YQb@p&^#|?@|Z( z3Px-l|97ZS;D%=X_97&S+kNT^HLK8(eTVR%ef9MAW@fxO0nZl;7aBy04V)8V8pvKM+5Qg*AyTV8uUiOls zD!L!#B9?0+o*fF7HD-gi*xoGu z0Kl%+p!D)VDj@4RNP61AFI_L>F1JHf2YOoxV{=h{!uR_lK=U|Y3v&O(|3}-KKtuhu z|Knrdmm=Gc>LW{{D8{Z-O14t83@xOxjIoD?WNWci3K^k9mXM0Ej zp)09SRTnWJ=Te(a!wXOQLm3lDD0*P9y)F#C$$u?1We(<-b#*91 zs^^DG>MswZDwbdsd>-v$(V6ixy5*Ym#p2dGKn&{kV`H2X=joT<=P4e#HO|NWGrHySHH5q4$4Z}6WFXX(YD})`bxyk%q~|M;4u1aXQ^5eTe(Lt@;#>-xRLCC9QkTCwcKDI3#?we=eVBf@4*wuS-X-6f^b)=O_v z?6*&24662%s|gxH{)J~G98QwYe8g|L@Q&PfwR&Q4d;iH!hR=y$6>;1jv5sXt4$}#Q z+a0g6T$;!BiclI7CAux#bA4yicYMFAPf@$}gg9MrnVXz7+Xr^(c4(Cc%uE4^vrh#s z71W>4zb6JkBNMf-ukgKP#*I&yU`^MU$Ys>;`ysUzI$8m{4?a8fbf zH4KbEx%#-$+rD)0{(rzwt6iJZ4KN^|nxFRP=DL-N<(dow`AWaBOx@qpT0Qp~A8x!BEuo?tlde z^?liJG+6;hpS~}9JDhQGY5>?T-B8aD}1T({pv5_^}l^>C3>PBjE2s_jk)g_j!C z2^q_i--YL@e80{i+E)s=hD$bq3-sFBSCJ9tKl=2t+M)e8o2KFA zi7!sI*3R-MWxsvyl z!zJG*w?v))Hh-0h)@Hp9l2FJ)%gsnn--XKK_#0-({PXc`;UV_&O~E|Ivo_@@GoBY5 zLlqnit@s;C4HaGQu{L`K7xj>Lh&eaipu}*LN9)(Cgnbb8J93YN;x%Jod5cCl!*6 z<`XVfcsAlfd;f4+6ALaqK+WGtk?TLU1Yhr89H_t0=v6@B@8I&Qv%jF|jqnMu;`X%6 z7OpysM-2UdSAHQXKZkMTHPr+p>hqNQeI81HH-PBlOR5i6=P)B|lsAVV>qj{e0?nsK zgeaa5wFPk^_B)}zohrXhR8__8SYylW4`;SFV)WWEYYS#WjoShZKgJFgeEW9dn*3`} z;Qx1-o2hwFi=N9!aKKM{;kpVFu#iJG!TTOv|AUpKsXt{+Wa2wQ!;$G zIHR2uUxzf*YcFm_vj9~T*U_}kJwLqWRmGzq)#ld7nHR;?_?(!5uiY&Gx$_ntW;PS_ zIVhsWE=jQZIN&><*5CmnV?v{~D$^JPlg=3yHKk4IZncT8T8Qf_BBZfE#n)$$LN6c6 zNPkH;&nw%Hb^Qk873a&)aR}g9Dt)rH>f)M0KWH&0pMmFd|C$=qXi(RQYP7yQ?#M7g zi$vJUFz0W`48d9lF^juCRapx1Q9Apax)(3<5~sTiO=;u|wo1EzV8}oAVPXKFE#B~{ z-xtCiiICx|h0J{~NV1Xapx88YLdT_?Tb$8V1E+G(c^>%ukr7zOwK%WcI|o+#>{8`W z?3}MQ;9MJVNa8|R!8}O*^*pvt61PtsvwC+)LCjH@?H~<5^!RC2vzT9`mUf4w=q&x9 z;j|a&4Nwda?)J>g&sFzHD4D0@;01M#4S?`pBlCZPa^UWo9W>fJ<+?PI&%?Lk^|7?y zPeHshi4`(Y_1JMpl9Oa{h}!b!Uo3w3EQ(HVn2GTdG}FGP+C5{>%RFxxiI$^uJ>MuX^i?R!N4VdHqiEG`hE|^mz{*PH%#P!sLqUcRkSG4&dIJ@ z$F*E%6LW-eA%5K3?$SKIv>AlvNk%9RT)O-$IP?dU6qqFztmh%llC3|-a8Mp)zL62Z zz39dYb#FyBJ}JPDJQXOARCImTWhXedxC}6lV1-(DgekBDifDPI8@kNPrC!LFrSc{S z5q%Q=a7RAT^xaVvOJ&qtA@GVP)y7!**tbF$SB#!> zqUb50<`R!z+9Mo$`FR%Ba%Dn7!E)o*WAU2(NLB;qml*I_JpH6<=V}Yu# z4-<_!6udG#>>JtLO|3Co3w*x(50qiRer3;|lI%l=EcnGk+dZ!qOR%rIENi*ob-Q1X z_i^cZm*mEcqf4DLN?{j04O;dr-8$6`E^N^hTj6|`BS-Bb?-VjFojPooFmmDbLg|tB z+mIpH7ZXA<}|s`kt2(%y_nj@$S{zxL8f5Gw?>+Tb|7<(l$rlYKPdeco&#J|&TlHhq*y9cah5i zFJn5`+w$BCV(0SiX?7XyXp`cCkz`OWu%Q;eRG-vDTr%klaR&f&{==mzmR9eV#m~2k z@s;Pfz7k7ezu-zSOGV$08?XU{Q}cLJ>^P7t0isr@g_)xaF&w-9Eze==(k7kKc8i?j zM+Veu2_J66ES_U`el9)z!c@C-;~ELDmt^1#SXB2r(9k|cjJz}|`Q5j#Y`^u&;M4pA zGT7~>eTkOV}Pftz><@9-;(+ z+JnW?ms03o3aVBD$Pgv64vvjPIP?S;w36r!TZbiJ+9Vn`((vs>o%A24PBUY(3)z^( z%ZmZ0We;^!i3eln!n8b-k}uTso~5A!!9?i18QL`Yas0Z9@LlNUezzb%m-&Fx(SN%Z z6}ZVxr98;XyhNCI+od*~p~8|qJC^s#CIG{9uPhQLl|nURei8k=``8kq|BRjYixq8F z;<}Vc9|r8!rVYRd)MoyB(jmZ@Z`@Hc?+ey z&E8h%`m!Yfw?LrQ zQE9fHg{jezQ4J0D4z++CD~r3EhNt9@iV>3O@p^Nc&unY-dK2FFxRniA625CSB`pKy z8HXgbU5x-1p^y0GjWRR`cfg1F+|$Np zZM|Yo+P0R!O)T!6K~Og^`qT@Og>W5e3E?fGIP~duYqE`v?kk^tkG9MNsCe&xgmPq&XzSTK8^cH1fJM^ zH^&d!n3mZxMp%4no!s*KNSKNn4%%qlPnA*h<^cW!D8Ip()1YquC{m63DDa`eZaq^z zEJ%*&P>q^@9X;dOZ^@-Nh~0B#IT!3$P>IozU{0V}VJXQ3HxihG4TWGAZ`fW@AH}PV z8u{A;E3hwV?t*gg6tu;LC9rgTNKXwRijekV{%|1tZ*)$Bpi; z3>D7e15#`hxef`34W~ctEN^Sw+_-ZOg2pevEB_ z%-qaovE0Qre2B|NexQi+mer>fcT@Z-Cy`IBmi%SlEg18<^WIsS_IeYWp`BK+r@tG4 zZF~@S4J)K4oRU^7NF(@25Ep`vuXB!d$?1E_U2W~N=@DPr!?3`7%WQ)~nxAHkN%|gp z;@+Mko5~`;`tHPeg_IvW$r&;ugC^u%_)tV=`dk-_?C)`Zt3TW zcY%bm*TJ)wwO2a%7ApUvs9rtV$+tm2`vdsv|6Q-sejM&UMhmcI@P!Kfm_K}5FQDCx z$lg#Y3(ao$NoMO{rNhEhxnnSy*gW10=-b??G1#W{AE7?iPovY6Uq8O02g;^ z>Lv**s!4XpFC?KgnOz%;qZoO$F2Ldb=t@J|4PV2)kD`%*)}ghLH(8=&f0a(meXYae z7H%*5x-F-+xf{KzlnIGLmiruQZOze*SSR#U+w=oL4{^c%#YW>9?6z=hd3^sWX^DB} z*THZr>u-D8T&4tf{cb6#is4<@Y>6mVu&@#f5zlWrSdAC03Re|pOR~Ksr(ke*MURE@ zjo{%5WRFviV<+2Y>banQxDbj$)&Vu52K(ylT0TliLFJ+g=Pb^|za?S=1gGMSusG)2 zBBCOjIM?TbIUTg%2=C#w#D;a}q8i(^^-pI;oOVu~^B@VK#OK)?tPfH@;sD0s>8{N(`CQg6d+Ryt^XOfolF~Z1SRJT7|1S3 zb!QJ2-2^ZVNXg02mqFrbn{B?sSa-t_mv~7=G@R$hj~F~DqR}+^97}hi&t(Y_om|q7 z*?l~OU_ah*sl zLCYH^+X~F{4<2`(+{4HNPl1-D$9JnypV+T)QPgo;SeJ&*UkOQfPVOn0LXNK7S1@{> z?Ww%*6~z^to}bD`;oT2@R@W$=BoxJD!|#53+n;MWSs6F@Q9=plb@u*I+a2Ox<9R6) zOucDN$iRzBcLd0SLX-&7+ltTNWvmta_8Q%uhG)ylrhnR=bc#!U z#+D%dRFa{>$h3Hbg2sCpm&j)5Ba) zdh@WUxk=>v8qp9R%c?=K)ka zUvu*sVn$j~M*0OV%A;agNn+vLZk9>}E|TOMjNJhT+|JxD)u{A)Tz+aa^zf0dK>Ped zqZN-k^t>@QhzFOuBc=aP0*^)GU`oH}k|*_{XsQ~Oj7ZAPBoRLLQ;}ZIFj)ODg}bix z=aNMbD_?VJctMJtLUQ;9vKon?l-UBp^2{)-R~=SczIZs1AqK?Cs86w#YE*m;)|sZR zZvHiVP!tzO%(pr+U|YbWoeIw6^4XAARE-t{;yOT^E_^?$Ieh4u@#_qjXn@Uy5%{@8WwG^+C4`|yFN-RqSY7;Oz)`E|FgzGzQV z+iicXu3xXnYyOXOl7RsR?2jH93ei`Esf}>zJp9k)C$k1uK1D>DP=Vl42Z_cQDHx8W z5jMHMy+dCuu6MM70P_@xyI5WLnKlWmjE3FV>SY-uPWcOo5TfX@Y~X6cxg`ReByq|$ z%>tA+un+BZ`pePeAC>&v$i)NlT)3w20Sn0YbBi1D10xerb})FlrzWshdcaaz2;yht)jM%TV7FC%1$xu!&3Dy&5sqEd7Ne4BY|Ms< z_1V^YSt$LCJVUWnK)(aHDVW$0?B(mekb135F|ZrUySW#;E_?tuy`|A~ze$azuReSc z&!2VmjlzSjEg*|iZptX{D|0wj&%mr>p~QPYM2s)FpGsEGFuk(eV{Ro=7Tvx*d2n~} zy=OQ*F^&%h9{WOU6W2wqQ0(~&H~Il$wiax*a;_ot9HhKz+K!v4DP2iyjuSGPqdKJjP$;0d-ac2fc3Jf024>YN77`IZ`Rs$X~^> zagVz8zGrvZ^K~QwyZOdwvPR|`4te6fr%TxH7NU&fZ+7JGNvkll=zqT& zz`J&pM)#w48%(x@zXMRzzpof!VtW=QL&Ff~QdKop{?7C&Aw3vdUqM3?3ZQo6zC;B3 zT?6L%V2u&qqdP)SY?U&s#L|H@WhB~vmAmz6V@_lU_H7&+8&MmP>>pU4l_$|SuJ6Xn z*jIzvc3^OqhJDrN)b|6T18iH`$!&~dPrl4Pu&brOF4lueVmW>%%Dx8(Q8&o)($982ZiB~OrDpyHf`eV3M zOB2^IFB1ZE=eRVj@O<0t7LD%5CEf~MI9ptX#V?&$-kOxEHmX)%{j$^H)S}+KyGE`7 zUXKW9f}0O4^NB6Yxi3xI*nUVjUR`a?1^7fE7+ch(@!lD`HC>t30jtWw`Y(T;v z#89aa@Zp!ciDao5j=&aiP?DF@x-P~A$Lzwg-)bTh+x6Uep_x){xKbEXteMO~dC8*K zS2kM1LVcWO&O2Pej6QxE;+FoFCXe#>IPO#=O(jcbVyyx#-mCAdBDG&-GW<{;EDRCWa9_I%PGj!x9_ zH_xj_denTZ1xC4th7FDd#toD(R2<)cbzy_z&t38DjIA;!;8^AfUQjI)*F9OoXf90! z2A%D2HR?p=bplB-Prw2ByaaQrEvMBTJjHKGs z_A#2@4_oC5kR`N$@}LzAYWe1~;;8?^_t^OBBLl73FOEFa-+#Y;RE@wbTNMhu@*X`2 zgIV~}e_dp-Bo4Qc42nZ&BLQb9qmrFR7|rbNwiBABO!>-`n$wVv2x%65cc|p!)Jb8` zkl~AbCO*f(X{O@snf_p??jsOH@&qBDeH(CCZ`Gn+n;hfE(UC$FTrhC6ey_G)@fWab zG;HH=KFR(BQWLQ9XBBzwq^W|_r)l-x>=H#s*|m%Y7G<^ACu-ng+2nZ!#arK#)-dbS zGV30*V;yNY;Hl^Oru@d6D-s!qWPyJBTgiOY;}YNpEKaGRgTdOCuU-8Fu6Qf z8IX4$E@0CJ?a~-@zpHO$B(SA^w>!!0+G}t?TtZn}!;D;-g?M}Ez5p(_&-1X55RvE1 z8Y-Hp)ty7fEqNE)gA#AcZa)n+O7DCJlZxqI;$mQ=nVKzqjO_us!Z>A+5GqQ9?uAUC zs9>zAVK7kT0!64~`#Hwsz@*H}{`GC1FOEMg^A&E6?0XzXlP0TX1(3+lALBqaaM>T6 zkbSG0q_siRL&Y-}CH*cwLQOlt5d1@VocOrM!6!F(9(K9X@B;6o31A+%=9cR@rkD;7 z^i7AM>WHtk+#pOMR1g2It@r+|@yAFp#Ei7EyX&u>gLGmg;cXnGXpS+t?%Ov%cs^`W z%Pb9-H};q|m^i{1j{QSaxrpWNs#K3c)AZPvm?vI~z?*=1RlMJVHVLJ)%WG4<23KQ--DyFl7(uA_Ybj$^UQb@quG`CnlOe`JeV%W6 zAuqiJmZlxDWkwiKv!|Ei^JBY;X-|u~x(~>*6W728pjZKH&qx{Xe^M`3f2UjXYb&_} z=;n_R|0>4x_i4%SkIw7Yk=2oM$)ly|R_&?-**`B(e2|T*QPzA9Rchbw?(=+<|JV`n zGGp{P=dLFHX@1-D8+Wcx_PTqOc87*$FOJNfnVW=%f6QiSaqa;dG2?T`to$*LByLJW z*ilZucQK_cT}r_z3{6XR%7jQ7P66dcdY_tgLd*QCgyO}$@`#JI4r9pZKA_3evjdCl z4od4jI0*h3ti9!L-%NCD%wup)q~S#?WTc2DYeZ&W=yJd`S^`;N(vBCv4mBFFK`?k6*($6)bN6^tRd#T9ZuB`_=3E2a?tQoI`?E>d z^6i=B?+N9nU$ij~PEj-L4?|sr^~08%_O< z+bD+1GaEemcZ0c6Tjq6%4VzA<8g%tBf+To2DA#@3>dGe0F&i#k%=1~pLP>s4CEvVQ z_>#a!LJ1z6Ig7%Szb6Lui*dU1)>Yv%AXYI)>l_rY(cHRG-1q=b4>OiZ_DNoB1d?A`#dIjG-p8Vew_iy>ct2O~XTi zFpkdh@huInvoM%U0-FIJ`$n?iuU+@R!QV^Ij|GS4Bpn{hB&)IzqpyCzi9GbAqGZQz z8@-S|gL&(OwW?UlchZW1R%I7YLTGe7CL9UF&do;Y7j5t4bX3ZEe@4};W&0j;|8y#z z6`KP(V1)w*Pb=eE=Oq~O{={pU_2a-2!(Ll0JIa0NHsoWc{8!!m&ofR&&Tjy=-{7lK zaNP91iW1s7L&`Y}dL1EPD8&0d5Ht3gJiP38hFF#!`=#m{5@p9|A-JbT{A}1TIgL2+rKp1Ay%A z#%^}#IRKUq<~X$4G+GS8u&YsXM+u1F|FRarO8bXi^?$C#j2}295Q=U&opzaCAGU$p zQ+idc`_D^PlUZzg(vkGy-U*p7=%XC$8tWM5IRCZ8(&{7;Vj+Wk8_YJt;%Z93EB^B3 z2w-pIh;BlY6*{GvVp+>0C)fB?sx6TQz)8JOckXp`;Paj<ezO1p1 z8@XF}NDKV%^Ul>04gyksAex7AAWG~iU9P(q(_QdDrN|T1jL#*zho2(*7ut~mSxiI= z7a=0c{d)7a4Uqzr0}?hnSO)aK!AZaLBwGt08e-QwrXq{{Q^y(~3z?~P87y5LpqAb6 z0p^)lMV_@h(5DlO{SJz;typgOWtm9~lKN*N< zDbn}otsibCo?>=gYG6e3&~|?9+E}7kCtopOcE|!J3I0}_AX6w_-CFgE#<%m;I&63H zUdDjsFap3)u>n;M;;cyjRzI<%q09$qV+w4GJOcjT)`?U(30Ovh(s0^B@M~P&S(9uF zi^}x*giAO3a8#tvri%kuDclRviLaAW>rby?nA5Z3K5UxXlM)588J~B+eO||8-y!JeP*Dg9?oWMn~QfL~+@Zxg0AIm4-l{XG- z1mmvG1}#k_cMK0D!lh-|rR4SC*E2fX{OCdJ9Rs-N*$XZm*Ia7-$$2}3j-A*aUxjV# zW5E2*E>xd0YDS3j1b>&A7t#m|vVr1eUuDWpR3O^^Q8Ew3?LQmE2fw$&*Z-?g9QwJe zR~I^#Wp&CM^RK#fwT9i#NfC>zFK)^=eV$7cM2v=Fm1TX?^SAB5po@V&^3Ak>f|!6#6t<|Zg+TiC;w_2@0H4>J0z6A0ZH?EEU5HF79YrhHtO;{&Vf5L zd|;_{+P)y^1Xjw^Nkuu+=m`NDfdxn3h4=Mr(352UMOR-Po&2m`6TE`4I++i5HT6Kv zTWKH{2Y0UfN$pJsaAczbqfmt_cu-Fa7i-Fs6t_uIJVXfvd^` zCh%;OuLGLfn8lOB&y~a5#UibbfuKWMA#cI`g9ix}SO%p1HDHI*E?G_3vWDo9*)Yy% zUKnS|STgV9#5T;cP$Cd+TbMR2Ld5qDC?psHCSS&{PTc2gnZ~RT!0}==4E_f~aDez% zqi4ZrPLb2t9852un|IwM93LBkQ4@W*hH#Vti2{>7@>TUDFu_x}zoNPSYy66SVi=uRAixzsE|`x*u){k47sKrv|JUKy z{(DLIR|>KfgCl39CUy2~(0@7qSeV7fC(8PRDzV?e#P!BQNv=w8keZCCb&aIhl=m?( zMdx;|&2C;x)K)uLO<$k7Kt*r$hI!AO!ws`X(}T4r2OL?OIcWIH-#Kk_v*MWQLbw;g zwkp!=bYb2!Uk&yvI2yWA|HO_rC6=F*wxi_{jcz`$)1N-3y7gF2U+Y4?8?!4YZ-TD# z+TkH8_xHgFh4oK63I?1-%|&Aa!B{rY9qi5vRPv={;l^vyB~+D{LcGNB#7!K5!vJ|N zyWeU_OfckRHn7-2y0%pzn%78o9kWZXYh9levpGMSYIRWMT#Mc3>9ID;_rzt@iH6;E zb1p_>7h3&gv_*l6gOBOcf|eY04w%f|s}${3&HyyHWRD@(x}%Y(Il4m=CO4m#bf11m zLpc6sDWncmVNC8`f)RE@#ma|b&`!(mDz;+pwf{ULoFgUBA$c)pz4v4tR%}AO&05A) z9oOWEo*|*WtYn{KG~4DT`=WuQH!?imsJF{0f2rQsC5n@V*4~2KF_)KT4ULNPh(QgR z*=1RfEMSicT=rzeMLG#G<-^@WCm%2--zoA{WuHW>I$Ac_p%H{v9ShYcw&F?R`C0-| z38BJ1Fd{&SU;IiCBWb7ghA}W)ZN9;gm92uQ!@zAknsH>{c{QqaC!x27a5Dm%emAkA zEATO@1k%es0So88Kd%o(Cb_;SEsN|Us>%aG5mY8*8&DVMX zi^#gFbauixPvDZ#4!+yq8T-*gnk`MB%Saiy%g)!S7@e!f5sbd$l4-eysG{F7z&SSg zZp*)rksnCOk15Fk#lJo0e{Udvzg~Uz8^uV3g@2d={%9j7$7tk7Q*$k+a-gb-AFLr* z8#B)%v{e0&d_Xify;?Ab4{xkoh#GuTTXmoExDIg6SzMOHOT(?6%EZr<;{UnL!W}T2O|IX zT|WCK=(!4N=>)wHpS27hc-*bQf$Q)v=k~AP6?44bV=Gf-xOkXdb#2ckIMzt`XiFgh ziv5O9ontB^0bXZO&Q+cCtAbUo+~=>fa@8vy-%ryKKXiohgt#9Rny!nOxk}YThJ!ub zVRyTO1nnOHZ4;t$8IxUk-F=X=G4585{`o1Yrz0wx4W9^6J`>vTy`G$myf^HWPe)e? zjnJ~(4{Nb+?;B&}eXJ%3cj*{1Z6YCr+&R1rUpd!DR52p~VMLArUR&C8wIzn#{cBxn zFEh{Q$qp3Uvk1ppN$;;kd0mfqG1NCq8a}@Cie+Hy<4cf!$>wg9cAiAYV5@il@|I3# zGaQ|Pk06TmXJck^__8xdD2GL|Itx*9sa@fX+O2*BQRRUQ7qN6ZUML@c?W+^^RPsZY zJ==J*p2VjwT%7|7n=7Q9?cWN6_rZg>BPTFRLvu-W)+rgRE+o~@PICKdpZolg< zExjAnxS*3VbwR36zA>z!vr%u2G4Xp8`VmMPXt%Uf{VSre97o;3GadrysG^Tc3V1ASy%Rd12<` z8*RuJ?^#anH%rLn{$^7;jVAiEFRoZ?i%{(4i`Fpmyfr%NP$SdNy7Xi>;s)VHJ!yEr zsQdRI9Y$2Yy{!LdrTKgK@PkRX`c?nB2o7NI)L(wWR<-Yx^CS%&`z(Tip^Z&f6CmBS z_$`=)nzBB3VwmSb2Fq?4D)}1~eP5P%1AP%wHDaKKqT)Hq`k$HdwgCbK+;hH^I&BM3 z#>HdwFcA3$7`YyA0>XV!ElMM)yG-CiAZ-s8Ttns*yxhJv=q@O;P(prn8*-O^_aA=R zT#z}9>SSFJWpAwB-?6C#RbG~#5Q(;;nsYlIIJ`z&ZkTk?YOBINSKEq@i|fVn4-`IO zuRqX^e6q3sJR6Xp45at#&u5LE=fyQ$mGqL?vnA)_hC{re4>sXU>#IQRtkmk*;=(Fk z5xz=r0e*Ku#TVY+`0);)*?q3^*n^G(-LQJU7|fb?xyh|;+2c6S0)xR$u7hKAH>Emb zJ1+4z?Fp%Z#Dz6o{xt-|r<_3=o+)9Pml6p6gM8sK{6xrT90yuJa@yy=4J^>1TKJra#TMGE0vLw_ZxV6JUV@ zAgm~TM03&W^-92yfh4K?8r!R{zX+yU`oVD&n>C1YwJ7Jw{4?K) z7>PrzMVUp<+icecr*F6AK6`@HtS?}`|CpMD3fILZLz8Bq%IhXhf*0?3OLOA*EMxuv zVVqnMp#JCVN^ej~6fK^oXwP}(7xm`47tPk3W!h+UD|OIgBefh?sGv%!7sohwH`PlhWOQ2maJO(;$8m z8GfejK;FV>YgPr1xyEX9HEMH_?6TjUTMA;X%rs7}**cQ?YqbMcPxGI-&stTw9*9T0 zl?(G~oVdtk$;HAPX&{6EY>o;t<#1O5O$hF7tNRw1bHtPvA07i7ykQHK><_fQ)d%BN z?MMX`Ei~{rEBEf*=&GasScPLv626;#Gz_~=;LXcP zE~<*;(|aGA=0n9ARAh-x2uLfHM+B)cFzyS-zs2(rdtEZ=p6aA{8R8mxbSNXbP%!Bf zA5hOSBHH`b($i%KwYIW(c_+H&)k@4Hh?3C}*vOsCV(<6HuHy{MJ`&-c1ktXAo%rGM z)cL~WpUWjW4TmxJpVVYyl)87IAw8}~4AA^noJ`+COk<-2NQ@X=obQQFl2Xtw?(;DN zdflQ4f~A^Mr1otSRFfkLY(o*)Z#S??3dcV^u5eGN_qDJn}d~$(1s}LIjq{j~nv${T2mIobO`quk1uz(Ao3{%#`@;6&k?uK8r zkB3nbQ4Q4rR_^K?_9hSd1lvZ>C`PnSOlPCno8ip`>z*KhQg~@AOcQm^ zK^SO2{t=2UyB{A52!?+h>|cg)R6H{K1`T~!hsEyQ-IvdK6O6C}P8ah7)vzu!V%`D` z?GJfx0|f3{0MAZ7*9_2Q$KhAo{GmYW^I+_-9K}Ilb*Tln(Ti-mzS7U+D+|`3XhC}! z%~?g7CwM7kv+WRjeaGc#JtOaUozFQiL5BpLfNJp4Sq%KWc>elTX7{gS=l}Zwnhuaf zu-7HRvZwpP*=z2qYa3Md8~div9l>$hAhI%9{B5tW#ndOQzTKLa!1>_rO!w8^gCHS4 zf3Gb6Vnt?m<=W|CJ1#)o!n@Hr}~!BkTY63ddJM$)NbAX8em6{RaQIK z#7BG6PQ_o&+bQx?o7Ah*jmc{YE24?wltY1lXZLQluMiPqm=+p>Ro5F0bBfYvzsHqx zn9L>byrOBY}qs zVaB_Ua(9Vc3&n0lp!k@+atN5Aa#3*H`JRy9!1s9@0{TUHHimp1*8h`ksfRf(&eBR( z20|$gM1xh~l4%6CUW=ikbzhZ}I8IRVX60pScK_%84H@w1+oQbuPJa5DDJ_zwl~tc& zwtr)i5)FCD9{pC$On{QLY_d&PK1B*yB|wd2BbsbCYcatlV}y7H$0|apeq&s35*9gv9^DsRB(rE_X6r?_jAk%IYcpF zcf%5lJOmC2(chUp5A<+ePZhxFC+~(5#xt>@s&af)64`jQhz7V1WK)VfB9xtMZ+vw; zR9cAAf23>C31cecj1Heh-{z!@jbkAvi>)M_0OyJv!AG{A*%1zq51t#=M0FVZ8y zbU;jX{Ng}1ndR|JimMDH_j6P2Kk9RBSCGekxI_tB#yl*V$f?b*$+_BRRh=wOyf+gH zGKU0y3M=CCIU0J)UN6@v?R;0LG3MWQdsr<=@PL5g<|rqnYpIJ^)Z?b8;Pkf7y(@sW zLFYv`Ey1pv+_>*pmy{PryNxGPjAa1VT_1rBsg?(t(NQk9Ax$2aBo1go0dWR%MAfnu zR#+&l`nnK=3^QNClEnic5h<`M3bK+*s zobW>j@&hx?Qgr2TeCBmc9EtMRu&kn97r)MBj{2*KGknJ!UJ&lHNxeTT6W(o~Yp9S- z*6j-17Wgz~+7{?YiW6|U4?~^%tI}QYGyvD#Fl@!{wT~9515@FSEuMzf(^ONDvYNe6*j?IM(j426=}yu6XC*py|hZe5V3yx&yH zk(=NRdTpSlVV3~9{nth4{e^1W`X5?{zoz&ukYRFu@!`|eRZ1DkYo;PYq$&jXZL1l^ML_hIx`LCC?FjJ;EMBFk#;UKz`^ zF&(OV>{24zwa8(rO}lqGbYJ3s!cO_x*nRhvd5HZ8f|j}WyjlU$!KgHG*`49^Ml(5P z8TVE|tn|SB8R@svwlv$6rDTD#kRs%@=f(O*O70`MLeD*lX(U4Tsmpm_Ecj!e;Wktd zqPX>sb$|7pyox??h>5Xh`Wj_)%N-fR##SWyeRgMH{F1@2pju4{H>k0OcsMWjEZE}& zfS9d#+vvbR$#TQ|mi=HNXMd=FWl8m`<|f>Z?%iHp0#lWU(V&iQDKb4|awAIYDI5+t z`^-9^ocYX%4j&u=7n#aSUZrW(WGk1YBK?aFwI~)C66+4$!14gXmu@+c$?iTkR0I&C z$D>ar$@z0!3X&gq5|_fxyMguFWn9kJp(3y$$gDR#;0ZI0 zs|$pAk(Z@Jxk%Ah2Qwiia5Vd?Rt9y;I+yxQN0j#ZRGxB=-N-@lIDAr-z0rT_P;8(~ zboTs?*Ipr^=3c=tb3jW}zNR5L$a1>?1pZo}%$$3pT<{wa-wweB%^c7A3Vh&*RoRA% z``~Ma0}o%29SQ|GR1=c2gtAr}=5G7)uFD-g%_kLF>`u$nVaG!cjTO=A0_a=dghk{Z zsKdq-)q$c4&UJlZhwhEIrk&F&JVXvp@qQ^1o?8Ny&wJBoBh8PxFMPf^l?6zA+>^O8a z;JbQBdp*m|$U9nO$vuDi!bE6n{TC9d8yO{A0Y1|7W@Szo)!z!Nc|6xDb1%oj`>lx7 zJdgC~W4Ukg%{nGuQu^;=%-ibAal+_*)UDP&w#4VRWjNk=@*qRsx|%y+|1JJu^|}mq?tT1L>26-J z2=kXCMi)~8*TPmE(M{_)h3Kh3w9I&2zlQaum)@(xZmc+;ciH!0ezjxmtEM7O9Zq69 z-M5wzUGczxA$I)21L(;j(DRmW`WOi5T(mHOw^pkq>GLa%dbuFJr?N4!L^XXdr(~lT zl}y&$_)x-75ql%B&8v`(gIkYpYHBJLKe^5B$cGYUGjm0-%+9PVc0#11=GxL-!ETmd-8jZb#3FM0PByAdAsLDEA^Mm;n2QM?G z6qT@D(34`kU^0{iF??2zsR!8QGuO zq~%+B2=gXW@0ypz*E^(a=|(*puifw4W{}Ut)9(D>^jM6=w;@bKCmN@Jf_&1 z^0DFm*AaY|*pcw}w#;ZFFHzj(+4slDzu{4a*CJ!m08@z(q8xRi?i(aHmt(9GDcfvD z7AYQ(=kkFrY_{<1n8f1v{B8dUXk*HwX-jo>U2vSsM_hv)fH_<5Q2f!%4Jkl$>gcj% zfau~?QIW=lFG#NU3kvT@4RqG)hcg>#0=kqE{6PJdpcK~zP7uI85e zoPek5o?`kwoK<&;gE{^GPY8Rs~Co?rprW}X2iBibMZG#cQ2Dfrem z00w~3%dE8^{!?U_WA%au)6~}E(3@*e0+hS;X(LEyN#i|4@`RQJ$Qomrj;Z~A`@f3S zWl=U^P7j$FOE`lx*5gY&f}(f`L{BU@^x6lIRr+6UJ81S#UYtVLrLwg#5g|f62VxY& zYM8si1gXAE6``0L`{eF4Eq#tXUa>ZmYDF^_WcqSX03Q-qh^2`Bo*oqr<(75CXrJ#x zyWC4-LuUd+`)s~Q#P~l*K%ks_+D7(Z9zy)Ire7RF>ysAT^YILT>wlE0yAxL(H9&n) z1Mb*cV7^~!#y2nl{2>Xto85+N%zIx%b4lYT)?i60K1g=~l2Q!!9#RF>hkO|JTTxxy z8PrFEHMImCah&p68opQ)rG*IPqkMBAaCKjSk#^og)lFT8OuHkF)T;Ko1er695haB* zq=SkFn0@f>DYk+{DDZF>2f0RB9RBiD=>^nna~B)t`N>=|Wz+s`p)&R_5#Et3z8jeh ztiCrGn7fcnXbi`3>GOjhcPtMb4#d31!%m~MRM*1MrN|U>(_|W(<2qb`LX^u#ya${n zi#J=k@f)KlS|VMXA{0G>_0A%51hq~9r4fFs;`N`N2sE5h*WAvhOvPTS)X0BZ*$=$v-i|1{-8Uc8zn6wD792%`f&r z@=_gv+DUl#jR>+E>vo$|rda5!S4}q+(z1OD&X(K_8NdM(`_9$QD9(ZxU~(pLKym#X zt*muWb=3H%o0<6DQ=8NIN&7yeTJ73zfphpn-Ckv9Y3c#Mu~szoR!HY`oO6<CC*-ZTpl?Ibd`{kLxL zKXI%7EM`}W+dt8*-;WSWx@IEw)>dOXXycoAau)D!TLeWB99 zKnmqRW_En-b1L~DC*>wI>7&orGmWom@a9{`to_4u++n+=+XID7M71o@3D4yIF@?Oj z8wVL-&6m~h$U@#2+cHw$pY^c#+chO2?WIwe1I*&$H#P3^esR?=21Bu{{I`nNR3uw% zGb?d6J?!_2{;MhmQXQU1N(sR_glv%YZMy1K!l(;Z4`9q?iU*V;Zirs*UE<#T;>!Hg z%7d9NYV-THmJGhsZaLK0CqAk<61-3CP~>6?f7@m~u2@l`WBnD$!+&s4aIL$$Co?|_ zwK!b!GK}(}j0QI)FW1>#qdyltK4=e-lP1sbg0H|7zQpY2fMRFnmF0WKPC2>jEZf8@-~^g9lJx8eFPY#O(1&S3`xi= z{n9kNSKEGm9ABQvJ)npckv~sv$*U=PCl{hCiW|)1@Wt+_-r;5E4*%qLz3ko6Pk10{ z%B_n5*+md1k1L-aV6iRlEITl{K3m1%3|?@Ae^=u+zUL<8cdf(vc)dA!Lb4qztJTsfbFOAz8v$qq2>$ zM3%38t3*mthJ;dL?2L5`LXslP$Y2K9qmga0^?Y=WbME^-_w#@L*YkU>xvn~=>zppe zd_SM}`?Yc@eOYzQZhAUXh#hfVJ7g$zzPtHJiYT*E^AW`A3xgR7lsM9_JaxLo=f};m z{Hhe;4zmbF*ZiO=I}v`otgd zcRpJ6-VJxLDr~BEPi^zcZZ~StS7XbP&P=6(w*_*48r zcz!P#J$X}yUntSwN9ppOwD+PsUoO{UbEUprCzqx1)Pic2E87FShY)1-_Fv@tr?i91 z*8g&Uf-s2>K=ZkZ>G#OPw%;KyCDLjtE+XUSBk=oR2%^mn&^7z8afy0;W8Vl_yj=V7bP;EW=1NT*9B#Zk=wK6XHb5I9$xw zDFB2DPaLB#-K`)7>1Uyp$O0JhTUu?^8D=u<1nX;&7z;~e2jD9{I$ho3wLQ0P&rOnZ z%#xT-M|Bmc-ihC*;U@>Gr0op2p{}ANkr*R!P%ypFc6oU_04q8v{ooi{I{!e(huS== zzFYO{f^{y;5znDtHt_6VqtI5rZF;+PP z=(R=gX7CTnncua+pI~(W(*TI%_5o}LEl4&B({v7Oba}bJ@0bLY#bFpV`Z7SuJC3WK@%PZ`1AZ#QsTpVD49HNfm2sJdzb@^trnK7ZMPh=0`Ts<=-- z#`3abbC``5Q8cj90Z6kbM|(!}!-+tcUnbDH>}5ynUv7Q$96lxvXl~U@n+_l`eM#3} zQg*<#Gol~jouuob#n}>ukrIFPD`Y*D=}*u~4*thB3gFi>z6`T8HKEq$;|KE+%iP|choRWXpaOAN4b&7NP)_gJOy;dpFfPGZrp>_`lJKZ*~#tzi*p zfKC9h8?YJyOWN!|g{RV>N&feA_J7Z2?Aeq9c;Am2jJ9U2wsSiSI)R7!bj?K}9=F5X z5NZ@71$md6$Mqe$0g9PbGD~7GILw=@G$Vwyn4sLO<}z-*xd0z@94)Q};MIdthjb{Hmyp_Cm{Gb{N1Vm{B4 zy)vgjWI{I&wK%J6*n^Hl6}Zj5@_NJt#^LLG>1ZTBOGdQML7HSG{}Y>Z97(o>9G%#k zV3}n@kGbhVPCDCwrv=cQ7}=7c=hnt_L2=)hO`Dsop`AAWS2JLEd;r&_!7~pC1W$Wn ztIu*#MYA^T9mBjs6xA@$YudH(E2j~$?${|nT`Qx8j?&Q~#oMFq0N^!YN~KA`_c%i| zWkHXKPYHKo?t z7{ac^oEi#y4bI2)J0NQ1&--j^AgBRm7+2AuyOtmABEQAUo$Ioa7ovIO zC+nh)o)lv%u2|I)fxM*?7rY#PQw4a?CLmHL%}r=$tM(M^19y_@rt~ZOLOmYLgn!J$ zKVSsp?QR&A+58)bR1-`yeZ@=ttWXb%{Ho2+&W!5?ao4TR!U#q!7YeQ{6|^rFj4$#L z&zVbpOeq$FMW!-IZ)fTF!s2KSe(vfO3CZTas5m2e^0tBo@%CbgDa~s@A~$oqB@sql z`ECf_PRn^vPQ+2W_+NypZ~n>X{JVo{i%k5>+wq?WR4C>ULvQKb5QT7+QLYI9?A@UU z6x@$PSq2|K2t|cL?ZeI*FF>dqn~8&PrW|K{>YL9sCNe8R^KlR3POx(moE2QA-{-Ln zuB;fcBBja)Y?5@*ydF1pV*T^T(^ak@D)yVk4uyh^w!S^_JfAjp?1_wOX*`=!Nu%U9 zCSG2CGST>VeIw`Lt+#kn!qFvGnyVh$*OR$=J&eG=Z6D_lvzSzZPLK*W8H9?T)=*4{ ziLH7ec!2FW_;HO+bkotiM4t};9q~Fpi$4m(pUSB0r6e;^jJB#oEBRzNm?Q8fN=xjhk2Fec*JjEH!&N~(1! zAB#H62WzWu``K@Taz0q!wbRxK@EW9GrRsAOC!w;MlcW`II;QA(CjX;U*;AF{A~zFh zYW$8PXqntsT_7u@>rASfr$Qs20k<3I@P1$^tRN_-1?mt)4IXAFb#i5fwXR6ek*jZ` z>RFG7Sss>^jvL8msofobB;s?cfQ(ZGnm0%(dDZ_VO_0!YX5)gP700KtidM|vbeE0T z0{O^P4NU{Z>V^Ey7NM5GrAB744W zy{6c*gJ<~i$z5WE(PvIT19J6b%CzT4HVefT!K%8T)ntRaF9@H$oPDpJ?~pJkN`0ih zuJ&+VGvyEt&S9Q4r~*;W!BW2l63`W|+oLG^I>6uHw)XhYC>XM~cwRp*fcb1Nt=tL6 zM1)CvkeH5Cr0L=2kWcpAItZ!rKlirDE=-x!SjZ4{za-jx>!EsomF4_|oO(1#(=KbbYg$C0-`XN~vlK5h0k0!4x+|6I8|4DEFj6M8+{0N_Aqt4DI zP@>+1yHb>uBk9QMZ=+Fc!_3kGggH+u0|qIptcZqn{j~%4eCrRlJZHGaR-;ct8<;PH!j6Gzh(Z>QMk9&`%)sRo z!!guv3EXEvFAnbap*r+map)hE@{8vgL56i?`#B;v!w&InYqgKvYwDa^=#}1>8@POCzF8{uYSniRUKspUpdK zqmXwZJ_fUN?P>>@MCTV^fI$?5=FKRZvupxFLKlCqw>e_^XhV#(sHVX93`S;&JlTdMqnpQsK*A z2jv!xK?fGJ88rbY@e#9!RmUzMwM{rx|45M7)hR4{gX+E` z`rcXP4>AFbRZ@=?AR5VTxyR*BY+fs!z!=6)EnRGm%H093#iw&LiX$vvDGSq<1Jr-@ zrp=zw+0U$j)_Y17ba6&vev+bNLaxuQ`iD~ATl>@7l`8o)8Uwy6O#qiCH2p|Gbq+2N z+QvmSQ3%^iBHYYt`S;hcs{bd?&g4PhOtiK+6!3tHaFmWh@#PvZ+*fP&8gUamnXsG$ zwv!S*Ka;G1K=RH7*|L{Cr+`$dhl?pyyl_(CUVPir=StU2Jf54%OWjC!Dk>juL+QHk%;~E4-tYyVRMk-{=TfS&-7TQsrChqz;Aq z<1Cls!b9qXZH2oQ#Se}Ktlx)|J}Gt39i9{gsnu24qP8}M%YQ{vfkzBac3V*224&G( zz;aZZ&i~5d`FZAkiPD!YEIA@zQan?%?>u+C2KfPZy9Q`KNKg~C!@e1CpcpI9zV}fY z?>p`{gKZdN^2nL^P_ri4u%gnU6^{{;W~zyjMR7Lf%qYH-M{^Q9Po@qe+Xh$8@Tk~{hY-Mivx1z4*9>5 zaJ4;kwna;m93J^)#4~uaW31G7KJ{PwJU*t_qNM{~TAPI^-2o_mZ*2Kk8dGno>+(^- zIO45RXIecTWKbWH&R33TXB#eNuH61AIYM;DF%^byJ?Ma%y$sYzUUE=u9tXh)=6(Uh zJJ!e$=3X^O1Fx##$x*P^eLf}{nFwk<5usQ%NZ7Ys$6E_H`&GG9o!(hCCxv2WOb)M~ zJ{`D6;F;UcgeR;7a1<9_~h`9U9P`8?mc z3Es=&5Ku>wRL=oc^5b3dZine1W8e+aB(~*`t7n+|QR3CFJ1g)S`nh*s>=_)59!1j&!$>Ucubn_QF*_Y+nDN$Q_m7v$ z=@Z)6sL>uBZc?EXoU7E%!w#0H+Jdmj{^_zCOw>%00qBb!5zjAHui1%WpIm7BG7Y*q zXWyTA{X+CH3w5f2yt+c9&;wiy6=4bym^M&*W5rjXy{!(Uz)V-$ zneqc$+dsSIz&P-mx0Bl{eg4mQf&b%h43-(?mP6tCdcml(+dd(GHsB(q^lG#5-!2De|Wc|7}RxN zr|up>hPxoDoCoyJGMZ}RfF4rriKo&d-+RCzc>LI95b62KsNkB^J<&G{KctSIvCkKG z%Wf>{@QI3VjH)(qpqVQ@?^1D;f22}dOe+W|!Px#EKeJ?{0_^3%ikasaUa#r_vYoXxQrVBrit&?XC^ zWI)o#JGE{DfcR6*UHw4(VP*lMu=$Clq*jg9wkyieG_Cjj!0fouQ+#qHrVSQQLcgT2E{65oX>{<)w>pOYqdL-e}{# zyZN2;xF>7ZLjuBsFHbdNB-FbxVk2@EF__N|j}1o?(<5UxcGVxVh>y8-<$RaOfcKi zPLAUn%C5;5iELrI;T`S=Bymzi_!S2b-SY8jk-3W^L!QY|Y6947YgQcr7U*fTjN}AA zjrPT$Nk@cq?I^29#vRNj?O(^Y*6YKYwjfFVoZDfT4EaWUg9W+tP!<&z=Bx>0VPbNFg{%L&1m^E!pX;euk-1 z^qC9ODN&CaKsa8q&%<$DM|oD`5x1Umlzet*db;5nyXYR61YdS3r9Kox=GU00l1Xim zjdGg4hXW}2%F=igNCTx|OI}*6NB$RV33L>!(*!nXV@+bVZn-MA?}uTyAHI107?|~$ zi*G&>?&IJn0C(AWY?z4PEhZL z=A>F(`oKXoNd%^mSKQPoFqC`_$27sBfgL%?jG53%ik>IU=oR>rUGk^ZWYR@!K=2;U z1vf_($fdJXykx;S6T+CNBsu<))JT@sKb4{ch&?v|)x{Bk`P?RR6si_c+d+U|rDUlq zLTzei;`zFd+!T#=d)h&{JL^b~^Yr?lgBhGLreTZH38v2@#5`N>q^(~)0n8E^D<-e3SZgd!gH!dmayYvuC2$n8HR!=5_uYSUTdx%F?k+8 z;%aKSg~t60YX+0N$3l+iH?6I{W{7BV=s(&>HH7=IRLDR=-?g`+|3?roRlehEqv9n|S zF|5ULmjd5>F`=WUH0LB(T6)0qj9AAlShWOyPW8}UvHTDqR4Q-ojSfm@A!tk!_PX8x znAYqf{uHCUg!pD~k=DA?G@<4tJli)J(d*obg#1O9^6r)?=B(vQLq5 zeFQ)BLR}j7h2e@UZLwe5rK2vN%aOEOgRjwtvYo4=onXBOG>TzoZ@gvo@TXB;b_Q2p z#F4a0ev5wXJ$rF?62r1Z?#XBtwcv<3tsKN*X_Tzmcv zeTE(EhOw;!o$Gl6w%zVoEc~Ha&$&5@FTvt@ zMns51nFpf0tQhXMpA2?O#C&+s3s=xgq}AmV4T;kZAfUvZE#}J{Gzy~v&i-b!S4QlL z5$A=wpR86Nom3_lc`ONQJ`Yh2YB4Zv z;y-<1AOTsk)W;6ucgG~}4qJT6BSlv;f#CO64m;ji74Eso=>UuA#Q~UsOhaQdW?<COPN{#2L9R9W}z#HK)#7P!M;bU9d+a|rX^x^QQQZ$_cwyBQt zv=tH!j}A(V*tei#FU+r-k&x<`DHY`b(V`>1TfJ5k(?6_Cu}Xe3u72+R4YzM<@=(cr z1w_I{9h=Hfn|ue_aPgDx_bs?QvTP793gZI!5)B;N%4B9u)WkwR*B6VRCk5MXeGwe~ zA$^K-^yRPyS+)7OI@j<>ruw>;C(eAzntdp(_v*j~;DA1g5##ols^`fc3Q+jlTAVVKw#|D=$|Y9jNEvTx-`;=^ikC@J(FdQP?dRU+2>kkvf&rH*!emmW6`h ziiW*{I|}O%8pwKffUwIi_KoM7d?=4Seqd&_5#bOB2!z$$H`6)824RRdiO$RCMno9p zAG>oU=(Sd4}C)dW@7}Q!=(0t37?91wMux$=Z;7G z4f>LWUZguTh#ptW)a-kJ!*br*42=Or52`>{kR1>sb+o>JwwL2cJJay{OrR$jJL7J^ zdG>kv@WYLrEDh2vZ}GlUSXWIfFNpto>#xwfqpM%}%yy(lEX>3CB)b0&K*Dx0NXf^K zYJu<(a71`bp_=j>S45WO&T88O3YFa}1y=Lr-3PwjiMCmq*#PMo?<*FKM1wrGk2Li?v1N$4dNQp` zM19uY|KnQy3fy;W(FLD-6_MT8S+He1wlrj{^t|JX5k9K z)!X0G+m^eeotMA|OBJb&iviIUeav)#Sw40IN&?4fv2yP)%!=gRkdPJ08qceOEL5wG zXrDtb)B5uiLNQjX+uwM)?OHH#&tJ?(E&%c=BL&~a)W=RShAHz>H9w5)<6AZXjNh!d zFmUkOu8jNy{*2h5;BBil4{4@bmxG%Frh8E2|2Cf?&fNoAINe zCM{$n?sr;E-wtBHd7wSyn#T)$Lldhdb^{nMh8|z0XK2~OaPOg@UeIY3SgR7%3*p=O z-g7G`duIVov>cQ(lsf2xw;SzUj}UG_a#HWY0%G4{4sUkXBzK4wvdOny!?M20u7^wY z!n=X1u5|)9ID6){rS`}MB$5|1jW55HZ%SGzMI@J6KP*lbsY_xhkJ4m@942qsJ8Vj# zTNTVJ4LAA~+-4JTMKp@f^RL0|Euzf^+_fjF0NKewy|4|E9E^vF?s>4|73deS{?#K+ zN2c$-p{-Zkh(F%Nd^FvoJ_2;N&D^>BcH@7u9c+N@AaU^7Eh3`q-MVQM_1huA)Duh} z*-q>ew2ycwG9ko#L|r!{yKXf6RCjmzqqdL!D_Dq1sZK2L-p8e4T4m=5zjvrEpu8i~8pTj5h*CK&2% z{Y=-zN>BlyylSTZqMH7(SyjVwT20Ea-q~qD<(8zysl;xL2_?~wW!AU`%0Wvt22u@R zOUvg62q1DPUT4=eUg+Y!BC|m02=beg7Q&5==F0BYifV$p_+LD5EjrwQSm_1Kt+eGZ zLEDdI?^=rkOciK1BFb{F$ITJI2hC-_6xT|maZfa87o=oIVQTam5P`d`w!PVeXyd~% z@d3+{6>i4=VKn-G=IC>0xAZmv4l{sfvWI#%;F|!5Cl-#nCos5=p;vluc0V6sGC7Ne zTFw+^qP|YdHfLPNOdsc3{Z+D@-ucjKJZK_Fo09%*RG@%bmcLzG|MF3V8-Z|@i%Vs- zP|)H9)`*+eEW%DdfxJE*hOz8^a3^(MR*;4FbLj1eBj;Ei!uB!0aJ(a%bF#H~dZsov zed?9(%#H6iu9z-QcCG&^ShRg|^i|iS_t=6Sd84N*1T==#rmHcv<|%WCng|77L`|58 zB6eDtg&Vl$+S5tu5Xz9lWH#t2rEmXxnqN~sS}YQKdQx0SQ_sT-}@KNY@EFm>p*Aqs*p z$5-CEJ|M@)X5!xUO|anxIs?H~w=HsL`C@XCYzn*ZPg)9Ua)}^Z(SOhZ|A=3pVwyV7 z!XPO_t}L~DJP^;imi`$F3KNwvbB`ICMtH^6Bid*9BnWe>4@3VtR05iJ65t?HTXdC~ zx&||dB})+Tdktr79M~?$*5gN+02$*FBSi37J|@6hyBE}*w2;k`v-Y`Ji}z7oJz)rcQ46{61r;2_>EcO(OI5s?LmiJfz;8frdZlMH=j;&oj`9 zlfI-BBA&Sp&UT4X%uU^^{Y;&P%`Dc*5uk=M;7o;atkCM)7VXFI4~WyQr(o1ECaQ~% zI1Ob@H&f!BZa^^QO%7q{vFE}uMN!=s;@Sc=1wp!?(@Qa@rQT8+uIk)5+pm6N2v?^5 zp0sD8Z!dG-Be4?h7!O6ZJ*H9g20=DpTnF=WxArbGkh?CKTh(%9rRs{?%)C)bR{M(0CFV{9+N@3DyCks)g`&bF49H_S{BQcmZW{pHyQZX;}_(Z`@$oTk9 zfi+7^ybh<&Smnh@47 zSD?Iy)jI-Q`{e>dX0O|jRoxf5KmwIV8pv5{ZMb;S8##o%rT_Z+w#U3wvz2~gP|q3j zU7Thtl}$Ti#(uDU^Qy<{SV;19TCj0&({VUT-YP?#nT~E0U_P_9kBFE%6gf#UYy0HA ztm+iPZW)sp_RBekhpHPJ^5+wsu&~7#PXMrV28aLQJ15Dc!B3Sl-=)LJ=Qkh5F@Gam z(Px`vi;E&GrcomJ*tPCQS9H-S_hT+S5;%9BHWNr)wb}DEDZHeAb>^%wSdQ>H{_ZI! zkI#^MF!#vpmtFBx<^4L{Rdo3FPnF_0s+02epqCAJalM_a&`5I5yk-Y`ra@E0+8myV zR0@Opq9nSrD|uVKf$x0b@RoYu;WMKcOxwEBmAMS;kM;_!6)Vj@ZlIy z53ZF8gCpC$K4yeK#NOZ9cWnI8&=@6zdHZkOe)3d0iANWMv;zNSbJNy1dKA?JE*cR zvyqV)d!p#%!@3NL6hLtg&>~(a)C;?M%sNbIvT_lG-77M{hP@0 zbhIKL;m%#NNX*94_DX+IyB{~zfsbpTfSbT3>iOWz6Pw9t6q9bzba%65KSWz zN#$|?Ujpeo#KV+bmDqZ@^UD4=Zw{;BY-#8YwCf@3I+;^l&fZADep;xtIPs^T6@`xO z=o(HjIMx-OuZcGn`*i;PjRZRGqjwLfwA{RG^JDq7x1Ai^h$so(GD~+}I}TD`fXPr= z4nip*G=mIkulU^_Lb19q5u#MIoj`InG!k3ncbAH5(-9;G%jit zQpc-o2z!HRecf`J20)>Z54lO}~p)PR@Y1 zBI1rNhQfKME}3sk7<#Ys`g#Zr_?=D%BC6kiIu$lz#b~DRLkD(#8nZ0m{jvxQ(8>Y~ z(pHG8^Iy^FRrfTZ)g;@WN#Am9*Wb*L2}@B3dsX>FTkh6v*gxdXVa1obf13_aYp(YOZuOIg3HD%$31 zG@Xgx6^#M^C3wbTzgUK0a=|jNbdnr{InYi+UG&J4X2@rh{22TeEO2ISwh|tM`=PZQ z%$wlEN`=n3Hsy?~ThU2|jPm+9=>un-rhy4Mto?1h*0$TEK!ZlxD?5$xAJ<`o zy*D(5DB-V67sm84u|$N{c6a-*KJbneHUdM$E-yiEG*m%&uDP6h#I(x!xPT~^1hu$iSGDu?Fb#MX7l|mV#pI+c-J;P z^3#!t_J~?;7+&IgKtWDd_Y2CsdOT8$;L}z#IHtG%-MnG!fg!w>K6DWsNQ4IiIn*E0 z@1Vc#<{bbEOe%nyNHj#s#?lfBb1h`yY=|Q$v=w^vI+l8Q2!%bTz((jvOJN7%!;p>f z5SvMjlQr@axJi-9dK!Y+qbDSmkOLf4>YNNSF`{#)8MmOM9>^iMgYhAdf*tVL?{Eft z`}GQjx=k4JVMqC?ev<)P7xT)!k+*nDa8LzRq1cM%YCn7oO3qKdH(<^uo29J6XLw-s zVUYQO()<4Y`*L@D>r`QkRYU$Eix)ZvgRk40pW`Gf=5n!|DfbzsXkwQ@rr}fs1}Rf_ zRg(gq)iKvjBH|bN0391yXCe%&3NLjq--F_w&M*IUA*AZM9i%kb#ErM)^V zR0zjHy*i;+bzcz5jlF-_{&F1+J2tmjGmRDle?kVmZ0m!QAkiVpMlsoE$Z~(^2zm6m zqVKU;hs2$yzPTp2zfv4dd=uub#rl}LU+$jhYUT?!Up)ygACm{W$2QFq50?q=?;GZ@xDq&_(f0gpsj3VTZ##~>rT zFz8)&gL@mY`UNY-{|cXzckYw1>%rUJr{|V?9o2Pbjz+oWO#2i9cYKt5IhSz%Zn95L zFvxn=(Qox@iRcR#E*E*H#5(x8)eg><@wC-(x`vyf=ToPr!s;#m{HMFZ!lLS0(miry zn_E+U7tKFOz9old;i{p!y&njFn_e7U|8<%Z|49@i=KdiK-%_N7PaFaYh~$u;$t8&j zhJ8Z8hs%tqgTWMaPU^%+T5v27VO(-zrs^iru8J$%#0o+4;aQh>WW`RNE22yY=Ow~K zXcRigRCZLiq|elv(NMyrBmqYDQxDOUhmSRz+>X((&H#XX4c`vFv3~HgOS8E#72&Gv zG_P~8{hHMg(Z=huU>{YA?BjfeRj<^aa!7#5S~urO{(bVCWyt%Scbf3}(b7nV2Wb}z z)h=!;z^PZRZx^eJ8bU4pXk?*(c3(BVwBC;WhQKXJIy$S_5drt(X~3WMQ25?@D@1uaJ#OSEH9e{& zGX}U+#&qkHb$M;bA;;$Ib%Pn#4;a)rT!OO^OtZVrYkXuGjhqI_y(sIsM|;${YPV?k zzkiQ2`1AYtm;3LZwEgCP|6BO}+4t|uf9s!``3Ogo<1+9y^X0gDyioXjBR&cSU1ay* z3Pl&vab$D^rlxHjv)`{PLxT-MaoJUf{xNn~@Q`|r8JW*rU-D94TLReFco#2lH(g*n4*#kC*)v6glRce0NCp}6=wU~XLV?Lc2*S@aatt4IuAW0jL+d<~OXM}( z1bGv&9y-Oa0YkmU7q*uzV8q<{QibBpYN76j307$oPU~hi4DuW%5W zp1&4NTw_~(hflCGMmp;eTt&`nx)M+H-XPfc`1^7o$W7I9@@*`CfO&A*_3+aA3WGjU4YT zNYd@Z{kTRvpZyPPYz@E|4$&`!Vy0)1)fdAtlH2HJPy5O*6VRU=`uh?nQ{F}_1ZO&B zEXAW%s~#_~b%?*-T3#~SD(dkCK{Kx9nA*LKU;r1pb+`tjo63Gdd_w2-=>{i>n#bY& z(DP`jfi>%+y8{Ht2INjb#Je0DOY@}gnyFHmUZw3XUTQT5hooPY36T{3vSKtzbwYK( zx92$@v@&MM4}nsKY@HiIO7j_of;RD{f!6SVi?!bcKP(}yCuWyLz0q-2eV(o3igeGN zoq1)DJ#PpF{uN94baS;@Z#C>aQPWCC7xiB=BjN+-@OTo8+6TkDStlK1CzP?iC%Mix z`VOJEUY8ZtRd){3HyROhs)%rmwTHZo9^+@iaiYs%z|ML!vQbHA6o$^Cm7xQ$e#;$% zxmL{Z$efkJ4g$_i0Yc?5Ylsny0Fl8*L<}x(-c|qp;tTm7Kh1ia1dCF2ZCf-~Q9CIV zvsOH>maF68jIXRH?09`Kwz2CP{^GL~w!LLCoXXsTrF5i?xO|3sY|rCrzIyyrrUD2! zR<7#aqiO8!jjO$@vNp)p>Gj-4%rM6fckj0P+vbAlh;xh2Qw31Y#7D3+D~6d#ReLtb z^`z~m=c+=e*vH!OW}TEO{vUjz1hu!GuDZrL1C|wAw?g|Cah};@c5Pi>6=A?hG&i@ea~*rBp2vUl6IG;>`%6m4#uT7;hD&<~r?1m->d`2} zCQR<=E}m!WokNr#LlwL7mX9VBS{FO`IOnSYRppS)N+$CvMU}^Z+4Fu;{3y@Sr;0>BABHQ+Yx7d_5ag`a44lMZLgL+3@;W5w*bY)AMb zF=b3vw4e8Osc(mb6=u;a!LHRHTW+&(eAD@J8VrDc4C>Y}D#0C5`kj?AhV3;IBmB&6 zQH#P9wYu-OQUgW{UAsnD`^ryq@YtNiDE1;Fx&dmK&A_Ekv?IW7eZ)HizsyqSyb~qa ziwr70QD(%GZ_Wty(?irX;48?N3d0W_vFY-*g(F95ZJ{^Q$Mm zzph6}JW_o`M@z!2r~qMvU3rdouU##>@|lK0E~qci;T}QMjn13;h-NyvR+=HYc2v@c zVYVC$WRpj(86FP%7%@mX14siau9~D*4P{K!-3%OmDC(a91(a=nB3>_Mwhpw3&0gbw zzWxx(ff@YYRXHr6lapaq`;4^%Mvdkp_*A3mXw|!e4VMf`?7rfZMcR03UotXteq4M# z9cA_h1qR8_jay(%;l#qX?_YU&p^pG zp&yPh0ZQzGTvo3;boh=M@XMOiBSN;%s|K>^N)4lAm(}31nmU_@q^_yw zh0|hx{Y;y85IV2>H@GP~NwPwT61uJn{_!qWc3Dy0YXL~mhmZt@(ZSeP(c*-;mg7#2 z2q${^0D!WHYW2=OyRfLqLxhkJ%S(EkdHiMa7}-&-taX>?B$Agy*^3QiLR&?_d7;=ab&ocYpy9>58i(}P zS?Xt8d3b7-eI@nWQAmLBrAGQQK}L~qaT#=y%u*>?P5dliA1hTB!V36EELCK6sX zFzL2Pql_JFMDVAkG~hE2=yr7`%M|0-6rQ5*BvP+hBi-sSA!;kBXA0_`_0pErHjcl@ z(i$4CBD!-6?no0AuLt1FAM1VPkh|K#3k!+B8%i2z=h9!9{2y zZsS1CUehoKl3gkv5wB@HqK9h?9Zb=G?#k0R11O81%gM~$Rh_cg;5aA{p0U|6`LSy@h*c+FzGI>8$rRqLl)5ykec&YEZMwB9_)$fiS zhbiCtHhkM~6-s(mJv)D$VDQ%mO;4H-^g}rP_V>f?Yy`L-=2+18xyv~-M7($;$_#+p z$kYu$a-H|UsnC%peTATlvzJ}zxSCH#*{RIJ0}mByCxOq3ub0FhiMkdSQ9NNYE*=?+ zJn70ywPg2b2m%SbOP+emZ5)Lb?Pf;TycyZQY}}ULYB+~uNse-%<$82<v&k zlR+q^DB3R71M6DzPn_%67J{>hbp3VkZ4Ju*`ulHR2wmOu4!KGadi+xd(}%VJlxJjwnJRZvbX7Vx41=Uw z?1vI9#cF|x#$^tb!$n6L^Fdv^tAf>8hpoiNSv(eMb{rSjI-uM905j+nL`U{zDSZ>m ze_oln4%}RypN)^o)7!t;Dup5{(YmEN2+$5Y6^`eYuy6;+iRT%)9=}{X>I^~ z&tUasF-|%Dag@m?fsVYx0DfX*6ozL8VEDdk_*SO=-gAKx8ink_c9_u^-3^Y=OP8nE=CS9*e8a!{ZiL4}cE-GPlz^jFez{lLe*XRI-d_vbD zkRvDwGFEmK8HHgiImZLx!*t}~bsTH8Us~G>7$PS^*KM8E^G9h7WSM4h>aUCEkPWyy zGJ=)F`n12bVAf+WCd?SXC?KB|KE6G0feuGM>?;JIejg2eG+eJQ48!Jv+LjVJ-oQZn z7t)cvr*=|DYTpns5uum{f)};B60CDxKZCy<(b$X_BJBiyffej@WYmBN9lhfp)DHUW zR{c1jcYa@XoSVDZe|!BVN74VR6+yQ&;*S=2r+xb5pCRQcQrRwQy{~Ks6dP6^6{SgU zrL<4?ed5@82Hy+o?Y9^b)O8HD3K$A@63BD8*x}RJ5M;YsLk_0tIMy;p% zS$9Mn@yqie6vZ#U)_wo;6#hCc>au+VW=(VJP>uV0Z2{vch@@;^ibH_)^G8xauX;F! zi6O`$p$uhZ596!o&9&`w! zpC~|euyKYBThW^j%#dw5@wP-H6YcGZvB7n!P(ZFWUG9kAi!z=4t_LVFlqX za)b`|t31tke3^)de;|I_()J>=2VlPU+qspXwrfZNCG}(uNNd@ERXmzTtXN#>zwaBq z`!DK!>jzbJkL7o?9eSo4Y!QWOl4NIx8XacUg=SBwAv`8P(2Do0U~sICC|`JPEJ$3I z-}U^6_{#{4)PCoK#uQ_s?o5lrxuARNC!MP(fd!NeyNd4d7wJ^jjqh?k;i&gVn#2Px z>e&0xExs+4p+{8DorE75jXQPl*H`4gBjk#BjKi)q`5_IiQy1@7azQ+`{O5%SAzC}f zzz1p&$OgkiwgS?j!2iM1pBk8Nhm`fZh-!TK^C#FR)u#+Xv(Ec306OMR0K8d@(8S^Q zHYQj}4`bYStEM!Lj{ah-c3U`wf4W?f0N<Xv>;; zbaWI*anOJ+TE75d2BA(1Y1laGIIj#e?~v0T2RMl_2J)nJ-19G-4R~ZFHmDghM=_6W zERDNPLoUyA{&@;Ll-PO)t-)}y{#j-H@@AB2FJYYBj3n&1Sp;%@L9;u|pFTyi>S#trzsy?l049HCxyBu$NcF2CFeK1u}U zI~Q7}u4jF)jEnjrWicfakSklUH)H>T{7lqNP-JRS z!V-${S+*^i9$U@4a$>aN!$9r{Ryur$@OcQ!?8LTwbv&@lAwsAD=5AW_De%b<csR<7G-mJ`q)D#Q%7KP=UHkQW@&Woz2ONOY%?73)byD6KQUgMa z(Av;}UkyGq?DcRI9w|?7?y%sjb1pP@rNg6RO~}9%$lgU@SD(Y3VfcC+?zim7<70>t zU317PgvZ>W)1SKWxbitGw+A}|f}5P9x1Rr;f1Q3MLF79PTMv_}nDf#<88Sw%udQKK zfG+8V@5?yvA(bB`UsahT|K78Ak9~T%>=V(n9wz4uiU+18=E;lI3qeXbvbp@~`eDvF zRBXorg6fdy0TBBa<8N@|hZjzC5fNO)FLxU|zo)zgD)$i@z6+FH=|dySd+QM!$1K9H zYYCF&0h`Zq)(*5U6nDWdywnV`9e55wf~OW=hH_w#34L5)(9=TPO#6IfTo4t zPk+L~JuwJ%2^rujLrB^ja|zkkI=wh^!Yg$&Muz8 zm>v-ecGD~^A2xw`ylf+58O=kIF}BJON^Hq_S+eq{>;sUt>m##Vk(_&iW~)-zx6 z`}9l5sCR816mFFu0%~An(t2b4`>OpvC1>^me;&`9m;WEI0a!8r+w1o?y-79y$BV+* zh_D{-P7*{3GtACh^DbX2MrQOq?2u-n3KgM@0TZQ_{63*Qrp@dMGf%wa@gDnxL z;yNjJ8u5#IVA#eyYh$Hhmy>;mGhJBda1gyqcPTSOuP#}$s=LwQOBH>Qm~BpD!6(jt zqAZyRP^$*aJ?*S_{6^IOK*qO#be)@<7*Jc?TAOIiQ^Ll|^cP9MTcpHImv*K#GTF>~ z^0Q8S`aH_j6!l&;is#5>&pVOBFIMuWnXANCxWbA}zLpL21jnT;u}rjb&Yn0ElpGgv zIyeL!)?!jYDoy!*%LNY(i3ugPR6b!SF$I{JF6Gr@q#pgGp|Cp>uTG( znHkRD!v*#L=Yg}ozkHw`fmQ<%V5*TGQosCoe^2>aPs4lA0p=L1n{4^I^uFH&ScA_; z^ohm*w;8hc1}+?PaX&-8{*eFog$pl@z6}qEwF8s5Kxs9;Y9~Z-;v%5;{V&$uJRIu3 z?HeE2vm}a&k`_`SVMc_alx0#Wm9a(H$B><|Z%L>`F_t!@8cUe5jzOq2_I;QcWF5=c z$Ns#h@AbXz>$&dh_dAZ~xu0YHs2m-b`Mj6&e4VdFL4@31$;K4jTq6ggy!PpPXrXyR zGeJS96VY=M96i|Z?ygfSl`p+JsW=+uz<17SaT*5F`ymJE4$3jB#A-4cAGm>g^X2i+ zUt|4G7^j2VMHlqPkq!m>(d}t$Qot1GMqdpP^rtMNw#%+6Eo4CTrIayZx3wL2P`i#o zTr_y7JvD|*9}l;Bi^n&`Zo$P2JqO9Q0-!$yZ#G(_uS=!9>m>uOQGJ>-tvGff<)V@R z8nnA&Nvns9CwvW~P=Qay((|vx+*x&UlEfZVUg@q`Id_n-1d-qd4pF#=8L##t{=PE) zx2dx&5i`MRp9Sby5`^8h?oOKW@Ng+rV&19ys(4`<&;3`|bq>pO88a7QlrkX~^*K{< zdxz^h@=MT-XH%8maPcjXoaGT&s{)@2^kNhlSrLvo3@mRTpLyO7o5vuKTgZu>0HCe6 zKhg^vT!cLmq|5cjvHVnuEtU|uWbwWR@QbbBkW`PfvT;&n;7(xTYPJ*oo=%vn*ZceT z1!dp{BzN&#Si32>Xf|-?j{(HZad8gNLy;;DJyB;m;i;ot5|x?Nus)+SK`N7CBk6a_ zT=$#HjKu_lFgi(bwEJlaXJ+-jIyjJBNjN#vZ6b9>QuR@O_q8b00U@yn=bH3;a_Hgp zXEy6C$N95D1L(EE)Rb!ceD}Wcc!0r~q(2tJ^qG`B<>=C9AamRg2EAuuxgH>9Hx8Lw z-gD%lnzRjy-)5)I$IJEncZJIIvww6mtnt&I&iNwX;{7{h#`^akfh7xd`}eXXRss+d zVfaH&N3CEAk1?n`MG)dm#9OzDvZK-?85l{T=WKSdBJuvSQ71L81-ZjEc&rX;)!Oxk z@t~$jQk$5y>|Qt1akVM2bgwTi1EN(gRqU~KVxLluS}%w#d>^`W*Dz6x<1P6vmMS1f z;gIMMMUxkcX2EcF_g8GaV#+(k#nC!zWDytIz&DWD%oddU$ueepeSp6KyCk6QxD}9? z`<}VN@pq5(_mK$ZS6tL<>E|692Qj@ZGhwK3wqqN*kT}Z*?t%Vhkq6I&qC|7-Ax}AO z9%DkMTCf=(bAD{%Hf$KxEJYbFm^N3dcUJNTRcv^s zYiX7}y=U-W?IoDD57!0SmZoq1Fc?W%X2VoA7F3dAVtilqIf22d&N=jBxyX(u@^a_a z(~d*%zfnVPcFA2`P&rs0ao?P$Td(%YJN65=o{=s(3cpJrCXX%HX3UoXQRD$ZQ3nFh zZMUF5p5&^r*RoeiCUU!F~-d2`xZH}J$S-Frhn`2j1gC?z{mh;5o*-8$rx;A!lw-cp_U|2v_xA@gv5)@Tkrc1PXamdFSBcJ- z1~EI`JBiM1(=tSSPZ17BB)itYMvlQaUDZoEvEjp!5Ee_)9GDsZ{bc`(25iGkwP<#N zR=3;wdWRx-flg)?6ofR4`f6-2M%h<}i>eyvBvQGWtH35Hrd`Zg6uFmB?70nCp~wIu zv}Kqeae{`>VwnH@fR|>#W2kGq)D%Orx|2X$bhOvmtmX}UGdf+ z0`(L8F4Aw6NFGH!msI4?bZ)PszjViAiRCd==EIL0iskW%fE4wQM!orCEuOmet)XxP zjOIq}2cz1o1nu@+Ig3UNjl7P-t1Z?>QMiDef^pwX8lqG=QEqGBt{NcKRf90ICba=) zn7%Q9lLZ`*-qbp4U|FPckl-Nt?5ASo0GF7z9^cQpdOl(&#|BRG<0oBbK`Ar><+KOM zpOa~hf8qyb39c7~ODdjzovejvh%QhAcbXN_V${MJSV_FpV9Aaz$i%snjmkwLUT)P( zA0*TZrPT-K3TB;`=bg_9v4d13(#Y$7kAe66Yne)R%B!?pU!!Y)u&e5JF5kkpmGbCe zW~>ej59C8PzQGa!dDsU~zj<9Vhu%vwumb%Z3Lele3@nKr)nX$=knNkz;0p|pGo*V5 zjKkMaz;RXt-5am$@{I$%I>pq#!_EjcJX62c7QG6RyE)SAF>iBqnGgINw!9l=fwGjD zhjk4jAS$-beqE6Iu`vXAE360rIcgZS;=8j;z6kXN4HM!hT2ZVzjdUxaVfecIg1A9G z9KdTXf-Dw7EiBHng0GA&W3vcH0o0@H3lp zFO38sJ^Lf@#I~2Y*=$C()?#^!ZYv9K>CgyIB%6}J*EYi-@x>2#Xu;jd>6e7;0~^vC z&f$mdkVU9m0<@dk$he>1U?iK5VKnQ6j-u{?xM&t$2Oh5S1YG-JL8Euu z@A$U*(SOvP|MwvIdkFR8NoM_FO#9m~)Nu&R(jkBWwPK1o?)f?ms*MgoG~ek(rb4{e zb+W!cL(U-tfBI=B|LHSapwMWi-@9=26_Sf;#xr7>%EAo>?&vU+9@D__x->f{4COP+ zilN|+G?%Y0=+fjCiz5!b;vyXM2rDyDeJmiRc+s0{SQ}tFqN*?R8Pk{!lyo&r&`JBdZkYtz!8lwfF=qr?nh{7V zxp=fZYaAP!bzX38efPTA?{)HQ-sKus0J|T6h}AmD0yE1dYjBs&pkJZH2@!|Q_$wUp z$WRkLja=LrQ1c}SrSg>py6{Jc%u$# zvzdqDK?3KB_%0rzoWgC;cp_ndSs+zB)8H+QcG^j?&r{YxIDG~P%4a}Kkmv_BjX?fvk`@c-qoNURJAqreF)Ots8$D; zm?zTja3do=HmY)EOhhlmvB@=mw6x#UoSD>@gfe!o4_S&7p*pg|M)(3fCU1eE?*3yD z3NlphJx+S~d+cs?F4;TZ?2$9K8!+~5>y>)D$?g2SVPdhRs|@Kb)G&iOh2kM(hFxI$ zdO1jbhw+Fu^jiE@vst zA+xJtJ5b@2TM3nsUqEbKm^P|*2Ng%RJG`O9v}i4(E?fjLWYiB_RhjP!!O=W3Z9jpn!DwYsIL>vSh>`rSMmN$4oszjN&?1cp7S#_pG6}1N|$E?q;YF+w!p#dZ}p#Zb1 zpp=2V<4t{Z08QtMGp>f!9U4z`rB}ncZX1cb?N5R9YOf4u?8Cq4STpWRF(Jwe_6e`t z^3Q%TvHQ95UODJ-EP#6V%F7JgQMY_w0_GjX6v!S)W}lU3E7^2kK_U>#3{e;-1IagM zkSpf#Tn5g%a~O_IW&bg9gPS6wYfG5`STk&{4zFhKw_Lg<<|=WLnKhnLhhnoIrOZG= zB>t^K2X2Cbe^bQ&uiJca8Za0FtVu2o>JSro`GSiZ0|x>?+U^qZ<244wA8q&wRJch3 zLWDWaaG&1qcYFO9@UqH4BCHuHGo-M4#=*r#hv@2g;T{Jer@#cch|{5wZ3CFNP9Z{_ zklE5$#k%6ym`R!Ze?W@?BfQRXqizZ~#1sVpf2PxcMA1@{;bcn8eeyin|f@c9%X~P-JS-NUQE)!LIvn-`yzB}1*pH*Tfp$R z0Zf=aXbeeR7?FH(Gtgz7pW3D2DQV=yKt6oXr^MaZ)RHb_P1XaQnidE=5oC^9_B*Si zjfe1Vp{Pc7KtJyo!Sw-(y7VoJV<%~dv00q@9>QD>QWKc6*4NlyGksbo3X5jS?IuLO z8hAD+UWIlj7)xAs>K(Pki%S`H1O9oL1_KMf-UZ&#`mjXHW>Uf3SlPh6i(R!SL|s5v z6bg}&6ICFK{(CnT|8J8OF$tr59D7L?`LTC{Wa2prMS`!)I&doRx7To1cwi!yGO2`_E_EbCPDsl0KdeSIvQr(R{)(L4SFZQR6%Ea z-V&T0h~M>9kJ-fOv#~tT%`aBH^bFxS^l?{tDNhr3hgr$!HFZ97U0~;pyqbqqWJ;UY zM<9jK?lNUSqC(HBvZVt%uZQKT|0G#}jP(74=r^Y$DWdI>o=?Tmh;~Rx5)F}~!%x-T zXUI+Kna;0lnqb6M7mOPEKl2dI(doo~z4Y-K{lvf=UIqc{qxnCq;{UB@K0A z@DNptkQ;^ye96re6{o%DMtRD7>-^jl?fT7xoe-Tsna|w?EXU<_vs?bXd1fO_2mjA2 z?psVK%tqbv6F~l(ubiX^*1G=iL2CBZMJ$6iUw2dE)t%F#^ozI-T^ewXe8Z|K9<7Fw z7FIitj~r=Yr|ubk)HuCeBah>8sFdw)DS7 z;D4XAe{ej$Dh6LK$VjwX-&klKV`AGyWUf8PBdmRz2B@A1l-XXtK^a}4MEWkw0Y-;f z3z3sIe2_4c6r^^rd4-m4hx0z6;7{WKG@=kyj4uZwL0S(|q!5C4h7G zX*OzT4Nk*Y3pMM?UFf#lbB9(^oN0lvhOrTTm=_&1U@p(ar`0fepJq>hfqI9oAb{M= zcq);A?dWz=?$Vv`HI(~0@I)Zi(GW)DIA!$F*V2`c6RnuEP~D}}-B#^6+Yt}#z_54E z7pZWRg=#UNN@?sT2>f;f%ETz)K}1@^@OJ6s=C2Rb^rsZTqywdL&|d5k;BCR7nOtM{(s7fP?V)pnkIwhP6yy2_t% zRV35!-1zf1*R`@z}P$}j;@fa{f>_#$wj+hfvfeC6Ipd5QLork7cz5z^R*mK62pa$d>T1HfE`f|jer z=f$)GvLy+Y3D;&{31H1S- zLZiEGr0aL%f=2yuWfq-2$C(U*N86Vlql!Db1`*@u4jloa=R-7vI3xBjqErBkhO&o7 z#KQkWzh3e`K0Vk7zBEJ}RdfelWeRu?kRzB&T>R7r-$kRqc<+!GbClba&R>D;{)Pgr zpm+d?h^^AA8V{ghLn-^v?=&)=ywrAi=>ke@>8Mr@O*^TR`R=f=-wVV9;)b`gSJel`mh7~ zCT1F)$9Hi9dgr1$pZmjocXE6%wP)s#guP7BaMx(QOAc<`57<(6P)u~-!Oq|?u8O50 zJ_7ew>M6tfF$$3qAoe}P*bv<=?_9z6l2#KrnCt0?h1Q099(mYfpkAJ z4v)01{0V5p-!|e=l?NY-m86w1NM6QN+QQZ5#PCk22fjVS>jnnoiM`_fhbw(6i}<#^ zMcpp%*j}pIx|6xF@Yk~?Jk><6uTCY~rQJ-zQCs$ z_vdOkSQizMft27pH7mfvgqyC=KdEQRd8U}A={eoRSXdfsc(faNw*!{WTUn&yqV%Af z!#lth+HZA@kqX-`$*QEktVVp1smuANFWtm`8Kx~4+NWGq;pH$MnlHYNei-Bd1X!hp zXCEM(pHjI%M7OQYLrJ4U7luOd`@re^Hn&hrEgzL^B>tIw1X?UY)#W7|?<%MICKv^} zuSDa6DP2c(;MS_h%I19Qzt1aWGe(fJpS9< z*E&NU*7DG9YKrY}npr6j1hfG%2cdYWvgOEyt>UX7->=f`o2ZV9)MD43FqM#e*}FK0 z%=M4!#U9?@kM5eeGbyX18U0ExxJj@1&bcuYlemW8<65{N{H)7C=rc=AwQJXa!76 z)nIX;ZF78^Vz$@MiI;!{(LnfGe(HSg*B!vdt}n^^>QB>$c`I4(9C{3T!#Ahcqfiu$ z-%GREhfY5=*9@gN67kGom%RInCYuMGs_{z?Q9xO`mv~#@N(SPAd7mybscSG8m7Usl z8C5aCK)&tZL1Vm$2*us<5US|t{5<(28y-}c-D+sW0WM~}n7IlKF}^nQ08gmKw`qUc zN$ctZmif3=EC-dT&_DnN848m;X!5Pok+`nCF^NB8GG`6hYGRhN7Hg`KfSyKzX~O9+ zH2h5Tm_D;h@XQk(gM^#f*X61CWtvis)Fi+mKD)kY!_}@s^Kr zPpo+tH&xK2ilC2wz!ddw8W@9f1u8=Dhc=Vdi*ktJ7ZkC{a*`?542&#w->l2)l^PgSs;O>A;A4g@ zo0zuM5oqPrn7e>DkCU!k5+^uh1;<(+|DDO{N|^NrL-3W=c#Wj2=4dXEih~fRVPhw3 z;nLYleC3^+Dazfv_Y02++2t-Q<*lUnh?_D*{hlJP z->h+Q$?T@SOrc-Rn?2&}O%zXb8T+qGrf5GO6W6kbK*FU0aA3 zQw8^*?eJ0v?06P}X9wO309sip&oBoF5kh8m&qtsQ4t$p{PmP-Ilh5~SLA()mO%S^! zZ0KIr_DH-2W(n&fIwSGZtrg;7heQwoa|&zXK-xyba23P=v zE6j#>MO^oO%6hD2_d1$SsCoIY)5Nj_!vs=Q{C(vZ@zB@+_eDr?yrlDrEDjc zd*@w71}(sXY}PktgQ_~Jw&+zjm5c--355$1EE48Zl&&OU(`@r!>Jz0 zS&y*-j4TK7@h`XHD5+GuqT!f@?po!*@42hrV{Kq@-DpoY)2XwX<4HIDDu>@p$HIO& z4bul6ZHxw=6>K~&*$tRBW9YnLpxne)-=OGuVH7g<1%P5Ao@Z9%Pt|`@#)ge*Z}bSx zG@*R{<uDsQQ+u_KaF=hu)(t zy)fWRG*dpSITw6D@YA1<>BS3h;QKF6735FnZB6DmZ_g`ipIwisV)7vt?AA(RXdfl2 z=OWKY?CNr4EwQs|EbfcD){1*?flY&RRev2SOL>X32(o~_8dE=p(|j&7mvzOe&b2$6 zaoho)=A(|oY&*rOiWKW)_Yz8xJ80J3K$hVKZ1DGWN&5i#y|hME%9sv2#riRvjFYa2 zkd9Ni_#g*ZW*EM58g|1)3=n3lVU|u5m!0SZxNAG`M)3kHs_9$!TjQf$5vW@+)Shoe z!Y7CK(h&CZ2>o;Dm`<~FEsL$!#is_85H2|rF?51~&D!v?hjr`P`{9}3h#*{Sob&R$++xA+F1JYqsm)#5w4z99pmP%$I&IVWi<%}20M zkA=7ufm#@OM~U3`*MRfXGctW7PLBUhnF#*ldsr2`$-GFdS3!a2a_AwProbM{ zyr&pbl9laTwy;H4MBw3w%v`l(F z_=7n&j8mv!gbsYaHo_BUqe@WR^2EueqZSqLQ8XSwp}-of1T>}$<{iUmZU!PE~CAA_P$q*!=tYjb{K;dGgBA}OR9HFjXP8s%8 z^@;+=^Wn$5G>n(lg7pVJ{S$YpC>l-GY=?H2;J&8vw_O6BlddV%hvm-?LqJ=1{B}6% z)QCo>(?DNk>nY#P_4mB#bt~|gj}HfeN`)O0`cNH7Jn<)e9Lg{BUk5TM1+>UYq!Xmq zcKxMBESsSKghB?@-%q?siRQF948 zQE>1U5g%4}NyBnjNMYLU$1bX192;64&CDNoHvPH$92u{t?)e`2M^o?~1I{qJRJX{> zF4WZ~Z|4P4rZ0(lF7&-{xVym_`S9Tnylyjnd`@z#?bPebI>D5*7Lz8*d9{J*-#;7?H7*hr zrFo@*F))`BPVR5K+HP(W-|&SUu%KIiyzFmVzuM+@-eTd9NF{!c_FsGKDPUbqPCXRO zCkxf$De0r4tkUf{*+3ih7RrTk5TX9`|8VCd6Fcz@zc1x^HgVdOiPOodQ_1karJVV3 z`plf%!+A2a8h<7b=*R0JW(bztw=bF21(GH$1vh3K08M=t@Ra{aBu~C`FZE*}|GjGz zU?A=OU-XR}RLwO^@2|yMyj6OD^!f03QT=stMb<)R90)CiIso0|}ehNZSObiX7i1dUe5zT5Cx+d&mp@Nc<{J_#&{2{?;QfK$d%zUAI#ASg>y zVMhh2E>)Y$Q@|1|9ba@0BbZw%UUcr+dw8Ej?e5Uwx{E5T<+*@AN@M>Vb3lq4Ltzrc(=DOGZId0c8AZ@;~j zAh)aLv15Zr#V%(8YI^XhW*(cv4Dv&;fz)7c^&~ZNW!BMUDeO?s9N+>hKZPe>8k~)yvebu)>Gwadl)7WrVfxe zYMjS__da|7hH9i{uo$~r=hk!N2$X|1dMfCUD*vbtsm@JPx#8me?MnUby#7eYxPG^% zd|m`{!^zd)JJ!eCDvw(N-%96cK*>7~zZwJz=k9$!mxrJQGHKm73-c4YdC?q0s!tMx z&P5?Nia~*}XbKeWWxuqxzJ^Sh|B|;~pbo}hr->Rav^IS`@l`}_>L7c6jx$O57a{n zFy8ih_LiO=)DpqCxv_fhfxudPApd0te(D+rr5_rB!h{5slRlT5$Mm0Hve4X3ygi87 zezdXj9-UnFjI`~qS~ZaQj^6Pkd+sgFM647mA9=%UOl>x3Tc{eJoGj}$DIVnzv#@ZP zH+AGSQu?MCf#lgZh$!TBwfGKt*`)9?)!Yjx13s0eCqmWk=upIGzN_2eC5fhdI>!T@ zc2MKl&MJjtjDYQiQ#q1B6G^|#tW*-@$Hpn{Bn`5W>d>L zB?^0?YtPb@pTpPAb=*>FolcW?S?F9r)e&=hMpP2oDQ>R!c>E&XCT;u4n=EL{4X)^p z4+~dxszd>7nRz{pqno)v43goJDe`rB6?Qf-z~NngH#-9xKHD>9^HI)Yf86>_;A@5} z^@+9A6`)qM_ft+txd7uS)ZVCf^{th!=8 znst6$H7w`9bA(u5`BUF1^Rk!(f9`QW$oTJr$^Rc;8m}X0`VffefhR)Dc6}i|N%nB|z)s{Z?gZ#K9MsJffHuZ=ddQlA3{W0K zc(VQWSG2%ZmB_v=m<~f#HUMK4IzX(siNNoLMlOR;Z=!^hV*?UQ}4Am38JBy7ZfE;+k4i?$Ze#T6T+L(_~}3`>sP?$%pz|6gFXWr+Lihw!}~fS@8S2+ zut%EXquS7wVxzLuJbA+fDOIsDAR24rC%6^Oyg7F=%5!G;v`e|WI%r-$bOWJ``|b_@ z*#L4^McySDtn#d^p3L<}@$G{e$NtoJt1Km?);03N6p__VBTLD;Epow>^GuLL)+mTZ zn9l(FS&XA(F@*Z0F;_VZ1gM3~aWQIxJ1C*?+Qy9vH~r}cx>Zh^pp?t(g!=~_)z zJVUNzoS|L82J~%2n1e7zlHf_+M=0KuP+b{xz?`|ZQK1x#omYBODH*6f5T}GL2O^Y8 zjI-fq?oI;9G|j{s>e#+VweQ2UOGgsaZkTw0wY{b`SdCPe7L(m=;p;v1*7{=(A}6Vj zf671^h3*T{<;qL3()6}P7y)+{lf>*h$wkYjXtPf0!S7&dx;|6hy!`$7dLg$Ynz%d| zCD|0GvHW^k#OVC}Y(lc;!Y?Jx(BTnD7*7*Ss_AeECCe|T(&4P_q`(#KHV#SZgAy3^ z^ifrZv#H@K%mZJ&k3@J^&NfCcUWLCu_Pq-_|HAdw_*qS>Ob4yQ=H%wfX$>dMVagK< zT|(A|t7=75ONXzRyLLMeHzIo|zS(35+G$6>a*tu?trT+01?-++>47{hf&-{Y>XfU- zQY@;lSwz@YiK?qR9Wy0;A7*akcMAT95AFjL0>IwvnRsV>_r}s7Opv1LM~4Tfw_>+} z(NvJj#l9qeYgl=5^(86R>4Hv3O}T0;Co%f%)U1OIc-k zu|+diL6r!Wh?7IGi%lwva)}VDTvezVLVgC`{9u5Y)nWoDbOo)!SuGFOAk>})HtN6Q zoL(3HAE2B!7}K!A>YQ>LcuAvH?B|mWiCGwf6gU#`bmQn$WeZK=*tI)>!$YI8$z#mN|ZY< zo;!#>^~^L!d`@5W8W^ry6{Ikx4Y`NYdVTv{oI+HZyFYIq@(fDayR8YTU~_kp%Fspk z!h0c5=v8q!=0z3n_fRR-!0WM4`}S%T(x~$vD8);cr3IzTZdw!!_I5ZY4P#z8uz$2I zl%=*fcHPimcBxkrR|qSWZGJz!Z?X~Y>#0>ky6*icK+vqV(O7HAxVLhJV&g>UI(j9p z=a9yl)$;iJ_`bl^M$4vj^t>K9JKgW+5!lI)l5BO!l|j`S6bFqzPzk`Q;rTbVM}#@f zSP_-Exo^KFx`pu#tA-|+h93G7euhw;Z?p>-f}dhn6_e6)B4xGc8B4Y#Xy#QKet}E6 zYxQ*v|L(@JOX$!P_fj(h1?2q}Z1AW{^d0gf&kuB`>xh$UZ4gze=kedS#om>!U@r$1 z^4uN+0}#AsNm5m0{mHi_BK(9j ztGvdC5IfB)BM^Bm>Mvxh8u~poEu1>85)xO}x~?nlyp>GU=)==zD+puBYtNj)w6DeVEWSq_wAAiR+oWOS}47Ph;> zflJ;u?PuSM+#P5#qa%iL-rOcpUg-gYWCf#0fCH16g>q*~Uw}2U96qreeMcCidqr|z z>zn+pn-fPQ`4skuJt>rY3@&^L-9ZFrq11Bk5w`&_8SDh1 zb^+JrbSMc6MBh%M#L!%y-GGf$N#4iBG`Z8=9g31876I@Ro>jZVcqK`=ttIBNkAOVp zHGtSwE<13wX+@JO*(12996 zQyC)Ybn?#=+X5Vi^0D-CxIJFbvY8edaxFkanzVR@N?M z0+nMt)!&f3vnvIn1XnpM@73GVJy`3~R@7{3OnER>FNHzvMA+XHuR7h}1Oitq&3|{% z4HC=jvF9tXJAp@jGJQVh%g|=U%WgBvwLajou@tynX`HfEZ*+Cyn)lJEdNz;QONp!N z`im6d%It4OS>ZN+fRM8(V9<_xP%YkxUpaRcgfuItb*Z8kc9hVzMmU(bR356y@3)ea z%|`hX^qGegpc*_v!xJf!JBYmgS>|9IKc!zNCG`bhuyi%B$Q9qR9R*Zj(wPoZH6Jn; z)9bnbwPxjB8o^{&Kg;5TgOC`Q6?K51hfn4K%AUwm)dM3RCZ+A6K+=**>; z<bnspbl|7>Lgt8oaB1*P>x z@)gMdW&e%224n_v{#Bt&x=&4)6>M$cRd2(DdsaBu0w79mYIQ6FA21go_Jp)tfgavG z#m`g1le@CjL(i;QSRqkV??t-a+6p|ty;s7c0(l|Q{j2QCrV6jIgVoFMbY=T5yv+H4 z^Gjz7=qr3*I{i~;eb!vwH`G)qY^>iAtIWL6Q1<2waLCTfw1UY6(z$({_<&K2$-zq? z>p+DKi3ElquESL_0aXfgWv-E9jCwcBE#8tPhDWi+;%I}*B4x)F=Z&Rki}%bC=12g9 zBzFbHdBG+xT7HkWrTV$UE9U7z=~S)|k3e;f$?>xfQ`5Ds@e-07Aq6- zkwE&um?_{EU$zIO?I3oqkZ9CG;bpNGpEz7@0;jFM zTT?jVBoSu%9h)DGI%4nc25c%<%WF=h&jklmfB-%?+TbF{Q6ER@73U9E?O`6t*^U0} zk=bS&1C2P~HHg4+-tt$t0F0LKP?a`9***vS6FtWYuD3UCLvjP&cwHZNt-%WziCj#5 zT;^z&%@ycA_wAx5trhrN^av&-QkLapVB+`m)h>?wi2L=;Dgrj2AD$CP$hASeS7NYuZ4{VJCxbt zprvi2$GrtQ<0nuBOMCuw!<#?A{>a$)k@0J$j4n?u-z6yz4^lFw+RtN>*L06_H26>T z6bv;hA8cBbwOb>*H^h?d*a<~8MIElk^Sr1B@IuZ>WMj@kPbk5*(GYt{7qKa~5Y z-9dZ_+U1TKLRhfgGH3|@iS0WPxUdcCg>HU`D-Wyq&~_27CrAdh;R&eEw^+wlXJxS=BcBRuNERj(7(T#k((uv^zD&{ACc3J)PMw_Nc} z?#cx89wHc(GA6fJ*=;60J?4x2JxGoavp8|+yE6Bs3CK}uj}FF%IqG7}DsJuH3uqB_ z#qsdfEM#9kcAyA*ju;tB{!=seusTjGbqzaJIyNuRS(LK|Fp44-@dEp!;#5cj>{lsW zCU71?c>V*g_v6c3`vl=Zn5jW)F_ zLWwmBn$}q9PZ$W=-o#yzM7Iq6g$nvFwqB{6t}e0RMElSGkU1Tw6fioGU1*o?Tfba# z(G@Vg8+Z4|cnu^~Q6ECL#bkmfhhS7&|GnsiFY6D0)(2Qu5)u45t^O_^H5_=5jrAnc z`n92ZG8D6;#A_$oy@~)V4!DTGL!7S~yAKF>D;!$LhrySUBLJ`6GJBNXVkQ$#>f><9 z5vYbq2DiV#3xzIJ#$5$WUvd#v8!wWWuKK)cE1B@}F>tLHuR2J2c${gQuHdKnYxs~S zQ@Yao(ssS_;n#moVrFzAXdg0d+_N?Uj=@*`;VB97g8|3?lyB)z0f)LpfMc#`XCD}7 z3*mgZ{5V!*>4RkbDjI^fxNiBp0>1mUR`9}fbncOJyw$6NW@phh`;GC^BW5eA+?MhEfYuLGmT2x|3S(^via6bRjKPZFpMSm}mD0ly%NNhlyR zFp(GesG||m*B+gcfB#wne=A*CENxU2fFx!3b}n>#80^J|CxyGOp204h`y@(0F7j4p z0@-2)8F;$3NTNFj72%l4;}Wrf3dyaY^v+$2Pmf?843z4HkzO= zczT3$3GFADaQ3f&RexWbe2?vzm&xx1ToiO+&9$!@T5Udl&brxd=TGDF&3zfi<7hYY zP(*Q0P(*W%WuTfL2UH;3lbqKId-q-9HdtVf>*&@O4;&9pJ$KiTyKR*XT7$|OB_7By zJA&+slPVDkul+6jj|T$Q`)?Vod(NMrBXsG)w=sDbbY=o?%UU#)+&XI4*RLmj^$%ef zuFqA~8qJ{^$_Een>~8Dw1;aWHJ_NCvOcAX)kzOpm-`am2iGS!#af`xMqgDpIGp5exIy@N$wj!YK?Dl<9wvDfOcazYy%Q!pKu|=H$mlUZhphlq8Gb?p z1A$=3aif+7Q+DO4u(?$zhctoq$Hu5~tPw2-@H;bmBf6kusQWg2UgXpb(yYVU0Gt_n z?aKp-!C6lueZ5Q@PeNJ4toGriiiU6&82X*2WC1wQy?*}hUWL)H+(N2$6aV@Dq2U0jHD||?mxno}uO(S^D;)2b*;;f$leDoQ_hao{F!Qm#dQgX?5X3aaQNu+D)OeD0&w~G^g&cgr+X8@5{qFBW4MbQw{$mRQx zdO+%Hj2YpyT(N0vjA?ydn;7wXXEEn*={LZbdd@9gA{YCu;mVrstl+y4%O{^A%z=$c z%%_&|p3$SunQpm(i=vK&b>F492A>F?LmO1ZN|bbl+y#(3Zf}3$j_>H#xuKE<7KuQQ~gVNlzj~cFgMR@cTS;N$bmJN`1;S01&}k9p=9_kzE-uQ#wq6 zU)(r+lDg-C;J4O`Hi6G#p)QydcITjFyC7Qw?p{L@my24Dt`J~#sJ)Rscy(b=)jZA5#mX^N;HT4b(sXRP#|XfwvX2W9M*ea!;6f>_fgy$4)zXsOX4=$UOfW4j25VD-lqnkS{Le+O85X8 zq!bLod9S62i z4X34wa8?-69>7+qX{w#$v zQpQ49a@S^ZoKkWbBQ-t24V34za#6wPfWf3~@%KTm+M*Nt6O7yZ^<5^88B{E$C(QPw z?fRCy^4j&ZAZz5zl>r3h&Zq@p&x@^>cV{IoAM{47rTQtj2QYR0Uy|8SNWsm!aaKzE z-`5dAlEhyBZ2$GQ-CZW?<`dA3yH6aKjq|cqe1nSe?M$^*essfoL`W+pPLkO!!O@HR zOb|VdX@aj*bw5|Z(Ah7iZ;#bV9_SxkuEtAodn<0>Zb%XwOxc`%=)@+44cu+N&EZz} zFe!W|J~^0DVIKSHeK{~AtXjoXJ%_{LFZ;q=efc1ESyd!BxychK*V7&6$j&!6dt$xi zOTnSH!^QgHZotSjX@i!G0M|CugT0!yaCZRL^=qy3h{QQkX1QiW)cu89j2EH%9L@iJS0&+-jw`HLDd~gUxQrWqy2$enuot>H9bf0R4vgKew^?dHr76ecF%q?A8(MSH8|li zaNs+MagTCV`Tek}M{0w2MZ=1l7-6p2iH1-Nnfb=Mo3JXK`$))M9<*`T4fL4IO|?P8 z+Tjz=-lMnNuWC_2zr5;pZY5kloYvpw50~24ASI!|rn*27x%TDHO&0dI+sbr)oLd;` zGrKkl^!2!tv(XmC03=NWqa$`#3>{gG?*y7vuMAHR7(lie@MhvlCsFgmas7D%;!TLO z4(V&|~^Fqc7xQYkLxtoQ6w#IF57y`Lzp$ zzGJq9=O(8NTYW9Wh|x5p$&FpX?$5}=d0eHw3YeW#QAgk*xWnabcc#TDnjFsR^ni2- zenc1@Glbhwx_VM!NIyn1vKRa(A!kJU9hE@5qAMa6>Z$l44CR!VCGX(@ESfG~a!{1I z#ZJX_k+H&r%l8w4ySbu2Z+!f6{6SI&+-TiZ3!qNF!ra1D?U`MaP3b`D@xwA6pw_d~ zT-aZJalEI%24|Zfgro(6Zed#5rSE${MY%5qMH`h=`d_TQc{tSl|MzW|gi5F+N>PMl z8B1s{%TVb|8A}mj%#3x!jGZJWI+Jxs8&V-*#yU)r#E{+0m>K)NOj%~^_j~I6-p6%a z-`_viao>mI@P|6e7@zn1wLG7Xr|>9Nww%oCYVZ~*{;B<;IPuWy>hQIi?mAHkad&)mxBfRWP z5$knyT>L7F{8+P+D_cTCRgsrj_5m(n;#D*o%%;0#OTJsFIv(8G#Zz%G0;7PLcp2j< zA2{-lQ?RS0AG3Dn7FM-;Z^|h$^Y_gCCIC)oVcAbw{59~ReeFzA) zGj}HpCu&I%y`L!=*<_4&)>znc#)csB9!Yh$sDE0r?Z1s4){rTyU}bO%OmDxh-k@w1 zAc^j11rC-9naZL>pmZ|GAyay;)!kj6AyMGEZyu{DsCLmho=x=w*jGKwkUVPyDsF<>f_> zjr)QXE0w1>H+j;x6ASuM{<)3P@IB=v2}feOJEi^_R(M0WT(KN7k;`wi2b2H)hpIqg zhQHR2i6uxfVj5j3r+2b*hiSh4o}(+e`0w5J46XGy1;LBb(uFr|=iT(gKsNh_o4zXp zE_Nqx3y*-v1Xid0|2JOK_VRbD-?j0Zd8Xjc|gupb+Dd3j~(FbK-N=|LiR=m4^Pjtw}_wDy0C zBZ6V}sa0UB1EGRawZ$@1@km}~Q-9ynj}8{EHMQZ zJwm#n0Q2O=Z|)FH6>%h*IW&Vo*g?w>kKkCAX=_xloN=>bf8?0T!={4x76 z24=3fZ0Fo2nBd*cHd<$$$Gh_N7dt0X%#;I$wt;%}r>PhAd@XAG0Yuk4FHp~&zV>`( z%~f-S3@I~=YK{amHkYB&~pOn;hzCR{Q^C2g(Q;$kBOp!2)Vr~3oo@q zh^F8xjdorQgR(`K(C#&pb{uD{4nqDyVDtt%jet$JbmL%%_+UaRQMrR%cbd6A@@#Yx zRRb1#)5<3LN5l0Z2PQa~Q4Bkt#w|-_r&IGrf7CdjDFcbuN%C!Z6fqk+`H$a9o(g={7)lIJJTR>a<`UE_eKY|G@Ke(>4A-UsxiUGrms%~ZXJpK$tk~zI&n$#PS36K2r@&2dYvxVREWDbAs$dR9Bm(Y zvOo+3*bdRay!-2ZVwTiox}pupTzKiiXVE6+evQ|L*5?npu~Hicna;1n#DA9U7Zx&*!y%X20BNu<^iE2IDI11;%MD zWQgBywf&!T*v+kV6H02I(lmE%9pAI!I*pyZ7sQ<)U*H>?UvyjWwuPmWd)tiHJ`@0P zh|*EwV4S7G8J+}PPwwUAc|%uO?4(!$u77EFLHfDrBY*fvlaAa@Y9ev{3VKw^rq98D zG?~(E&!K-VYanKiNiFqlIR*zt^3S#G=Ry|^dH;@WyjSgT5J+-Y4Aw=ywC zMm-_m#+8#olv1EM$9%6PfXFb%BtTGdyDiGp`?mcqBp+l;R1}Jqb7phyXv#P?05)`o zvj+$+>McF=K>9q~AEhWn?;kre`-Y(YU?DjY$hIo7$KS>&>d6p(*2V$phU8h!&5d#i zs}&IENTY_KY7(=K-Sv?nE)l<`UOGtp$$mCbqv+coKR^uJny65xLZ+pItd7Tcw^1_Y zWrN#9z+5PKf_Zw2zAlDmoGg+R7+PsMuEQdCWroF?$^S8oJeh6kt0P>O`Tnio7_GKi zdmacf4i3BYAYxG47ki|`V6q@NcJePFr1=N4J-u{ay64_2mHQ$;*yTIU$H30tdjhuZ%^JIX!UK5dY7LIrJVZtHMZ`5kMcAE|>5y-(rvYr4U*hx~ zI7HMxFYU)!i1lUw^$Ws`tC*JSe=~Vhn{=ThoEG3}Y8^80cWzo#ZfymJoOS=4F#W#u zf5s6T+-s+3i@lX4M|@9cT4iA7%jPdr$UWdW6Y;u8zMy9)HFy7~Ch_6n^-G=1i5(2r z;l0c^7jVNZuBWMCoXf31h~-gOitMoX9-noD2mWux$JGG-QZ#vLAF|?tcdTCjbugl8 z>iZ0U4iwi6KAbMWAuF4D6v|ym65Nq=#WA=9kTwwff(EWEV4Yc<!_=hjbT;cqN%XC|GcOCnQ2#NZ7_aGLypBn}MGTzS7yFef2|B!+wP#gRANfhtH+< zo*{(PYl%nU7!Qf4J@L>0(Lr6AvS?!&epFFfucU-$lFD7et@EsSem%hQJ){>g6lkRf zsJN9m&1tm#{+vtGG7HALffS)!+rNgl#Nr=)LLE4%@;13uVu)b$E3fP9JrYh2y)nL zXQprH1wM-To7utEUyBL(P!Kf5kGQDFLL4MfhG1>y1ns3zA1E zjMkBSALmpdCQ|E9lRqF+>qy=}P}QXb{|kgmyZavjz@Jd{-xuTmwaH!%m?eU~qwaNT zNm4BGN=gFVT$M}mCq-znO|dg|7Z{+eqb%NFq_>W{=H_|2<%!+L&7oCr^L9)>Bz~cy zweegQE=%3(K%ZHopMM|xM2yz_k6`gF}_?QolCvP0%>_U(F`Wd?ub&tR(BnQ z`G^V3@X@qnX*YEzAc?rS5}gdPJ)P08a8xV@dceC9fI**LZkszjqom>bZUSJ5=$fegA*@pS3>f6b+{t` zz1dLQn=N(gm*XcIs@AA#H~~CN`={nwS10G@_CMa4dHei|hu49Emj^>QgH8)eskQ5) zP76;dUQpThE3O{4hUpn(Q>=4`{MJu-8pt#qRhD?t1I5;i6ejD14a1L${2u6Rdzu2C zbNQ8Tmp=g|#Fu68nXF!WVqQk0wq*z;7K2EctXZ*Mb{KAD0lY>yxL32bt{aBtH8ZiD zPzr*;@PPNY#7Hn*ntShEPSD#{Sh9IaXWLv~aib^M;BU)Iti?_%m$o$MaMswixd+z_ zOnC<(e7uVxGKl`XB$Hi!$A$?>NBa-)O7j^k8t-fTD_8pRHN}DaC`$h+i=zN%Wve;y zSKd0@uRVLa(@`ggh_qQ^y>M0k{I#Sa$4I7Lgn2!3UQaC4>f-uW?|wu52a1bdv*RlJ zpnP`wOLH#X>2P^m8_=FHiQ_w&w3FJ~=a1tnE()aTK975;yDQIsO^QG0c3h&c`uX7V zoCn+9Rp`x%4Wg&Jgz0uOUk5XC4=sKzhK1Meu9JNU5xe)EpTU{7qc^kG=4);m7_}qD zr+>aAtgkd_z>#p!&!=)Fs+(&INviFbD}L*<*V;r>CI!q(3-h1f%YQu3(soFAbrHo_ zZ_Q=0$NDk@9hIYW#pScpgE09YaodT-!5uqRC+eQa6R*+N zRF@r5b4S*5V^A><`G^&%OD_rS(Ww1~Je(eNkgTJ}05v>9oQYk&SRSmyjoJacOW^Fp zf$H>zu9lWN5Z78-&9^D#{W_S|mpwbbTsK1WbW)ig=?eGs@aRCh24x&7kF{h6z|YWc z5L_erZ{&>m3Pw054MB9o4R#8=iW6Zwbk#Dua422tOUPm! z*#faB#T{-6cHx6_#HwS^N6o$0R-Vxn(dYWxhb$P{XH#{R+kr^>#3i`0b%}ng?(vNH zoYIP9z+Qm1g6Sm;QlY$K+aQEl8OyK7{XfojoC280r{?N&OG9y<8nf#wap>)K;sb>h zV;+FqTalY45J2v>akFvsPoDkD#^H8X$jnr=5UKr!p4QP;i!iO4=~hF->7si4@*B&s z8da?SXSAs>XX;boc;26>K)d~!3NwZd1>fP5`LCAu~r zy0y7CNPe8)=F1OoT`-=*)EAYOkb+KW{~LU#RKu~!YfFW+(S)|@Iz z06-868V*KE<5!GZNg3K%f2T6VvwwXp#%Yt{dpF7jD0-JLr0PrhMb`Nf(9eh*TENNj z`AV>2&cHTa9f+0Gpk#3GbaY?li;n8N8$t4u#G!j;W5GvCp64%z&rGxnBjgQ&O2fRz z_S(-JF<%4*#-M7vd1Ag&IYJwV zHQ!}Qr+zZYP}Jbv9jkEV++Eyf;~8?6?ovVZd=<)?v3vQU4IpouRIv@zNRR~Pxi(q4 zm48t%JzUEW;6v0qZAPd(4bh>A-SsFH1)J3@)iaRB*Pf4{S)5C{t~pWvgE|Baax(wy z3;&?*f3L0nuXDA8Nrc4Unb(y5em`9;xB@k5wrg?I^RoiMk}cntG&_yn`ogeeQ!Fh& z?|au!3&EmYVC3^>%}t|2^Can(uAQBMd7gQmTs?ZL^RIeHMVx52`mP6^cp)ro>co$? zn}YpL=CfK@Ne#jg2=AAI0`-Q*iNcRP`|4rg$nbv>NP-!8aAZAD#NXbGdl+BaPqkYj zFkc7W?$!u-e)0KoWz6X^jSkqLxByY^hW2*mJ>dM72abEODGy0vR?bvzo_{eVms719 zgL>XL)<5Ug+W)Dbe~u)yWYnq^ZRUs>7=Ja_#<-o89T*y&eN>z&zMr$a`Oibt!omUt zG@hOik@Wonmu1U+1p@j~bj6mav~N(o)&p1!izJLNmo3kJ@T%{GUnXLwkHp9JaLjdWR;}kX=+g^tg4Lcpn2eU~Y~xDaNiIJNrSc;06kVu^P2bnjyRx=w<4AJP)!F z2OzRv#J~#-nBf)(LO+$Cb3PU5@8d;Chf>d#uYVC}Of-SnX8(-eSUlLPN@rKtaqbMz zWKo$j%VHXLG;yuWQ=9_pgc!R!ONDWLhvUzxs6Fo(D62erWvr*cefJ0Rv*_I(3&E|u zU5)wqLd2Y#k@MKL^D2ztHq&i2$uB85DOL)9NfC9op08?fLgBcsU&XIR%tUNZgk*h-@kIqP60O@Pe7ZAn2Xl_N3hqN1aIp(WyQXB&g48Z2g?dHFjjrAQUaQP*9 zW0eXUzS1}`VU(MfYysq4-0B@7Bvlp# zne+C%TYK00_}vp*RTi{({PqE%>lfitSB~6OaCh{3|INK7+5-jSak)<5C3brZ4Fi12 z_OIrfL?AC(&;hdd)R&>S!&ur0#R^0{GJ(oVQc8{e; ztH@{6+#=*N2rsIQ&Q1HHz_{|eO;6nVY2w=J7+~Kni!3Ld)9b_;P9M(@NL4I%Sh-Pt zFv8HzATW#)2E~0`p;Wj>AdPwF^kGEgDco*%XCO`E^D6eYo`~Xo*@OaMTDbDRRRP%c z)XQ&rp$BD%Kc=Gt#X_nUJIp;bAW2N(gNgz{gw2S~w^>lxm(?up_NAC3$V{@og;;`W zZmm70z&nh;FbXb0IjHnV1{Cywjap) z5t~ai0wcrfxvwGO3fBi@p~0`4Pyn(k{Ip{r|He0^!B2ZQW0uu$zUk|#M$bb|e+5qD zXX6*+wglcjP;o)Bw&;7CQtJti3pZbLV9muyv+bj_Xf5V9o!eYn7I^McGeLXF5>Q$W&9bOG@S`G zU0&o>x;O|5=50zH-x4D1tNDwC6Uu7)Y<3^K%58p<^4utpJvWQ>WJb-7e7T^jRn~?MakbJltm*9ugG3IU_}lQK z2F~y!4IQ#I$47!%S>Efkw#JtTFt*i@t)vhV#Gef`yK<7C-4=*aYnMLf3r@evz6A$l z9SZ?(0QAy*A>5KMF(-nXOD>eXU;r5S z1FsybOl&R#z6d-B@t&BCb6tT;WKw$Wo7vKJdK=HF@bHw`n!e-4K zD>V##Mjp+!zrGyD?}?V&WmxB`>)m+?X%Ge%Kizq0n{QaW?#(o@{;lEAbqSd7M{-ETM^?^<7W+q%6;uq9S-VuY{Z z(##w+w#RBLSj`%=>BhglDgI^|tu*nsT!Py|(#3*G?{<1>=Rn{3b?_K?hm|7D^zVj= zGT;8089ja84qEo_>UvwJX&QE3XVHVvff|()uV$XcY2a&SKAFeuZ$VRB=ng^d-uw7R z)rp+LG;{TeH-3Q;J~sCLE@*_CJsKfX*WQ%XA_AWJo4Ky_4~UxUb9`X(i#rH(MzE6Q zsjRSpTg#&wp%h)F!sfGrK#6A0-4LC57w;WsrnQL0OwYs0H(o}gO7WQYaZC$<=ZyYg zZGZnJ(CvlYuKfr?V8(_B&l)F<+agyP(ZtKPi_(!OW0vbcy2O*oAJpDgJE8o{BjrkE z@_o>Au=U}{I&c4C+#<*RYR{fGE6CS#&MZ1O+jmu}94g>x{ncHA^p*uNV*;Jpta!`Q z-`+=q_cnk8JX*s=i4Ht6Q78>c0zdXO`=4LzCqUUpN5kFJoD=HlAsE)kGl31Us3glz z_j>db{a%_l4*#h9icOqgCar8hYu6?p9FR+D{)v;XzBaixD=;*+HsC*{dsBHm!3ze= z+Ar1L(M&jBe>dJf-;_QRj%r|&+S38_GR*lt@)+=F+bx3YLWioTc253b?+^ri%G!n6%pd*KsRT2oedIqj@aM%)Tqj`EkdeblM9n8vX=7c}24klRFq!5o(6kuxK z0oE-^UONQmYeCB|@iY!zFNxe@U{uC^{+&9PYwZ&rs7&RF7y*0Mn$1J@& zN|j(Tlf_6TTg>9KGWt@XXz7St>ovBk)>^=*(T~Y54luxMAdis;VT^+hUbnte1w`y` zr64_V;Vj`4df_wSVjfXz?};2WD(6OdM^t%E!yDQ)t4GeFq_WYRR~@f31Quz2oLZLo0Vm96k$Ny#8ZgVai7}yP>uxJSqCap zkt;f2vEbjegoiL5oX>X==%rWR>#+>zziSRxwl&!mff)!_q@0(KD;sqi*R{TUzvB_* zbu z^!sP_XsVr^MQAh53auqy%la&eAp769&0JXnmJZM6U76|9m#4KNnVbs_8Fla;yBP9U z)?8(?#f6vo$AoIrGjwE_dyw2SDC4s^M@`tPO?uv?y$eDe0kk~HHv^m@G2T+IJI;AT ze*mc1W@CCsB}emE@^*UZ1GT7@`=9OKX3#q7beOZIsiwhp8I4MVgFfi$!G=Or zBP*%JOPzGay6tX*P753JaF63&@vlLsleWmGGM)4fB3H~w0U3?C(XPf8(&yZYe@*AT zs<<&T_NvSV3l%`E`2VWRHs?jZC8E*Rx-WjEtCKVPjWv6LFlw4VZ16Ka zx0i9>^4*u6pe~-c@GRHQ={rM2Wr3YKW13PqEoaDEs6Cs~Vz)8UfpEtuH93D#^D}eeK$vRRS3rq z5pG2$z82hJQ=uXMXSaQX-!vhHB%ovRiZAluMex z0G(fQt)ylRO`Br`8$_b24C1PT@sh&JuGW%Oh&*Mhwp?ZOD7{u0Ml(` zOq(mo%FJxZ&C5l&x#oVNR8;#)6Ulu(-*iz1kj~i;x4@An?JRmua*+0k*5Hu}xaDUM z)#ktWiueG=jM;a%O_&-nfzTi3ZL^0c%gb`abe$HRUONOtD7_Mg z$H`z10dY4r!#5E@j9ue}`G)N$|h@98R; zK>FjF9u=nPMJWDmj>iB-cvI*zzq~kIiv<)EeCSrY)Kny8l_)s?n%p(!ARVfo&`PlWt|y12uq0FLAeGqDAnP?ccc- zJ8Drff$ksEBqD2;sqeG4h&{8@qir}QZ&A%xV|n=AEcYsGL zelOE<@V>?|D+!8p0BoOIxEf@+4?7)P9m7lJy3ebC_01DizPUQ16v;FIqBoq1@~bz$ zl5R3wEw2Vn7v!*0Ii@K-0s%z@fr(9-_m7)UI?gu)#W0*-vC~@&zC6pm)pQ_rp{DkW z)u#)}HitP6lc0zHxC#nEigxFKgF6r;#)NxuZq9fYhLt(M=e->qPR!p{7YaHd4e~`Z z23WlQ$!z0ZTFL1dmfGX* zcv}xZv6C2*Q=zAZ*0K*3w>qHihCbLC>)sB#k9q(*f5Qn=r$b2I!J!JE>Lr*|fso*6 z)H|B#c@K~Uh2!NY@s+=6{EWox9i&~hS|NJ|0bONA#y2G~v+2dUO(W@w^f_v9P+lHo zj*hv69;ido4wPiwHPrXE)h*lFL7S|pDoHoOAe6x`%Cf(@TcsN~WSYPcl$4aUhQsTy zjkHn&S}ifnFKM6Gv3b6Dp)!z?cf&RuMYmn-xqV;lD-R3`7His$f~bR>o3<0}hf(@G z%+mzr=af4NAdy5jV3&;n=B)K1{*B)G@ZF@NAUzPo{&gHFo6A-OS%2lT4^bq?i6ezL zV88JmRv+pRB)$@1&7R<0&kVehwwW7-YT^_`G43r2aFn1?)AFF=O5etK2)NiL=Hx+| z@TL7ksKtQ^OZ5sWzS@1w0G-k#``Bc`_P}M~K{Lsrz9Z#xO312z*5>LPj)1^5sBDG% zDo+273SuC)9nK3!;tn6FTF zbfbVXLXpz`T7qialKb+FWhe%?kj<`j29I=&t=UarnzMaZeN~Q()D?8iwH3KpTjp<2N-U5j zkqyrv!UY!xVfR!rS2(_HavD<(x}hg(D);sa^Q`#Y5NBFuK5+jqfYaA?^I)1(ZU%vo zf;lgnzzGC!KB|#IGXw$_C$7PKpB}aCO&iY)3HY_dci9UF zxa~QY=8vbd|Nb+2zsCZgmy`f~Dc~k!nw>xU#;B{=WpOOKD+Pk+pgBFbCESQQyNFNY zVcb$=#m`%hmA8B*bbq@6k*WH*%wKK#R;_2GrsXw1(`TlO9)SJWI~axP^1quQyH>d> z$b9(1?U%|jO8F2GT1N=^Ot{2TD8|7XV_DEI?j1|~6HuU2`5%ZccK^d-;Aw~jd`~aP ziupf3R@v?U0mP{OpMM7Uw%dqcAR*s2mO56FUf~YfNQoIHP)JtwqI(PJ7uj=k`t6a? zEWNraRoo1k>KDr3&uX5fqz15wMwCtdHP1XHiF;aEYsFd5TXNlgwYFHe_?C3y^<8nr zKp4>EYABCWGfoX4p>t^zPwx=*qHvVxLST2?so+%6b=~Y8%#_sqpq0DWAUN=p&~VPD zfmyZ_YtDlhoStfjxd;r~@_ZC*)Ok37L){&>BfJ%5a`rOS?*nK2U($CUU&L{m(|K!N zjXJKHYNS99!F(X6MNLIW$3LcZYDtJB55NRz=?xu~{Eve;Z{=BPJ3e}x$=7O*e_i7D zXlWl5U~I;80keQ#C_JZ8GW2<~WBsLT^tOgeQp*(Nm>6((vm#%MF~tw)J+TiQm7@G* zWAB0?>-6rCEn9$6dubWabEWQ8>)r_i$P?n#^bWek1=+{<`3aO8U9`H5oCzVM?~5#5 zFI@Z{tYnM?sr{_3!4Fq%yFg5*c@F7rY_e+|J*i5dwD~s>s^b(;RQ86btqqN)!nnJ@ zWc&4=e2N`{g;RrBjmoBC;i~9gJdw zN#Fpq_dKH|J9}U8HO0%Sp!R29*R@{ zu3oixou(1;^OsXFnyh0Zqa1Jw*XTa$->e34BjDKYPJ~r87nXqkaFl26E#O20N#m8` z<)HPx;+a@wqNh2B+*O^r)DJMeZHBkP_=QEnu9Da8psKw)dhQ$yvTFG-yDh?dA~_;y zoBq#fpS@C*t+0bT#_Hh*B0#{Z@4lC)psB+;j@CbHf!iUm00DSqgVPat0$-!RSAGhMA&D~W1jZY@`LOR5 z^anN5BxSA#bM*o{v0xE%>hBMHpCz3InL*$X!Y?w~%rs)SeXY#7 z&oYn=)Zs}WwD(S4jy})JR0-2u@)qAlLL8fNsz8LJKFEIfKJ8gNrC!_euDp<~nH-KA zLj#?|M$h~@8sEQi4tSS4do9Shb2XTSIYyZYt$UVXgc0R{qUE{gP#puwc1cRYc;@Az z{evoJH~FhR(J^4aLM?-UY3KaR7oul~%KS{%rl*I`XLOLqDO^^oA;j=6KeTJ*TX3BC zTI1sua<%Aa<(l-?Hg@F?_|!cDz-AQ#p~f?{&9~THz1wQk9<~E@bh0%3cqiQts1M)K zzIxOS$kt|}uH^yfn}@TU2M2#Bi=wa;?Ik-1yS6oS%zetX;GwiV_VG=r9fE^2Qhbn{ z9`?OS_y-ZJc_oxx`45!+rVpY!TX*=ZMuxTq{K zfJ@bjP_-RtNYc_DEi7c?i!?chZggnO{+QjrswsoNsMaPSP0uN!lC#h8|^b zV{!zA-ui$4Nn5cmO{p&veVmWT1(sF^uMpn;R^zn=ioRPm0#{F{mdOlt@Lq}2s6JV> z_!KNTYS%w8{21>GXFiY%Kb3VdD?6=y)>Ba1A+jV1rhjTdVD{8zD|>WvE;i#n-*-g` zlT4%KzSsE11f!do<*_W>q?VNlf=e0_7k9vF&YSNbEqPtscY9|DP{JH!9Xj)+IQgO% zssZ-dgLfyODXY6CeU~4{9d=!KT~Wz`dI}V-sDtDd&bSokqX>2wzN*x@3B)JqTN>UZ z*~{>b8xZ+tNyyh~z`sm*;0VHGR6JB8)fm0i3at&umrcIKt6_T^QB6r*na!LZnHwcM z%=OgJF>J0sv|((Y_?0Y0a4@5pfPokP*jlVGO+P_hh8A`P)Uk4T8x3 zp~C;8$^RoZJj|^pZW;de53Xt!T(gt-?^Li!=H-9+_HPRQd#S<==r-Ki2>7&gUY2rw zJUYk~*@GUcyN5@i7<4xam#WsZQhJG7C?2(Skj5`vJxj=%u?z9>aAc(0Q%3sp@oP%0 z-_Y|^=c?(_U`jaXcgB^_Zlcy0Pq7G`ftSL36bTjRmZ!X30Agqa2D4VQ{o4p9A?{Ur zCh2Nqt8x1@wW}}@V#U#Iz5B5^cH)fvjxK(>Ss%ikB+;GDy$$wf-0zVGTo*wTqtZbU5*SqQqeEzJj#PFqq8YM> zj)ZtIU0~F@je$tY>qKLZwJ(xZ4}g+EYPT_seSXOAMy8UB+~{^Bt5?ljuX;%>Eqz$|iBO#J14ld1VDJ2U zCQE-X9DmVW%>TWSdz~CUfC+>O^TahzvMGPd!z0vZ7jPfQxu>XCU3J;*LRMT_NQ)`I z#qe-_e}o;1J2{a6Z?p4&Ee-#E5WTx!9XFe0eKLb+rs^NIxTHPLzv~1XW2_=qwZPa$Y z+@unD{zpFIV-E*ywnpL)gVM$1uyHdQCL=dW+^Uo3ujR$uk|llpHhT0~UNmZR`a>^f z#+WDX0y5~>#DHGA_k*xoP9WjcPMq7ZtP+J^^T3e>X9(5hY=hI6ON-2JM zDP97XL-E!Yr-5V0SKzX6@BJxRXo!|uXRC589R=B)p1}`yt$&hFQTPH9r_Xu-@Pd`* zOK5-TKisB7N4WpZaV0E2gaolr=c=5ZEP~$e$De+W+x;*XAqMU| zS$aIyzPrHO{m5)u&#^*bll4IfSf8uM6KbF*_=Y1?Ne~Q0)U(ZGzIvwLx3##8MYarH z!;a6p$tbkiSMJ5C&K2!L#|>5mJ%LvK%|p(NOLvbQ6`!Kb67+C zmP1}!v)6~1?l<{}CgA(~+_S-1XfsI_7`?$#^Rj39UVrGN%)>Ne&8m#2$pYA2e_?ZB zm3k2-!faJ9dv|K&*7^~1LO-^|=&Vb~)9;2QT)16N=DpY#CMHe0q3?8pmvddCV%0-pQ+5Ij%7=-%{7AZY6|r9)InOzQp|3^fvGJh zPwL-;c?$<7_2LK=tX}sgBIYvzgzW~#a5S?i`>`Wau?x4Lu+yMj(c`BPIxFkV<`*>w#)kEi>7E%a>LG{RT8ExTe|*|a$v z7{rQOP%O~~I>`Y+U&9E%i<@ORrn3#{ilm~^ca?Ckv@-j&@hsJVLn!g#y;$?xb$<=e z&V6)wg1@m|@fYW&WI2#cG>Mj4Wj^%_Iq6RqqJ9a&wj zCq$mVDWfiTWAQ4Deq)G$AAm`D=sW~So)m~5Y_zc~rz+$<3lZmU{TF?KvgWXK(pK!u zF$a@%Fh|FJZ#+50LNFOM(!^_GAzHX_i=rW1N!1n^lT7eaNDwbM|fdl(v_o*K>y$!zP=~ zRk1i-9~`v*hA#^&E-0Xs04NuJZ!qEIzK?&rhpX>vGwg#Vk4o249MJA)AXKD;v=_cC zEUi(wnlMl}SyF@C~iDh zQpTB`%tK^{&Q&pv+_iBzz>!tb+#c_aYS!%cHQG~mHM{|3e}{B7CR*RV z{I%d&&08Z5Ut@ahZjHS6)87oCD-7cN3=utRdUC*0T(v#Omj&Y{!om*}Pd>S`j@fxW zc|T`tbG=lInehs;?`DndS(I`A*tvJ9jv@MwnD0JL|0PT-i==-w?5zB2XmFpq%##11t8a@9jh!BRNQK5PK=isxS34O-1BoaYujfbhX)7S78 zM_S11JU`RS3+hep0k(&%UO;Tes6>`MwaOP^9#OU?BG*^?_5+2nA@@}A8>FpQ&wg3- z!?VvYw3aPg@@C1HsV)n&ha1Vc5{^`-nWXoACTwWoldP>NUs|GO!}-@fZD7z~XXAef zkJ&=fI^C9C+*%HPq^s5u^3$6Bp*M%YpE5 z@~bQYRWUj?gQp=&Y8F}zXSyI6>#n)P^p0?y&PMsRpSb)irw$`Mb6mN*A_U=cQug@$ z;YQFj0Bjj35B5k}XE>eXGA?CJB#(-J``uoi20?!66<>xRq66wxrT5)mT?w%E&#~Vd zkRx2ze$T!{GuHCpoz`!N9?RSSzdU052xP(dR>)A8@Zp?;bW}MDYMI3q_&@N|$8WE9hy7%!$%7Oe7y^WtVi) z4UEMYu^uJ39;{&-L-*S7NAs8GXJLKDv+#gW zXcb(N#5_3hfsk;JOcV5yCK>&DltqNEa*Ca$!OHjbBr4a+B#WReGDmR}9k@C`m=np? zdo=hWvYv43V(Hhywn5mAJ;{YuOZ}+=(-{g{T6YV0TIV3jU76b!_wMbq$Py(% zL@GeYwEIulP)jUpmJIO+s11FK3QI}OawUjp&3hbOhl{Je4w5dYcC>+6seM&PNf2XG zsF0iS$c+dTsqx2dFuZgLjc(>FkVz z{?OV2%+s7aLwH)=?3zWJ6w_*9KF&cHs7U-y<*7TRz8d zTk$#0BGV1&x>%S-Jt5^P7*DRAQiDH|HG53$0kII(juXy>ER=88Db%YM`OuQ?MP8-D%qh2hX9JU81D<;F^m! zMTwJ#6#fSw=>HpUQYyz)n|R*)U&ihN|K;rLtWErvb8rR5^?3g8AKXMzLg@Gw`DSnL zr<}5G&kz?1I>=j-2GN0IsEnqW&MIY3D>@`OOIOpm>V0SMnleI}E%!~Sl3p8nn1)d( zq8uQi*!uzlvQlR0ghq7RmvGPdkvFp;{M2Rnk6GZ3pT9u>cF-Gy&aB!t3p96d($d=3 z+Dewn!uf(ChH;Tr;b0h7)X=cg3*;uocXii8UTXIQv<%|5SA)c@st2+zoUy9vPyw&1 ziTIjZ59(~0V1A0YfU^GqJaB$17dYtqA3^Bx41m?eJ~FGu&IkRZ6-E%M5l)anV%vAw z2-MK*1D@NptcXQHj8TS;8~7GENC^PmSZJxW48`FdhI& zn3-((7WdLg0i?L{dF;3$9vlu^t3_M;g-5tZq$8TIC77G`=%%A9N>fgX{xU?a9sBn~ zYeK6Huu{2Wb>hcGuQ}HvDkgz-@sJU?a#cv>Xv$22A(D_IW}3@phK8cK%f>3C0lK9J zOg@~;ZbRB#HA^NFbro@9Z%dD-6yYY`@cIq$l(;u_)j{6fhU~&>$)K}FVW~+VM%F0*;6Z(P?ihJCGS}FFdp2=|&;N~&0DGfu7t&V3o z!+Gg~<1`0OG*|KV;4ULh{RHr>*8lXEt?6~qO<>+<5T?33d1{B3LEjWK&A=>3>d-@L zjVNJ8gJ)!F+c7s2jaJ{z0uYMSyra?23kgh5L-;B2fr;RP?O5kyb;8UOEyo9FraBK6 zhIAw>%t==?GryS3@w*9qTAAWU;Zelf7o9AR`t?X~l+I}$m0n1$lhqLQdi;NaSE2Nwq8_z-2_3iYVSnILJDPZTRm$dLx8l_b2x(PU-`l8Wp){Ra% z%Uk@G&qDX1yl=8sKQe2W+*-&>PFyqZFW=@U<9Ao&4S&mHt_WrdpzZ{!AQG;dhyndp z<)qa%WIyegl+Llq7^TJYO%KV`weJv8t9E5Bp)wFSG?tR2ur26QO&r}9=Vbb=c%wj^ z(_Y9pvIW$CyX{1|wVb)EXQeu$M!T6@j_tC}XIgeBV}WK9>X&P$5*xIztS zQ@t9A>vdlKnp$DdI#8|1K-YouoCN?Sg+zi0j$X;@oU9T+(v6+Oc$preNf-u>>Nj4+ zaQAjgWBqJ?>i(71Gq&1;#`ysLhnC)!mS$*C5rC2al8 zkPXyO#Ev_FFaIM>bAa}ev;goig!UiJn>~ld-07Rsga9Llk>%?J{G{*5cbt8@oNX#l zB9s*S>ghprrUI_wihefnbxO~El8R6Q>7i>lW4sEdcJi;!TM*0H9^WV4Rdv0~lSmT< zCGAq5H2AI8H-((i^lq`=J8{L6ZHLR1ZO4KQtm1+cp|gwqnC0?GG*jPniGhd^I=8tX z1Krx9qXtycgy7X3K&ej_&3_E;mNxID^1IhkPqp;hPgL#OgmLHN}A#E{KR;fumwVn&nI}S_@y;33bMcW^U{?(uEpLUg~i?*mOL}AlM zPi#bsB4JRLquUG(@TK%H{F%fa;oZ^&0-9Nsh_p8t&3B39;NtHc?#7>tu)px9Um&J? zl1lcyK%nW4#)mG*?>fQ_FA)M?@W$6j<#%LI+n-_+(>*-HdVq^TWmUTNTkR6UWB4F6 z0ki&;(GE)Sk<;o%ikhxb!Z_6r`_A=Y4$s%N0S$44_SPw~K1b0`rv9BJV5~<5-gMZS z-{KJa&3bj@T=amt$95nT{P(TI{LBq5UNeh<2S`N91Wfd6P@gNVBNTB}*pzkmi~$5I z9~2bTTZ6dAe6GqmQHJm6_(2UJ=~_%_ixA?o8uh08@o{xl92KAWTA=7E)QRT1Y)>nn z0=d?W%i6(Bui#aOOms+$;w36TZ_-UsUa>=s`Q*BGCT!5!5=G=cpcx3Dj8$9LSBRH>WX(!Fq` zj+JAjPv|zd6$ZH_tn&q8^`f)-CP#VT3Su=tvbL&#NF(XBS8ru5Z|mO`@PT^NUpa(n zB@5U{#qm)({fZvHttJ;ymFaU$Sl=eS7rVkD_9$t2kvF?cYQ6<_W>@t|!xCDrnuWXpZ^UY552HwDrH#jWR=I|1CDYE)_62{og;Oq5U-Z5T01iuf|KIodI4} zeGhOcR#vTx{W(5T{F23s6?MUzksTl&l)1AZU@M!HW(bZ+m z6H5s#-SMm*+~I+>etv;U7)%OTm%7xAW`u=F8lbKAw1Fo7uFZ++FU_AI7-31W#4rhDM0b?9 z-v(}Jrt}zX!W+98Zsma`+goqDuz zZYV5us7h_|{YjzwKVZ-7lXJHO=h0N^! zePms3a8y-kCJGSexM|+u0iOTdk1xeUtlKSg6Gdv&Cm`#G~}T+5*dH$f8T8KU@Ugr>?Wi@BDkn3wh~ zeBFDVSr69k_tUgTx>J1?dwP1dR3rvZ`G^-S^j<~?ejmWVkre23E2YmLCthje`w@WeK+9`!M4$VXEKV+*z`BomE7ySnuF$R0EtN8Qft>=O zv&O>#fA-Sq@bc~g!e0Q>GtpoAoEM-BDs*)RW}-F-v4hPeN%0%3L6k0BxYwC&wv66g zLOj62Z1}5ouSA`725r>7_}Xi7_G5Z@f1c62+y*T`=c|YNg+w!p&bWd0(jn5j?zvWT zfMYqR1pK2JtF2F$p4>m9rQiS&Ar;i#wrm`X9w?M+%iU(!asKd_m+J+)Gr`BdIMHe> zl%5>_li|8GZmIvh?<&Bhke<_{WeseLAB&EsKKQ2exZ^^Z+)R{`+Tj^iqBp9YF%2W3){->ySvU5F0RO#m>nMowh5&M&j`5J5Rw z!_EwY<#lEF;9n=$sX(A}JxE(^dJMEd)VXrI_S^vSxs1^;_mVF??21m7Do>3mOguGM zI+bRgvMiLmX|S)!1rX(DFWt5Ra_OkgQr{t7)4qq`>8oTwn&l=bltnv zT%=ZXlX(0QNl{_RU0iX17v=gn^r{TIlL{Qw%$m$s)!>({8VV~k0 z<=F$W6%&c;%MXw%&pS!{7S@)C*kCR~x+8<}o~PyiU97i-8QD?BRP z_j|7MsiO*aQF1TIWAhPP-5HjeJ(%HVHWKql7k|$EDczG~ScQNNKb{ViU8!=Z#;x)K zKjd9o>Nx?NQnDY=jgp~fBh;6oG(8Pws)TD2PXb@4_ll$DPA{%FhU?s-dP9U}Nd62N z$5pOEONWn0|09;s*$R+NQ=j($MV|75(I!C(Y3Dx>K1K}Ua(d=*u>42RKcp7pqYd(J z`jY9v0@DSE9Q9nExJEPQvdf+6SETYm!D;Nb`;_k|Asve+yMdn|mKS@1TT3-^7Vh9w z@`J33`~db_V_jI~U-#$u77{Wm&x`B)-Abh=?*-$mi$Z2aQ0+6^P0{CSX*@&3#^f<= zXh_R59n(-IASSVNR`~`%rZO`i8Dy39hkq{^KS78e z^m~+CzEbuBx6f}-KF7BWpvxBEq1BKiPfTT{Z@Qv^_K*!I{%S;9P}?HnQ-hbsenVeW zL)g0SZGD<~|7U{d*NMRCdOg>5vehFjXLLU;S(rJD5{eFgdA3EuW*^};!;Te(NKqP5 zOfL_<;W6140Rp?T}N-=kH*5zgN~lwlIO7$Kk5Y z`C>oab7mu8Z<-+Ch~L!yI)p5@iuYg3PT_k0e^#mX0J)>VEJrnVQBSjBR;*8HEDu_! z<|q4Yb+yCasS-6VSy~y=n>xAMgo)sd+3*TYIg>4f>3IMtPHRE+P6L-{+~32*V1ZMp zX3Sr$AgrCEmGDdhIOsDpBB!Gi#Ys(KF;SK{s3UK23iwR^cBLV9j0RzpZcGjh1aQrh z&smdwj+zi%C*4;3IY_LcFG;QgqRh}Bz|rIwoz@phMfDfdR1>49W2Bz)v$K`H<$hKhpjai`&FE8?3h9WLea z<+)3au_YdW(mDu`7<7Ys71O_SQK#Or#tzgzFF66_Gz@5kNOKhus<|lhghGatJ>?Dw@exE*#&Nr z1o|fcB8WSbTNg9?}AViyXqWgA4)iflH z63_60%{Y$2zJ+o{!>U1C@Wca*Zox(psF;Bfa6bqECHJaOWAdRWPr$fIdT`-Mg5stR zq`Q|SH|g*6V{b^=^9&|MD%U!DfV6bPY-=2l3E5{qI1|;on=2mJr{7;YU?sCMOh{{U z=;yY)7``Ob!Z|-PQTri%J{g~uFq5%~$sGb>9&`_R45${Dm!uNn^P%VsKVA54hSGhb1iqE4W8P9cMs65B@0TV3gOk{baD zG=;ZHuSO_6NsmeN)~bE;UcAj~zV^`qsHL07wMo@cm2BBhA!A-3Wla>h_=QH4S?jjj zf{q=)ETK3*V*$?)(@Pup%z@Z3w_VK1i1@&|2~gN~K7dw(y_cRbFkLyHIDLFo_gp1n zOyg%oB@l*H)u^L$QP+pdFk^Q71v_EVdbvUt2$m=^Wr?GH1(tf~^dJiRfIyDHna{qP z0d#&AwXwrCR;))jB<&={iH1fLi3r3Y`qc7zBv{uGPC{855UeU(JYeUeJd=TPooRG= z8>tGf`TOiy&H;Qmsv4Yf2x7qxU=+F`uV(U=9{?lOTaC~a0|HJ8&r08)wzzSyv{Ms3 zD3s6P3_L-|@?134ASvXQX6K>~pVE$4pPtm!*wVCKqB1X3ZLv4ljk*TRiCAeCt0ygm zGuch&x_tmfds@s4pEd5PmkbQ$&Y5tlRVyEw_no?_T6*et@;|d&X?}>JJH{ zL!g1|*iL0i?{mahdcd;&HrLLx@d>d$WMBdv?T39oLPHAdj5?n;D%I^{S z=Xn1FQb`dA37f}HSrSTGY2R^0>}-Vl9A<39W!x&uuh7cB5wjWbyuU@K31sQh#;i55 z<-z(3EFYU0KV$u+JX8Hy^7&n+p|hjUHzyz|F!T3Fi!3E{daOpGn5eWqIw8*nA3TPLH+goVEMc$w&j5HG(T?eR&Na}cqrv}Zy;)qS?xSN; zBF}zZ(afG&eJ(dOTbFP?n`OkdMt8k4fhySG=9A02(lsRzwVCB+w}A}MVQI5CBdG4! z!!yeqvo(K#tmPhVb~|igNnJLJSup1m7)x*DPjy-Iss+jvU=>sN z9GsIFV{1i^pKa)9>vQIu{`5VAuBZf)guybx^YI5uJ_|3^6l1DMfD>}wb`KULuNU_u z14;xSk`NI;H;9BV(WV=iQx)u%jmwyk2Wqp5eEQqU$}5#cqDjGE>S&Lj2}V2(ogs zgX;i{XT=z==ZG%iuiRT}O3DS<@MFQT{Z@#!7dx5VNA~PSTY*NH3Z}Le%V-<(3@=^p zvOYyv45*l2`{0qWR`t3Tz~y%%u~rqw;8yYSq^{?#A4`D<`_ZH55MR;1_XAqFg%TA0 z07%a&>z?nwB3gTICd!WD!)$T-OdKG>zns}+^9VOhM@$ji$5g`d6^@*PJ~Z5ZLD#e$ zv;)+48)7cA z@k86SRq#K8jOIV3wSQoZHw-d0UQS&6FWugdRi4W~pp<9D2mZ^axRgyyW6){o2MA8n z(#YbvJN9&h)nvHaJ2U*KPZKALgE~r~lW8q}{%ddoD5VkoV_2zrSr*;SUbL~0*C~Fz z(><0mezjR1{?n=n4XD~FQ{7cxTlP{1drE(>*u8birj)nHVx2w35ipHwC;Ge1UC`9i zaMe}Q>l*van(w@#*}-xj8Dy(<1VEvMOXYCMCsxU66H24Qf5MVGhFoMiON&QcC4s>0 z!Y7=QNbYQV1z1;X_P?Al>QD(oEFCl2z^ItZSpj1!i^x2)v!R z;P0Z=7dli4xSMo6j~ICRIbnStp7lLK78%Mf1|T-8-VYvuMD5qkGgeswo`)<*FT?_*FOh5a=ZW7%MBHz9xLm+QY$xQ)du8*vW+LsfdsilI$mwXI6bc0@UcbCs)|5MJI)zfm7O0=ty!3 zXR=898P9NcnWPZ(h&0Hu53jC_=v!H@B^MbmHzU9{DgMz{;iVGqQxUMkbyq>-D2(DJ z$YvE3>8EQX_8f#Ws#P_lzH6fi6Zr)9rND_b9vOB2x_ivwpl?M}f%S`Epb9WQ+4{A6 z`UV%T7-u>UY7w!>m9nQIb%dan-J1^jd0tG$Zu(=JEfi&b>&eich6-%u6n3dkYD?3B zBVV?MGal;ogq;#+6g0W2$cWt3zc6x4N1|X!=-wpBx_Kn@NfK=^CdweS+rj@M)e5oD zsPaIvZR|tJ^z01n*F${?&6{?Zd1Pj=-REhuZMg-c)Z5RXDw9K@_q5e@A1Uxr=kLt_ zj7Yc{G9o+T8NrNRO2*bKm~atUXuDt2&(C~4{`%xm{cUYaGbuY(t?Ne=LUu~Oyn!+UNCOoEa^9b8)- zMWIyWzo))mL0$s_P66zR1yAXaol_12oDtZxU#j9%BvZdKN_CEX-i|(!E3%v_W5=zH zrqv99;C*bn70UkPz+-w8r0*e&VLqPcKNov9DG9kN<@GP=7~l`0W!R2m&*N6gq*qDu zppFyGF#W`d8Qm1e5g` zm1+8F`!QeBN#uRet*4RgJ@R=y#}yxe=k3;Yk0y;?LNI-`OBLPWTR#9KpmuI0d4LA$ zhfzHC)EHw%_$$62sCBR}C`g#ZbDWym&X&_@dyZQD2`s4F7m9!uG;(v%GI|eoe#e7y z*WbVP&#(+n!ePC^{h&c@Fo9YHL71MiN;!iteUA7scxXEil`;)Jj=?0>izhf2#YYWQ zq#$Z1=U%eb5;NSVJ3p~VV_}n3&1gnLwmYAs7vLYHLqpYOx%;QgzUUdt;y=yXfIziq z(fy|LZF;6`pxvnj6Z4bBYQ{~d-s~RNw6$WEWEQ|2Fo;cl%r)YA_vmgzK^AOxx3Mu` zvPuEmuPH`G#=S`~Qv0DgY8r`TBa@hwS=+z@XD7z}5qTN5w7u zC%C2eHz3TorKQ$l6=ve0O(g@E;O{l!59l{%@+$j{%5TSt8x>sbyVh~F509p7kmC&1 zSUwE2x(uY25TI?CTSwfX!~WE|;n2~dM&zoH(q?SQX6BiPWMP_qdiK`l-%s*Y(59Dk zbY-TR6%i$XJD=?P_vFLNT)%JrQlkAdsMrKiRHP!lU+TKojjf()z%6RN!gU5vtGo&; zE22Tn4zz<*UVw(kFW@`sfBaJmM--Ye4G~0u5Ll!ka2iq(&635gZVez*1rM>FWfjEd z-6Uhn)X2-wL1&+h9=V?K3Gbt5_kel3c`#b;-TRII#2A49g|*s?bxN@FOT?YrRuZq{Ux&x~`s)d+qh%|AM?JVyDTJN2r+ge`Zol^Q)Nll=?-isEdJ zq&&=YPC-_SQfZ*<$YT+=5kS{SnU+OI(0b~ST!BD#8V93Jpa4VV%YDxP9fn-y`=~(^ zgVi{eT1JCz-L19{+lA*y^S>fXf6dECbl%IVjem1%>$jhB7s4`cFTGBQ978{eF!61~ znx;H3PyAz6{F1{B66dgFOsdvz+m?%&f#x`2pqV=@VzgFFPM_sjaL%G};_< ztlG|MsN54fy+j(p#U>cEqI;7zedM;f9mrjHm?ROm&E=(orv9U*l)s0Bb*HZ6&lTsc z;GZXP9TjEK3&u26YQ`2mEA0!KE=?{+%Ez~}2QFcMZBoN4#b4C)AD2W-qtOgA9#OGB@1=jZ#hbw(CgiYE9tXeM2MX@@I`bz-UyH?qh zF@_ipIvKn%qjIIGvRI?8|6h9s&@lS7i`QW7H1xI##ja^;Jd@m)fv z9m=QTAdjBW@Y_k-gs(L(Y%3AfLr2;x_~%#cKcL8=Eoxd4<63J zAMF$uer%?0wwsVrlT{@1bsJ+>;9~f#8O3c?9-7bG+nYr4P0t%FdQW^0PtC#uEia&y zX5GqBi`~xLl@Y|J{qTAOR6NWNJ@IMBpnJ@$q8Yn9x_-KX)_idy@$xSgLhc7-?J>my z@_)j^|M}hTir{@6zrp&aL+$@VLz}2JV2bJUiL4j6)EBnr(YCtg8GB# zBm>?6XK3}iY_Q#;ut6M0#3Q7PCfgW1BNBXM+F;qBNOp8lR(thn$b8J1)$194N}Q0p zx@r8U@Q`mlgsb4Mkbsc-#TVySFH&U3(MGuj&AjTX%sL(^crhr06p}T+O6EQFnBZX+ zgmtm9()i#b1wUBm8DU~Ad@xkQl2h7SHZ4O5Np1`7bB@i_4}TOm$uA4z!B=@}Pra8a zLo3<-Ytc4#@2OLFw&!7*sSD`_%jt}@dLZs{cL^CXS!WoMI~rpOqDtawL>f2ZlK^a; zxY}s~o=g*fqJkQ*)jHD}f`Nu7WpN~UYNXJP830c4vhOA=w$PDK*gYJbphLS2BuINt_qVU1&`-_46X%WtKhYf z5XPcJ@I_n5>|DuPgknt-^>%UeCwrMN!7kU5sN;96A*-|y9)d-k_vcSBOSoj_?w;V$ z%f^5zv~;zuRJNJ-efQ#;HCk!Bb&MgiGcgDz5?7Vmip@^~B z&o=(LX*A8?+Fq{z1&?Kf_iYb0?66_To072wQ-ngL`!ahu@&+~j%<3AoLa8i`O(cER zG+@l_h{_2{>CgzV9`cuzQ3pDtfaylxSy84B$mm#}5*WV>-Fdz1rlYqu{ ztgN0K3pxR?m4b_Bai;mJl?bYYbDqkX0aP`=l0PH67+_ZnuPnx%=DqKt-s++CjSxF! z4@N{Y4o~cMpRgS(mXSkc)O%Q=$lb8i5@Ap(bc3iRQvWCCw4xT%Wb??*P4j z@$S&E55eYL_!thSF=Pby4N`F!P3!JHB@O`x2QLq5g|_<38PThE_FuhIEGv`IcJUJU z4ZLzOKU6TYvwHu0VyD4-_6>eX@WK8lYOcn$ligC$f*p*$o)X21Fr&e;nLIFF$>;Fl z*B1mIoO-F5r@iB?g)dKXpE%s;So} z=HIPH00LOYfB&jC-uy}rL8vbuy;b;DhZ2yPjgZ^#437RY2r$#0h&ze- zDqu>~Di4#7uT3$Xub%MjypK@s%gUs_7(+?3vVg>VV{35XWud@dilF@k+Z5nWH0B|tiFKA=$HDi zM%N0z%#ZHqrg`tt@$D3&>eq2;Hf+9#kf-2sV&8M3n)yXpz|0OqgSTJ}bRX)y5>sEM znmu3x;jx{qFtbKJKWX8`Mef#LF=;iW15%3*j@L(n4tZ5u6^z#1Dh%TCz82$i`jMDN z(H8C*(mSt#ad_SNJ}P%ODjPo`XDWS9iXk{-vJ_}`6r3!@=x;wgm9HX|cNx&%n6jsW zrTewc4SE@iVlDd}@A`mboXY`dy4S(A6~XEjkVL-!^>DQn;(ISW*WBSIVXdD?Nqa8;!C)cxG9x;y*A_ zz<&LriUE{-GwRdINJ7h{skPie$6JC?bVJMUI5%-DX%8q?&o2+!ituG)kR9L9DZ`b+ z6Y4NDV4tp=JVzAFe^(yA%N4OTgf%cB$L7zHsIrdS%F4kqQar=7CJC@zI`Rfl)3xYk z4nvexP|MT&%Rm@BXn#STAnLhmyNfJ56JzS^&+TP)KjnWvoK{2r7LJIH&D?{BVjv0C z7G(kyqDLR3u$fr`JHoCNC*B_*KOGcNv6%iQWJE{@?|3emdm+M0AHnq?(T|S9WsN zqHm`|A4H36K`3s$F>VuP2UH00YnP`yZ z{w$9GpUXoSlEKYOZiAf*3Ce1)L&|-GbMOt;Eof^?NKL9jFkdN}lZUxvXDS7=v8+p+ zo8YF7=wJpe{j=2gjpG||Boz&8L$5~kqDFUa&0U6=ieHe(=jNf&0B_mQB|JDk zIwZ~0Ie)?m=)q=cQt(F9Hy$qxk|f@54|P6e5K4uW~0`Ngo?EKa*k z)~#vn+BFY#y=$+ITmrYWJ!WZ0yPkhhQ`uI@c+RZoDOIGrPffQRm$a_@$Es1 z{>&XagKRzu-#R@Z=c@N86$kDZlKN(K`+DZ`uba&<(lFAek7BfnwlhSz-}K=pXrlu8dDMP09#yoT3uBirxlH@>BVBxCUnUK*`*Ytq!FD`KNlFi^Uo>{ zKQK*eA6!R=N~Uq(%S(rxDce6%-HP6ATjd8f&K`F)+S^NYfSX2cqoGspF1i9jiJ$m4 zpMqLScaWAybsrsGPsLqfkF-%GdL@{PhtwB?wpc@ zoQ<~$r!CZWbqC$~#c-aiIagZm10%_eY0|G=9Sw`DoXL2m!6vfx!ZT;c=lLFb5>uFa z!!-uJv}_d)27g|Fw<2yk zrFn26$i`^EYSJjHvqZ_RE|Dfm*Ic(Ssx1?lI@2L#A~T<9s@Tw8DyN&b2Mp%fVz{?m zlWD4aQm_<(uOn5aA>v;QqvI#)gTrJ`)L3o@1~HF zJq8)O)Z4q9D$2&+2wVg9YR~gZ+@~Ni?D*>WEtxj4t_<^0>ax;M_dTnL%*>fQGSW1b z!yI3$SXz9t-WC0-4h5kt92!Q+UO+AFx#vUOZ)q)#QvQ@S{igFO#{(qDtwR>RW+QTFKc=$ z{bC7X*Iz=-XW^roWEMdgULbLhv$?C{_3WF<;nt}QkTK1Snl}N4!(P8W>#(7uJ)cLO zUdx+RoT0xU=+6MnQqdI>?_5m$;m80|-^j6fz1Us%l1ARQ^j?{$C}qz4Qw#O` zg>TTSuVF&_r_;-(?f$}CxrF`hD^s+~v-Q})KpRA0O z)}ML(b`pBm;q;Q1sAI>LQ`=YD*f}+8f(Ptx#PRj=)brmY^S0LPli(h!k8)}*)nzs> zG!Hco4scp!LwVLq(MB4;RpHGN`PUNE?1~U`f6=BZOJtZ9`2`UIxfw7=IKNt{UoRdO zR2(|XXr(O97lS4`k>HkD?>&?6l(N zcUHG7|5D5zT0IiH^`YD+ttk{cJj-RI4o(tu0}b}zUwcDpWZ=R8*j}uhS~QI!A95R z12Z4T_aMjJRgjtAsQA|AA@@NCI~o6-@_DUycb-(x?SQ`2Q!|ec?&siP5)DNu306D& z?H^@nysJ?uLzZ+G^-&Jxq`k#NU*o5ija8d1%8>WZ7GV9Ljn276_6<`u%vOd~&3>WO zWX$hRuT{6;+>Q_VPpIT~z4q7Kf}6YTcR59O+@453x;ydwox@qF=LmtjiwKgG-^Ugx zOHqoewdENZ5Sd^cQ-cN_Yv0R~PW!);PlF!Ey3@9zZ13Svv3=+54%YP|bT~FxEYH z629>j)O@eiqs&>>SB5x~z`&O*BW;|%cS#nn^;r**iWGJ`w|2bLws&dc)=r<<&RX`C zj#Nx+NG?N}AUr+7p4>lr#MGs6=b?oQ{)_nE4pUo63o4HC+po*wa6D)2v_v@j4SeNJ z&uu-iUqbyY3J^!R)O^xyXl;K^vRIK7ENdJxoT=kr3wmLaPG9r?(()(web=;gMc%CF z6WpK7=1uKptG~}y|5PdSUs)&%(6nXVk)RcH=d`@gC9qNTN$1MWdL z9+}8bK{H=teos$+jm%>_IF*{yeCdm!aeWIh>Q64g#}p2wiW}=5a!j;zWrt)F&JaX|Nw_I9eqaw+PWZx!`?MnUn1F zk})x!=VTX@>P#_Z#!F44amF>h$ny{>X1<#ZRP>$s$QjdVtCWj_K8vd6D{$|{Wqre?l}r@zMq9QD+eQCKocUAvV z^EBAEifpKqpK6Hl_)~mj+ml55tIo}*Dm$8tW}AHb%uZos#+4h1Dq8HCGTfe#yz=ap z4D&j@q|@U+Of%aOPrvUg_ZjQrVZnfA4QGC0%qkQ1WTjS33?+L{u=aOb{q1wz#{GII zx;Z0n@UTsl4K*e(Ht%ZYHRSbZX|g}TPe`PHMAGKGS#7c{Tt{jL7+f%IS#I-RTM-jw z2J~|euSShe^>xC~3ez@AGUeNzylSt--L)GV%0!T#E{I=+Qn-C@ru~|YoV+_+25HfK z`-_(Im#tEqTJ#{*K`P47)37Y;qYpO&Sy8=_0fZ;A3U zK5L4LFvnS~Cbp95h`q)YcW2`tAHLri6$s|3HI&iRjOONpNxB*^8!>Pl1rAsVU_J-Q$l`ZGmm`ZE}8$e+xzb2_F{jenBcb?##e)}rwb8uW{%&9XO?xEUuJ zswb&khwQ9(j6I+|Csq)BzmP_+6RJW*DW#5U>`RGiKr|Tnch)$MWHxr&Mq}eKs)Uq~ z!bcbUKHgeEqje!QOXRtvmcmLSVWEREY&8c5X-*3qx-$uA4+1fHs)erLXm7rr`3%?DIr9? zk=IToi{r630;9~+9Xyq*&u1w4or-nLxT7#7q#d{h^%*vl$z(*WxZ97$N|*m;AC|wa zLELce)8XrW8YJRLh+c-$eT5y)sumitUe{tAGWEqqg45zKZW-^)q31PD30<2I)|FzW zs){UzgpqDwVR><}|BOgjo8RBsd#F3!#Unm*}{<~iF;b_^~# z57I{CZ%us~6MfRf{H|u+b&7om>VrHUg)jjn*6hfj!1GLZ^hC;FmO4qsUO4pZ=BoPc zJ}F4!O1lO8tAvz6z^+MAWPwfjSadC}8C94wkc-xwko%dl5ctpBN(`DrL_IQbM@NnX zPy^05cp`{*0!S2X>lCR8A5Xsn0|%y=F1v!f2WgEvb$m327_eTA>pMjcF}XeV9VWwb z9}~Z)ORN;yG%a{sefp^G05h;oTkK3D&)ZLuKKB5syVY^yPlP+VNzM+XKFE95Hy?+8aL4Ud!J64{N+aylj(BM5 z}_bTj;?(q%Z z2=6t3Z`HIxt){RG%!Fp6bR($Oq}>}es1!4Bo+|{dV zuGS0gwNorX0?4W2;?oR-g3})4)GOs4lym?dO6pw+OmcvSk-rIs_&h_!+Gd6357sTRNn){CDQ6Z4PHt-SfA@ zp4rvBAA8dazVMSe>h)77Ub`}}t%NY^;d^~)*&3UCWph9DWle5k7~rSrQ*-dF9k0b@ z{mHz0Tl(}qN1w5J)a|)y7Zn8`+4ZM_TJGw+v!$E(1Qp-j{2;01n%j|{;kEW;?bzce zPPxwpjztHCUb2CE#kxulRnnJZJ^$~7h~XrL{Yhl=ZUn)*?6c%GNkPlR4NsUSl`f=~ zTfz~${+#xChyV*@qfvTY!Ewj}BQ!ou1RSz5-H+X=;_fdn6J3&u6cN9atW+(gB*gYX ziqs(LO#{AieIW8;l#9M!TOqVraTelqqz#gQ%V2k^$@-D|e2h=>7t4}^j>thPw2a#3 z!UHuqZ#L+YLUIl}li)kv!!GO~`ZsFeuJ!R)t<4F%_ z9n9a{m&6e2J{cXO#+E1YDvt^*sAf{h?_*!gq7buQYUSxIXH3fO!cCmt>=Ck#ic#}9 z2vA-B`vHkMTo>J^zTA6F?RG#^qSZ?N?KK`Z5x>8R;0C#B-Mai-WfIsE;8t}BhNhjP zuirA>r&UybF&Sq$AR2|pQ5zjaB`#sU0i!8+Ky-f^bZ2O({#u~`*CWPkcfJ}Ksjz(~ z@u}LDG8q|e`qff~kgP~6MWycHdu=}QO)e+ZNovRsLUIzn9JDWd!;#?AiVlw0_T@8D z^GbCjw_{H0!($qhOjID%6!6Ej$HSHrQ=Zx%o+3gMul-|8`G?*1+nK?-3!d<{AqyTS zA4jIB+^T;MG=|0;-0WT;fGnOq#&J^S4xb>_VAY)5s0pu-%FES;lIOo z=ZzdB8+jF&PD?|Fi2~CO>1ysA5c*)-q}p+oi4hKHle&juk3Q> zwe3~9ZV;!wNQX|b+Hc@73mgQ%Bo}JHvbrp*J_>oCQE!KcS&X*oGpAI&B&ZoIOJg4Xr!xH?qO+EUXq0_{@jPuo&?-z6$C{=RMS zp(km|N+d;xo08PP#DRKEqKew^i#cxMRS?a?4n*FEkn~PMoc!xfS4u?gGH;z7J2~0p zQ{u;OnA(&%D=Ji;orWnC+W{?4e>JO+k56hiX96N4?1#J23Y38S)bwDF3=%NX5n_gU zxfps)5N9LY2qDxr;QsNZBvwuLD%4Lg_!d0yl1=Ux_H9^o+Z>|6OTlWQVK~!W=Jaqp z|&h{nd2=~WMk2zJe!gt=Io=iRe?+U-a)!bPvudDnt`*7dIL8&S> z3_^T$d&>V$OwsHD&7oT+3UPce7`Em%6mX_BF`E$2>*|@E)b<~#%~`5=gS>s5(Z(%3 z9-3<73ektd4r}LVD2!hJ`ggughQd8Mgm^yf#?gwDi^P*5ccW*_8wb<;swXG-zFj z=GeVTyP=Sn{lBJM(7HQ(hkWiciUxxs4RE0+gpOOKw z>^~UUq-7rCp-|gz`1JR-@-g?j*yI?nf3ME42rhw)cs8(-K$Y|{MNj_@vJcl4f9T|2 zf={1SOl7~35;}hpQj=3D>);=d`}Xp9LD`B`irtiD)#`KngXZUT4zeSsD2Dd%##Fv{ zmX{m5ZVxGPM2qIfUZfSc-<0qdPRZI$ zMotw`4&xmoRgBvbw0MlA!|O%pTgtH=Kh zf$y%lgcQC`@8f(GCY=wS{JTYe>gC4Ee*I^$A2r6jkyd6E`0c@YpKP2zM4BIKc%E96 zQ*J{Klo|F{CmnziV8w91(Jc>=7ZrRI?1BL?y!)Hh$sfI7YMC?`&3;AXS)JDT=@U5Q zzOk5d=vpBhR`;>zSo=<3qF3Vj?gcZF`pfb3@+UkRvfwzusns+$&sy*LG%|%4{-)PR zdpL)_TZ6P=CN@sRTaMaU5`!DbL{MvX^(2`$T3>X8qy+7%*RJioR5BbdKArO_j}0 zI#81~QHZ(=K9mBcAai#+zGb4E>ylf zBr(iKsLkh9UeAx44G)g*(7xu~)8#PjhDP0lpQ*v-j2LLT91MsaM3#8NCeFX(9O+Q^ z*GSYcJ&{CS755aL0c&3UV8SfXa8pWDovwsuNRg7Jsj_51^c>zYhNcrUq=FLjAM)`m zX{Tgqt6m14lD|}T$$TLho;ZQPJOvmVQs+yL(m#*>xkUf3(L}dQf`M63hdBZ{qRmwvm0Cs_2bc0r$mp z#xJR9_j{C&{m0gaTIE|+-eMZdwVGZZkK{}@p04wmY+Mm)J2cYbZ7NAHuu1pkNO!y2 z47Z^D{o!9@u>OY)$VO=uBD(rirv5IW{JJcyVQI-n!_$j&hi(_+hCgBVP4A&S(j1Eq zjAX~#MdLdrEmKUq9pso@btUPPSPLnQB&q9a{q@kt{#!hP)|?^oT;FvEV}sCt{;^IZ zzC2qo`@d!!muIH_fDzbtMMyEkT>Np&KWx^Wf8Vz6|Jn1|1lR{c0_!qr9t)+M-GBNGxL5h_ z{(paWOjWb2-~bi_NFW^82nyTy@$IWHyXPMiuvq)=GtkzrK->2J``BMUC+_Lmnt7+6 z@BSla({s!m$sh&>Iagq!M+W_iP#ne3;C}H38?u4}i!*ANkp&(Eq6Q1Y2MtswF;s}4 zx}U)Y=v8DR=;Vwk_W#*$OxVAiz0dC<(0bJp*NBpo#FA929LeQ+{b) zN@iZVQ+@@5p&9U4U6TqUOCvoKBV%m?Z3C&?HMf9TL||INQj3Z+^Ya)2T- 0) - block_name = "generator_att_block" - block_name += str(block) if not self.share_block_weights else "" - - z_s = [] - for head in range(self.n_heads): - head_name = "%s-head%d" % (block_name, head) - z_s.append(self.attention_block(z, share_params, name=head_name)) - - # [B, K, Z_dim * n_heads] -> [B, K, Z_dim] - if self.n_heads > 1: - z = self.aggregate_heads( - z_s, share_params, block_name + "-aggregate_heads") - else: - z = z_s[0] - - return z - - def aggregate_images(self, generated): - """Returns the aggregated image from the generated images by each comp.""" - # Estimate local k / c_dim to support background / alpha channel. - k = generated.get_shape().as_list()[1] - c_dim = 3 if generated.get_shape().as_list()[-1] >= 3 else 1 - - # Aggregate generator output. - if self.aggregate == "sum_clip": - aggregated = tf.reduce_sum(generated, axis=1) - aggregated = tf.clip_by_value(aggregated, 0.0, 1.0) - # Aggregate via alpha compositing, either using a generated alpha or an - # implicit one obtained via thresholding. - elif self.aggregate in ["alpha", "implicit_alpha"]: - def AOverB(color_a_, alpha_a_, color_b_, alpha_b_): - """Returns A OVER B operation using alpha compositing.""" - alpha_o = alpha_a_ + alpha_b_ * (1. - alpha_a_) - color_o = (color_a_ * alpha_a_ + ( - color_b_ * alpha_b_ * (1. - alpha_a_))) / alpha_o - - return color_o, alpha_o - - # Start with last map to match stacking order when generating background. - color_b = generated[:, -1, :, :, :c_dim] - if self.aggregate == "implicit_alpha": - alpha_shape = color_b.get_shape().as_list()[:-1] + [1] - alpha_b = tf.ones(alpha_shape, dtype=tf.float32) - else: - alpha_b = generated[:, -1, :, :, -1:] - - for i in range(k-1, -1, -1): - color_a = generated[:, i, :, :, :c_dim] - if self.aggregate == "implicit_alpha": - epsilon = 0.1 - mask = tf.reduce_max(color_a, axis=-1, keep_dims=True) > epsilon - alpha_a = tf.where( - mask, tf.ones_like(alpha_b), tf.zeros_like(alpha_b)) - else: - alpha_a = generated[:, i, :, :, -1:] - - # Perform A OVER B (non-premultiplied version) - color_b, alpha_b = AOverB(color_a, alpha_a, color_b, alpha_b) - - aggregated = color_b - else: - raise ValueError("Unknown aggregation method: %s" % self.aggregate) - - return aggregated - - def generate_images(self, z, is_training, reuse): - """Returns K generated images (B, K, W, H, C) from z.""" - # Update each latent representation based on others - z = self.update_z(z, reuse) - - # Pass latent representation through generator. - out = [] - for i in range(self.k): - use_copy = reuse or (i > 0) - out_k = super(MultiGAN, self).generator(z[:, i], is_training, use_copy) - out.append(tf.expand_dims(out_k, 1)) - - return tf.concat(out, axis=1, name="generator_predictions") - - def generator(self, z, is_training, reuse=False): - # Hack to add alpha channel to generator output if aggregate = alpha - if self.aggregate == "alpha": - self.c_dim += 1 - - # Generate and aggregate images to obtain final image - generated = self.generate_images(z, is_training, reuse) - aggregated = self.aggregate_images(generated) - - # Hack to reset alpha channel after use - if self.aggregate == "alpha": - self.c_dim -= 1 - - return aggregated - - def z_generator(self, batch_size, z_dim): - z = [] - for _ in range(self.k): - z_k = super(MultiGAN, self).z_generator(batch_size, z_dim) - z.append(z_k[:, None, :]) - - return np.concatenate(z, axis=1) # (batch_size, k, z_dim) - - def z_tf_generator(self, batch_size, z_dim, name=None): - z = [] - for _ in range(self.k): - z_k = super(MultiGAN, self).z_tf_generator(batch_size, z_dim, name) - z.append(tf.expand_dims(z_k, 1)) - - return tf.concat(z, axis=1) - - -def MultiGANHyperParams(range_type, gan_type, penalty_type): - """Return a default set of hyperparameters for MultiGAN.""" - del gan_type - - param_ranges = params.GetDefaultRange(range_type) - param_ranges.AddRange("penalty_type", consts.NO_PENALTY, - [consts.NO_PENALTY, consts.WGANGP_PENALTY], - is_log_scale=False, is_discrete=True) - if penalty_type and penalty_type == consts.L2_PENALTY: - param_ranges.AddRange("lambda", 0.01, [-4.0, 1.0], - is_log_scale=True, is_discrete=False) - else: - param_ranges.AddRange("lambda", 10.0, [-1.0, 2.0], - is_log_scale=True, is_discrete=False) - param_ranges.AddRange("beta2", 0.999, [0, 1], - is_log_scale=False, is_discrete=False) - - # GAN Penalty - param_ranges.UpdateDefaults( - {"beta1": 0.0, "learning_rate": 0.00005, "disc_iters": 1, - "discriminator_normalization": consts.BATCH_NORM}) - param_ranges.AddRange("penalty_type", consts.NO_PENALTY, - [consts.NO_PENALTY, consts.WGANGP_PENALTY], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("discriminator_normalization", consts.NO_NORMALIZATION, - [consts.NO_NORMALIZATION, consts.BATCH_NORM, - consts.SPECTRAL_NORM], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("z_dim", 64, [64], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("k", 3, [1, 2, 3, 4, 5], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("aggregate", "sum_clip", ["sum_clip"], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("n_heads", 1, [1, 2, 3], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("n_blocks", 1, [1, 2, 3], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("share_block_weights", False, [True, False], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("embedding_dim", 32, [32, 64, 128], - is_log_scale=False, is_discrete=True) - - return param_ranges.GetParams() diff --git a/compare_gan/src/multi_gan/multi_gan_background.py b/compare_gan/src/multi_gan/multi_gan_background.py deleted file mode 100644 index 2b2fd32..0000000 --- a/compare_gan/src/multi_gan/multi_gan_background.py +++ /dev/null @@ -1,244 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Multiple GANs that together model the data distribution, including BG.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from compare_gan.src import params -from compare_gan.src.gans import consts -from compare_gan.src.multi_gan.multi_gan import MultiGAN - -import numpy as np -import tensorflow as tf - - -class MultiGANBackground(MultiGAN): - """A GAN consisting of a background generator and multiple copies of - object generators.""" - - def __init__(self, dataset_content, parameters, runtime_info): - super(MultiGANBackground, self).__init__( - dataset_content=dataset_content, - parameters=parameters, - runtime_info=runtime_info, - model_name="MultiGANBackground") - - self.background_interaction = parameters["background_interaction"] - - def build_model(self, is_training=True): - image_dims = [self.input_height, self.input_width, self.c_dim] - batch_size = self.batch_size - - # Input images. - self.inputs = tf.placeholder( - tf.float32, [batch_size] + image_dims, name="real_images") - - # Noise vector. - self.z = tf.placeholder( - tf.float32, [batch_size, self.k + 1, self.z_dim], name="z") - - # Discriminator output for real images. - d_real, d_real_logits, _ = self.discriminator( - self.inputs, is_training=is_training, reuse=False) - - # Discriminator output for fake images. - generated = self.generator(self.z, is_training=is_training, reuse=False) - d_fake, d_fake_logits, _ = self.discriminator( - generated, is_training=is_training, reuse=True) - - self.discriminator_output = self.discriminator( - self.inputs, is_training=is_training, reuse=True)[0] - - # Define the loss functions (NS-GAN) - d_loss_real = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=d_real_logits, labels=tf.ones_like(d_real))) - d_loss_fake = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=d_fake_logits, labels=tf.zeros_like(d_fake))) - self.d_loss = d_loss_real + d_loss_fake - self.g_loss = tf.reduce_mean( - tf.nn.sigmoid_cross_entropy_with_logits( - logits=d_fake_logits, labels=tf.ones_like(d_fake))) - - # Define the penalty. - if self.penalty_type == consts.NO_PENALTY: - self.penalty_loss = 0.0 - elif self.penalty_type == consts.DRAGAN_PENALTY: - self.penalty_loss = self.dragan_penalty(self.inputs, self.discriminator, - is_training) - self.d_loss += self.lambd * self.penalty_loss - elif self.penalty_type == consts.WGANGP_PENALTY: - self.penalty_loss = self.wgangp_penalty(self.inputs, generated, - self.discriminator, is_training) - self.d_loss += self.lambd * self.penalty_loss - elif self.penalty_type == consts.L2_PENALTY: - self.penalty_loss = self.l2_penalty() - self.d_loss += self.lambd * self.penalty_loss - else: - raise NotImplementedError( - "The penalty %s was not implemented." % self.penalty_type) - - # Divide trainable variables into a group for D and group for G. - t_vars = tf.trainable_variables() - d_vars = [var for var in t_vars if "discriminator" in var.name] - g_vars = [var for var in t_vars if "generator" in var.name] - self.check_variables(t_vars, d_vars, g_vars) - - # Define optimization ops. - with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): - self.d_optim = self.get_optimizer("d_").minimize( - self.d_loss, var_list=d_vars) - self.g_optim = self.get_optimizer("g_").minimize( - self.g_loss, var_list=g_vars) - - # Store testing images. - self.fake_images = self.generator(self.z, is_training=False, reuse=True) - - # Setup summaries. - d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real) - d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake) - d_loss_sum = tf.summary.scalar("d_loss", self.d_loss) - g_loss_sum = tf.summary.scalar("g_loss", self.g_loss) - - self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum]) - self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum]) - - def generate_images(self, z, is_training, reuse): - """Returns generated object / backgrounds images and z's.""" - # Let z_b participate in relational part - if self.background_interaction: - z = self.update_z(z, reuse) - - # Isolate z used for background ALWAYS IN LAST - z_s = tf.unstack(z, axis=1) - z_o = tf.stack(z_s[:-1], axis=1) - z_b = z_s[-1] - - # Pass latent representation through generator (copy from MG). - out = [] - for i in range(self.k): - use_copy = reuse or (i > 0) - out_k = super(MultiGAN, self).generator( # pylint: disable=bad-super-call - z_o[:, i], is_training, use_copy) - out.append(tf.expand_dims(out_k, 1)) - - generated_o = tf.concat(out, axis=1, name="generator_predictions") - # Only let z_o participate in relational part - else: - # Isolate z used for background ALWAYS IN LAST - z_s = tf.unstack(z, axis=1) - z_o = tf.stack(z_s[:-1], axis=1) - z_b = z_s[-1] - - # Generated object images and background image - generated_o = super(MultiGANBackground, self).generate_images( - z_o, is_training, reuse) - - with tf.variable_scope("background_generator", reuse=reuse): - generated_b = super(MultiGAN, self).generator(z_b, is_training, reuse) # pylint: disable=bad-super-call - - return generated_o, generated_b, z_o, z_b - - def generator(self, z, is_training, reuse=False): - # Hack to add alpha channel to generator output if aggregate = alpha - if self.aggregate == "alpha": - self.c_dim += 1 - - # # Generated object images and background image. - generated_o, generated_b, z_o, z_b = self.generate_images( - z, is_training, reuse) - - # Hack to reset alpha channel after use - # Add opaque alpha channel in case of alpha compositing to background - if self.aggregate == "alpha": - self.c_dim -= 1 - generated_b = tf.concat( - (generated_b[..., :-1], tf.ones_like(generated_b[..., -1:])), axis=-1) - - # Aggregate and generated outputs (order matters for alpha / fixed_perm) - z = tf.concat([z_o, tf.expand_dims(z_b, axis=1)], axis=1) - generated = tf.concat( - [generated_o, tf.expand_dims(generated_b, axis=1)], axis=1) - aggregated = self.aggregate_images(generated) - - return aggregated - - def z_generator(self, batch_size, z_dim): - z_o = super(MultiGANBackground, self).z_generator(batch_size, z_dim) - z_b = np.random.uniform(-1, 1, size=(batch_size, 1, z_dim)) - - return np.concatenate([z_b, z_o], axis=1) # (batch_size, k + 1, z_dim) - - def z_tf_generator(self, batch_size, z_dim, name=None): - z_o = super(MultiGANBackground, self).z_tf_generator( - batch_size, z_dim, name) - z_b = tf.random_uniform( - (batch_size, 1, z_dim), minval=-1.0, maxval=1.0, name=name) - - return tf.concat([z_b, z_o], axis=1) # (batch_size, k + 1, z_dim) - - -def MultiGANBackgroundHyperParams(range_type, gan_type, penalty_type): - """Return a default set of hyperparameters for MultiGANBackground.""" - del gan_type - - param_ranges = params.GetDefaultRange(range_type) - param_ranges.AddRange("penalty_type", consts.NO_PENALTY, - [consts.NO_PENALTY, consts.WGANGP_PENALTY], - is_log_scale=False, is_discrete=True) - if penalty_type and penalty_type == consts.L2_PENALTY: - param_ranges.AddRange("lambda", 0.01, [-4.0, 1.0], - is_log_scale=True, is_discrete=False) - else: - param_ranges.AddRange("lambda", 10.0, [-1.0, 2.0], - is_log_scale=True, is_discrete=False) - param_ranges.AddRange("beta2", 0.999, [0, 1], - is_log_scale=False, is_discrete=False) - - # MultiGAN - param_ranges.UpdateDefaults( - {"beta1": 0.0, "learning_rate": 0.00005, "disc_iters": 1, - "discriminator_normalization": consts.BATCH_NORM}) - param_ranges.AddRange("penalty_type", consts.NO_PENALTY, - [consts.NO_PENALTY, consts.WGANGP_PENALTY], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("discriminator_normalization", consts.NO_NORMALIZATION, - [consts.NO_NORMALIZATION, consts.BATCH_NORM, - consts.SPECTRAL_NORM], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("z_dim", 64, [64], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("k", 3, [1, 2, 3, 4, 5], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("aggregate", "sum_clip", ["sum", "sum_clip", "mean"], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("n_heads", 1, [1, 2, 3], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("n_blocks", 1, [1, 2, 3], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("share_block_weights", False, [True, False], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("embedding_dim", 32, [32, 64, 128], - is_log_scale=False, is_discrete=True) - - # MultiGANBackground - param_ranges.AddRange("background_interaction", False, [True, False], - is_log_scale=False, is_discrete=True) - - return param_ranges.GetParams() diff --git a/compare_gan/src/multi_gan/run_one_task.py b/compare_gan/src/multi_gan/run_one_task.py deleted file mode 100644 index 9ca9c20..0000000 --- a/compare_gan/src/multi_gan/run_one_task.py +++ /dev/null @@ -1,345 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Run one task.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import json - -from compare_gan.src import eval_gan_lib -from compare_gan.src import gan_lib -from compare_gan.src import params -from compare_gan.src import task_utils -from compare_gan.src.gans import consts -from compare_gan.src.multi_gan import dataset -from compare_gan.src.multi_gan import multi_gan -from compare_gan.src.multi_gan import multi_gan_background - -import tensorflow as tf - -FLAGS = tf.flags.FLAGS - - -def GetGANGridSearch(dataset_name, training_steps, num_seeds): - """Standard GAN grid search used in the paper.""" - - config = { # 48 x num_seeds workers - "gan_type": [consts.GAN_WITH_PENALTY, consts.WGAN_WITH_PENALTY], - "penalty_type": [consts.NO_PENALTY, consts.WGANGP_PENALTY], - "discriminator_normalization": [ - consts.NO_NORMALIZATION, consts.SPECTRAL_NORM], - "architecture": consts.DCGAN_ARCH, - "dataset": dataset_name, - "tf_seed": list(range(num_seeds)), - "training_steps": training_steps, - "save_checkpoint_steps": 20000, - "batch_size": 64, - "optimizer": "adam", - "z_dim": 64, - - "__initial_trials": json.dumps(task_utils.CrossProduct({ - "learning_rate": 0.0001, - "lambda": [1, 10], - ("beta1", "beta2", "disc_iters"): [ - (0.5, 0.9, 5), (0.5, 0.999, 5), (0.9, 0.999, 5)], - })), - } - - return config - - -def GetMultiGANGridSearch(dataset_name, training_steps, num_seeds, aggregate): - """Standard MultiGAN grid search used in the paper.""" - - config = { # 42 x num_seeds workers - "gan_type": "MultiGAN", - "penalty_type": consts.WGANGP_PENALTY, - "discriminator_normalization": [ - consts.NO_NORMALIZATION, consts.SPECTRAL_NORM], - "architecture": consts.DCGAN_ARCH, - "dataset": dataset_name, - "tf_seed": list(range(num_seeds)), - "training_steps": training_steps, - "save_checkpoint_steps": 20000, - "batch_size": 64, - "optimizer": "adam", - - # Model params. - "aggregate": aggregate, - "__initial_trials": json.dumps(task_utils.CrossProduct({ - ("n_blocks", "share_block_weights", "n_heads", "k"): [ - (0, False, 0, 3), (0, False, 0, 4), # M-GAN [3, 4] - (0, False, 0, 5), # M-GAN 5 - (1, False, 1, 3), (1, False, 2, 3), # RM-GAN 3 - (2, False, 1, 3), (2, True, 1, 3), # RM-GAN 3 - (2, False, 2, 3), (2, True, 2, 3), # RM-GAN 3 - (1, False, 1, 4), (1, False, 2, 4), # RM-GAN 4 - (2, False, 1, 4), (2, True, 1, 4), # RM-GAN 4 - (2, False, 2, 4), (2, True, 2, 4), # RM-GAN 4 - (1, False, 1, 5), (1, False, 2, 5), # RM-GAN 5 - (2, False, 1, 5), (2, True, 1, 5), # RM-GAN 5 - (2, False, 2, 5), (2, True, 2, 5), # RM-GAN 5 - ], - ("z_dim", "embedding_dim"): [(64, 32)], - "learning_rate": 0.0001, - "lambda": 1, - ("beta1", "beta2", "disc_iters"): [(0.9, 0.999, 5)], - })) - } - - return config - - -def GetMultiGANBackgroundGridSearch(dataset_name, training_steps, num_seeds, - aggregate): - """Standard MultiGAN grid search used in the paper.""" - - config = GetMultiGANGridSearch( # 84 x num_seeds workers - dataset_name, training_steps, num_seeds, aggregate) - config["gan_type"] = "MultiGANBackground" - config["background_interaction"] = [True, False] - - return config - - -def GetMultiGANGridSearchKPlusOne(dataset_name, training_steps, num_seeds, - aggregate): - """Standard MultiGAN grid search with k+1 used to compare against MBG.""" - - config = { # 42 x num_seeds workers - "gan_type": "MultiGAN", - "penalty_type": consts.WGANGP_PENALTY, - "discriminator_normalization": [ - consts.NO_NORMALIZATION, consts.SPECTRAL_NORM], - "architecture": consts.DCGAN_ARCH, - "dataset": dataset_name, - "tf_seed": list(range(num_seeds)), - "training_steps": training_steps, - "save_checkpoint_steps": 20000, - "batch_size": 64, - "optimizer": "adam", - - # Model params. - "aggregate": aggregate, - "__initial_trials": json.dumps(task_utils.CrossProduct({ - ("n_blocks", "share_block_weights", "n_heads", "k"): [ - (0, False, 0, 4), (0, False, 0, 5), # M-GAN [4, 5] - (0, False, 0, 6), # M-GAN 6 - (1, False, 1, 4), (1, False, 2, 4), # RM-GAN 4 - (2, False, 1, 4), (2, True, 1, 4), # RM-GAN 4 - (2, False, 2, 4), (2, True, 2, 4), # RM-GAN 4 - (1, False, 1, 5), (1, False, 2, 5), # RM-GAN 5 - (2, False, 1, 5), (2, True, 1, 5), # RM-GAN 5 - (2, False, 2, 5), (2, True, 2, 5), # RM-GAN 5 - (1, False, 1, 6), (1, False, 2, 6), # RM-GAN 6 - (2, False, 1, 6), (2, True, 1, 6), # RM-GAN 6 - (2, False, 2, 6), (2, True, 2, 6), # RM-GAN 6 - ], - ("z_dim", "embedding_dim"): [(64, 32)], - "learning_rate": 0.0001, - "lambda": 1, - ("beta1", "beta2", "disc_iters"): [(0.9, 0.999, 5)], - })) - } - - return config - - -def GetMultiGANBackgroundRandomSearch(dataset_name, training_steps, aggregate, - num_tasks): - """MultiGANBackground random search used in the paper.""" - - config = { - "gan_type": "MultiGANBackground", - "architecture": consts.DCGAN_ARCH, - "dataset": dataset_name, - "tf_seed": 1, - "training_steps": training_steps, - "save_checkpoint_steps": 20000, - "batch_size": 64, - "optimizer": "adam", - - # Model params. - "aggregate": aggregate, - "n_blocks": 2, - "share_block_weights": False, - "n_heads": 2, - "k": 5, - "z_dim": 64, - "embedding_dim": 32, - "background_interaction": False, - "__num_tasks": num_tasks - } - - return config - - -def GetMetaTasks(experiment_name): - """Returns meta options to be used for study generation. - - Args: - experiment_name: name of an experiment - - Raises: - ValueError: When experiment is not found. - """ - - if experiment_name == "multi_gan-debug": - meta_config = { - "gan_type": ["MultiGAN",], - "penalty_type": [consts.WGANGP_PENALTY], - "discriminator_normalization": [consts.SPECTRAL_NORM], - "architecture": consts.DCGAN_ARCH, - "dataset": ["multi-mnist-3-uniform"], - "sampler": ["rs",], - "tf_seed": [0], - "training_steps": [1000], - "save_checkpoint_steps": [100], - "batch_size": [64], - "optimizer": ["adam"], - "learning_rate": [0.0001], - "lambda": [10], - "beta1": [0.5], - "beta2": [0.9], - "disc_iters": [5], - - # Model params. - "k": [3], - "aggregate": ["sum_clip"], - "n_heads": 4, - "n_blocks": 2, - "share_block_weights": True, - "embedding_dim": 32, - } - - ####################### - ## PAPER EXPERIMENTS ## - ####################### - - # MULTI-MNIST - - # 240 - elif experiment_name == "gan-base-experiment-paper": - meta_config = GetGANGridSearch( - dataset_name="multi-mnist-3-uniform", - training_steps=1000000, num_seeds=5) - - # 210 - elif experiment_name == "multi_gan-base-experiment-paper": - meta_config = GetMultiGANGridSearch( - dataset_name="multi-mnist-3-uniform", - training_steps=1000000, num_seeds=5, aggregate="sum_clip") - - # 480 - elif experiment_name == "gan-relational-experiment-paper": - meta_config = GetGANGridSearch( - dataset_name=[ - "multi-mnist-3-triplet", "multi-mnist-3-uniform-rgb-occluded"], - training_steps=1000000, num_seeds=5) - - # 210 - elif experiment_name == "multi_gan-relational-experiment-triplet-paper": - meta_config = GetMultiGANGridSearch( - dataset_name="multi-mnist-3-triplet", - training_steps=1000000, num_seeds=5, aggregate="sum_clip") - - # 210 - elif experiment_name == "multi_gan-relational-experiment-rgb-occluded-paper": - meta_config = GetMultiGANGridSearch( - dataset_name="multi-mnist-3-uniform-rgb-occluded", - training_steps=1000000, num_seeds=5, aggregate="implicit_alpha") - - # CIFAR 10 - - # 240 - elif experiment_name == "gan-background-experiment-paper": - meta_config = GetGANGridSearch( - dataset_name="multi-mnist-3-uniform-rgb-occluded-cifar10", - training_steps=1000000, num_seeds=5) - - # 210 - elif experiment_name == "multi_gan-background-experiment-paper": - meta_config = GetMultiGANGridSearchKPlusOne( - dataset_name="multi-mnist-3-uniform-rgb-occluded-cifar10", - training_steps=1000000, num_seeds=5, aggregate="alpha") - - # 420 - elif experiment_name == "multi_gan_bg-background-experiment-paper": - meta_config = GetMultiGANBackgroundGridSearch( - dataset_name="multi-mnist-3-uniform-rgb-occluded-cifar10", - training_steps=1000000, num_seeds=5, aggregate="alpha") - - # CLEVR - - # 240 - elif experiment_name == "gan-clevr-experiment-paper": - meta_config = GetGANGridSearch( - dataset_name="clevr", training_steps=1000000, num_seeds=5) - - # 210 - elif experiment_name == "multi_gan-clevr-experiment-paper": - meta_config = GetMultiGANGridSearchKPlusOne( - dataset_name="clevr", training_steps=1000000, num_seeds=5, - aggregate="alpha") - - # 420 - elif experiment_name == "multi_gan_bg-clevr-experiment-paper": - meta_config = GetMultiGANBackgroundGridSearch( - dataset_name="clevr", training_steps=1000000, num_seeds=5, - aggregate="alpha") - - # 200 - elif experiment_name == "multi_gan_bg-clevr-rs200k-paper": - meta_config = GetMultiGANBackgroundRandomSearch( - dataset_name="clevr", training_steps=200000, aggregate="alpha", - num_tasks=200) - - else: - raise ValueError("Unknown study-based experiment %s." % experiment_name) - - options = task_utils.CrossProduct(meta_config) - return options - - -def AddGansAndDatasets(): - """Injects MultiGAN models, parameters and datasets. - - This code injects the GAN model and its default parameters to the framework. - Must be run just after the main. - """ - gan_lib.MODELS.update({ - "MultiGAN": multi_gan.MultiGAN, - "MultiGANBackground": multi_gan_background.MultiGANBackground - }) - params.PARAMETERS.update({ - "MultiGAN": multi_gan.MultiGANHyperParams, - "MultiGANBackground": multi_gan_background.MultiGANBackgroundHyperParams - }) - eval_gan_lib.SUPPORTED_GANS.extend(["MultiGAN", "MultiGANBackground"]) - eval_gan_lib.DEFAULT_VALUES.update({ - "k": -1, - "aggregate": "none", - "embedding_dim": -1, - "n_blocks": -1, - "share_block_weights": False, - "n_heads": -1, - "background_interaction": False, - }) - - gan_lib.DATASETS.update(dataset.get_datasets()) - params.DATASET_PARAMS.update(dataset.get_dataset_params()) diff --git a/compare_gan/src/multi_gan/visualize_data.py b/compare_gan/src/multi_gan/visualize_data.py deleted file mode 100644 index 8870551..0000000 --- a/compare_gan/src/multi_gan/visualize_data.py +++ /dev/null @@ -1,73 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Simple script to visualize samples of a custom dataset in datasets.py.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -from compare_gan.src.multi_gan import dataset -import numpy as np -from scipy.misc import imsave -import tensorflow as tf - -flags = tf.flags - -flags.DEFINE_string("dataset", "multi-mnist-3-uniform", "Name of the dataset.") -flags.DEFINE_string("dataset_split", "test", "Name of the split.") -flags.DEFINE_string("save_dir", "/tmp/vis", "Directory of where to save ims.") -flags.DEFINE_boolean("grid", False, "Whether to aggregate the images in a grid " - "(8 x 8) or save them in individual figs.") - -FLAGS = flags.FLAGS - - -def save_image(im, name): - im = im[:, :, 0] if im.shape[2] == 1 else im - imsave(os.path.join(FLAGS.save_dir, name), im) - - -def main(unused_argv): - if not tf.gfile.IsDirectory(FLAGS.save_dir): - tf.gfile.MakeDirs(FLAGS.save_dir) - - with tf.Graph().as_default(): - datasets = dataset.get_datasets() - dataset_content = datasets[FLAGS.dataset]( - FLAGS.dataset, FLAGS.dataset_split, 4, 128 * 1024) - batched = dataset_content.batch(64) - batch_op = batched.make_one_shot_iterator().get_next() - - with tf.Session() as session: - data = session.run(batch_op)[0] - - if not FLAGS.grid: - for i in range(data.shape[0]): - save_image(data[i], FLAGS.dataset + "_%d.png" % i) - else: - grid_im = [] - for i in range(8): - im_row = [] - for j in range(8): - im_row.append(data[i * 8 + j]) - grid_im.append(np.concatenate(im_row, axis=1)) - grid_im = np.concatenate(grid_im, axis=0) - save_image(grid_im, FLAGS.dataset + "_grid.png") - -if __name__ == "__main__": - tf.app.run(main) diff --git a/compare_gan/src/multi_gan/visualize_gan.py b/compare_gan/src/multi_gan/visualize_gan.py deleted file mode 100644 index e5e4ad5..0000000 --- a/compare_gan/src/multi_gan/visualize_gan.py +++ /dev/null @@ -1,439 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Evaluation for GAN tasks.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -from compare_gan.src import gan_lib -from compare_gan.src import params -from compare_gan.src import simple_task_pb2 -from compare_gan.src import task_utils -from compare_gan.src.gans import consts -from compare_gan.src.multi_gan import dataset -from compare_gan.src.multi_gan import multi_gan -from compare_gan.src.multi_gan import multi_gan_background - -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf - -from google.protobuf import text_format - -flags = tf.flags - -flags.DEFINE_string("out_dir", None, "Where to save images.") -flags.DEFINE_string("eval_task_workdir", None, "Workdir to evaluate") - -flags.DEFINE_string("checkpoint", "all", - "Which checkpoint(s) to evaluate for a given study/task." - "Supports {'all', }.") -flags.DEFINE_enum("visualization_type", "multi_image", [ - "image", "multi_image", "latent", "multi_latent"], - "How to visualize this GAN.") -flags.DEFINE_integer("batch_size", 64, "Size of the batch.") -flags.DEFINE_integer("images_per_fig", 4, "How many images to stack in a fig.") -FLAGS = flags.FLAGS - - -def GetBackgroundImageTensorName(architecture): - """Returns the name of the tensor that is the generated background.""" - - tensor_prefix = "background_generator/generator" - if architecture == consts.DCGAN_ARCH: - tensor_suffix = "add:0" - elif architecture == consts.RESNET5_ARCH: - tensor_suffix = "Sigmoid:0" - else: - raise ValueError("Unknown architecture: %s" % architecture) - - return "%s/%s" % (tensor_prefix, tensor_suffix) - - -def PlotImage(ax, im, title=None, x_label=None, y_label=None, vmin=0.0, - vmax=1.0, cmap="gray"): - """Helper function that wraps matplotlib.axes.Axes.imshow. - - Args: - ax: An instance of matplotlib.axes.Axes - im: The image to plot (W, H, C) - title: The title of the image - x_label: Optional x label - y_label: Optional y label - vmin: The minimum value that a pixel can take. - vmax: The maximum value that a pixel can take. - cmap: The color map to use (whenever C != 3) - - """ - - im = im[..., 0] if im.shape[2] == 1 else im - ax.imshow(im, vmin=vmin, vmax=vmax, cmap=cmap) - - # Set xlabel - if x_label: ax.set_xlabel(x_label, fontsize=16) - if y_label: ax.set_ylabel(y_label, rotation=90, fontsize=16) - - # Remove axis. - ax.set_xticks([]) - ax.set_yticks([]) - ax.set_xticklabels([]) - ax.set_yticklabels([]) - - # Set title. - if title is not None: ax.set_title(title) - - -def SaveGanLatentTraversalImages(aggregated_images, save_dir): - """Visualizes latent traversals. - - Args: - aggregated_images: The aggregated image (B, T, W, H, C). - save_dir: The path to the directory in which the figure should be saved. - """ - - def LatentTraversalFig(aggregated_images_): - """Returns a figure that visualizes a sequence of latent traversals.""" - n_images, n_steps, _, _, _ = aggregated_images_.shape - - figsize = (n_steps * 5, n_images * 5) - fig, ax = plt.subplots(n_images, n_steps, figsize=figsize) - - for i in range(n_images): - for t in range(n_steps): - x_label = "step %d" % t if i == n_images - 1 else None - y_label = "Generated %d" % i if t == 0 else None - ax_ = ax[i, t] if n_images > 1 else ax[t] - PlotImage(ax_, aggregated_images_[i, t], - x_label=x_label, y_label=y_label) - - return fig - - images_per_fig = FLAGS.images_per_fig - for block in range(0, aggregated_images.shape[0], images_per_fig): - figure = LatentTraversalFig( - aggregated_images[block:block + images_per_fig]) - plt.subplots_adjust(wspace=0.1, hspace=0.04) - plt.savefig(os.path.join( - save_dir, "single_latent_interpolation_%d-%d.pdf" % ( - block, min(block + images_per_fig, aggregated_images.shape[0]))), - bbox_inches="tight") - plt.close(figure) - - -def SaveMultiGanLatentTraversalImages(aggregated_images, generated_images, - save_dir): - """Visualizes latent traversals factored across generators. - - Args: - aggregated_images: The aggregated image (B, T, W, H, C). - generated_images: The output of each generator (B, T, K, W, H, C) - save_dir: The path to the directory in which the figure should be saved. - """ - - def LatentTraversalFig(aggregated_images_, generated_images_): - """Returns a figure that visualizes a sequence of latent traversals.""" - n_images, n_steps, k, _, _, _ = generated_images_.shape - - figsize = (n_steps * 5, (k + 1) * n_images * 5) - fig, ax = plt.subplots((k + 1)* n_images, n_steps, figsize=figsize) - - for i in range(0, (k + 1) * n_images, (k + 1)): - for t in range(n_steps): - PlotImage(ax[i, t], aggregated_images_[i // (k + 1), t]) - - for j in range(k): - PlotImage(ax[i + j + 1, t], generated_images_[i // (k + 1), t, j]) - - return fig - - images_per_fig = FLAGS.images_per_fig - for block in range(0, aggregated_images.shape[0], images_per_fig): - figure = LatentTraversalFig( - aggregated_images[block:block + images_per_fig], - generated_images[block:block + images_per_fig]) - plt.savefig(os.path.join( - save_dir, "multi_latent_interpolation_%d-%d.pdf" % ( - block, min(block + images_per_fig, aggregated_images.shape[0])))) - plt.close(figure) - - -def SaveMultiGanGeneratorImages(aggregated_images, generated_images, save_dir): - """Visualizes the output of each generator together with its aggregate. - - Args: - aggregated_images: The aggregated image (B, W, H, C). - generated_images: The output of each generator (B, K, W, H, C) - save_dir: The path to the directory in which the figure should be saved. - """ - - images_per_fig = FLAGS.images_per_fig - for block in range(0, aggregated_images.shape[0], images_per_fig): - b_aggregated_images = aggregated_images[block:block + images_per_fig] - b_generated_images = generated_images[block:block + images_per_fig] - n_images, k, _, _, _ = b_generated_images.shape - - figsize = ((k + 1) * 5, n_images * 5) - fig, ax = plt.subplots(n_images, (k + 1), figsize=figsize) - - for i in range(n_images): - for j in range(k): - ax_ = ax[i, j] if n_images > 1 else ax[j] - PlotImage( - ax_, b_generated_images[i, k - j - 1], x_label="Generator %d" % j) - - ax_ = ax[i, -1] if n_images > 1 else ax[-1] - PlotImage(ax_, b_aggregated_images[i], x_label="Composed Image") - - plt.subplots_adjust(wspace=0.1, hspace=0.04) - plt.savefig(os.path.join(save_dir, "generator_images_%d-%d.pdf" % ( - block, min(block + images_per_fig, aggregated_images.shape[0]))), - bbox_inches="tight") - plt.close(fig) - - -def SaveGeneratorImages(aggregated_images, save_dir): - """Visualizes the aggregated output of all generators. - - Args: - aggregated_images: The aggregated image (B, W, H, C). - save_dir: The path to the directory in which the figure should be saved. - """ - - n_images, _, _, _ = aggregated_images.shape - - for i in range(n_images): - fig, ax = plt.subplots(figsize=(5, 5)) - - PlotImage(ax, aggregated_images[i]) - plt.savefig(os.path.join(save_dir, "generator_images_%d.png" % i), - bbox_inches="tight") - plt.close(fig) - - -def GetMultiGANGeneratorsOp(graph, gan_type, architecture, aggregate): - """Returns the op to obtain the output of all generators.""" - - # Generator ops for normal Multi-GAN. - generator_preds_op = graph.get_tensor_by_name("generator_predictions:0") - - # Add background generator - if gan_type == "MultiGANBackground": - background_image_name = GetBackgroundImageTensorName(architecture) - generator_preds_bg_op = graph.get_tensor_by_name(background_image_name) - - # Set background alpha to 1. as is done pre-aggregation during training - if aggregate == "alpha": - generator_preds_bg_op = tf.concat( - (generator_preds_bg_op[..., :-1], tf.ones_like( - generator_preds_bg_op[..., -1:])), axis=-1) - - generator_preds_op = tf.concat( - [generator_preds_op, tf.expand_dims( - generator_preds_bg_op, axis=1)], axis=1) - - return generator_preds_op - - -def EvalCheckpoint(checkpoint_path, task_workdir, options, out_cp_dir): - """Evaluate model at given checkpoint_path.""" - - # Overwrite batch size - options["batch_size"] = FLAGS.batch_size - - checkpoint_dir = os.path.join(task_workdir, "checkpoint") - result_dir = os.path.join(task_workdir, "result") - gan_log_dir = os.path.join(task_workdir, "logs") - - dataset_params = params.GetDatasetParameters(options["dataset"]) - dataset_params.update(options) - - # generate fake images - with tf.Graph().as_default() as g: - with tf.Session() as sess: - gan = gan_lib.create_gan( - gan_type=options["gan_type"], - dataset=options["dataset"], - dataset_content=None, - options=options, - checkpoint_dir=checkpoint_dir, - result_dir=result_dir, - gan_log_dir=gan_log_dir) - - gan.build_model(is_training=False) - - tf.global_variables_initializer().run() - saver = tf.train.Saver() - saver.restore(sess, checkpoint_path) - - # Compute outputs for MultiGanGeneratorImages. - if (FLAGS.visualization_type == "multi_image" and - "MultiGAN" in options["gan_type"]): - generator_preds_op = GetMultiGANGeneratorsOp( - g, options["gan_type"], options["architecture"], - options["aggregate"]) - - fetches = [gan.fake_images, generator_preds_op] - - # Construct feed dict - z_sample = gan.z_generator(gan.batch_size, gan.z_dim) - feed_dict = {gan.z: z_sample} - - # Fetch data and save images. - fake_images, generator_preds = sess.run(fetches, feed_dict=feed_dict) - SaveMultiGanGeneratorImages(fake_images, generator_preds, out_cp_dir) - - # Compute outputs for GeneratorImages. - elif FLAGS.visualization_type == "image": - # Construct feed dict - z_sample = gan.z_generator(gan.batch_size, gan.z_dim) - feed_dict = {gan.z: z_sample} - - # Fetch data and save images. - fake_images = sess.run(gan.fake_images, feed_dict=feed_dict) - SaveGeneratorImages(fake_images, out_cp_dir) - - # Compute outputs for MultiGanLatentTraversalImages - elif (FLAGS.visualization_type == "multi_latent" and - "MultiGAN" in options["gan_type"]): - generator_preds_op = GetMultiGANGeneratorsOp( - g, options["gan_type"], options["architecture"], - options["aggregate"]) - - fetches = [gan.fake_images, generator_preds_op] - - # Init latent params - z_sample = gan.z_generator(gan.batch_size, gan.z_dim) - directions = np.random.uniform(size=z_sample.shape) - k_indices = np.random.randint(gan.k, size=gan.batch_size) - n_steps, step_size = 10, 0.1 - images, gen_preds = [], [] - - # Traverse in latent space of a single component n_steps times and - # generate the corresponding images. - for step in range(n_steps + 1): - new_z = z_sample.copy() - for i in range(z_sample.shape[0]): - new_z[i, k_indices[i]] += ( - step * step_size * directions[i, k_indices[i]]) - - images_batch, gen_preds_batch = sess.run(fetches, {gan.z: new_z}) - images.append(images_batch) - gen_preds.append(gen_preds_batch) - - images = np.stack(images, axis=1) - gen_preds = np.stack(gen_preds, axis=1) - SaveMultiGanLatentTraversalImages(images, gen_preds, out_cp_dir) - - # Compute outputs for GanLatentTraversalImages - elif FLAGS.visualization_type == "latent": - # Init latent params. - z_sample = gan.z_generator(gan.batch_size, gan.z_dim) - directions = np.random.uniform(size=z_sample.shape) - k_indices = np.random.randint(options.get("k", 1), size=gan.batch_size) - n_steps, step_size = 5, 0.1 - images = [] - - # Traverse in latent space of a single component n_steps times and - # generate the corresponding images. - for step in range(n_steps + 1): - new_z = z_sample.copy() - for i in range(z_sample.shape[0]): - if "MultiGAN" in options["gan_type"]: - new_z[i, k_indices[i]] += ( - step * step_size * directions[i, k_indices[i]]) - else: - new_z[i] += step * step_size * directions[i] - - images_batch = sess.run(gan.fake_images, {gan.z: new_z}) - images.append(images_batch) - - images = np.stack(images, axis=1) - SaveGanLatentTraversalImages(images, out_cp_dir) - - -def GetModelDir(options): - """Returns the model directory for a given model.""" - - model_dir = "" - if options["gan_type"] in ["MultiGAN", "MultiGANBackground"]: - if options["n_blocks"] == 0 and options["n_heads"] == 0: - model_dir = "Independent" - model_dir += "%s-%d" % (options["gan_type"], options["k"]) - else: - model_dir = "GAN" - - return model_dir - - -def EvalTask(options, task_workdir, out_dir): - """Evaluates all checkpoints for the given task.""" - - # Compute all records not done yet. - checkpoint_dir = os.path.join(task_workdir, "checkpoint") - # Fetch checkpoint to eval. - checkpoint_state = tf.train.get_checkpoint_state(checkpoint_dir) - - if FLAGS.checkpoint == "all": - all_checkpoint_paths = checkpoint_state.all_model_checkpoint_paths - else: - all_checkpoint_paths = ["%s/checkpoint/%s.model-%s" % ( - FLAGS.eval_task_workdir, options["gan_type"], FLAGS.checkpoint)] - - for checkpoint_path in all_checkpoint_paths: - out_cp_dir = os.path.join( - out_dir, "checkpoint-%s" % checkpoint_path.split("-")[-1]) - if not tf.gfile.IsDirectory(out_cp_dir): - tf.gfile.MakeDirs(out_cp_dir) - EvalCheckpoint(checkpoint_path, task_workdir, options, out_cp_dir) - - -def main(unused_argv): - gan_lib.MODELS.update({ - "MultiGAN": multi_gan.MultiGAN, - "MultiGANBackground": multi_gan_background.MultiGANBackground - }) - params.PARAMETERS.update({ - "MultiGAN": multi_gan.MultiGANHyperParams, - "MultiGANBackground": multi_gan_background.MultiGANBackgroundHyperParams - }) - - gan_lib.DATASETS.update(dataset.get_datasets()) - params.DATASET_PARAMS.update(dataset.get_dataset_params()) - - task_workdir = FLAGS.eval_task_workdir - - task = simple_task_pb2.Task() - with open(os.path.join(task_workdir, "task"), "r") as f: - text_format.Parse(f.read(), task) - - options = task_utils.ParseOptions(task) - - out_dir = os.path.join(FLAGS.out_dir, GetModelDir(options)) - if not tf.gfile.IsDirectory(out_dir): - tf.gfile.MakeDirs(out_dir) - - task_string = text_format.MessageToString(task) - print("\nWill evaluate task\n%s\n\n", task_string) - - EvalTask(options, task_workdir, out_dir) - -if __name__ == "__main__": - flags.mark_flag_as_required("eval_task_workdir") - flags.mark_flag_as_required("out_dir") - tf.app.run() diff --git a/compare_gan/src/params.py b/compare_gan/src/params.py deleted file mode 100644 index e85bbcf..0000000 --- a/compare_gan/src/params.py +++ /dev/null @@ -1,413 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Hyperparameter ranges for various GANs. - -We define the default GAN parameters with respect to the datasets and the -training hyperparameters. The hyperparameters used by the respective authors -are also added to the set. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections - -from compare_gan.src.gans import consts - - -ParamInfo = collections.namedtuple( - "ParamInfo", ["default", "range", "is_log_scale", "is_discrete"]) - -NARROW_RANGE = "narrow" -WIDE_RANGE = "wide" - - -class ParamRanges(object): - """Class for holding parameter ranges.""" - - def __init__(self): - super(ParamRanges, self).__init__() - self._params = {} - - def AddRange(self, param_name, default, value_range, - is_log_scale, is_discrete): - self._params[param_name] = ParamInfo( - default, value_range, is_log_scale, is_discrete) - - def _UpdateDefault(self, param_name, default_value): - old_param_info = self._params[param_name] - new_param_info = ParamInfo( - default_value, old_param_info.range, - old_param_info.is_log_scale, old_param_info.is_discrete) - self._params[param_name] = new_param_info - - def UpdateDefaults(self, params_to_update): - for k, v in params_to_update.items(): - self._UpdateDefault(k, v) - - def GetParams(self): - return self._params - - -def GetDefaultWideRange(): - """Get default ranges for tuning (wide, before pruning).""" - param_ranges = ParamRanges() - param_ranges.AddRange("beta1", 0.5, [0.0, 1.0], - is_log_scale=False, is_discrete=False) - param_ranges.AddRange("learning_rate", 0.0001, [-5.0, -2.0], - is_log_scale=True, is_discrete=False) - param_ranges.AddRange("disc_iters", 1, [1, 5], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("discriminator_normalization", consts.BATCH_NORM, - consts.NORMALIZERS, - is_log_scale=False, is_discrete=True) - return param_ranges - - -def GetDefaultNarrowRange(): - """Get default ranges for tuning (narrow, after pruning).""" - param_ranges = ParamRanges() - param_ranges.AddRange("beta1", 0.5, [0.5], - is_log_scale=False, is_discrete=True) - param_ranges.AddRange("learning_rate", 0.0001, [-4.0, -3.0], - is_log_scale=True, is_discrete=False) - param_ranges.AddRange("disc_iters", 1, [1], - is_log_scale=False, is_discrete=True) - # NOTE: batchnorm really depends on the model, so we will specify this - # range in each class separately. - return param_ranges - - -def GetDefaultRange(range_type): - if range_type == WIDE_RANGE: - return GetDefaultWideRange() - elif range_type == NARROW_RANGE: - return GetDefaultNarrowRange() - else: - assert False, "Unsupported range: %s" % range_type - - -def GANHyperParams(range_type): - """Returns GAN hyperparameters. - - Args: - range_type: which param defaults to use. - """ - param_ranges = GetDefaultRange(range_type) - if range_type == NARROW_RANGE: - param_ranges.AddRange("discriminator_normalization", - consts.BATCH_NORM, [consts.BATCH_NORM, - consts.NO_NORMALIZATION], - is_log_scale=False, is_discrete=True) - param_ranges.UpdateDefaults( - {"beta1": 0.0, "learning_rate": 0.00005, "disc_iters": 1, - "discriminator_normalization": consts.BATCH_NORM}) - return param_ranges.GetParams() - - -def LSGANHyperParams(range_type): - """Returns LSGAN hyperparameters. - - Args: - range_type: which param defaults to use. - """ - param_ranges = GetDefaultRange(range_type) - if range_type == NARROW_RANGE: - param_ranges.AddRange("discriminator_normalization", - consts.BATCH_NORM, [consts.BATCH_NORM, - consts.NO_NORMALIZATION], - is_log_scale=False, is_discrete=True) - param_ranges.UpdateDefaults( - {"beta1": 0.5, "learning_rate": 0.0002, "disc_iters": 1, - "discriminator_normalization": consts.BATCH_NORM}) - return param_ranges.GetParams() - - -def WGANHyperParams(range_type): - """Returns WGAN hyperparameters. - - Args: - range_type: which param defaults to use. - """ - param_ranges = GetDefaultRange(range_type) - if range_type == WIDE_RANGE: - weight_range = [-3.0, 0.0] - elif range_type == NARROW_RANGE: - weight_range = [-2.0, 0.0] - param_ranges.AddRange("discriminator_normalization", - consts.BATCH_NORM, [consts.BATCH_NORM, - consts.NO_NORMALIZATION], - is_log_scale=False, is_discrete=True) - param_ranges.UpdateDefaults( - {"beta1": 0.5, "learning_rate": 0.00005, "disc_iters": 5, - "discriminator_normalization": consts.BATCH_NORM}) - param_ranges.AddRange("weight_clipping", 0.01, weight_range, - is_log_scale=True, is_discrete=False) - return param_ranges.GetParams() - - -def WGANGPHyperParams(range_type): - """Returns WGAN_GP hyperparameters. - - Args: - range_type: which param defaults to use. - """ - param_ranges = GetDefaultRange(range_type) - if range_type == WIDE_RANGE: - lambda_range = [-1.0, 2.0] - elif range_type == NARROW_RANGE: - lambda_range = [-1.0, 1.0] - # From the barplot, clearly False is better. - param_ranges.AddRange("discriminator_normalization", - consts.NO_NORMALIZATION, - [consts.NO_NORMALIZATION], - is_log_scale=False, is_discrete=True) - param_ranges.UpdateDefaults( - {"beta1": 0.5, "learning_rate": 0.0001, "disc_iters": 5, - "discriminator_normalization": consts.NO_NORMALIZATION}) - param_ranges.AddRange("lambda", 10.0, lambda_range, - is_log_scale=True, is_discrete=False) - return param_ranges.GetParams() - - -def DRAGANHyperParams(range_type): - """Returns DRAGAN hyperparameters. - - Args: - range_type: which param defaults to use. - """ - param_ranges = GetDefaultRange(range_type) - if range_type == WIDE_RANGE: - lambda_range = [-1.0, 2.0] - elif range_type == NARROW_RANGE: - lambda_range = [-1.0, 1.0] - # From the barplot, clearly False is better. - param_ranges.AddRange("discriminator_normalization", - consts.NO_NORMALIZATION, - [consts.NO_NORMALIZATION], - is_log_scale=False, is_discrete=True) - param_ranges.UpdateDefaults( - {"beta1": 0.5, "learning_rate": 0.0001, "disc_iters": 1, - "discriminator_normalization": consts.BATCH_NORM}) - param_ranges.AddRange("lambda", 10.0, lambda_range, - is_log_scale=True, is_discrete=False) - return param_ranges.GetParams() - - -def VAEHyperParams(range_type): - """Returns VAE hyperparameters. - - Args: - range_type: which param defaults to use. - """ - param_ranges = GetDefaultRange(range_type) - if range_type == NARROW_RANGE: - # Needed because AbstractGAN expects this param. - param_ranges.AddRange("discriminator_normalization", - consts.NO_NORMALIZATION, [consts.BATCH_NORM, - consts.NO_NORMALIZATION], - is_log_scale=False, is_discrete=True) - param_ranges.UpdateDefaults( - {"beta1": 0.5, "learning_rate": 0.00005}) - return param_ranges.GetParams() - - -def BEGANHyperParams(range_type): - """Returns BEGAN hyperparameters. - - Args: - range_type: which param defaults to use. - """ - param_ranges = GetDefaultRange(range_type) - - if range_type == WIDE_RANGE: - lambda_range = [-4.0, -2.0] - gamma_range = [0.0, 1.0] - elif range_type == NARROW_RANGE: - lambda_range = [-3.0, -3.0] - gamma_range = [0.6, 0.9] - param_ranges.AddRange("discriminator_normalization", - consts.NO_NORMALIZATION, [consts.BATCH_NORM, - consts.NO_NORMALIZATION], - is_log_scale=False, is_discrete=True) - - param_ranges.UpdateDefaults( - {"beta1": 0.5, "learning_rate": 0.0001, "disc_iters": 5, - "discriminator_normalization": consts.NO_NORMALIZATION}) - param_ranges.AddRange("lambda", 0.001, lambda_range, - is_log_scale=True, is_discrete=False) - param_ranges.AddRange("gamma", 0.75, gamma_range, - is_log_scale=False, is_discrete=False) - return param_ranges.GetParams() - - -def GANPENALTYHyperParams(range_type, gan_type, penalty_type=None): - """Returns WGAN_GP hyperparameters. - - Args: - range_type: which param defaults to use. - gan_type: which of the GANs with penalty we should use - penalty_type: which of the penalty we should use. If not specified, - a default range will be generated. - """ - param_ranges = GetDefaultRange(range_type) - param_ranges.AddRange("penalty_type", consts.NO_PENALTY, - [consts.NO_PENALTY, consts.WGANGP_PENALTY], - is_log_scale=False, is_discrete=True) - if penalty_type and penalty_type == consts.L2_PENALTY: - param_ranges.AddRange("lambda", 0.01, [-4.0, 1.0], - is_log_scale=True, is_discrete=False) - else: - param_ranges.AddRange("lambda", 10.0, [-1.0, 2.0], - is_log_scale=True, is_discrete=False) - param_ranges.AddRange("beta2", 0.999, [0, 1], - is_log_scale=False, is_discrete=False) - - if gan_type == "GAN_PENALTY": - param_ranges.UpdateDefaults( - {"beta1": 0.0, "learning_rate": 0.00005, "disc_iters": 1, - "discriminator_normalization": consts.BATCH_NORM}) - elif gan_type == "WGAN_PENALTY": - param_ranges.UpdateDefaults( - {"beta1": 0.5, "learning_rate": 0.0001, "disc_iters": 5, - "discriminator_normalization": consts.NO_NORMALIZATION}) - elif gan_type == "SN_GAN" or gan_type == "LSGAN_PENALTY": - pass - else: - assert False, ("GAN type not recognized: %s." % gan_type) - - return param_ranges.GetParams() - -PARAMETERS = { -} - - -def GetParameters(gan_type, range_type="narrow", penalty_type=None): - """Returns a tuple of dataset specific parameters and hyperparameters.""" - if gan_type == "GAN" or gan_type == "GAN_MINMAX": - return GANHyperParams(range_type) - elif gan_type == "WGAN": - return WGANHyperParams(range_type) - elif gan_type == "WGAN_GP": - return WGANGPHyperParams(range_type) - elif gan_type == "DRAGAN": - return DRAGANHyperParams(range_type) - elif gan_type == "VAE": - return VAEHyperParams(range_type) - elif gan_type == "LSGAN": - return LSGANHyperParams(range_type) - elif gan_type == "BEGAN": - return BEGANHyperParams(range_type) - elif gan_type in ["GAN_PENALTY", "WGAN_PENALTY", "LSGAN_PENALTY", "SN_GAN"]: - return GANPENALTYHyperParams(range_type, gan_type, penalty_type) - elif gan_type in PARAMETERS: - return PARAMETERS[gan_type](range_type, gan_type, penalty_type) - else: - raise NotImplementedError("Unknown GAN: %s" % gan_type) - - -DATASET_PARAMS = { - "mnist": { - "input_height": 28, - "input_width": 28, - "output_height": 28, - "output_width": 28, - "c_dim": 1, - "dataset_name": "mnist", - "eval_test_samples": 10000 - }, - "fashion-mnist": { - "input_height": 28, - "input_width": 28, - "output_height": 28, - "output_width": 28, - "c_dim": 1, - "dataset_name": "fashion-mnist", - "eval_test_samples": 10000 - }, - "triangles": { - "input_height": 28, - "input_width": 28, - "output_height": 28, - "output_width": 28, - "c_dim": 1, - "dataset_name": "triangles", - "eval_test_samples": 10000 - }, - "squares": { - "input_height": 28, - "input_width": 28, - "output_height": 28, - "output_width": 28, - "c_dim": 1, - "dataset_name": "squares", - "eval_test_samples": 10000 - }, - "cifar10": { - "input_height": 32, - "input_width": 32, - "output_height": 32, - "output_width": 32, - "c_dim": 3, - "dataset_name": "cifar10", - "eval_test_samples": 10000 - }, - "fake": { - "input_height": 64, - "input_width": 64, - "output_height": 64, - "output_width": 64, - "c_dim": 1, - "dataset_name": "fake", - "eval_test_samples": 100 - }, - "celeba": { - "input_height": 64, - "input_width": 64, - "output_height": 64, - "output_width": 64, - "c_dim": 3, - "dataset_name": "celeba", - "eval_test_samples": 10000 - }, - "lsun-bedroom": { - "input_height": 128, - "input_width": 128, - "output_height": 128, - "output_width": 128, - "c_dim": 3, - "dataset_name": "lsun-bedroom", - "eval_test_samples": 10000 - }, - "celebahq128": { - "input_height": 128, - "input_width": 128, - "output_height": 128, - "output_width": 128, - "c_dim": 3, - "dataset_name": "celebahq128", - "eval_test_samples": 3000 - }, -} - - -def GetDatasetParameters(dataset_name): - """Returns default dataset parameters for a specific dataset.""" - if dataset_name in DATASET_PARAMS: - return DATASET_PARAMS[dataset_name] - else: - raise NotImplementedError("Parameters not defined for '%s'" % dataset_name) diff --git a/compare_gan/src/simple_task.proto b/compare_gan/src/simple_task.proto deleted file mode 100644 index 9a7bb7d..0000000 --- a/compare_gan/src/simple_task.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto2"; - -package compare_gan; - - -message TaskDimension { - optional string parameter = 1; - optional string string_value = 2; - optional int32 int_value = 3; - optional float float_value = 4; - optional bool bool_value = 5; -} - -message Task { - optional int32 num = 1; - - // A list of specifications of the relevant parameters used for this task. - repeated TaskDimension dimensions = 7; -} diff --git a/compare_gan/src/simple_task_pb2.py b/compare_gan/src/simple_task_pb2.py deleted file mode 100644 index 2e2b1b7..0000000 --- a/compare_gan/src/simple_task_pb2.py +++ /dev/null @@ -1,159 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: src/simple_task.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='src/simple_task.proto', - package='compare_gan', - syntax='proto2', - serialized_options=None, - serialized_pb=_b('\n\x15src/simple_task.proto\x12\x0b\x63ompare_gan\"t\n\rTaskDimension\x12\x11\n\tparameter\x18\x01 \x01(\t\x12\x14\n\x0cstring_value\x18\x02 \x01(\t\x12\x11\n\tint_value\x18\x03 \x01(\x05\x12\x13\n\x0b\x66loat_value\x18\x04 \x01(\x02\x12\x12\n\nbool_value\x18\x05 \x01(\x08\"C\n\x04Task\x12\x0b\n\x03num\x18\x01 \x01(\x05\x12.\n\ndimensions\x18\x07 \x03(\x0b\x32\x1a.compare_gan.TaskDimension') -) - - - - -_TASKDIMENSION = _descriptor.Descriptor( - name='TaskDimension', - full_name='compare_gan.TaskDimension', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parameter', full_name='compare_gan.TaskDimension.parameter', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='string_value', full_name='compare_gan.TaskDimension.string_value', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='int_value', full_name='compare_gan.TaskDimension.int_value', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='float_value', full_name='compare_gan.TaskDimension.float_value', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='bool_value', full_name='compare_gan.TaskDimension.bool_value', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=38, - serialized_end=154, -) - - -_TASK = _descriptor.Descriptor( - name='Task', - full_name='compare_gan.Task', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num', full_name='compare_gan.Task.num', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='dimensions', full_name='compare_gan.Task.dimensions', index=1, - number=7, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto2', - extension_ranges=[], - oneofs=[ - ], - serialized_start=156, - serialized_end=223, -) - -_TASK.fields_by_name['dimensions'].message_type = _TASKDIMENSION -DESCRIPTOR.message_types_by_name['TaskDimension'] = _TASKDIMENSION -DESCRIPTOR.message_types_by_name['Task'] = _TASK -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TaskDimension = _reflection.GeneratedProtocolMessageType('TaskDimension', (_message.Message,), dict( - DESCRIPTOR = _TASKDIMENSION, - __module__ = 'src.simple_task_pb2' - # @@protoc_insertion_point(class_scope:compare_gan.TaskDimension) - )) -_sym_db.RegisterMessage(TaskDimension) - -Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), dict( - DESCRIPTOR = _TASK, - __module__ = 'src.simple_task_pb2' - # @@protoc_insertion_point(class_scope:compare_gan.Task) - )) -_sym_db.RegisterMessage(Task) - - -# @@protoc_insertion_point(module_scope) diff --git a/compare_gan/src/task_utils.py b/compare_gan/src/task_utils.py deleted file mode 100644 index c0dae76..0000000 --- a/compare_gan/src/task_utils.py +++ /dev/null @@ -1,147 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Functions for creating list of tasks and writing them to disk.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import copy -import os - -from compare_gan.src import simple_task_pb2 - -import six -import tensorflow as tf -from google.protobuf import text_format - - -def ParseOptions(task): - """Parse the options from a task proto into a dictionary.""" - options = {} - for dim in task.dimensions: - v = None - if dim.HasField("bool_value"): - v = dim.bool_value - elif dim.HasField("string_value"): - v = dim.string_value - elif dim.HasField("int_value"): - v = dim.int_value - elif dim.HasField("float_value"): - v = dim.float_value - assert v is not None - options[dim.parameter] = v - - return options - - -def UnrollCalls(function, kwargs): - """Call function with all arguments unrolled and concatenate results. - - Unroll all list arguments in kwargs as multiple calls to function, and - concatenate the results. This enables things like cross-product. - - Args: - function: the function to execute - kwargs: a dictionary specifying which arguments to use (as explained - below) - - Returns: - a list whose elements are the results of the calls. - - Example usage: - kwargs = {"a": 1, "b": [2, 4], "c": [5, 6], "d": 7, "e": [8]} - will call "function" with a=1, d=7, e=8 and all combinations of - b x c in [2,4] x [5, 6]. - """ - res = [] - for key, value in sorted(six.iteritems(kwargs)): - assert not isinstance(key, tuple) - if isinstance(value, list): - for v in value: - kwargs_copy = copy.deepcopy(kwargs) - kwargs_copy[key] = v - res += UnrollCalls(function, kwargs_copy) - return res - res.append(function(**kwargs)) - return res - - -def CrossProduct(config): - """Computes the cross product of all options in config. - - Following common.UnrollCalls interpretation of a dict containing options, - this compute the cross product and returns a list of those options from the - cross product. - - Args: - config: a dictionary, following common.UnrollCalls format. - - Returns: - A list of dictionaries, containing the cross product of all options. - """ - def _Copy(**args): - return copy.deepcopy(args) - options = UnrollCalls(_Copy, config) - return options - - -def MakeDimensions(dim_dict, extra_dims=None, base_task=None): - """Creates a task proto from given dictionary.""" - task = simple_task_pb2.Task() - if base_task is not None: - task.MergeFrom(base_task) - if extra_dims is not None: - dim_dict = copy.copy(dim_dict) - dim_dict.update(extra_dims) - dim_dict = collections.OrderedDict(sorted(dim_dict.items())) - for key, value in six.iteritems(dim_dict): - if key in ("_proto", "_prefix"): - # We skip the special keys. - continue - dimension = task.dimensions.add() - dimension.parameter = key - if isinstance(value, str): - dimension.string_value = value - elif isinstance(value, bool): - dimension.bool_value = value - elif isinstance(value, int): - dimension.int_value = value - elif isinstance(value, float): - dimension.float_value = value - elif isinstance(value, tuple): - # Convert the tuple in a string - dimension.string_value = str(value) - else: - # We skip the values that are of other type, - # e.g. proto (this is the case of the "grid" argument that is - # used in many functions). - del task.dimensions[-1] - continue - return task - - -def WriteTasksToDirectories(dirname, options): - print ("Writing tasks to directory: %s" % dirname) - if not tf.gfile.Exists(dirname): - tf.gfile.MakeDirs(dirname) - - for i, opt in enumerate(options): - task = MakeDimensions(opt) - task.num = i - tf.gfile.MkDir(os.path.join(dirname, str(i))) - with tf.gfile.Open(os.path.join(dirname, str(i), "task"), "w") as f: - f.write(text_format.MessageToString(task)) diff --git a/compare_gan/src/test_data/image_celeba-dev-00000-of-00010 b/compare_gan/src/test_data/image_celeba-dev-00000-of-00010 deleted file mode 100644 index 8303d80375f65950322161112a633d75608b0cb4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7171 zcmb`M2T&8vw(vs@1f)nW5+q0mX`v&3rHk|?ReF^`2pwrE#Ypc22)%cu3W!MWhyf%5 zBE5xPq(A=m&i%eOZ{Ezi-#727&2=5>A zPt4moJ^>*SF_455M0V@YKn1|ZBOt&hBp@OpB)qi--P!?!)I@jhi##T#(X|G0z-dLp z67onOPwG19^uFzJirIL+CI!(mFfuVe;Ns@te|C@LwdsH&+y)i*FSGJbAi zYHMfj;OOM+g7EV8@%8f$c=I+qA~Nb7GBGJRB{eNQ1C?J;SX5k6T2}tyV|_zoQ*+Cw zuU*|ey?y-ygA?B|lT*__X0X2(mzGyn*VZ>S_YV$_j!#bioSpy8g$E$`C)REGPqP2Q zMSaVKPe@2W2>hE158wB1aB4!L`y#}5AL|0G;WQkgVI;Iq67uRgNg-l-dvrFQ-$3-7 z;=dp4|Bdz!vVRZk_5T;y{{j2ITvz}(0p9K65l{mZ00%Px2HK}C#uq6}D=tjQ>PO!% zCghD;Ye)yuIbSH6(}nd6UMw5C{xuAa?GpeCwSbcKHN7SlN#C`B~gSYTy|IlWkG1RByQ*r?7H^GQF%e}UFpGcQq z+n}G-`d>c!qrp**JIlt#<{$sWqC!Y~^J9+;QfYOS2|zsgq0|*XeRB&(jM09jg$bNP zvXN)gQA`HOQ_jwlpGBRSpL9eE!17){>o^?g;Nu}!^+Yg28TJg22&&Qx=7?ucFu3Ly zrxmq(OWTv6bL0|4)&V$<+&preDen$)HFdXE+qJ&|7?q4qxV(|*>|jRl%eQ>tUgw2H zW;@nYAUxLxURTNH&m3cr5dcp(}=zFj%ea8|V(}KXZl=F+_Cym4Nk^Evh`A8nX-x;d6IRJyV zM0Mhf*2M|bGlq)(l3Bin@@<@K-2enS&nMfict&KC2BVCo9$eIwmP93c>7MC-RB<-< zd0elnj^b$svzOCq)=%WpB$+-kO-Mi%L0e%`8){!6>_~|$m>dOs%S>feidLIph>!Jx zdCgqzC50NKonKp^lrE0ATJ!^N?^=r=i+-dY<#1 z$H$+aDfMKTk841$6fGXlz|@`q`Ay3Zf|48q^19QT^r=(yvERa@y=#P9Q~9sFqupB! ztQPhyz%kH*vgcJ*g<%@t^th+X^>Z?2Y}&zR#ntsR9|PEGey`&?J}y!d5vfAQxwZHs z_k6PB=lw$|_8?6M z?S10k9V?|^MU-WKFy^GO^<6;oWy#X*=&JPzwB**^jn)vGV=fv9Y+^D{Z zml-A?Vs2nO(To4{PB}4t>Bb*WO5vpa*tvKj?>cD0JIS{$zpJ^zd_8f$q=omAZL+wX zKk;Kn%ZFxC1ni5*LJ9d1a7*ej+*B?!l=Dkqo?$4@wDn>ODE&y|pb%oHR;nTzP4^Xe|`jXaq@@prs;I)_NBZeG{!gWU8@4=?q@!U0Pjd z0R~1>jj44iKrvg<&jCrXz$S3Ew<%qCZ}AerV54R8u6a^Y<_Fm_Jze9}mF!2oW_<>l z-2_8yIpN+u0kk5&i->P`nrq35u>j7t{;diUz?LGC&a-9u1Z9TwX8A^=OsG>0QP$lS z(PJQ(_*mHXLo6%KT9;*|PkQ8KrG5YWYI0(kMbYDPWCT;<+$b$To88F2m7rQ#@j<_B z%-1CQaz`C570tm}ol0;KLU>;I0qMdqQ`LovQJ9I=<6Q%k(^oh=ShE7ze%oUY@V;=(EmIB95= z_|&Q)v#a88M0Tu*o)DUE;q%*SE#9iqZXliI2D(tAByQ(Ah2sYvw!qC0_{>1ClA>F8 zgzBTv)w>`fy3DJ(-E&=4In}{Ydlsw36#EA)iv@E@NYBt=VmSIbWzdU(Fo*l;cKwx{ zbHGc_SssRufCbz1@$-ut!0|>7#Va9bwyD`x?wwdPVNF7J%;@AfJn87P%$t8Hq(WOL z_#z%yyI{;&j=-F_7OxpH^7Lh+0+cH@8Pr1Qo*hhXnR@9ypW%du3d7$m#d^&Mfp@w5(1WgUZ2elhHkTlA6L8T?5_-P(Y{ORW1R}`8CObB z`$8CYgfov>?QNj|NNrhAXhMdY;HQ@CmAPf0Eq^_(b;O@?&`potb)#h`(R~U0r zgtBr({`)Xq_${TJS?K5@}F_#b~5J8BANmKPKPd|&|AhzSc7C^$ai+|mSqs|V&*)%>6CgxwX`eO)%$3%JfF-L4{u#NE?W@NafJo_IC3%W*YpCUthEGy6uDptu zW~7`LZF_OG*ulZ)j(JPy#gwR3Us;|@*Yack?ow6uW4aDUIP{46SM9+!+UO;gcQ*j8 zHtbhv&B0ecNzT%K-Sd#$w;ag#gUPnDT=A`LRDYROOU6A3qP=Xs zdW!ujlJ&knGJAaGu)}C-M8PZpD4;C40hqSj_g(E*BTNi1bZyzS+iPRUS2bu#|H|T9 zI;MTyAgl4b#)#Nre@sfClqyAPR1LyT{swbyRMlANsqQi(zK>79Mq19GPqGBzXvG2l zLLRuKu}jwNnxZ-OxKP+YIo_38ro|#1cRlG!zgK;MNmJ=mXr$|tK)9n^0-H~xm=05F z&I4q7KcQC0>n!#VYd!zZNatePbp;t|9gBC%lBeOc{(YEWFa4D$7yF9I^gU(0doF&n z(bRe)loorbMb5T+Tie(iw9Thaw#-VzK0>qTK-gtJR8?os=7kAamCUttRk}N|!TtT% zP%k+QRwB#$o~&9U_kDrks!>m}+dE&PYz@8DmHO?c41ViOykm9=I!DhnHqm^-XEPwN zxIjyO+mpW0VN|bbIPBzgpVSRNnrcPRQhHHCJ$lCcf}-w#eh^ z&%gALQ!|2#5E6-f3vM|>Bx)z?gA&A|&3l10IEWu*L3}rwo}>HpMH^+!+2W^xhsmE; zyQs(Hd4IVz#@pu;wW3ob`KG0oiso=6Q=R*lii}|0c-5`r$}_J^~-gU zhRnxQXX9y%R_aohTQ%={d{7vEM^7=X?cS@dkeoH0#?+YvWT1 zpgo!7(yVA#u>GUTYCHW|d7fXfp+>JaX0C}9!k?jRec}!aW`x&Nw9}ocYPNf?7q3IE z0(egDk9u;T+nw{INtwt;(CCTyz5S&7Puy=GROiq<TBzL9sm2e^qT%!4pvJ?{8AhkE>C)5c%FR#DroO8RCHc}2yu)Wr9;>VNSG1rs z$~NS)Z`bpy26zD&HsYSH?r@_>eMOe0icQ*Zn)jR3Q!y&|Oe!p~Efv#7r33w21`R5A zzh%6@E_eI0AC>3))N{v@_WG4rV&8Jc;+0D@a+$<&hKcds>n@!hVD2!t`w3*PYqDp9q7?BTZ#EvwwRkyluXam3)tE) zar8(-q208kT}G=17hT^;^t}#J-!Pup5f~gDX-AC88Y>iT;!kO3**~{hFhb{*_dMuG zV;Ydxd|BM`429|Nu2d9wmZxqbJ1zBx!h;rJ0E6B}j9VrXF4fwm4W?VVo-9RQuK5%> zZU6x-y_iCUp19sTx5s0~S57Ad%QA6Ld8cZ9T-DVkZ=extga>SrFI2Vti43N_oT-kZ zMTMM#k#!Ba&Bl!3nta78k-Op`@nJi`&YacftaZ=-I%Nqdz1pt*8`r&?Z~3F{nZ--$ zQaN^}F@jd_<0W~L>X%9@BnYMTE`{Kpvg3E0Q*PVE4OD}ioq#t#u??{OuvqCHdl!i+ z@O!}r_Snt@^S~7R23UVDo}4#bm89RVmih(9zSUp?@3Iho zc3juV;hl-)x-|#~ipD+f&aY^pd&urDU_BR-SGs+tqOMU&7jGZ0=-=j?-15Q%;cqkh9!`xy%2 zS1rT{edhyc`1{JWRioYzg}uCOM~Ml{WV2E%+vFRr@ArRBWgtqSLpZ`Py~j@5lIK%K zjcdZ|UF7<(Vkb;!)m^aA}Gk*feg$f_==G8hsw+}^FfZswRC!j+nlC_F_Z<=No zt{J&nlBLTY^F0Ql-u0-Pq|kr=Bwx~<#Cck~DJ@4J1 zPWl6}aw5g6>~GB;_KTgu$BjwLX9wIp2Gt+<0Wm&h#Z|PieS$(>`?D1amg-l0-KXTW zEB+AT=FA}Lga~bDm0Q)<9~AKK($FV*&r(koH8N8O?_KdN(;vR=99`NqESeOU>(mo1 zU?Qk;GIL3w({HxhW4LsFJzI(uZ=yWt?TLG}ucr{cgT$+)*@2sGRd&Bm5qUG8s!j8_ z=^o2x|J-T=gov}R1cO5RY(cfGk!9OeZupIMfl+$PH2xPLQ=9F0tjhxtH(1l%<-wI#kUp_$H0p6jgB351 zID?GW+@OmftcRbpPRK3eyAxnaxi>~)H^pDR&O$5O4K?gb(} zgpqr3#Qc*~Ic28a13ZUEE!QbCuQ~;R#4qypm6}fVS7Vvl| za}GGUqpmzdWKb*vNd?T+i{VnV1IoP^2gq*Ll?zRvw1OYT3dT;+&U;F_tlS~*M@#Us z&b@4i!c$#7TzOf3d1YBz(7-S*V4|)WQ+%E03z2v8od!5&G*5>UWcH6~Crwpx;+-f@0U_CbA#wGJovg9^$qi zbXtjQ>HYGX$If$5cS_+bz2QLu!zP(YCI_(i<*D+9M#zk>tomp6r@@P;Jjq!17du(O z(4nZHvaihd{3&O;XKw&+`*f~v09(p7{^EKk-*+CV#RkRoK;!(e#1Vql%qIA4Lt`yHTcwI)o@pg`k+|VOn zrY3RJie5pWVm%}e zXB-$GWc8hw#cbsWI`St?xpB(vt+7Vri(^E|ytX@hJLIVacQ?K1bsR;Wv&8y}`NFZv zPRXp^ke5%gg6L$q^Eyb=C60Oeh^(E=gY>QES1uz>S6CW zp9%LbN!Q3tzv3JJsBOcnv}rB(;YvO@JGfy??@es!dW@mqD8=)A!~Py;4KOD)vUDF; z`QDa;F(HW0%{uHthGOvfwZe6|9e-q1mEI)R-r%SKsT+(=GTPiiH>c-xmCrl#F|ool6FF^$Z!pnsASQy`xn*DSb&cQ zpTNTR$ZiBVeMy9S74k99d3%by}HKw-b(b>=bWr; z;o2UmHvFsCi>+hf6H=4AwiKP@E>G8DOB$TDv`)Cxj&rr87b`RwgD)o&PF+YpO>Gaa zK*#PV27NNX=Z_BBlX9>5Gffm|Eniqg)a@Y6kj1uy2|%8xD`2L%mk1S0x_=sb_^h$s zE~61Cf96CcY_RGRA(Z{?3miAnc+TJ~sZni<7#-~lu$%WUNOca8E?f@+u9 zBnUu1^aYFq4T4rv#8u;zIfmG#F1n6OXVm0fcsiy8P1DMX9$=Abl@R@h% zvz@b@{d1rDyzYJ7*YlP`0RFw0wzHzKhZ9h-`=1t`jf07m#VZR(GbeKkbHci(ud1F_ zSpsULiqacAY;x?U|KQ2Kg{wV%2K~2ys88<-23I)%K*X^uTvEe3$J7t`_!^BJg^rFc z^bCnaVqQB2ARoihv(Imm-yoNC&@s%9#d;Y&PhIYg)V&IP{DLjt<^OO$7B6u8BTc-= z&*$NOG*IRli3b(Gr&f3%Zu+bIb>ww2MLsdU$Q?2;u*8D+(~Wdsz@7W~m@WZVWIT-+ zOGPjA0ckEghZEQ$N`@FP^!s%&)x$i zH>cZP;gP_TT_56S^use*yTx5c7!Wb00gu5hVts0eheml-_;8!5_ma_?66e<7&x%hbCIq){G zG04g_DU9vP5(-R`>iMs%uYDeZt76A)h0{jGH8qzKQPL`EQh0=%iOM1(a&8W?*~UNO z?(@+Znc+;`K6H{9pt9ETl)6=(J*LCmB%mbQ@@^m(+ zs3}JU-}`&WM%pP_b^6|=Op$HjDateL6*qG6zic(5qc54Ca5sG>NgI`j9v4|y{5mCa zixU{0x2?{7$aiTwO4)ix)y=yf8C+jm44a063O>22_O!@^cVz0uG6-S13BL)h53-g{ z8_9QCSL*p6vr?4hQ6&LGUY1%?Ui0qIIZlU=iQo?FvKi%BOzdDWX+vT6Iw?b8a?@w9 zjr z>Lrv;&592ys5zt_t`ZFQ9j>~(*}T3>i1`xg+V?C(tVBbVIHt$|Rei@}8E>aSkv%)I zY#=e?b?brhd|Yjvll4$bb0G7(=7Uy;9nU}J_B#Wrk-kynp+FY7@}l0E$f8OJtlYu> zj5*tI;?+Os>{ATo`&9tp@m3;kjvef?x}X&FHJk?oN5{SZ#_FYZR6=<)cB~k}RvP{4 zLxH2kKL75D?CafXj^`IB-EAkUccNXV6c}yUtP>Wyo67xFz1fePfBi3zYMTWq9K_Mb zGvmdnV-FY%^yIPb>_j}lTvJ&>ZUHV}?^nu~SlKGv48)(h#c*8YW-Md;I%mt*^#~(# znzo!bjB*#iWOVcdWA>fx?F}4~p7p)jOKZYERwGeq1_2C%u0BgMmk|^PXH)On^h1@j zv=%4)9*gxH1oF&+)8!bJeY(3^)ha8?Z@on|&D%NkIo;9TKtgADaa`40eRCz(rJ!{yVDnDT?y_?*JnXN@dwZboMx1V3}EFdEpBZVXw%Z=m;@s5Ovf?T4>5| zMJP62D=!nyx9&k8{`SgF^G)^Z={ZTn+A$KjeH}t`7ELfb;EJnwub$*IPE%L+8`&4f zH4K6wW)Ra3y?Y7?#~w%9dpGZb`cI4O$iio_5{ene6Rj_{b!!mY=G@yC4rd+S27)=V z3ZrhirQ7<}?zpB0h0xRcgv6DOi0KeNlEFTG6&88V{YaK0eaQrLx`_NyJgPcI)I=K^2NT ztN88ML^|p}6@O9OUHy|BKr`&WO!?;y{XZnl=jG*5RRQ6fTRAQ}lp|gGSd6ml3Lri` zOvxE&@Vznn57PE(O%>2@s3dBq#0wmPri=_Wk8g0*D4sT+qi?@o`HA|Sd_pnXA*y>& zehp5f!SHl-KX+tVyxS!gmGjn@renG0`UUw>v@;9loCk z89o;oR%d)ISoZ^L_rlG$oSuZ_WP!&+|T)> zvfb`>*FVz=(D`As!=RsDr=^(Pe>J;ru@r!_pdu^+5GR7IKD_1#{l^rA>T~M+^OI4U z!_G711^m3n>`l;4T^ccL@$#XW9Y+%0$mdFlV3R>qX6Z%^rS)7J%Si@pEt&psl9~Ou zXu>QeUNaP86i%d`*U!9iaVN@xn8%KZ52 z1`srVZm8c(GY10^GMb{WtvN6(AWpg|5K-JvcB=Zf3mdk@);IN#$$6J>2l@c7J zvLigjZPayX9F}4N7S79O>&@#hgy}ZTw<`P<{IGQ zBM)}F8Ll@_oc)L;xK0vQ$bvmy|3V|+f1;75C_lgn2%n=@oO8VJ{j}eA1swf!W)^pG63Y_jO-lCMCD9T07 z#77eaOCfAiP$2}vUFO2!64=F~ypc5>RPhlOQ}t!$L%_Z)wl@P@T;(Uez`msO!s;{| z+r{oEl4p!>1&%wkCuLNesU2Eq@*|FvM)3J}#*1&iz?SnAjl8EoU%mTMh|^?CHman@ z!0NEyaN}aEZ@lB&kWl4^1KzwmDk`O{I9ZQj$uzDCiDt61sP(lA*tFBp5i3A@T$4MV zYUH(;kVnX=iSv4ASG{d3jRWTQ=GA8fTD`tKzB2*-+4x^)pI3&dMuT}X6-%lSlINGc zc{NnMH(381NveqQlSG7_6ydd9p;UyR4Z`u{c$q%*jRtP)i1Q8`f2;~gZekQppd}_m zXFECyF?w&z@!Zt?^?*RmFOiYIRSLk3UfBIu?;?$T*BL%OKTr)=jsg4pIb*wBE9u-r zMryt3;s z|FAoRC-<1#fhEaHFeewB1@m4Na>l%>tCWi>8^IiD#hVj7xJdH$@pXF;!c$U?6WM*? zIBD#su(odWa|w|?q}sViZ`6ppDwXb*$LK;lb(PKDXwd{+)bmIA52w(yNu_=XVws5N zDWnbs4DaaWG@!$&`deeUb~H^$iKDohtCd zTgv^9(sEQqeI6BR&j5gwFO4*Lzk~!&lqiP)39&CVmX%99$zwn1Wh?foPg!sPAt`ER z3jOIA!Q`$riUksK(%Q`1x71MEn%8xr`7w3`y*ghql+CAu9C<^Yyb$5d+~^cjPBUf>R?W_OQkgN8n&?` zr@$HIxfC^1cXo8`MqIQhA)YKDyRNiCw?Uk+Q}#kW8H&y-Y+XBext#^*bw9X-9fjGV z%43eHUIu+dnXvD-saRbU;b)&gya+V>Ak|WeFl(|c*(6lJnS@|C*fz4eh4Itejx z!M^rynDaK~Utf+DpkCBW7>?v%tZyHtG@U!)c}C{Mga-r|Ofmh2a_UB161lPX;;EoP zevQ0aM~UKM3?=nRvykna+jW}8R^W;c(fWRb@A?QiN7vYUpI7VNJ#5bq%hIAz$QIj> z$`1zU+O{OZfRJXCs+7tYCb>Mnskv`eKH#nLc>^QzP&}jssmKIMjZr?e;Ydl!jbyGG zeMxZ&twaFm9XymxNEw$l&r*#g=CehN5HZ9&#xia>b7)}GH}AzmhoQk~TkD&SB<*A_ zn6;IYmxKS-{UPu6IVUdu^LWu$8Ae7c3L~n^>*8WIUL9I?y-Qu)c{)Yf{BOMQudBKx zi7x?U3(oMoJ-PoKub7N(IkYEFW_5h1V7fw|#YZZm0kno7S`3?`d}IjNw%ickarlG7 zmOwbkhsTFgQ!6(Ops)MaK+NZuJ-<)Pb_17Z*5T05(}NG!V$*4Iq-#FBGS1XF4kHfn zsS6(Pw}7vM_vz8bk9&J^8nm)EfMa5-p2=%u$5DFama1UNIV*CCOEzlK>M2S-a$0rcH) zPm7D~+6eawig)*P98z6q8{;rUYgGy5txQ?&x!-<|%r2Up96deUzS-M+bQHIVA5QK& zt7qZKv^a69<2H7pqUtztpV_;5@2gr=pn)-{Z#Day=AWA7q)*B{saa4mug#og7ma>;D+%q1{6bVUO$*wrIs(i<#-lsn_r^gmRUp*rrtS zuLwEC454EJ>`FKp0fi@;4TeO!k`kQXXY{C&nE;&A+H%tzQ%W5J1C4}Rs)4R<9-LwU zIZD|w&%;ATR|iZJd%MMofSw(`i*482?{!m$?#9){kixHfMag3cF_WIlC%c#RB5$}3 zIAz|hkYE0*LGIr>pXdX|*l0hZx8r|dZ%;@P3}!FXL8R*3fINe96TBK4|6m**`lAt4 z^FbsN>$9pwC|-)RJ&X(op&u$=l3-4m1dgV?P5<#bAAH-w7_D#95QXxYj=nc6$DiTs zjepDLYtBO)C$hd!Q!cV-^UCvfx3qUTC8(dxg15X2V>4wdnx|rOS+iwp98@`6f)iW$ z-=(OiO$50aVM!^xEi%H_dDon%MqXn#Z=1&o0A(UrQuX)-WY&vd^U&dKQ-t5F2#rwZl~&B=|x7V>PPO#CTJ^Wo6iYpkde%nVZfc{#M7 z<<`p~t!ZAFSONlkXEM~rgQ|>iCt8Kw>uk61%TY9Fh4FstvjznJ*y943cAluNKuPCJlN>!%lpYUD(Ex zKwtG{8lTl8zAzP~xE_ZWtn=D7Osu8r+I@5Rt>|?XlZM}bd$Leu||OlG%3 zB$BDznikCW{XLd7o!-)4RI%XP*jMI7VZVJ58zn$&LKWABw}|0Y`c4rM9^`}$Q(@{Z zSr6K}2@qnHd;NIVha5@A>~;8?iOh$eic!R`zh7lj#2JDKNBy*<5|U=q;_J2z8q?yz7MJESV4{m+G8g`S}VqPlLLV|v<1e^Sn3@AS7Io03sZ-pZ}1WrJ?I@zT3sfCYG&rnhUdQnB>~UR8bU- zHC7)Tcx&xHgRFoBWJAxVbO3-kondQLUI6IXr%63gsC9EOnT^o@Tp9!I@g?4&H zRzqL>)CE;ylpu{L5R7SF@8CxOlSTDV25rm>mI)o!)ZnmsTLOT@28#dDACY&U8M%Y` zsXu3Z$D8i}ykFFOypV!jRGz-eyD!q!rkrZ)_A6nZS{6_8bMnXzQZv=qoZ*8xhp;TWHFpH{!|_O}k|J#04(C+QdhqiiD2;Uy_r<9p_6 ztpZ}kG*(*#%FG3DrQY0ymJ~HDvB8Z8zk;0ebG<-C%VFJi-g8?r)NfFTq&28dcj03St+Gv`+ljp{agFF!j-tg7NK+xFDKhW>Twj?ud$V_sf^H zfWZ4lPY<7la`P&~t9Yg7m}T_a_yfy#*$0!3R?ePzk(3F)CcnNH@1VG9Z6gE#?*Ex_ z1D#GKR7v98b~meZ!<2*|4C2So$b<}?jA{(Ln!vJ1zD-QtzOf%_4l$&12-NZ8si}!v zGMsPlfX$rb^B)Yfho2wT9l&>^JH1Vi`@GVa28V3{ndFvVU%t)TK3?5&hDbEdF`u{0 zvl!R4ou}q3r;OdI*j@i=n24_ASJKzkkg-t~18@@0IM9*FsjVo}TR~!7Ud1v*>6r4w zF#w>9u`;0uR6(3c%b&2BaGohyW{3*2=3)!Jf?Xpd!A*~1w<`t;uZU$H$3_SFoS*^A zTQ1l1|9sZTlKI@Gcb?<60SJB}_gY+x3NKFEIC}4hZM@O5%GcjR2syD_!%GpXtTbIY zX(^n!vc8HNK5%{}txJmd^Ko%n+ovutWAM!N?0F8+tiV6(V4wLfF$iC-WoNYj-(kiog0_Z;FkPPg48~n>e8`F*GmB}vex!F=iw|V`{c;FNuG6Qh9tW-eSUW-bD-u}xOd=O*13MLYGBx=AA7ucW-j`^t2cIS?a6s4-=fJ` z^Wdho#$Zk-yOcHEaXyg?vwDo;XzD+!jYJxx{qv$>UY5I7=QNGQ2@=9sz@Cu5&sw2k zaV4?S<~G=|=;+8SdivuiuBC1sw-du?382<4Z`s|{W_e)!iM;b~d`z2HonMY0d76k5 zL8ySSqgJ-4n}dsov$e{~7x1nbTMqVu}Oqyw31B{EA$t^APfgjEQnMrye78JqA2X0ja5V~x zN-?}QR=Y1sLPy$m?%sL(xjxNC+LzaiWo_{v0_xlrk5fyb%yBt1E^wHe6HlgT3yk$A1(TyRag;La&yY{*#D~L*(OMnl-4hB)vVV{+ zs^|pi5lwd+Y+_U5fuD_k_KvTr1Q0^i#sCs>SO5grztcOv`?dHdTOrCrF1bpFhn6+_ zsw}tT<@y!DgSmxUO$YTcW=RV57+JgdTY7W~YGOH+v_5w*olo!Yi&%a`3v5GjP0|i4 zcisKQ-`vAOyH7s(!FeU7pTbGw6T*#3rah8C(kXGWq?!N|<6Kh|s|6 zHFo*&Fcuw<6TLWgyV`4Q$;Cc(h4AO?PwsW5CwuDj0_W~Rrge1wVRCfVMR(Hk%Ja#k z3RLMPnaAuhPp2f!`#s*?f2TM?;VjAjSG;1fUMmf7f+dblny5w;Z|TtYXVjem;oK-? zQq`0p6r7C^NF_Gbj{=qyeu&H4ctj%OEne|e4`Bi0ilgW$h%mb>cmsgK83 zZ4(CJ^mK_9x;E?pze86ET>(SY`RuwMzN9+VR0KW@(%mPO-O&t5WxT^561RF52r(J= zrCGEp^M9MH(?=d(#&R*L$rS>oz&K18W}i_cnI!t2ze0I4g#vh;sMT;lUL>-}A+!tsCF%opt^IO- zW}Z}PdGpjPyZHyW+;HdT8|JQAEN$6xMw&1zM4RBE-Tg$@K&4NyhtfoM$ z%MeVRcHQCNHTN(oq{hW!6%RcX)f6i@Sv0Z2yn(RdS8x-rRs-Pxy_`05n9NE7G~9<7 z%B(^=qZ~K$isBNAz8mEFk(x=uiVaW_!^KiU4wwCNRPzM z0r{@0qk|w^cF!s!rob_lAi!xLBA6Y6N@9_(eDb|s6EP!D1NwxW_)UH|pGHf21{z;9 zmQd5wjO-JCy>io<+0{$J9Q8J)z@^ruCjQUOrCCg-)}%K#2K`A$R8$ZU1Um}0_=(O= zM4>b8-IVPyXI_$Q!R*Ct^y=J5_m}h(GtY7;SOR5!24;{5db|6K74=-$DS#FZM=Q+H zgF|gmKC7Sx4S#Hz7^YQ|3k9xkXqd))ed;MEvN)>(I)%`@X*3FzSlb%1BA7mo=n@nc6eWND>{a}1QyEj#HD$p_&~YAR|`v6Z&TpFdqM0({%HT5BC^r>EXL`+oP{*K0bW zg5kmmTrdIJ1V|z?R2R+;l1ee*3<1NxX?y_s<#diGyYi2ghKaDhd83O8H31I} zK^4|8?^drSTg^z|wzxD+k3g@x z_UgOZ7pl8a54C*rL-$7%|AFC`C8YdRWZ+)CX82B&(O@sy@^Y2OKs*dZ7Pw?6VIdP? zBBk^5L&~XRH63>Z@-)wqH_LJ8KI9fz{Fh|Kcl}%Ew&?;j{`pz-oHa48hx+0j)2}z} zwXN$>K8uIr9*S_W(5=yYf6?XwA&ggrDgET8p_1{@8oRG^^&EhAS|ySD*VQfIBxr>R zh(Q2t7&;aJov_o(B7tgGVT_Ps0ZT$V3LI%cJnIz)fTC~{(y8I*Wn@Hxh|Hn`JTu!R zZKEf)GwB97rgBSu)1fu*mV>2$YK2_nfI|Gu72BZ-d1BUGgLXZLL_ng0c>3I^d8h9B zwtzBz{MM&-n&(xO1`}o4i)~HUZTnTkur9Iq{|)8*vgGB_BtfBNox8J5(_oSdB$Am3 z&h?Rkh)`Or8aHZ7O*09k}?n`L0=0bKz^!oAH(q zXXbYd>l+I+z?!#jl__ zrlHQZpx6r5y$v8f=>3;pxM%;T9>mhq2Wy!kWFv5PcDVgLfRzHW@d8KdF;p5GN0tsj zA1?YbInym}s61@r4VzB=;-mf>a!1}P@b0gH z69wQ{m}GP4efpmnj}CF`Dy_E4=fDIk?oC}XnaBufYz0+8X=YPgN&0$p6DV|l8b=&N z2%+P?yv1K<1XvzBR@PE=w6macCT632%7l6w18Lvm*H;Bsb`q&0OiK|=6Bc=Re{|qN zN;fu{+`*C@svp0UJ3fawmI9PQ0TL6C!Bx+>=DUuyH0iZ(9*ds-X}X&8^V>snTLa#H zf&Q*yV7z?emjZ2GP?Si4gLyx(yMNc>{J^j9T7$wE_1?GE9u%u`5A1JI^1dC*_)Kni)AWzW1};=9sDA2Is~2jkLXH%t}d z;rYvXm7XMEH08sa+|m%Vw-YRC6zi>?bKjziZA)b?3`oE@JyB z4rT)oG3E3#2RRqg3KE3^sZVJ`@0jo5z^He@l|~7KfUK|WFX>cBZC;eJXr^^q)WCFy z2(ctJr|mL8n_00NiVm{YKjI^27c}AJa*GaecP>jB2Y2)5TXVce{IBW`=A~5kBOojB z=LIRN{|jgQvL~M^0&eJtok(_mtI8!t!UjU{^l`e6#8L7yN+G{ZQzw4}$VK8O-VUeP z!JPHuRKjC)zbSHy)32+y)V=mcZD2qnuc6a&PoR427VqR!bd;t3qxo{z8U5XHx;$z^ zBZ(UW{5haj*hZ7z0}uUq^#tMDFij!ncZruj0D$QGU!MaB1{Y{Li3^y*x;~Mx4oyr% zb8;KUp`<4-u2Xv@D>U5ppGw^Q|^n<+N~uRFyYzB%Rsl*l;dei76o>zeSIh zeQ$E8V$|y(5|ReMt9d`*ZS(pTI?3qsf%#nzq}wuzf%cK68FOF zC@fhi2%W)L0;N#$9{F-cg;o_(4EJ#!V_fR|UizKLreY;dv~lEX%pQ$u1P@Bhz5T-7 zsP=h7%BqrjX#Tm^O!Zv&^Lkwwo{bFg4_V})Qs|J@QEP(?U2gz@ zpc3IrFT<$PS6!{J8fM-HR93K5LbtRu)0{7zV5v@`@@fCUI0@jKcWKZ$!si3DW?vqR zpO4%v4{Xi(2bx*0dpHY_*34b}yGy2#*{wAAw9gLlO`}Fiz{sd@Oo_tH2jfsws-NVc ze`R4D5OF(odnJjzudTdUH#@!P4}1DkcaMb-0++H{o8|bn^exEo&zY{$Jz`dWVL zlM8LIpZU~3iDLfqghW)9h6epJtPgH$W-)=k76HvMTQ1!?*T~?2~+nDp?>TJ8hq0@y@;(t4Apx@~@z=>OYOfSTrRUohSWNoVC zY1B81W!c`~H-_xvZVw+x5dQk`#*x>(DdJ8!;}D>(XUkf4(`7U_En0T4=j}ZuBxOEX z@-1t@w?Y#xVUz}qV+0sfydOVW(+hl;`>c%)=Ii45w3T=QZdH1E{fEBGSeg+v-R>W>uGdi8n9K1J9zqob%`*_QX-ox?{6VY)iuE1OI-B@jUvve$M*bE< z`i29a=(YX+ro(To&0^V+JN4#6ByFp%HqUt}d6qMaUmd&OT*qa;B{PGY*ELL#3jsnw zg%PkQltE$i6NBi!KSK~;C&27J7kJRb4wtKTtZVB?#G%+PZQT^Xi1A!C&##2#-fKb9MHwS zynHuubi1nWA0XPDmv3ZHy=b&7njiR2q&2cuZ3iWQVV8n)=7daPJ;-x*`SGuZjW^k< zz9#(5*G9lUKBD`*@vXh+Ig~lxQ_~|o>z4tALZO(M*i{G!S-9GAB@CEOYOoSvFw>Ya#(1h~|>!$FUnI)YpA+M4-XG+hkM@8-5WVEuURSEYx~j~<;>>bYOXz5jYxgKSAhGzhMA1M+1J zlyCn5e?e0_wGr`cw3734&|W`DJqq+Ci#* zd|9Y0M@{&avx!zsKb=#S-2pe-A507_h!>im-9#@K!%6KCz{-QZHl;Va?|h^OOB2WR z1%{wJk1AV?E7Q<+GtHF@QquLj&nT#b5o5E}=z2`q3va(=0l@*ECfh-Tr#4O5tG`#< zb>z94uE*OKU=kLs118`fvu_tX6Yl?Z9s4fHXLiTvRc~wkx%Nb2{A;{^jQ=O87%J<& zucgX2kjOX;wIS4$;IGA*6VXG&gHgHHY4?IxfzjXV8jaJ#%sDLd7cKJa#Pp_=U-mla z+s#kJ<)9ohFXs3PsQLg zF7N+RncN5ls70p`294N^F^qua*ezSj{JU$CFg9;4>J8G{u5ZkGic?CTE|m17G)pgx z?=JrTL?=tc5seEME}Qt3rF9t~c^sJgZEx$dWMx>$Wvy6M0nTVpw800ogq(=$K*02} zcnLkUC1v2uStzi?fnA7w{4mu`32I;OArBx|p~#pBVn3v>dejrP=Ag zT)98MtH>pD)>o0_MvmH4|2|DZ>F|R$0(1MscUWiuxGz z=!7G)zh6MQTHX2m)7ro!9zUtY5E)@b>{Y>rOZcMVIT4~NX4Knd)s<`3-d9$sz=q+ z-#$MhpKv{{)-0_Ase^>wPK%P^nsKvr+WDeyUE>AWi@b94QAD9Uib|q%GHOmT2tOP}V#)g&(R) zq6rJH#ob(y3YZVhwMfhGO-cPmu&XA0^?38q&Y+fbwbl4#Tc-nlMwZ<-QRU;+c?06v zsHI|TNKmF4e3X{Kmf4p1mz=Gno=7NME;B@r`K$F(sHzDTgrJa+mw{)ak|YSbBgM7i zIS+}t;&HA%^AJizTg}F?PhMD(n-RT0i<-6Lu^6roc*>H0`!GEF^3UJbjBVFjf4c&G z-mg%Ji#y!~%w7L4dp6JN`;XOdby?+W_Y=V{!5q{Y8mGwKp{f^>=&uj=u|-5g=5oD% zOM-f7Bd}9hj2;7|??-wW(3pJ4BMM?|dXdD5tDc?h`qA~*)s>|h?AyGG(HeXo2rbwv zJN~}f$D_5p6`QJ~5n+()>+TBlV6Ha(t)@3rf84))v+wEVLK;GJK}GOgM3p`08z@w6 zI~}jgfSbo{tIxFcGZdlzlY3XrG4i8j>9ZLFY=Ww!1`vtqtSFj-MgRp{(6H>qq$Gf! z+b#noC5K{Dcv;A*a=_$wS%rgol*8!Ion@9KC+;B?{y@cvh{@oFIym-l@8)Q8rxMSE zGCux#@qY;TWpzH)XJWY5$$%&2OBn=|iV%;10lBWGHcOz&z>7Yz7P@-(QOEYtgqKH{ z2iXm^J%ILG2uUGWzH;^kQm>4a?(!PnIyztHH2KErB0!MA!kuH&IF%;WR)xYH>cJJM z6`GgNzF$#ET4(QMm6UV06*3zJh{csXZ2Ah+KA9nbTIyr3?|dZe=+%TS@f(!{ENKX> zqaJe}@oAg97ce!vxuixWC_mis8A6&E#D)&-DTX89&Ygysuc%ihRvt5Q5#_d0g6@8k znV^xL>@*E4rJ(PgvA!U6O$QV}VLNxz+X@o3Voo#WU2m{qxGkAE|C2p z^M7aEFKgTu^(g|Ws_fh?DG!x3pa(U6fp*>_*MTUiJc6pe!vPwaq`4je8;%?+h0k|; zQ8oAmCBAVsRb?Mc--!9>^Yu44R62=AB(PYGoH6QY$z%?Zn*kGmnsC0Rr|Y7_u%8lA zW3QHnb`RVZJ=;L0J5|@iKawOvq0ZSmMRKOy3Ug+?MzsrJ^`vRK!!Lk1!kAcut(Hjtu8YaBzW(+qo$^Z27DD;Ww+Q%fhPY_|5<(VmbJ;IJ=9w1D;yK|iS#yu>Oa4dV zYwKOt|8pUJ+2cz6D*q+v?aeC1Nlb%ak8P2^kMSltHZfm=*-KLL#sdvfTJX6tGbaT0 z^NqU@w6JxrVX*1^anp#^ztM@9}fYqWV_r1(&D+hd62I{_c62EGiNAqLuJrxlFA;w8YCJ@Msf*2&G zgXTrZYYLn!PuF!+#D(xx3gsFS!!IDv2GekQ-O@>PG^1{*KcdT20li+SrU6~XuN~5c zCg_8hxI&$CDL@d(T0Yv73(IKs@YDg61f9zTL{x7+) zxTkr!PkwGWGPhLmG z53T+j4B$)t;Ow=`d1rm)ySK_2%Pg|1iAV5!)uK~RRD9Jwn}J~T$B8Jla+x$xsDe(r zq6t8pZHWKT(I@RZCdm5Ad3D7}3-N`pYs&)IGTMUMzt)5x+M$KVb-gOFmXlgPS-v{j zIw(6>6JI_VIJ>lX^HyDh75LUNRXdtb8vHZ06UUZEH7e9)4*!Y`-&$wEA(=$%$?hT9gw=FtF2*aR{#7EON~LfW)A zaUogX_~mc78pBD&elH-0d>5kNx%MK*O}%#)%=3~&QFm-!K~efbIEjD<4}BtL1MX3+ zd77dVsiM;-gi*~wU~ri9O_4n5g3EFtvA1I1D}T!4?vL}`ZF6AHD}pBC?~m@yltOrw zEE_aQJUni{2h0uA&WFCf`0#~C-je#~%++3zW1Lg|yte<|?e<|Qt-lr#E)U_LiPA{T4f94(_N(uNNUCXbfhywaX968*E<@}lL*MlEmMHbM?$xe11Un{Z7#T6-TiOrM29JSdi^_j6_}fNUL~+UUmTuT$azP zl{T&#D?`d_FJJ4L>(*W=NUgY~MGGFo_rOW8cl-nSFGjY{c{(PXAVBvOqi0endnyY$ z0*Rz_q!dcQnThzp=<)ONI7>?C51t!^%}yWyZr&F+P~xD!o73VgbgUeKwrik4RF;x- zBFbQ@Y8Hj?J}F5+CEj9cs%!u#ZvRv$Ay(Z2%%B!34zgA1IjU#9HEKK9J-T`@vx=&| zHHuapU4UuaUHRy-XQfA6J)#&!2`;{;!43Li7zSgOfeS{TzAi)69Muhv?3v!0$jR_B z7S~gtu!SqY6aPH=-H94?1}SH6Z@q8Py(;i*uOaQj;n6d=?nVPUZaH^{mrBA3xZdqo zlJ69=kV-t_+K`Q8liE=_l>PoQ>jc^U+x z4fZa#248YBlZ7y`U<}$-}n2G4kShj zh{OnK>5ztjbazRPl9uk0A>A$ACEbmHbfa_#lNLuv|Mu3;_xBI%wLN?8>pIuD&N=sz z5jzySf(0uVh!_-6M*W#65>1Tn4`aLCxV-QW?CD;4=eg#rQYuuWFG}~Zl(zfJfUa;2 z$L&Ia*{FEW%I)D()=kbV0p%@!PmwaEARCP~g)G)648_v~#GpKqLpZ7`P7;HT7Z!LMBEv_mh2^#dgdgvUs%_oz>iQ$!v{Btjk z$v{F&3yCHq`Qd^T&0C+@9G7OB|IYe)7gb0g`G->KrvpPGeJ;NETQ#mmhw`6YFQqI- ze4k85sFyWKRTtO+fq7b{U6!levt3vmfM6Eu7^ti!ys#cP8Ljb8FX8AWDeOGQhZ@aBschJt?xUCD4l1(gf*>3ha( zDm6w7SV4)?_4obTzARnUAqWjY7zsGoYGr?ZU}#YH564E}oY5nl2>_s`oiH*!u~+q) zYIMP_!(>Qv?X2ebDE~FN1#Eo;wH6iG$$WPB2qMBzD+Soy?Rcap9O@XBXCREHc8x~iVSy4f_5HLlgo)G*Q=uXc#p--=W zh_4MyRCNz!Y3$+|{9ox+`d^{(T1bzxOgZq z|LjaZ<~Vk{bR3eDpyKe*IdW`GAtg(0^6q^J4zf9X{G17J&Jou@OHi zQ@3fC&SchrHXZEZF3wUQH#@TimV%F#PCuR3LpDc|`H`+INEJD_51XzJvtgF7% zs1g9PSX(>6@)j_5UjW#k`+Td|}(pSB<(%{dvF z-`TCsI?GAzbc@_GVZi71hze|`=^>kP>|U}7rK#GEmivHJ9IoBCtF`xkq@Yi+!~F^!e&rmddI$Kl zR)RBZ=7dAl=}a(t_B+y?aNSi^PJyw*UWO+N87@?t|<{Ag_o|g>V=dl_G zU8PFv-{(3(Rv3V?cZKle*S@5NFzm%6=}`1F{@*75IW) zWi@!#-T06mFGBto&f`{(-y_Nc5QO@(nJQv%99mLQadY;-NLV{&2m5e--*D9N5k%k% z1=!$Hni7}0M@a#M1xLQv@m<bW97aI7K8*7K2&cFrK@qL zZ6_Tm$H$3K8drR7v%5!b;rZgFjz|3oT$qetI_b~ZNaZhXwm!!KFWkr#Xx8Hdo21oi z$aVus4R(vG%>9@#UP>fP)o3i>qdA4hMWN8})FBY=|0r3dEJYP`B!oZyrZW423l&Kx zd8|H~!xE-x+KFEBN8#i?^|BVP)QSsi3VKvD9Z2nL^%cc~iuH8y+?WcT&vp-6jPHQ@%qDDPWEI`mgpg$>An5EeF8e2s!{T?c;+a@ZY03w2NpCRN~(44D}1`y zR+wow*Ct%E_8W%)G?dd_mr!n1mbY4A;O{Jq`SuO)Zvq=d%U|SktDV6RtU_Iikx639A&ef z1DxUF>XrR$*pNwdqtL6 z|EJkv@C@&zRU0uIjm#3R#{toD(UQDJdcqb~-g7pL;ouS!C*g?e`Gd8TXpJDI`SDe) z{VTapCD=FyvxtLR1B4O(jTChB*D1obQ%q!)dR=_|Q|z0IDTDZo$n1>w>zQ(9MyX?J z)O7u;6L)|4Tgz-=$3aezG`uw7c%pXX9zU5y`5+lZXj`Z-dN zCs7Xa=JYt=-Yf9-;ihv9AAG((g+z~|;3fZ!3e|v~c9Mm~<=w6NTwT;puRQDtP>4QN zdvUS!_ivjl7ARg6>oiEA6G}=z%-FyOV#Bg|#AcW?MMG;cDFDur%Roe{9F(Ji&edh? zb8}oY7mkZ5jD}Om^63QvE1#j+>j!V6|<7#>Lk|f^R;4qa!mG+=Cej1S^eD($7}Dq_}G_^3soC7hwpiRaoKZ|?)1S66nFn^>1y+wM{vn?p03 zFoT8EU^pjoX6qPpHGJ}%?{A;-6A(}Ymyftoz66V5zQ+M;Xk$|fsSI7)zFoU#VR&WV z^&%+0`9!cDVy?45OpapLl}aqL{jb~^Wsm(wYRVw04nBD}>jXrxdOkd=fL5_WSP2YD zD-#ur=Bw@^oTW$F;61H;w6t#9sz}y46IeSMFAqfT4Lgy9X_EoS&*&>)NdKu#IZFkRAY}r@Yj~zTZ367)F-^!mCou^D zLjk1)KcN3u2Y_j+e-V$IedCDz-_+Lyq?+Qc%05#|8c%T<|!CNBmv%3q8vG@ z4qtJO;-WF%6ShokUtiRR+OPRHF2WgptZ5C76Pi}Le&lF3 zZBtj;AO87}vj5_0*?WN`OO3(iN)De#`?`D(3yLx~)e0Nq=S}QXtFO5m4R@7dsF}VB zzyCb6_pD8(9S+y8n!E+Ltaw1FN^K37N^ZJW?_F!$bK!}ExcccfwS1AVbRkQqz$1~~ z(4fB>4LSi~2r$Zd^1J9y(BW&6n2~pTj<`x#5@9GbaFmRM5$(bz8`=9Q4n+VbN-1%> znMjPqS7yHIMXf^*7CK0n;WJSmL&{%a8GH^EgY$R5Z{^neP~#_uEsHKJ$7$!>_|?#f z&|BHuyk zi*__Q7~S6)1}hpoFMMTu0li~t&5|OQi0wfE)dc z3z%ij3mq_>7P0)%zJ_MEVtaK)!fy%{QjIGcM5YbJhV7ATMj_-4u%hQg7#T7_-|tG| zWJ-j<2OB8_7+sAXX>QyMq2o1pT8Cr1p6mgM3~TxwoY0j;O1s-E{TCI zU3FRrrWsU?#GG+*gctwHfHjDmy|gO$!pgl6&BAtG4h;(}-X@7IHF3w78Ie>fH(u zjfD^;aLI0X0=C3zLkyh?6btUMYi}1{MNE)X8%#JH-e0-m@?esd2mb9_kQ~MT{#}TJ zXUCf`tT2+NCxINKG*$%k-yk{B;$Qkh1;ZG|E)K2(Q6KIqmE_FY{Bk5kkO2wTJpJ(O zWkbfj>=VNzN9ZZ_@5`ItoXa;}WUqZrJ)a&MQwa3sdtOa*d@_Z`t>$O>^U#*V3YSUy z0l+~pxoirjwZaa#SUasmF)5Wmt2*wUyjLYK^+lN-UKp{$Z`^NnUNh?4Se6+E1%B)} zYV>Ul82W<~phy4%Z3CxukipzJkOx#*VZB-<1+8sP%2;w(9bepeow|DqAB~&{tPcDUN3X-66eV)Ra0gi z?mF?fD@U6OZ*E78J<2Udiva(C+{KmqUG0(eVcM(Ely-;b)NY=3$Lq2y;6=Wko7>kUa7`R%-?64sboe9kCW4wS?nFZk8`GpZ_kk|#rO&cX#VsN z>S2REVcdkZd-ijx5w=m#a@w4f)_z!05cLI*qj6rsni2Dogj!%ft6kxT9W_Ofd?;Ipd(17WEQp`ErMqw^YsaCU$Nt+Cj@#9K3 zTXki?K8flk1HMYB19Lj`4DQOq{x#pj;j`7Now}rS)m0@3p;!rn6qb*LmppbaP&UuA zokxNO1(O~_;wx?J$0FcM7OM2;6*8e(3+ux2ae6HibRy=h)~^nm1?0XeRae@or_}54 z7_NP97r2TS$$9&4y#mQW0&ofB8(iBUFSFGf^OW%M(U?X8Wrj5I-6ECSx1xK@+s&Cy z@(s**4TB|flh6>drp4D&xlXl6gX-MW)y}~EnHSt~=g&f!2$C+tpPemeD1qb-Fy9WK(q^|(WABe{okXl4&P4qGG#e#cE5-f#ycl|d zw?L!5y{m6*33Y0`mj&;gY^7a8-sl@L)&tC;9&33SFC`#A%^BpA>@~=NBP6kAc?KrJ zt;jwYw1A_7+*fGobnLV4|5hoboc;e$N0{6DsaC48pSR-aAvEBz5InYqIcvPYb67W| zLhhjQZVsSMAwjE}>-5UDBe_#dOq6=#t=r_=vF%u)wDoX7BWgUTZ1l*Ti@{_X@d(=7 z0;QeidkLv~T!z>ZGB*&-6ABO^!3i1mw2Cf%j?__?dKx)Zdu%iWA=H=(>1pZe^J5{A+aX{_FXFhr{}@W za;HHjz}Eq&sOgdp?C7Kk28U&*yqenV#Q?8pk?1zqAerJ<{Hqpb2S}>}@+je;{8P{? z<*X?Tsv*@|*_T=^!0kot@H(FBG zHmu$Aus2fVQkCQ<3QA9lwvh9)0xP}EDO`pDXG0xcf`Nb`(X%t{6H=oys07l}ggJ#221BUPJ9LRn`BqZIGSHg~b{h{Y2- zM)Q9o1SI=V0fKBtS-e7`Bw<$mk1a%!(=wtREBt9g)EB>T(9F`279!MEa>6MgqP^j{ z>x*6eH%?tLQs5JRSXoC(eYtn_#i8H_8`*3(ytHY?8IREn6RpfsSw1+y8@r?&Ut{fa9P@XQgjw(Pg$0W9W#(GhY4%H#f-`b0zv z?Mt-FC!yR?qo~fyWbSk%)`MzmR~pUSH#8+`k~n}+;-!^~ziyeoe3}{#L(ZLvnF{UDwtnezYF4+0QWrf7(PO zgIX(=+_#3tDcWqjVqBFrvB8c1!U;=qFq@Kqi7tv-Rc1{p)O-@p8j}z(o?M!L>PuBR zR>@Z~2%dE}8iLlrEc+)S<+O)#HCF;edyY)5I%-}a73xUgx`%JfNk5Co!WcuIE(w?F z@W3t9S9dD2m4%X$0GCn&H45J~Mo3MYPgg?pv0u8?sT>Tk+;fHivYCpr$LR6Cju+Bb zXDkiqNzjX66&UG8dRJtXfU)1g6^+HTeoN#Ub4L(LBxiMI_drEk*rr?;lZqbY=u_Y{nz@{6vzu1N?I0B2>ZrJdCcuFg+(X z%Tz{);8axkSuen=dd!1T@%udDs)9oq%`0}bm;qvmohGl+)6%lG*a=7b*5dr?vdzSv zrieK5`|VhyaA~?(@?uGOY8|Md+TizR|3Ir)W?W2Ds4^tWm5TqBs#VX+K?xfGNB`Hu z>h0b0yH4q7lsf5B1D;ki)j4;*CID-Yc#&9y3HIG*qlDG2YGsJ#7JxHKFoDY^s;lg~x4&J<)x-W~SZ`xOuA&>DEyBf9-yXbnU zG5u_YhDE~0f(T4jrMNSayRo5?W-qMVeOPwqPo+Nhb*FK*a7B-`OjekjoEpzLy+&!?^u%R7st|icJgYsVC2B(pJwkB`R^jF@8@pgb&mz zC6y~8zyfc#v2SGZvbdmyqRf^(*Ds)m2<@{4KyCHVi#Y|3RZ4A%32<}72`Y+PJ7a*B z^Plf`KiY=Q^S68~N3Af&j8Cy+%T`R#(HOl2QO5fnd`K5%mqb#%kURg3*jM+RB}R2b zH6&b2(R+ONe|MN#Ig&`;3@C%L!}!ULLiyQR4DVHsmOx5Hg@(3O;574L^O5V zfq&uxA%k4Sy>%JYW4}GR{QWysUlEZNxts%)XefuxHW<+DSSUd_Ek-RGeK;sGO~@8~ zO^c?9V~n~xH7BF>CVfq8D1PnM(jAA04@{*NvPL9CD6 z2?exOf&`s+A0IzFO~WqV){B(N{I;%-=NM?N>%-%eJo!rxLO5b3_Nhk&e0F;-PZ(dC zX8i=y9wG8r-7>(7Nf6{od=%tO8Fo_q-GOz%2I?)RV{bpMSwJ9xLmTIM%}xy;yIrXi zs^ELAIa}|9=W)Fljdyx;oAY#@) zRouKqe6&GfOVsFg+HW6nk4byR{v}p{CV{OJ$Zc3)Ze|5i5%SAK4Ny>mg0{YUQN|5- zpYm9tVtPWUppJ|}`fkuqc8S6hM(DXX;Mdn!%BwOz)P+rrZ}nOGN=aLt^u^sjH8;0A z*qfH`a{fWEH^5@9lWYDtLwIw(BlE|IF-~U<6Um9S_8tSro=7^QTNAzH%2_|Tpgnte zkdOD}yV$NGN7m`&*wyac+2xJ9U+z$@**8U2TO}8LqhApNWBY!}%wJmHT><+%a$aet zD|6sx==!fb$*Q0KfJ|)C8&gW=#^f$tIrn}}eVM}tQIP9>x2>@$CMqV*EY%*n>lIO~ z&%Z9jO@pcRQ6+DE8}CJ{S4P#<5v)Oj1*|Wb(zbw&(;p&Cs6k^{K#$ta*NgfQ;%4-r z`%O^_CktxW4L+-Rfvo*9Zcw$YCdyi33rR)O$B%bItqMi%lNws+Y(fGk3w!=Zkrsw^ zBs_gs2*c?E?KlDlnB&IsZz|`_5z;^cJ_v(;v$Yx42=Hs9T&oBcGuN{`8rrPmK*KDo zaU&^ho(BmLN>%unt7k(GO=|>f(O^$DK9akk zeK=@83ngeRRYr1RppzIArlB_<`3rG)wSQ)&`&*=^Y0*f*Q0JIsxJ3M!Q$hkE;zAWm zZ%OGL{G`;%OCqdV(YfCC`3`XqV6Z9HS$p9WFS>C18-O?(NOb!qwI(~FEZG1^*e<=9 zenUgCM6rr91WL-I#R*boj##wuiE>stb_8&iy6>ML&E!%JnS{}tV7u+B7GLUVs|)0p zn(!mMYn*EP@elIyW-B6xbr9s1N0cY_#TL=$iThW5AJ`4~fmpN*q-4W&@VN=|M9ug6 zT?g&>Z%~3@W40EqPPMP3zKPW%3(0$RKKP)vajKPm&5j`6eg06U#=$M#LOK8hw_lF> zUHI?>8Zl|FO<9hj2!Obvc61@ zk)9e;=(E0(uP78e(P!*lSSKkUG{>(%`x39X;D=g}fC85+%VJxPze04Q*JOCIR3Ucq zSRWyrGkWe}pFo{}M3J~%{)<49mhsb~ZioyGW{@3J7Bv#MH8YI>%xprEz>+=UrX@*Hdc%>SfY&yn3g6#W z#}Nd%TFCmc>k6bPT1RJ^BmNyIk=6eW6mpR8f{L8bht2Agy_fug?efU#jDa4dAV))6 zd=T0w|9p=iO9oru*WTN2zaNcBxh$rAAM1P^4vak=8;F6&`J()#jA2E5R9OungZ-gB zTuO9o>VlpSQhW!Cn8iHUJL4PxH??<{_=I&a@LAjQpR{0JVbn_`inJh--Q)xyYg#WLhdYUrBi~ zzHcUrsBs&6GyUiC_bOGEU6)Xmz}3?3TC^(}uoQ)T^O2`fPQySFmFnP_4Zt!~78A*J z?k_GVh0~jz-#4~F{r&igfP3FpvQTp>QPP})jox z>^@}p60uBDTBj1Uq#B!)<UZL|!Hf%!dfG_xCEa{nX?&i)3G|(U*MLN|VDOv#9NB!H8PI}p5jzK;76PEz zOZa`;y@S(KU6hPr*UX0sJN7*FGu<8xN$1;szevcyj4&5-h&c&dg|;2x`#qyJS7<|)@dPR#)9EDy0Mn{`Df_lPl4B!2FfmbRB_oHlPI}i!ziW|LV`|9e% z4k(yIDPHSDL#Mov(WW!kUNXpXOf9yp&>jGe$WfH;Sy@D7Z1-xR?nf2s$nf_@cYE8X4RyGdOa9UrxGv%i1u zd3$tM(6P9ajsJH25-$!&YhZu6dcROy`TYFPJr^V3_T_K|(@tS!caG9OD_IW z5#aHEx^QZEtZh{Sv1cf8yF~C?IuqqpsdNN z%P$6eniuuLZh>bGRXKnltkoDg)Ird>656liwG&fviSa6&=P0}JHeKJvMH^*w z<%nbYPAB~xZ-#flvYGG7SVI*QnA1&5)F+f$7GzeH2fANk?s%+>{kgo^w)mLQ>6wP` z9&OvW3OALF)%b;~TC2;7UKS%6oXCaB5zfhz(!PIvV(0C)w%(FZkcZavts;$kCm755(o}MkV~e2&d3(gLo^g!U4N47C;x9kP*e*GguY@C=aeDJvNU zaK>oJhB`vtw-O`B`|I#wRYgA}tUD?83;}OstFtjN|Hy1OHW?W_DvBQ(cpTAn8qI~a zW47CT`0_Lx#H}UUo;x*nHdvt*^R479ar05e6PFibqgRvle6YM2L}$-CalDTE6+Cvr z1i`u*>3;r$*=#&a*7>R%r#qI4CTMlB6bn1NFb5dK9J54++UteCI1BAkV5ZyNceTg$ zZgEkN7m$9-nnw$O38g$aeG&@(2MXD86dn6HK@!$SF7IAWYYcug4xfL1{xDotQBx^} zRs;$;b6O2|`$*p9?IWFX`y%ivZEovyJ~u-1@mf8-dXd>GI-ll4vg4fKniKQcz!X~7 z==;UY@eEk!RWslH0vDY`g5b+K)>#56Kk8bim6z~}S@!CZ+^ufvAuH>)&u%KLlDxVi z&kJ7{dQOei&Y<*re6N1?f`(-floG}D3@071x9w0!jHSiY2r3B(lt|!0Bd)Hdo_s#n z-E*>>V)vGfhLH(ESV5fH<`^&1q0=O$r`=5mmv$-EWGZfKCtsuEs+0`}G0eETj+;{K z`suMPZ*e;=vcjcDWrAqJ=ZK#(`2S+cYcS?`7eihOkogxg&z2*s`k!^ItNF_Dlq2{@ zNDw#f_LE!kYo^VtPT z3%iwe_eI_sk;ZXsTdi5b_q>AG2s{=?$EUONt zlBc)B0~hN{K(rAB?h$6^v_7Xfl^HhaP{`Qecw$tsGBcy-t=ED8`AE8v@F#ei4F?*= zTMy$`qbk+3d^h$g>ALc&%`(-*3T?h<`<-t`9H zeD~=S6}8FLt!<_z2Og6;zyB0?u&QF<3+Iss^Pme^#2UXmKVs32BRsS-&=0jvh!U`Z!fH%L6f*Ct3c#ygWTbRAjcjpfjetv;U92K8NfjKEmX=}w0PQh zIa1qe9G(aMU?ItHZ66kGNqZxE-#~wL!p3IVjtP%9L*F@TxCD9!j@L3Z$K{!|6&#JK zwCAopZ~{I(i+2ZJ?wosz`0pB2%VKQz+%QX`I9nj^MQA_hq$f3x(Pt(RdC{{}3U4tv zR4LaxeT`j$0}M8P0aqjE-TMO#kF0ZoNb>Z=shW_M8C^{squoB5U(Lq}Nc zLX@`i$LrA)W*6(|gi;*O#z4eOKi)Q|WoPUEtM!(>;fS)Ey8$E`y0>gTdf%jqN>&}w z=YI3nj&3k_gTZT9%9SY7CMKMLaJG|;OZU{lh=}Lv8jabqMus+i`yWzdTW!-A;3N(= zV_#~fXC))Vp&jRB!^ObG=nQ-#|3yim9e^V2@~fbdHLAaJ=o&6egI+2DuUxoTbI8oN zSh3f1AgEgoC{dUgYiJe~eB-_l?-}_c>I)^!zOVpJw}|K^mB3r!$zA$0=k{k&NtaaN z#BAK@I&Y|gdCv2xfq+ZpN-bzgpnjmp+FSCJf?;$3AxMdHB5X5pta!qHX(^UNRRV>R z!=W7iVp8Ed_f}-%V)mO92}d^v91nlEkt)?Ym!x9+xT)j47lDTEW({BlY+4BI7xZpv z;Lp22EV4I4J7LerEj}?t(|s8cydw64$pvGj>o*DWBFy(d%;>6HBbmJNftPFbMsg@H z%lh+<{e!)h^o$DGc&X8%L!UzBRKV+VvvKzt%tW1kMjWGVLTLX zuEEsxZ)EnuJD!VSgHjfyDHu5+D3XFFj-DQi+kc2v5qE*~VEx(lj=-N6Q}{A2?ryiQ z18=;1#Zn{c*-ZB0U~TzRMsZ%;qM;`Nz6+Ks)fjIproRGPO5wJ8RKp!RBNJOip4 zDLnWg{03I7A}0koaLaqIgChZth(OHtP4aFShDMYG|Y!^=6mc)LNJ>L&D3UgzWI zSsJV$poxg@B{)_;MDTLbWx_~*m@vJ#>NsyBHRp#u1;OiJ3vD_(@O{H~jMnI#Z`?!~ z_cQfG&{u@fD8SX&n8oFKfugDga(asVG{(pO_4l>^uE7YqdN+B(sH`Awm#+)PHdo&m zcuu`)U;8nH=1-d5EaN_-&@Ei%KoMu)*8~n>Ooy?zxBp@GF6_&1HeavoskxH%F2~#s zXznvs>xnD)8~r8}uQmQJU`>UX9j~J|fXVb+*H3}_)@+|`^Y_oc(M-n=FlBxzJvne% zD~Wu4fC50ZP+h6Lt!$#Qts7ATP6ctL?#j=(^Dq-d`yBYMgk=fi0<2P4!rOlJa=i-S zqp_sd*46%cBSP1;{?5yJGW~ufN^D*|`l}4pmwx>LA>?THO9Ok-EVI-6CH;HXh(1iG zs*Cj`gk?1suijG9;a3$Wg1STl{7Uod2Thf`0OE<<~%GLT_$5fdBW{9_2QjTst^)W5fiennIj zxhD!Tds&y%VPa@1Nz6sVNFO%X`JEk$#tZe!~f`66rUVW{v*26Awl!5;&$ z8V6varY#A?U3pe6>=Pv>yCRQd$HUFdouJTq)lv^Kc)TrLkrk6;_B}kP0<2Q^9A@B5 z)$7tAL){sgECtMW*3@+EqjM#q&wa!5t8HWear@<GP)Ok%t~M! zMPl9L_<})=9_(}wa@DYY5gM{nC%2w5@pj#zRfpO}P+ZCSiI`wDwyE4tzo6IPnL)m! zLBS{BiT5lH&v}-hs|FeR|?ODEnl4_7$`2xt3yi;Xj@gImZ3f>J9IBv*{mW5`rs;VVGFzGiQ%I z07490?J(lYgm)fP_W0ohZ#Rd+t={I+9pHn?A#+u33TgZAyV~3r{R28hyE;D&yEJrk z1o}BTc@z`Jknu#sch<_;#LoT!NS!f4g%}7DB!7`I_&7yJMzTvm%dsIC>QB3m?hn0w zs?fgrElQ_Ka2!xgOk|ayUYxF=9-wWgo6k>?7qnL(C7={>>UmYL^9<$APMgPiE3KXpksUZLC}F3)AIZx0hE9C)9RRdRA;)}gOE z8L1-8$`@_@l`(EETSGP`rA79T%oW%tWKY0b#sD8Bw~ndx>uSIGn-jv8gN$V21chA< zche6ugbCAvP-o8Y8sCd>zaSyRmynhWWr~mg)Ag;ctAluw=v@~NVj|v6Y0xb?$P#M2 zLKV9bKvwJIC@#K^R15|EIeR#{*!6A^K8dIX4yKo+!$>nwu)f#PYF#^*$37u$3Qd)n z_r09lt-`|!oZog$3rCjORir91Yu^iGxn*c9zr4Rm z@(;;Om1_&*i~v!(t++Gqx{%o=7Tf9+_giU&a;~6r26Cc z1cw9vSE}p&Ly;f+;clypwyY3V5TRkX-Zx|@#%nIbC|fj}2r5xQkiz_;$`K-?Z$fzW zx8jB;HR{34d;%viXEKZ)}v>4Lf`Sna{*NV@li~ zOtz{{faw)|Qj^I9rdY=OyafQccFU_(o|4yDyRc#Bss4UAYdIEq8O*gQJH(45$vlc# zNTgoTWEbl($-62gV*de@0nC93bb5`t6Gx7=sSX^ks#S-IXN$(X^GecZIa%VZF5{%4 zvBxLz#Rez8Z1^Af%>u$Z%?U^&*=QN1k>(2fgT$Q>vYjn2obe7vaVNHX*)She2jhvX zgLjK;%RVzISo8BLn0ujalfKgDNGI~+BQH{8_1|9lcu=J@IpITu!dM}pl-!Zt*}Wg& z`M02U8FOux3fs&Kk?haFyD$gi+)8P#!9YF(+7g8~Xc7vx-JO13eg-{V*>Bla16ta< zdi1LosZWv5NL~OTHh^EG*cMOQ zDA0=c1gx{S7k)s|63tJKtZDy#p}&gvV9nHp!3+rlGCb2#ptl8!7Vp=GT0mrfi1XRy zFp(f?56H!FEcGCFWDL?@-EXB*vlSx2@|I*>h@-QgFe;Gvk)ZOu&NzG5 z1}A6&kemUq3=7Z0=B^hxgz$z1O@1NkqW(b3Qq*ZAPoD$35PMJCxaJ-BJLjc#5(@06 z9T%;c4o{e#GOPMTRHqPS6nE{RHVo;6ga)!rib;FAtCJ@s^p;<7xh?~q<@@N9Fi9OoS-5Qt>_;z`B$ZvNlilq zLgvv6W|ryR6fOYJEqtCn+# zVh+4#yG{2lzBX9HWM0Zg#O%HMB!_6|X+b!~&ha8sxzRZ%zlptv?8SS>dUuSK+l(G$ z=g#G8>8IA;ucc}D{o)$>zvrR+U-oDCf58_oX)$FW#JC^R&Hz-s_U$h4$rxWrbuCzu z(olx{qjpo2N{!ktuc)goA70^f-<}uk&8Lei0T#oOdPr8?(J)m;H0RS>leP7=Tz1FB zv0P-%Ow^Z)B5rN{)_XJ^2=7oBD3Vh(lX~*ycr+j*4^LeWJ&`QtO#||dvoJPW3$!1Y zxf=4%WzoYT(xys0+t|y2awtLc-yLYUpKLKjqF^v*i5^KmiayP3>E-NbT3n8w)n7#2 zw0JM3_ssQy#3GsttO0sx2uO)*l*SJN5~G{~txZ)Hec;lJdk5_YvR4mHN4M$;uSJEZ z9Ns;-y6p0~|KqT{{<`KV3$YdhZBaYZO&>N5eQ}J5Q~ONB|1ZCeNb4nJ>CkusC74bF zJ2_A;i{=fcjEqu}W)xHR*1 zKu07{aH;z3`ntH^+)?vJ$MMo36qz>z5y))PzYnj3c++O)@!j* zY_5oGw`Rw*ll=+*q!}G(p|vmv5^w3*oc0BO@(TeFg@^pSroAhu09;ZTuZba&0YUnG zH&J1e!M?yvCGDh<^diC@`J&i_Lb<>fs`q@d8Oc6#NO%3v!kpg>VUliWwqo@5G?B|M zAmDbVnA(RuJUwfA^@Q8VO{vCkGINtTUW#86k1}1`c=Tgf)7=i)ziEqiP58gfV9-`h zPLr|;=Bj#77&Oov+uK&Pk@#Hpo6 za!Me&ZKmzo88tL8=D~y#MgX6s<@%cLMeRna1D8vgkl71(wv=@*$jBIy7+tnZOBee3 zPrEHB=Oed07!3Z!1hqbfK=@b81aNu|*L?*bEM5#=a{AN4=D0tjw4#BV)s9k+E$m8* z|965`kS7S|LsG`~W5+3Wm{+9$xI5_d5jBKH8BgB-jl0j+oEX@LutjFT7|?j) zM;5-!(j=042cN04L|6^a@{99hhk=k9$)!3^{?m|%AmJR+su3pY!Jj1@3<9c4%FB3S#HlC;Lxx$W>CW6*I@*i^ z-hE{24BO3^8HWRs4UFK7A1#7_&vcg}rs8(a*`=9^i>%~bFvkW?X2Oq=VV;Q^)Et@x z*;&BM{o@G+C45a(HuR~y^h}~jgPa}ZU&gKXgPp4Gps!6HJT*x~DV;O6FLRG9Vxj_ly5zd_Ang|a<+@y||5Rc6nwg&FHPO1Myk zj#@1*PsV-95D8A1gS6>~nH#}u4N}s;Q42ErcW`SoiMO}!m0}iqCx%>T49~XKBMI7t zPIGiyZDWO61{z^FJ%9U+R?Zp{z~RU`yxgCy|If@ogx||21&nbt&Jyq7fA&3ogjKNK zY9Argz3|SijRCTqY4CmfeES|$YJt?Xh@C>c+Xzu{3lY*LuaN0!%TmDdd5yLW4P=Bj z-Z9+u&~*}Iq@b<+RUKdP>6n+_jmTU0AfPucd}pIU*y`-~{&2@v=*!oLY5yJAroS@> zsO_QY>ouNNM+X%yPP?rpHNg`r=oX&DyoRCUI2cpMP&smmJ;k6-w<=W2!#ynQ*D&Ri z-reS$*e86dc#xtuN!eHZ;{{M1bPyLw4BdqoIesWh{IFQzdjWWSxHM&px7F6rttFSk zzd#J$HDBaCAt20OuO=#M@YM)NAtHq@5foICdiZ%&YmU?_&_wxdYRy{CU>tnSXVuyL4#MHltHH`Gq(MN# zr^#KX&FP01M04tQa@qXeF@X$JB~o*AOo3IMImKM$N|R>hYlR(g#i(H~twMoK%V&zg z5Q97^+YDu5jcxBwIc~va0Emz^8Cro6cB5_f;!gV!MQLM=P0(nQ4;+mFd&WBy%vyTz z7pICK2~T9v`~a`3TjFXaEX&I;*g`c5C2~VYPu7wkqNN2!ojG1|N!Q^WpCj)0Hk?dy zb3gw4EaY%Rq2+cC`%{OsXYi=+598LpreWk?aeMG<^>lnZ&aQahyk8}%zXF;&xmVO0 z>eFe|_|031`r$kI-DC1*J_yxlzo#!d-hYj;m$I$e{E;%7@3v7`(>2(!c+%Q z&i}L`K=aL|roXT;xIzX}G?>m({E{*e-z5`D@Ll*ycmn*{85Ts`NO|fq7zB(eX?zzf zfnE^rropFdpYDB03!h!qNzfQ&Mx&)b+oBnlfXQIv@eWiL|NjCW1mXLnp);Ax_=iQJ;uBc< z^lhGHJ^%gnDS3W>dnE^Tdo-2t{85(K0Nd^mLR7^NLg69?FQ?N_-|u&I+ccH03YTI0 z9jDMXcXfTgK6A|}oi8!59vi9vH)-nMAp)*fK1yP-I-VBS~ zb|OM=mI(+@%u?1)WXYv%XxG-`6tDBbsz`*u-Fbst)+1@xLOPJvplr-Fet5VZZ(9zOx0ARBO$eR4w zvPB`oA*7L>^xkJ4@9%^jEh0qMb#>cnjVZ?QqeoW{{lR%wJ|}OqsC0-$&)BX%F7aBjFI9r&BtMUJ@o6k8boWYMq#;2 z4zVx*Ok%ot?PjJn%uTp53yW-(x2;ao@7MT!yHBPPA<47OFy^iayhG-h{hED00030|2$ngP6IIzo$=V-Y!E?PQy?T7F2IdB1y|q* zR5X;E>?YpW^P_N-JB*&*JiSkRetEmBYaVui=USJVLwKqj!~Uin|n9t0rtu(VPbhrv~` zwwvWW#kAF?=GL1IOwQ*zO^*~@#Qp&Q0RR630Ab#S#*xwp`~Uy|07*qoM6N<$g2B6- E!wnuzlmGw# diff --git a/compare_gan/src/test_data/image_cifar10-dev-00000-of-00001 b/compare_gan/src/test_data/image_cifar10-dev-00000-of-00001 deleted file mode 100644 index 54a8e9f4fbe72d8ae1a8a28c21d52932b283c5f9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2478 zcmV;f2~qZ*2><{90000t!StdEn-dDQ5(*7zZDD6+FJ*3HZ)9a;5~LCep%Mz75{Xbw zM-2)Z3IG5A4M|8uQUCw|AOHXWAP52g008_L?V|ty2<1scK~zYI1;NR$WyO5};2M54 zo#D>+&Cs;F1*QQInVR-(3#uAxLP-ttBsqcr$;?z5*ea3p+FGC5FOO@ zlHm-rM55>HKvGt5lt=|tEt(9;dzW3JEEYD9z=)BwMBR1$v0G5`>Ngjc&p(jW>ifH& zkGJ0_)aznwb`S(s+1DB}%b1o@(i}YEh1Y;M#*`3@a4CRj4eRVnrh#(5h*zl)=k!9d z7a#6+HLV3=Dm~)6k^dl14e}7i*+oZKp%@@wD2zhQDy$SDz$1|X7-qx?#!IB8w@`$t zRH^w2UWmS#)SyIZR$|j~$zit9aQ=&beF)dRdGMYCYHL!3Y)Z@|RZwvvrR0=LqHsYa z2FZ($({a!OuLRxFNJ~VUyq8+pU}Fm2=16l!zEOC$KiQt_U;p_{);!K48A3DEuyM(o zd|1d0BzB+6F1sATw9SUG#+Nyb6?cp`c~dFjBu7$tIZu=EKE_lqniC0*nw(W*uDIOP zAMJ8q!PzlP5B+;BkBrnb$;>lQpjETLp}2As1TnfxhE}ot;ACy+!Sn&BfZ&f>hY&G9 zt7OVJ_^_NS+H+b#x!?ATMBTE!D-o~C?u)vV7~qwSfZSBpz)7APWv zyF<98|Nfu)s+N6iHKs!7hA~{t#XsQ+xk8(^SxCVM=8R{Az~4GV30`6EO~Hi%MkU2*LJqsf5)7djGRgsSsN_r* zT{r6CF*qYoqLlQ9z7Qs*V0d>w{E|mIcix#0a!Ls2Lg4zLQ-W`T6fFee>t?a=P51Pa zT}2balu}%PB8x~7+}LqCKA7=XLPSy^@5a%bTz;tPrA0%m{gFhaL++1Cr3qZrv z=j3qVf`MO9#zQ0MN9F?R*e*mHL)L@M4i%pzlPEDkWl^}`=FPhzE}xK^exUyyOB(@{_lAb|H2TE)J+= zMKWfcoo53iEhNtfQYE}~J{F7fZom8OKYn(8wvtVAH2UpyJ9-4nMCq^`mtK1~ z*{ns$-iMH!5)yGBI7z&n4VE>2|8Tsyd%_qCE|nCLA#F><>{nOqr_XjLjjMPrl>GAk z|EPyLwTDiRS_e!ym8`{;T*HN^fKeN4NUd=O>`V)rdh_j%KP;8R$#X*2+x^w)ItTmL z@7^X%UcFd<@qD{pwtTsws`>kmKkBDjrjr|saNN0!0wV#Z!6r3BH_nXHWmByhmHFn- zcUDtM!(~eePWAre{PJvB*Y}UdH#bMMsIFH0`CiqEiPefOHt*iuzxnHrLXxN>BwB1n zLkd2|gi6J6LG%w_y>z4VDHf8OMy?mFjpFSu_nn@_Vp}z>nZxbF0PXh6KmPv9FRq$0 z3nA;>X{74*_U>=5zZ?3|8avKWNr4eL4ueldR*W-NB0z#^7!^xubOt|AMyqMYF(SsR zanN@MeQ|OAo6kP}!#5~SnSS{pn=9Yc!<=da{V~Hd4Dr`|MAO@|LN0brz=8}|KC6T*R~-f&fot1`uVTe z^4QPLE2Y3jug$U{L=is}1a!5jQH(~recLuN!SZyeE=0KTR=f9y?)qTwd!v2$@?U@d zZ(n^*_5HEyX6J71AFj?`TVlsA&1*5>-FwPV~*G&~t zoKo-;cEti?bHv~NbbtF~BBGyv`tqw+A2Rn)I5{~z>!)$QkE+>HtX_Tg$z3zG4Y7i1{G4AQObwL!96-}TqWaWdt_v+X~>+(vz8P|G-r z$<6)bhyMQETfAB{4F5D*GmJQAK`F}tVH!{lxUA~Bshc3?$<74FM3`g>Zo)_kc8BnI zbTN?q34ifvZN$)JS*4oi0ze2bOo%gfVifuEbpP-_-AyhOBw_?HBy&pN|MW93dU3LE zlmbQ&=14#$vI?&m#eno+;KZ z`+oEq$yhT$BXJ?Te15szuG@C0t%+Eu3R^S6E;VIW!A_j4Hx|zvlRP1Y!9Mvr*#(4` zje39c@cM^$0vAN0RO)fCyVLFOE>ClbF=?9V(9e|=%eEnybX~8rwr)lqrr85BA=xze zp%&HE`c#Q(?1tGG0KBew@zc$1Z?xVo!3P`y&4n}y4Tod&?YgZvZ6~8srjvGvK%UGr zj0rnk3?xdj090s+sfMai4U<*Kv7o$KuBxgcIL8!Mt$KEGVx4`Oj?R>rkh<7JAGP-g z;Sh3(K{CQ9_}L(gQ7I@9i7KG1l=A;iyTY9%tzm=!00003~6m)XJs#DZ*py6bP@*&1quUjZf6P=3Jhs&VP|D8Xk}?< sXmk<<8UqReAPO1^5ov8t}s$jPb6VW~}2cA(etl3NQ8 zpD8sgt**0tUi18PIR}G)p9RD7OmQvi+iV;b35+^*3qUjuQ-pF&zQobpujSB&5eYzS3%V`--^x96>DIgqZBzSf2SMMnvJ0n7Y-hr z$;TzYv}&{C+VJd@Yg3N?6}-bDVaUMNAkdeYX}Ndm`(Wjr+iu)Hc}a-Fz1Udo-8#nb zH1+AZ$zq{mwJRSlixOz6p2)gV<%@2=lwb1xknJzLm#h?vDPrfnxckkev^|?XEuX~| z_x+RF6A#g-b8A%D_p8>G1ZMAkp&fF3c6pG3YPeO!&C6kX;yLr)u}&1sZ)6Zr=-B?q zAZ_aF_JfYmW#`v(ye*nPn_1+7Vc>>rvvAK3JB(L9_1pEbK(yfF{soD-2Q0S-shcTB zZ~lBL$?}EyvDq8?!&@jzl>KQfHj3a;Q^V`Dg>5N=kT(&U2p1tLtcGmw~-c^1##i}20tQKrw zx*MgDQu9gwNZYl{$2a1?O_^7dV76X_jn&2WU1fy)Td%q;9!sRGgsuwi`Na9odw~Oo z%AblIZwuDhPZp0cZ4DLm;Y{*nVSRM>Wn_$=M)>{}8)NQH)cAQ}e_ZGHIZUkc_G#Zg z*c+p@xpJL~)s#&SZoGB6^|OpijKSF0VX@Aek`C)vjDJ>tj57bEH0v1i70Fed?{arP z*kJmov}fkW&=VIkz rF33yg5`{@*q-LgPl)%(WaS1}yC+8#<7whLFCZ*;; diff --git a/compare_gan/src/test_data/image_lsun_bedrooms-train-00099-of-00100 b/compare_gan/src/test_data/image_lsun_bedrooms-train-00099-of-00100 deleted file mode 100644 index 19dbffaf4f22e806c6e4c77fd9d7b473bac163cc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 170141 zcmXtf2QZx9`~6xax?Lqo)Ky|d)K%l7uIRmm=w+24SUt)TVf7M(C0cYs5CqYpi&e5j z5Owt$L=XPH^PB%Wb7$VUckaA*?mYKB&vVYXiCI9v{~o@06<>&iJYA3|3;lW9bP&*InpNGb;aGa=uhOa`nqaV zck}O7MWvw*Hv|9(!tWY@^nWXzG+gGndk1L1m5c*(-LI$9JT%gH(NNL)A+G(fdExJ9 zbyHZppWjJ>B7!KeFMsL#A6VqWugg^8IMCo<-!b2Sx@w+oyjWR`hdrV)?%v!0_X{F5 zL&M)MrDZy>+8$d!O0*7dZ^EyTcOMgniJO=Zi$7r}feV!6>Xx}`ddzhM9j~viKRx$m zb@m-ujSYnObU;F+}s`FemL0*zF3z^6JkdD_Y;&p5hd7D_y3LKz6K?y*ky8x)clREU< z%Y#vw4OT$9p(YgGH3H{>K!5=pw!fON_z+ZND-u}koOlS z1VDV$;j8V!(07I-lwfRiak?Sxsc3)Aj(x(Y%;(FizLD|j4(G154MLPsa__ z@~t}jga%py^StR1CgkLV_og*wRSH04H{PF(%D^c%MMfVh6^ zRJaoTNJ01Xhfy-0SZ2!^PrS{)qxjCt8NItuy3Aje&b;QLv7qlIAOdc5X1rTfG0+~| zTfgkf8N2#!w*Ddhk*b(Ik?B-=nJdOnvkI?(($1wWWHs~O-da6kN9#=B!q;6fXdIQBEZTJO9!go}BaIMsYttcB z;`+e-p$IS-MGwl&LVovGB~k=%`k)br(8VVk*%x=Q%#bD1?}7*diuc*QG-*hBx&LL1o}Y7rz+G0?PpO-?KUSo6~ADV18?zlqP94Jq64~nO zdt~^$Hm2#lr6%WF^fS}0OEoA;D7lYPJMY#nXctoH7RN14 zKpi?&#$zh=@aKm7Sc3pRXFbJ29kJs{;ucOH756N&h(vLu7ftEv!w(t>lFIz+b^ABD zWLJLg25WO8MYGr$e!CVxQ&tw)TQ~(G?>ea(Yn=-AJA~8-DTgo5Hu{V_t+R<&h6+Gr z#$=E?Q0Lp3`0!V=3hW1qt~Vk&BoG7aOfF(rprKF`Imq-=-Ab*4?I-0XC94XIqTwRf zQWuw1=Ka$$9q<|&&sm_BUjhnZRL0pF_(y5S*^&-vGchm_kVkU!22lS8k76JoKtY3J z;PlFrT!9=|!IE^tqYl4Gz)Wr$!l^#(XiK^yJix5JNwmg{>RT>Oo@n0vGCt?#=yHAg zh~;&-T!O;a+rQZ6#qdjCsYH!l?Fq%~&}qM&Fgl0aqx3XhdWchuC<(ElBjPF07{+hw zvf02hG}|92`@Y|M|0)y1U*zbCv&qOs4vX__u<{pgk@CuHZhL6J(dbPAav+8Ph$3Jj zU;%rjx&5XOgH*Nytu@~Ricj^&43(oJNnf3hC# z=5P-i|Le5ErSs@oT46Uyzc@PDRWAF3$sCGL**gl2ay-$lQUO;e^^X9$wY!LnUu;(F z*dy+KnZTcZR#ZYsOd!T*nz`9q7bK!j6qvrt2-En-BA>lPfmlo!2pwa3ZRyh|SAFL6 z`l6vKKLjgFqv8blUqR4J=`95ur9NjcEFQQ2XN|Eo>n!M{SD2WL*xWq+ zvDk-~>9UO`6IG)&17X)mRH;;h3CKFw3rtD6v7w|136heE9tSjh7PuO%6cOT*!>Q;< z`9_3rq(Hm?#3?{n;N%N1Zhr9}CBT^Gxm7~kzdL|<^z~t4r9j$uM+*QeCGO7_uIKvN zVW?}DCT2XqyU|VR-A)c8B%#-E#*QMV2N4jd=pWA%8;~GnuxdK4v>Ul+9)IPfnb%-RuSXk@ z^m1>p$0?j55HXptya`fL$}Ujq4g#oJHO>9Hkm+YT$rsC9#U+4iTL(E;L9`g1v~Nvp zYOJNu6)}`)GF`oqi4vX*7`a7er$NXaGIXjhSO^vR~ z<;}jgrr2xl#U;tc@_Z|+icGWce;Z}zC7WvAW#iv?_G= zjFz}SW^ox_Q$AH=h!?yBB>>NkRDI@6J{LRg%(c74*jH3kyjgvG* zBM~|V34-gx^+4-i!5G#yO+kPt6AM$Mn9`<2emr-cWuBpss_Imns&hGJSg^%zW$6hg zjpxZ)+q~VjHFhWLc)OwV;va?-BP4YQEEDAQ-r*O!aEJF&z2b($`E12LL}n4h*)I6i z#@Lf*PO>>`97oWqrcd>46vo;$B06ao?=P|`OBsIEf!NDzq{X8nSeU^gAQ6EGAXzzt zv^)#(_dObr#eDz*lPlW~789;$r!gf`>eo3U&ugtuQVE#eZ$$@36|W!68qw zk43-aZlssGvcxN5lJxY>FW08NPj?1h9G$pL)z%2pl;H;s!q5LK3>;~p#V z->c$H#?*EGFwXf+!-0RIHwUlei8kcGsg;5-B6RkB1_;lzy;mu06hYs&`Uh5Mzj$W# zfL4RFCh;AmU3eh>@uYtq+j@HUE5twoT-_0X>O%KwN86I?7k-Ytuej;hd;T zo~4H`6{HBojU&LK@`)Aze@V%!0gJ3xVXwVH_h1gD)R@jrbLg> zXy(k#*QhuIpd3+3s*f*Ffmkmwu{;9<6}jPx-$~4QU)v24i(=d~X4Zhkd9H1<|E#Ic z&XO0e|19sFtZ#42lFU~B%sE+Z-K^`{3LII>ig8E?TNC-h)IPUSzh!2H3p-wWX=Zh` zjh(xIVMZ2Saj347P5umR-gStK^!}V=MTt2=Me=BK zLP{W$cv$Yq#^+UtYGW2X6p6Vv)bRB(gjO0m^dn+87E*KRx7)KlMFnOiUQS8Si^R(% zl!Xq2LQCi$&k&^ya*2w3kX3w?8Gm09a*877i;!4z?;9jmBXZ&!eC+(`?R2Uw!bDPB zbZ8~-HMRVK82~UMV zaW{c-VVAz=z5z@F6;#;ZhNE2}sX3E5_*f=g`6%AF-yFusqD=bRZ*`oRj#dQt8U-@A zZ=GJF>VAyE!Y~d;>j~6TSbp0~={ulC6 zL-^>#$73)7HX(IemLf;|>dzrJs5Bi_;W4Kv4lZI5en>}!demvU_!vzKC@KO!f8#_l zm|*LOBDHmND=k&6m+Y_!czoWp73@q%@2%)CtTJ?_*QUexYyEn8@^rq{#)M0-4m6$)I*Z-N5SFU}sL+hTdE=x6STBuXQZQBOATyAV zf=SV;5%V*a!IEFTON`f7Uiw}hcU$!1;K#&yy5?kZz)v!tQ71!s7h9fd58Qu$2|vRs zeC@mqJi#g4tmTX;vhjE1EA6!K@P?%EBL?NYU!ipioC3;|G1& zq~k0#`x(oom*JN|x+%1Qi=NGMd%-2}mci-f=1N&!p6O<*Uo}#~wY5L1JaK+OdFwcD zb!Z~DIu{Jkg-D~-(ad{z2f7RQ2J$Rz?T>mDdxh#>z{WI=_C-T4)~>>D%I?66ODo#% zY|9>L=@&|O-c#07_3pvCAd9;YvGWE%EK}e3*DFVojmcMhPfb)|kp9+3b$pKi`k?>n zAVTA$Ls?!JHD&*r!%?sdpj<}Bx>D7VK?@}E3L^T4uadmH+|<_2U-8lP*RF75Ld$O) z0uTZaZGix^P?XTt^s&=@P=zy42|Fp{Y#OxP+8v)=h!HZQV1W=o#IvEw(|*tr!(qz< zjj5uH%t}h8?(9i`%2dgpdk9Cx0vAV%kpuv<`2-q7`KW{!4FKiREJ$(H%u*Y63vw!2 zb+(6PWOGE*y%SB0F_WnkZtA?fnHOJC0?Fp@ATR|Xqc2J8Xm5{M93{y zR=uHIoL!M=2^IjQA6ob z)luYqgDEplRLjSc6LnOzyw^qv8g&>4lgb9X!{Z;H?C8{eXqd3`FQ6;{P*R*$_|oLv z-UEF49aD)gRnos6b`2M!Ta?}am)fUbOr~wzEiFs3a}R`?guf=o^z7T+rXzi@{+){) z{o3+jH*d$nj!v;n2ilqs&i`9{Mx97F2R!DYewGl`B&rKN zNUe)y)myu9y;;1$zwG#Tw9}Sca`?$2z$h~yfVY)9GUWE~@Z|g6SXeO9FSJlDe_g3; z^4zqZ(2s9aM%k7Rv7RmlmUSM9E>|>lHM3W9?$Guz-qJ9)zILe{mdFB-pYjE z@~wUmg9gM6I@&torGLKRhCKfU;{V2&AKo!{(2y<(->`%dt9EHekowM$JOwmn(tQ1c zk1jp=XzLW^H8i;0!cAhR-yO%VS|1lkWGl%-<7NCxmMyt+?qy`Cf5YDECnLvSUj4SV z>8svVF?b&x@eC)I5wyFq)3S4*&=DRS>^7H~9dJc>NusX_W%=gTxZS$`*Xxs==YJko zVGTb`Y7=vKxbK@h(r{lsC!2ITv5!!A>Vk4B$k>=JF*G+HJ!UcS&tq*kvo>tP#L$TX z#e>KE`Un71MA}p)Q!Q3e^KbDImQ>TL5&R!O-wZ>+3RO` z)(@Wui4C-5k^tB~Ehh)CPd*_^NlRY%Rd`>=C7YpmHJvY+ z#$(Q!glEZk|L}amU~@(7c9Wd=H`Ns;cePf0DgJv z%VI`QTwV^JzmC^vg#(c<8ct0~V9Q?SJ=|f(Cg@wm^KT!2mcJj3ub?bD!d~H1KFI#( zG@|(Nz^cH!@g*D4`92;miVbhOIc7A=p$V`F`}eo*!`60RpI&%y`)oBhyHGceriZkJOaO61svP0@^(;f#$Uenxn!@C1@B=2uEmZVy0*;VH};Mo8BU79A; zr(?Zduus@h^J;K}=xchIT}A5D-g5^kkXRcr5!at@=;^hTQrEl}8*rj8#X}Sth2)A5 zLv5^J|Fi72^fcq(95QfzUS3K$nTe{XLT#9BZKYe^s0?wISrr)k-`iLn98;(Z<_@4_ z^9P{~U@nx3v!P;vm`*^x=T?2d%ipjOk+KBSNQF3brnhW+*zwAqaF~Diuj`rG8+!`J zT`HHnjb&dZnk#QPw;{cUx7+I$8;3OqS2veyInzDTiwCFlah!=|c1+O&S1}n6ACV)> z7(+SU4hwyUrqpo{f+AJ?8%>*Cpwd89Rmp=#Revb`-9}CFovq$g`ToL|6)r2r+ezAb#nx3TPL7Lz023#-$77 z>5`KpR>#2rmrOi<;Q1iln`17Kc{h*!An#N$LS;l1UYw|dzWMS>^Z1K(ZIC=0EkF?y z-LYrAP(5^VFEaZ*8chzB-as=kV|QU2n_p=t1^CG%h;dzzrUOc=?Cr^}ltc`SiN%~xGJ%P8SWKzwmr$m#>#@bK6Rc-vj5lz4v3QjNAQMw#;29rb zmz~k*GI6?GaQinW;Cgm1Z=Zwxt^99mD>krX=xycBdv)C*%Icj^XkSdyuVmcN$k#LY zD%bA?1=@)!vqGzrK4V4Y2ZqB&dGWDOPKd0L3Rr24SX`7-R-8c9#DGCW{OwwWkiY{& zfLPVxwOV<22jM6IY3e7-D*(K52|!T62aQ9ZwJj?J3(D!mW3*J0y9NsAOXW1OA4K=7Daui+#VaQ1 zmEo=e7R#TGDE33Wlh|bMJN`)! zWmH9oJ^f1*J!qRZ;w&f;Tlpj;mH;8~RvE76z7f*2{`n4RaA}W_F>uoDb{ijJMVP*& zVFMo=qO52nZUXa4=yH)zzEyf$bhGdiN(g+y37{xlj!H|UJ95tjXSB=MAML(J3MI96 zUfDw5G;@7H%#(Cccs(9K=mxQFxMKRRBgPF}4g%E9zG2rxTt@sRKT=z9I7BtI%^h?T zO)@2Rk$z6EhA<{|9m!EzASNsPW&`$%){Of}5TgTW92&X=eQyEjuPk(+7uf}@PzD=n zRif`*DGYH0OrHqIi#Xemh1Zr)xfm`)jdq7Zm4y}*VNZ?aQaw%lFz&$VPvs>(dg{x1 zHb9n|)1gwIJH&9Bqw^qiQ8H2O`|-`^S0P{i`)|Hvd8sS1tb@vfPA*kF6EaCozy&b> z_(z(D_~$%6R7sS&4EJe~W`GclqQNfnIQG^!Q9i9WCXej) zWG$UxeS&9#RW745287Orwil7SU!-vmV1^38k2QZ{WgZnCfMv(;KiWY;Bl`20D3Su; z7;Sf4UUi(K(wJ*QA$Z3JK}k-~AS2*TYOaa>!|Mb$7DPX*YM>DM&;ZClMNpa$47#OK zU-IJPk4DB|A^MLhy|!Xo0Ho%Ll)1v4=ZCQE|wCq;NG`? z$KRN~f|r$+(8Hhwd4s7kd@uQ3;+9gFyU6r;eTl%Jd{r8W%fJ;?cM}s%ID|G7Fov&C zpOO`?Sao(=(JrP1SD>EQ&=#p03y9bwdO2{i4IDTxGsIK1eiXZ+IEIU7b?EH%i^duK zfYffP2FUq1OzIG4W)p4{*R&H)iSrDQ<&)7lE3}&b8hkw?9PSf-$CJOa2`=3W-UBzi zpz=74nEbS~d}wS*rhT)5wJ_8?=07e7%FKnQxp`mRGpUn%!QP!lB1vZWZy35oDp|?< z8Tff{Gnn>`BaVx(N&q7bcqHETCWBs4(Ga5KGd8y`^W-DJ;48h)#a4(SH$eYL-OdOR ztM{C5l{F}-?DHa9ejcCbB5bE-2jHu#;^K{SpO6|+zY`Vo2IlPoq-dUG*tNN`D*t6_ zKO3o3)f!D}cT)VbnVphCrf<+y;1MbM<(JM--+1METDBEFA=Zu3Nnd=3BpOhXHT>IO zsrw*VPv+{Rm74`++5-zY+1tb0k%LWl&X1ZlV$(Uxwe4k5&E5Otyc%vgsLZ2{UwhRkUFX zWZ0iehW8x13EG%f@vc%D6he%|M0k2DUp585D~jOCt^h**&{|}P7*mlVp-x<;@&D9o zfdIw104*kELot&^NC`bO1?e*?<}Bnd z=$ZeCmlC$QT`fDX!|eA*YmJt8v@PMK+4nsU!f#Gb{9kzo1WZd@!$uav&w6mzxXJ_l zxL8qH{o~z~`Iw$%yMXW@B%4EuC6-2H8bV%wqafl_T6`9bA(9z=}oHZN=_6*y91mn z;+7k7Ef}UisojI=Dk$fQ6?3o~LZ22@Ap3kZ)ulgsbFM9>u8pU)3zS5+sDO3*DY@^A zzdBhvS-hDGpSEc~r@1}s3_F=J>k%AqMJ&ojDO~rB)il2lqKmMyazDM=w?BWse$SUG zre9i-nPta*dUii+z-UHcbl|xn$O0DIr~J# z4PSFlUAslD&Z?ux3wnu6TLK9=0YJyIdsqLo5C53YY@yMb8YZgN8Lk~3T^H;NdeKIGrywWx*!@y?f0``D4l-K(?{SGJ>vtLkq= zWKlQ*%Uq&4AuZ|?b-zH*AwKedS`XTOVfJw@Xxg^>V6v{yL1ylX^Gv?W6mie^%2pL|(Bvyo_N1IW0pE7+ z)@$F}KFnsL+-EL5l+N`Jk{2s(TFacNOC!e!qHx}%5oi)7Hlk?sw@L~F zGakn8f}_o$q)&?$LE)ya^Sb%= zwC$iHZ1%t2{_COqe-q2+{Ap8SlY^WsXcm`pohk)^;t@(pM@&?~`ZF}Tm$D1xHaO;+ zlkH?QiM(JdI2!J?e1t?G%mo-KAr=mdrtP#9TSb|QoOCKM?oalOD!_Lk0AOl_X$I#L zZWRfKD(C`#qYdR`=IdfZ?M7u(jg<+@a}#igR7Mq}%mKC0j~?&Y1m+SE&a@oEu;bH@ z*ts^>u(SR7s(7#C^|hESYOPGsccPt_yE+sXe*zZ={>l^S@s9O^IIckdVhf-CY;DM=Iw_mwqT9<2%}hzkmPC-)!yUzP#e0e|*8I zGAtI$kn~1biAJbs?q%}}waJ*DK`UbV?CNwwFtu_46GD-vNNUysJLZ`CWM7mvq$Qtq zp%QwGL+pUmhH*ZQ&eg>HlanJO`ft?P&;pMc0xVW9wCLDq%QJHuD{bu)>JD{q(i^=2 z4V(BL{Ef&yjmj|V3ZLZjqCAoEZIO|Ymc4Q@e3_li4i=`S*5YoM+C?GFZv`SBJO8?)!jt`B)%t$VYI3mA7)qmiFyJ@p3El1prr22Bx!UgO5AF_fr< z^hev(Q%||!R_c074IKn^2UjEF5J@gzm)w{yU-U+hf5L*yQ`nGgJrDLf`Oo;*=<-+= z<#K0D?}>Nv_gFp+Uq(Fs!obAopyKX0LwJvfbE2qW&5X@(I7ril{`r~Pd(+?-3cqhJ zcg{L5r)qC5U*4QvUbfbM7X8xv(wZ%w{q&3@dOlQM4u+I-bnNMv`#oQ@__=Vtb==KG zv_`sV>7T2#Zv?-7aj(y}3l}HAkhr!gGUFa8Q$d74c|ihFfOjmX<|P5!LnEj6bg*Ia z-7>lCBB}eUz$Fv`KwJIIMAyjcFB_4vU{PTX&Io~YL%0uVp(|#pwQ7Uw@BCGAv%MQKH1k~@d)*(}~(AxDQc-X^g2`!j%NtbaolR$Y;#AJHw-|#qFalph#p=R-T3|U{LbK#N}pI=5t4LHE9YcNZ_8e(_`Cc zHzpwW(KjLiAiat4aW`3odVAVL_qPH;U?g}%q=eq#uW0?*yX)^%Ew3{^@SKjUSWZ3Y@bNE_y=FOP$6Lm&Je$}z{WaUJ z4RCi}rhs?v-o@be@y#4Lxvf~qsHOU(V@nPS2z&AXj7?UsR=74muQ_ARp4a5D1$t73 zgqvH82bebe1dzMNry$_*?tpjuO1|Mk1Ahb{Z%FJeL-`R!5?g=&qdT=QOo5ip09Rib z$5l+X^6mL_s5J%Y2xU`L9>zs@JoW=2TIlHzRk;!Yt26fcq_6+{2pIj_{c`N*{K@|5 zU#!$Y*ojnlfVa13aT>jK`xn#41M{!fpvh%oIz2dtPZ!BymznafMEBW?M;5LA6)#$! zdp79RKRB+?HxzWB#s3V{`bz$rcEBVdaQT;gA)v@YyCEGqrh?JK#K2gO0wZDY_fk4^ z`f!R0YTv>FfZX?DjZ_2r`&w__80r)8J|bO}CM&Ar<~td*ZCN$sVPJ5yerdXMQo1u8 z#dT*Ly15R}qoKK%db84(!=y5V^unDu(arN5o8jyh*#iQl2jyawl!rZBF?=)D~~)Q zpD{N^{o|J-y-ns=ygga`*EcoQ7k;*Qo8KQ3 zlTI6R1~>lk`;xQ(?{$=vPV17?{%|rV??~=GAxw-r%La%-7XA=Ti&t^a@nltl{bBuY zz~aR{!acXnR=O*wCPm+vQByL7nE?=P#3H;#;_>Js57r#HEdSq$|Ht#f@He&&7q z_VA2xI^Th$_d7^Pi$yeLn=VpGRMO#(;zT3^!-Ru80^vW(t>S0%1NOjN<1KP2S0xYh zd9Hx0qI7Qw7OywcOa*yds-GAz5uZ_Q?s)%^{@!!I{-NwSNve@R3zMCQErYTrC2J%h z`ACG7?P=8ha$kN;D`)EIg|vKVRx*vQZYudu2-#MV)-lXAXcC9l)U)|2U2^o@tY0DE zkU>IdB35ePbZxM&*Wo7YNeJUW@~5(vHjQiNmi7oQr2bTTgD&-VikEr(()NZV+{Pkr z3@2Az`bO9-jUKqN<{a=>bD~mse<`GCH2K6_60Ng-QBMLy3s2Oqw&3Zj|7OoF;q^^F z%TQ-&{lU(DV(#r7@w0l3jw>}Sb)|iTwk-}_jOS`_VEmfwDwMx}aInA66S7aJcGN8+ zB1#7uaczRFc}uhTZ}|!aAz*qVBTHEAe(&53a1{j+7uYXUg=l0`PG-ZNQbIcOH46&l zXjEr7u=`Zr+kx^s1a&_h9iP<7kdRf^$|6hK4|I=dqaj>B9E3@#EyId_JhnM#OBJFPPwueueGr1_gyufO-w+vCLlndR?$Pq3Tf7R zX9)*bN;RY?SWaBWtL7tDgIzTwUzr9QBl=CQ?=+f#?S;tf9s2PtnWv>H&? zak(XVB!Hr{o^kyu!4Xt{!QZ#t<4XSbeE9AM zBnmH`RMt(&7K82UYwe`8Qs-;KrKXcO-}A-`+R0ZWxR5ytxHXL1t;=YieLCCydY0(0 z_@zs}9+?2eM(NvO>_m)6$`Bto%d<612hI8De?+FuBCw`RMBSh$rU-qo%D*@1#Uf<6 z2-0Dd_SrdTu8cH? zR+;QwFq>?~F0r<*@`=Z=d|kYq@bN0uOxx0X?$yn3xgTjJ#Nbpx>_dd_=$lRDKN26y zqvZ#qb`w8lA+j}fE%@k6xcXAw39Us|HrAL|DN|RZp*7*^yZ?zl;gLYG$j4AiI06 z?vCGWS@`8c-sCijdwhqYO(Vr=Wv(FbIw#6JJq|Le^LhdK#sNz6RKkQ405VqPeQi-? zXi{$dx$z`unPFrA4yJnyx7ESfSbAcjYlNht z_*_f1(dGKaQ=5VX|GmA`iqLBm>p*Nq)&lv<8yQu=IY?J3EGa03kXBCW0olHffbz=B zy+tuKBV`|c{xjy%KHtx)5n2niTSEhb3T#BK#ns<-%)!`_|eya~8hv_p@ z^a|1Nj>k@}B)i;)8F!~2MXGg)24E`1F+Ko-2hrQDuM?k5Z)7YYK}WL!&SLx~`sruu zed|^QQO#(&FG_yK59l|v-x46iJ{S+8k@YffrQejKzu?=obPt(p>WJkcvR4?pxY<9P zA4nePS+d-0^-P;7q;MzFbQI#W7{$Yq%V5+pRONkd#)(eZNfou(?rQrl_dlhf%QMrB z3`~Y(x{dKfVDQQBw@B6pjBi8M*`ZDwf*{Qv_RBTRGvAU)$oK&1wc|= zqaQC3HAVgwEXcTmWG;G|UkRy_Q~u}+;uNXMe3KDC+U0i@_Y6vIYt<~vdN$fUKd-Wv zu+!7`^7i7Yjizj_E!igIuw`J+^;h;59_K;2zrJ>oBPHeGZp@@e_IzN5UKL6E1}by5 zYg6b3LGW9MI(#fk2^Lq8Fj5h3P8msGH>hU1W0K)0d`$P;F$qM zChjdugwNgleNP*e{;51KVi^W-0%#k?dKHNV$z2rfao(Pu-!A*AKI|_XM9&xNW8N|5 zUpI%JO%Kd_$jIlNo+Vy<+WJMOemMBL*p=zKoHVvdNcu<9vYFYmYwqcQ-Bdt+dsrjK zQ^aibmEphTFGi2%TX&vxS~M3Be>}`)^G-Ev8@nIrzQytwA^9bXq)Hli4AbG=hxi!0AY{^%4 zOg?!H!?YwHjcLbUE!NC7C2~c{bGvc+dw6*`Mj8IdT0h*je~;8{Xk9X^0l$F`-RUr` za&fZClGLf`zdrE{wfVIAtu47G$v@$birSwP*`r>J24PC$3s2;~lqzkc{)4>(K1--q zvSx9@R-1~ZP!`I9F_Fw(GQ~iY3sKkD8a*J33qlh4^EfW#D2x1Izeb4UJ^>#(I=%0i zt-sKDu6R)}V@^lKo5w1D3R~LS?^xuRtFnjC`^r7L$jqt!HwB~(aV9~YnnoCs6d)#2 zd?S=G^sl;0fdvmY!F{JH!8`W=U6asH9Zyb#A-S>eN3pR%I9E{sE1EPl3j57}CzP~M zbi>SJ`g|#i>c2GG?t(vR;+R?ARrLqBp|mX&dgB!*i~lA2{MRBDZFBz{U%>o zNngRw8C78My@?dyDn#-FHV<28+Va&FtEwNDwpqMRzU0XFr^&%?@9)#q z-O>BowYONC>y>v3@q4o)^!lrkQJLjZZw-&qDMo?i3k?-aPK|C z76_%O2_2&Yz_@}Bz=>bMX5yAL(5EHI$paCi%BPhA_NR9mvXZLmXMRXMvRhOM1cvi^ zO`QNJwJ89fxbO8EL)$BNIAL)M_Z;ctciTWppA(Q@Z6{pk`(<9Ke<1W^4dw&#ZNvmA zr@+d(&6heTgaUtiA?vK?LtAx_!6!$h|Gn>*3HL^REWbSx?$mXqg}RGTs=@YJ`@J9I zp#?zB^78wS`z|;nuC+MAP2|>;0TJ&7o3y%wF301b0Q$P(rq4u#oc6K^2K!UcT@j#c z`1&3phcfKCWq1)C9DYA^?@IhJ+JruO*Q-5_tI1<0M^btq!9*W3&FQ8Z z4;XP=N~z{@3Fc9z?A^>5sJ^=1vI~%9*L6@h3FT;QvI(d<`DyQbb*I?(Op?5qe4?VI zC>?EIT=JHXuZixVPoD&rf8qT3c#u~VK?H~?A}Y5?9HpUcLgltb#PA-&k|vhVOd;|N zehCmsx@Yf2y4yt%Oi8o~n6`WKTRN*Rp^;M?HN&eB-QOJ$+Gbs=LZ<~i&JDl4s`Zb- z_4MGqFz8$))!UaC#QYQS47vLnmE-~~#jr#xI#xm%#_ZP*)krDg_yEuf7ClV{QP~Z* zSt=Czre%&8TvcC??ue0?ZWkma5m=C@>k*CdX2rp#!~WxOfU8zcaT;+GiisE~LI_E! zP3;pJ|9%~pWjGnUNV7?p8Iz=!xGHC)FPCp476P)8H}W|RCq3a~4B4>SX&*>nPx#no z+4+9s5P#JZ7OqJ*aC&sObrW!>{{C8H6&8{#!?}hB1|O3*&7JtSQE>$_ruJ%2ieUuG zxymaeNClI;3`Vb0iV%j=Hh|eCVv`^CEttPq+`L6HR9Z@JNwY@^-KLmjiE5}{m=>p( zJd^}kG|G-Jr#@n$Pjx$4XPtU9KYFy|ykAW#V9(&}O7~p!-=7K zWZRd2b?k#{F08}KKHbpD(=;%B7BYq9SxZCs(aUYazRMl#A0**Ij@LcBlCo{uTZ00> z&?KS1xlI*14dJ|`*Tll=NZ*19$O;5oyJuo*7WmbP6yp@1B1-u-R#_^c;%~Xll!WN% z9s?{N%|x^tbr1m{PdNY(+B74TS0S>+EeSnda+EzH*yCKJ>6OJ?7ujuxi6?gbHwG@&p15K+Hj6f6PryK&8lj4 zFrXPyAYZ2UC}C0k@e(jS0T3~QT=<;(14fBIgi+Cz6SR_8ogFpglLr>^(@zF1p;<}m z3Ja#3h}_!IMEU^MAVM)76-j`EB~U+HA7%L7WkjE{fp%P|;+|(gZ1LmZv2Jcz&g0QW zk2ycKS5i8zcUCu=p3Z-#cZN#kY1n%-m;GGR9g9I;mTrXE$lb_o$TU`!b^k`qS{U$5 zs|_9JK5XE%xL&T>|rHS7?`_Hk|hN~FG6fIQb1CQmmYlLrgvDW))0L~}g_u5AA!a^s6%0#-$AW>eV81q4=hoB!<{$?%$Lpn8>P z#2`4y8piq%F2=oKuUQlae)8Gp)>SP@Wi!jA?XX~}t<3-BqiZ8rE42K|3 z0it)2U1L^NO2$)VNG>knLCTJugmDd3zR=9}!T4yFE4z=%P$vzj!ATq)M$zVenD2r? zcdRL>o(tbo$4ahj`R13k<>%=g#FVV$V;pHZ7nLqu&gHMDb>ar-fjUKg zj4%VahC?#gx9dOr>yiz$xZrBkFCJEzI6bapfE*Z$3Lj`x-%ca5^JzcD9V~e$!1~_Z>RPM+ver)8Z1G;z}rG9u?C$_dFDU!Tp#YZ+PIn7WCg`xa|`>u$qRNChvvqbskjmlBD@rQsOf#fn^%e0!7OOCrt% z?kFcsfi&X#CT|-ObCZ^{PwLfj}=dmruLL%grWF6M`}lf=2I3 zDNPfdb7qiIno_Y+h&@1c6sxweY9>$!Faxpxrd)Qr{c)I1<221#M2rXkO1gB82(5L{ zuhv%=>$YQrJf~v_)-^svlrqip7)bjV9@0VWBso)DZPwd1HX>qD9C!l(MUf1PKP7-d z0(FkTu@(aK#z7%Y8TW&z?_Jv#VGf;9jBR&ub$v;B&9Z8PtyXifQp!xcV%Ii53G6<- z`#v+pt4ldNI2p@yay%0ef~J(xIEe^ZX0Sep7*rv%LNP!w1f`O#7?do@z|b3e@~u$_ z{nj`*-vUP@FMw(&3Rz=qc0(pZ1%L11D*z-pDthNcSD3Dd;T6huK4kj~!h|N3QTf1Lln zuZ603Zl^*7wW{RiGmG*3w&Xn2$+?BJE~S`?_x^eQBH}XoMpX?HA|k613W$Lqqw}1l zn2FY%F7O;}wM0-tqhcVcrGOMjZ2$Q`|ISg*&Tj8MTwh=P^o!5d>vbE~r^gSK8&yyM zAa2Z5wD{P4`1DC~zPfq2UG;Z&cc~^u=C7tcn`}>KVi;kR7bmWiwLtqDL z_qVrQ(-2qxd=o89fB>G!$g3(MdG8g-fDl>KL{rW)BE}{#GdYjsiW&;s-ruEM#yO`{ z#1zSSB4)mM+0`BzH3>|sUfaeaI`Qq(?*8MaPd?y!)mK_@JWdyzwu`N5jIoW*qzocr zqG`?maJCh5WK0Oapf(D40VhO4!5}8}lM;9`36qS+Spq}T#*6ElH$VCOjeRgEFb~vV zBnLGaXCe%)zx?T6eE$0NYnhJ!_`m-9$9G@3rkzfAtVjsP=cV;y;k|IUK zr0Sv^t77)vT%*>8>v)Od9iA~UwkJBlo6j)zfZ#m??ZQC{m?_-Q@ z@L)FOeBPsrJ)aRO5)gWJ&j1H%BBKfh)kT;H3F*0S007tkfs_!ab_^ThGE)nI&76W1 zgRH6s1OxyA#z^Pp@CBU?TfDQ0TEN0z=S-5l{t+>9=%tXj>j}kc^GB{SCpO} zAK|RuS$wj+kDgOXc7}BZ;#kM#tdD z#m1P)`CMSm^O$DjFr<-vTyHmh-)=5m98ZVFIi+BnPO}!HtemAp4&1DWWIi3|d5$q6 zk%)=`5SLO;`$Nvz1TI$_mHF!WvhU*Ur+2TeZXDif(9SiU7Th#V(L%3$#k!zyW*Ywm_Sei`xD)V+d`x=`Whq z#f#Xq-FmZX{hPST5aPwFi&vim`n$Wk{cfj55I5JK|MB|f^Ur_&N0{XXnXR2HLYDvOG^I zPkEjIt%+{4-co=zHX#H;bV!I$cVSIQ&hDWKgaU>J%eM_4HpAI1rsw=MXn<#a4`?$8p#08n&x`&JUc^G zRX%$sS9<7~jj}WAvxv>AVKZ6vrmOl9L<8^uD3TQl5gD7BfylxtA~C;*_b*`AJN9Ou&jMdsiB?*028o}6=?yW3u@yRIXG^{RRG>gBrc5FD`h zfE@C8I^5pg9{2mEX~@s3)#e!}Jq|uJzGf2$al#PKgS|z@ET^qD003W(MXSA0Jan zmd*PZyla|v-LKlV-90_jJ}fjP%w{5a&N_AN<Gg(1}wZ00yfJa$R|Y82YEDA?111zlhP#Bh2IIJb(N6X}g`9CIaYs z)!aRl%WaP!Z{FVA-+pKs_x8=}V&T*6=`>C43e(}SRu!72siQk`&biDnMzE#A4XDq< zf9F!_MRHEQ$T^2-Ip;HHv`8-ND;PLbDIhtEjaDShxPM5qY#xsfP7T`9bm8LSVjV7; z=2J0y`^DS4AHIEhc-m~&o4)(*>+eqET!56wJZr*nn&g;^1@EYc4a3y3Z@ZpE=i@2H z=Iz^`F!SU6Lm#_Y_3(6nIX2E89}lqA+TMmsy`WT$Q=JxBq!cwItL6^GJ_HsKDB!?S zDxzvdOav%Zy)v7jsT8Y%pgt&pK;0~)U@9`J1tMtM$Q%%S&N8GKk*cn*rmFzp(8oY{ zHh6p1%lu)REBUn%0R;g!B{-b&{xpxt*8R%I&@gUV_xhp-hR4TwPQxi7fr)b>H;>c9{cV}2X&!P;j;-x{)5I=@5FC)JYIH*dQ3V9>&N(ImLL(yt zAS_^tKxS0_KoSCAi>3?N`)8Kn4@sHDst*|mYbsD@CNVn%ui%lK7#20ds;$j%c0L9| zt@AX^2+R>ypVj&W&v0hREK0SBsuTOPlu~0h{DHy#Lfncx$0IW(LhoJ9**ONVQf4!$ z6KqOJAp`@eV*Wa%LW-rLB4*XNs+cKSeX9nlxt?dJzU`HDCt~kThoh?X{VIl~AwKxf zT>nXo(M;2Hyt~h*(_M%UVCC^~zJGY{9e%nyY*y{7*MAhF2h=h>sg#_Cah$#PAvot; z=z>T_WB@T({yNnH03ud9ysEzWtVleIGb=zcR|P>SrK`(T|hN zZB9NIs4^fKOsC;49S_5v8%G|SRX~E(#k#xP9>+ObIUS$I@l<3kU}D5<%r{92rxd!L z$s-E~Ql278>L%h!#QsLUfFd z&$YJ7`TZ+a_}R-37spTjKLnsExiKKC0(n#f0#G7{;6TX?#XtE6uGHzAnyw z3ziWu0%JtOIzeu+4HYZT_(h*bO8fx>RXHQ?+%xLIvpGw}^UMK}b0Mqn0Sr*lvDK&t zOysKfsfa>#_o_8tP*60f^9iz=Rf?;|*>lecRl7CkTrL2B4F^-+Jr(B)J2Z=?IHwZ5 zct`*8+jn(Z?zi84*lhZL_jiALae4jm)B7(jUp&{&rC5TZsw`|3*rLHXJvoaovHEq? zhf^1bh(e&sKvPxgsjBKc&n>kbhsi-M8POFoB^S{9Zga3UwmrpSl1j=&=F{PL(A_`& z`oI3-v!7hvTspARX?XMbYXiP}I_!?)=`?02fJMq=0(=-ZLN*Two zjop5?JMH%CHjJkMFMzR*;Z_q}686QV(-Dy(v4I#YJtjorYw48!BY!*H4yt!-N=$$Q^M?pLvE1Cos6kn@z>g@~X*^r35-Gd-yyGcu8L92M&L zQ9>Ye?BJP@es-Lxyw7=b+aH{H(vtNINX@KDQo(2$aa_ee3M#dssf&n2q`Il&*>(rdL*_)5c?QlC&7`UT3Sfwab<`{xRj)Hy zRI`YTK)?ndxHxRqFl|7UIa<BP*2kWxbG41w5rh9Xm%hdE~e zaO{etCPw0NcYF9x|J#3g^X7HV`FH>6%gqK}zI=IebG=%3L{v%|r-2x&#{luO5g8!@ zI`D_nJG&RAQX)P*&>Y{OX$mVUClLHPAkn8iTZo(<0loFnOY}**b zxk~@#oJ%Qc0zl{5*j~{-R$HTOL^lUG- z{p#)Y?@uyeGabrNCS`8>J`ImwfBV(fANK#|>ZhA8-b?`Bf4F`8$NyBm{g+|?z)0gX z9#8u;9g#}YhNkg8u#eFY3`x-$v4ImL0Lo>8YO!E|h6GDX!TRE|I$&1Y?~-%QGlv=krd^DI13LyiWz`zj@8*vr_!wfDdk;Cp!XOQ_=z>=A$B!_994hT6gh8Tk<4-UJo@coznvfM{X5i%{C*M7{Db#*NCCE7k3Jd>oMm?%3Qi#Hj z!#5W=V8Ut$h2WTSAxfoK)~mQOA;YohwxVU4rncX-u{+|Fi?|X(3Emt^VS-SElY*F2 zLqrN*v=lAi(1?y{?7EJE6R=d0N2~is0Li`+*HhptkxHkP0rjjCGWcX$H#7c zRaBJCvIHLuB$td-9omF9HA0#9-jW@5J49SHDq2njTnsp%Yuc;RIA&e7oA~i*`u_HGeSLj={5R_{y*u6gH zbRKtodjaUDqD5waybByiezS}_YK=-FV}Pz^v(M5H2sT*{J@c5d5T#Oa5pGNkrO z``}9KAdknP!6L0NN=kPdf=i-_n%}*@KZSOh?!WlS+uAGiQ<|^W zxH}#B-9P;9;rH)8ynlRrB%<`s|F4aH10Oy<{nJ0MVz;@zet^IH;eYvm@HF{xFe!x2 zaRgKmE@L(`#ieQMd6IoW|Km(8LT0M0e4TX+XXAS^4Q6Kcr&l)5{&>LS5lhJeK4+I< zm<2j**4z>SnSy8lc8+j=*q1zYEqhkeLZ13Q_RWUCf^*Ra&jHnHHgy=sobztCdwMc6 zc=3W?y?MFaZexsa78yPx7c-m3sopbo_N8B7g?G`Ug%9V5oqAh#>^U zFl8Z{j zy*ov6WjCwwa7w8f-!QIHo==DU<0Ego4$9~-mQu$V0%)}dvNOD++EGacTA9`5ok5md zQxG##_;JPt5z({Z#@Pb7uIX;K^X{6ng98aZ`EUpCS+t_gEa^YyBvG06z z%RGCsv*~JG`u+Zi2!N?xYxOB2A}&0pLY-Y!`FBwwUXu4(1F1y7GFQsT&VM;9<&x9s zv|g{7y>k{q6Jj%_970Ixlu}X?W&}_X0aOu*F{-MRI-|LcN@ZsD2=hG8$GK@Y*W2}K zd)asz_xJzwpZ=GN%gxQr)y;OZYy5YQPmdptr^kms?$@VrC}MYy52DJ!zyI)Qci8{* zr$7DSo8KSq@1|h{Rc_`p0KkVuSD{d6mjW0Vn1EFt+yJT~X3>=J zd6OX4rc-!s);#xly?C(&P$}0b%{k9GP2=fsIvuC+q*{hyh|%|58>0s`_wKu|KYn`1 zGu*s{>la=05W2WoubaSV%F7@p06=o=fm~jubWRgY6Ofb37ilRc+hP-Agdpd`d=4=c z%GUiVB3``JKdJJ>s$fQl&e1YdwjN!olf!Y$#mJdx{mG6PiA_lqN>O%%j;c*4utUk` zdD&IA14dxy>Ff+Z&Ux=8r=^;XR4qrVCFvpvf;x$hDn-4FLqQTm6(L$i@Dd>}?rhyS>bbNehxp3z6!ETX-v2(lmsU zi&)7Z#ZYrX%hNQEqi`utPfzU@OX0`6yW^Nryj0b{`^&$5b$yXeyZzlA<^jtDsKahP z9!F%qUT^xgDM6mfNz*8&V;-k@+C$lUGm~Hd)Rqxx zC8dOH!!Vg)+x6r_Ipf5t-#dFh+hR(!{L1IaYX$&Lgytlt!<=PmJCu@#eK|h9|K>N_ z?e=Fs|Lo=Ug`E!R>GA&KyF+x;#%Y>>_;I%@Xie81ce|X+xa(5qhCEHfs73DIeRF?% zdw+l5=Ctf0X6FcMAy}2pL_HB;b>Kq;0OuPHK7?>~SpkK!P!`Ws2mpV+V#m|bdly28 z(INCHO$SCZEqT_gfWSyhj@cuD+dVu?JIDow25gfz7u)ToYlCMpT*hRYR-8#hp1Q7U znkL5B_xm}guIsL@u2!qnoKr0v8ZCxZXEM*8#c5T&rmB^oV&)iQ-}kX?6tT={MD$(2 z#mmV%=LnIQfRNRI9Z^9`M_7BO*u6)KCEl8HkXHxO%SFNL1T=E0c@>AnIa308nEh59DAO4Drky{RoLvD{^^` zCiFbE^c+o|qtElVIOl40+|CU8GfH}nP@>8XYcFJ-IZM!1d&e8EtAI+u_st+l)12p|xs*AV zk|O%c)dm^L_{1Sn1A&-1gg*LWj@q@0S<*Ac1OPy-r>N_?qEbo%kvb{2qPLtDt{n*+ zdSU=nV4J3?w)g-88JR+rw!5~i-)5?%q>^$`F|!cDJkLev{o(ZVv~SwZ#Rd^q5r~NF zNBLMOxkeu%V)D*$)taZ2L?k8l#X8jS_;{B_AG+(Zx4sV{q5c1aBO4KcFVlBI@Jg2@dTi- zD2>t8@dbW?InPevp!x^+-Oe}S!iS!BJ32IzlqT;egwO;xIwruBQX72Vbs;#BJRApR zcGuf*xyrd%^l-K5-@dx;x=_*>ynoiCfug}upPCL+#r!XBuB(uw>lzn4Ij+%1|Fy)c z%tnhhsFhNZ$T*JUIDY!{DJ2U5Z*Fd0y?V7;tt!Zxbw)&H$K;)J`MJlL=LG~oh|ocK~T?L zt`!P3O;bOpNb^~PlTt#WB9cpC=W0J~DYaNGXDyP%7>THqLaKf1-@M*jU0@7TQJk$b zp)KG@e1YH{nK}UzM9ow*L+$1#G7-y5B;}NzqtY^=5D^ih^Rszz9*oIK5cA%=F95K; zymEKwm>s2zFA3i}4jW!>LRcKamt@&8S@$gVCLXGTDY)JsYz!2s%sw@h7 z=R!C$>(gAhQ)4i3W(L(}p2(q9%!j7POPPYkc|u$|nZ0NAp?H^bHiJ@1RYmW1hsUQw zXq&ce`dapO=&-RDONYhyAA? zfBf6GTLYiI9*Hv8bd}QUdr3b{T{5D^6DDdZ4MZ=u}|fFK2=%I;PI`EP zQZ&a;jq{E3!!S7K0>^s4E0kP>lyX4<2&oXa`+Yb(jmtP`DU4{CfH;-R=#V*=qEg$g zg2oV4Qyfx4VnW00EPRoQYmBL(K8#f0FF>3%@6GUY>5;R&Mjg|tlNl7Lt1U({il$C* zvI;fo*oEi9q(u~^Mu!L>h*q82ms3@>G^^$289;eH$@TntUWy~G1VTj3IahR0BsWb{ zr!%I^MBZ^u<0(I3Njah)wk2jQLJW+=)?rH-5DaJf%&I=|S3jNY#T8jQ?Ncs#I;Kw_AD?uQspVhkfP=|mr$%gZ@uxl z*=}3kI695{hg7m62xv&j7RoWcd;8YoAq+2EeDt~)Fl<)tBo{%{yh>XLL`91h#kH$j zcXk{^1f{y6rh>F$Vb<~UO$>mt4kmSvij`sZOdnBIBF;ItUE8#M2x$pp&UslDfL#qk zEs|3Ts)d=Eh-p6E+m>|=`ho6q8@o;}DDUx#Aw$3uhQUMWSAY?LZD$U8bszVY0)n{Mx ztg39+`;Y+97&e9x5ut!EAjs;DRQJ)K`swhldYl`ii;IiaH{*0%#%Z4CX`H8Vyxd(F zW0rAwdKwK%L_uST4QURi!$Wj7gn1r?4Ki6p$XUv2J_%XMX@U?y(Eh=b7w3gSvFt24&EN-8M@~47aQlvX$9dbfw@BK0x*Z5g!8=2%b4N< zQj7&dm^A}Y-}i0T?f1u+6EPY?s-=R3=`xUuvau%lAH=iE=5ud1t!hl44L8>3=-R>(X(?{ zO@N`Zr@`mkQ)d(W{G#$I*`C=#`yw1z?({Qp%F(?T5!a z-gRbqec@a)Aeod>rJ69q5=$5-E^5$puA9R`Tr!A&&dUt@!zo#`W+_2gp7wh#u_@f7 z{mZR(U87U1rpi^sM%9lW#2BMVD?xU(chrjIcZdZA0V~y0fT6S%iKN!K zm}BD`s2l_CXzbS*m1x9b>!=F~dtoarC;@H{W#4{XEaZFmzpqqH$hOls!LBl!&WC2oMg# zwt&m|c;^;F$S9x*iWJPwHN&>=x^9|JF&1WH9Zl1e3sX@55U`4pOG=WIoPZV;!>enQ zo|!&^fU2?z5C9ONW<29cVtj5s>nZVNb-=Tb0IgsHW-`!i+ z-0WU_b>*)Hz;Y^J3s^GNHXXAb_VYML2&o}Mpr78q8@iIx!(my*({h+TiQS?1@9*C3 zUJcy&jlCGQJ2{LQ#*l+YS@~2@iR^W_CCM@8l5+uFHH52#n}`Sn5J{PpR&1@rv^+O} zD)sJQ1SG+nv!tXV0768@d*|FdFNl~@I*s!o1O|Fm8r4{W7!^wf07TaH5Cu5X^lGBac_WM>l4Hs_8=`8A zwJMYH8My3Q-`J*g&RA<%Qk>^;o=(<5<7sHSuInThLws@38AQ!FcCN`KKip6K25)X| za*1^{Uf+SW)~=i$W#;WR{q5g>eK;Imy?SM>RV{U*flB#XWtmbRl8}?3iVdzyaciyj z{&YBAU0osK&~{b!5@TfM@h~#8F^yrzY*6aG4Ivd{+qUrtQqp2rONq`{Vr%onGfzWeUg_G0_#;bYfcblztWB49Q#CshT4RMH$~Yi$8R z5N<3AACE^CT4KnkYAEESMZhuxRxTx56JzvU8)K|Q7yy{(**WLEuP$g62?l_+ZJ(Z= zh^XtjX_|u9XHwiu-pDY;bpWW|_6e40h0LR5&T2@p~VB>^HB2ZU@82{VyHXEU~#%W($tGnY{qdk&Y5+?qL~OpH0ONg zzChK=pPPRGfXnMEGTe1tMH!4SS=U+)^tsa!1VCUWQWB^ebwCj zEYB}Ei_y-b*|}Bz3`U4Z+x9uIy;R*QO*cO5m1>3(GWdbg;Z#%tWiAq zTw96GzRA_GpV_j>>*&hWYb{g0efRF+zC1oYRwuS)S@y?M%<}E;e!JVWWb#r@DbAeH znvHEuySs@>597Ef@2*~5_Pte&AinPi%GIkY%6YhW+)v|_9Jg&sC+-^V8jLi>5X&-< z+SZv75x77p0wM)f6-oi5>U(JIvc!2jPKw_9rfEXVt9(+kYN_BAAR-#5N0c#0LTkXZ zE-BWHJ60DxKwNX?>g_7^baB2BBt+~TdG7&ap2zt#No5m>)FxM?il2oy+Lqnek6oZnr)T$W|NyNxNi#sQ!L0$DA&044=dtn}A& z+m3R^7M7=2x0Q%&TtyEnYkW0+s>KrO{iZN4q3%!Cw!PMD#8d;DDmNO@JI8>ikd~mT zgj%4WDgY9JDW%l1UDUAwP%1_6yuf-M4R9Sxp>5l`TdC<6$V{t3<*bCLmx1)$H=Xr) zhzJPGwo>dw$8n5@!_sxzeyE^zN{O9CWJ1G0s06A42r3|oq~5Z#Xh<2wIy9~S-~GqG z|LUu+PN!4f+hKSCMSk_&t7VB)o^Ro8Xy~^~I}Zdx(~zA*7rc9g9HD z5>h@KPEk;x2!+JiBC;^5SE6e0`1}TxsT$6%Z$N>Op206wHDayxi7X-$Q7IZi$X39^ zcDqGHChHn+Q|Vg$=9}ww({W>su~@P!OD-m=Bth%dIYu5#i>CG5wvDC68hh*hz0Am5a^RwH+c(af-{B`Ydd%wTP8vleM6rs)(u{mtb5`m2W(BH( z+n7onDQbolsE7gz)&p2TkyaLM{SX8p=Ui6Bu;|JZTAiCp&cxO^-x>>S5Ya*gfEZ&4 zX`UAasm1&3a*Mj==^_y$Vx|lTjEo4(XjzGXSj!qLQ1wHgr~m~Kuvp*qb4n`irCRwx;Vqj$pRuP4;z+c!OrkMyTL6H^cEB3iU(Gd$k_3EkLg|H@GmokpyhYufG-+c4UH#e78 z^@OCR`XyHrZ$?DaB1pN!#%&g~iWcXr zs=nA=?Dq%b&31SB;??zOo*thL{npRX>>riPqbtzm0`FxPDntzStc*Y>BV)P@N8?0b8z zKKcc(r=n@~w)nF8tmA+@-#vd(eO6-Vc^6YknP$DZ^4r}-W1^&!vCbG8=LxCFn1vGN zkWsBTBZ%VWywnk|4idQn;3+@9J(R!1h7=u}1&Pi`YR zy5YraDdsrR?q$DNH$~Qj(qxw(G1>q$m)xK>bB2c|O)&GUr(5HDio16k|kW03{|Ov$ETW zG>=&Q8d%m`8bTyp%Ay2>2G}4OAO>PY1miq5htnzMTn%w*Kaon#DPS=blnKjJb6=r^ zI+;G-R8mw`o%e>c6r-w=OMzHju2Wh;$uG}HTK6-c3YDz*d;_OYthJVj$#U(JMYTy! zj~P)J6j17)1`)Gl*&55fX;g=tr)ffRzU@k>6EaZc|DFX1(>z;iQz=8+o}Zj&^(M$O z%v>K8g!MTQC8EN@L{+cQct3167t69@V1#Up0|-T{F@n&U@=SFffm$+Jb9J%#)vvz= z{1}r}u@}4H)yvCQFE6^T*=>g$VhGDP7GpAEn8!&VBN+v_d9i){=4RIJ(}$14FdUwq z-u?6oDLg(r+~41)B}9qUsbr4JGD0bbqVV$SI-`Q>6qYPSi#X)gdocvSm9(skLiI`@ z3Myh22{A4q;B#OQk<_ZJBC^MnOwkXybCu6Z89R~^>pVeR?#Ja?%G zvhCV!*KUr7pH7eW&6`)lW(TB2uv$ckqKXvF2*nWAoq&MExN;*zil8oGK~+R7#G<8$ zsa4Xevf&DMS~k_NziFD%_U)@zQw9;)?~iWSxMoufHw_#gPGydTMCZu?^u8ffDfG0T z=IPXUy4nsv@bGl|_H@Ykl8b7{%i+{E!;8zSY&Q30eEaSPNjKB+w6$ASX`0r$IfUtX zUxUa7K@kB^6~nTy@zz=*Qw1q8E-?a|HNsmom+E?R55d#mn*kXFzK*77cA zhD7zNQ-zXJ(v+8!V~kuflGdk}nTzBYV~mi_cA$h+>{xGf_SO*qA`l=G5CDRr;5zwF(nmbum}RGDN3B{UK+4Igy*%F1y!g$ zFDm9r+D7b~*0gOUi&S>L>Z*Yg0(rI%C1m2~X1PO;h^6ElQ!KNH$e*cc>^DHD-z`l6tSqG*h(uBJ$9oH-K!Xw13o`wFJky$sNqu|jB`MP77%bH=GE z6u~uf0xHpN4$Jo9Vz=8>`9%mpwVDk_02Bq#^(ZG)0&tC=mYEe^y?XI~`9Ht>pZ*g} z6FeQp%d5Wie0{Us4*jba*R5~rw&FC7A%7iHIga!DPj??be#%8Rn@#RsO(lGN^X;%5 z*qMudGf&g$bUN)1rIf5vMCPy@4u}2e7(#Hi$slnFPxGjUthsc)*$tbEiwjkKHYbHM z3geI}OQ6p3rqYECL&j$Ss3Ja^}imV&L!#j=gcgB$#`C?&l#&*35@4Z3U1p} z@H8)XcMrE8v~TxczrN@?7YkU-t5ZNpT-buMYW!svmt`EsWmziAW96qXuO<-Z_wtO0 zBO=tliwsFYQRn>#fXGC8d3kBdOhh54+xH)n`#2vCH=E6FGlZN{mPH^g#d^ogh8+=% z<8eHV^ZqucX?a{)kDX~Qx&f4d%{)hiJRK)VGRJYd9T;gdbla9Nk0oU}qs~OcWDrph zK7PEvy4tqaFPJ&TCCq`Dt@qP>JX0bRP^yfwmW7Hv6xFjc)#~I~2LWbQHK@;<-@hP^ zib91^7@-p7j4`SxQBsc2=SrpdtV>Km0qMMZZevja6>CZ;7OHlGg+xm#>+l6>6he;6 zyiDcer?((VDvrS#l#EE=m=Tyz4S*w-a@HWB0MN=Z0X0a54H`6nRr&3#b=Fe(Y(Y`* zEd4=jJ6|8p=hLowOIU%J=6RgPaa@*JOHwTeq8XHds-&=%0WC&|$0eNR#W}az?by0p zgQA_O(mK#0k_k{q>u|K$Z0a`WbUKmYrg3#D#??556rm>nBBJrG_C8QOv_&h2EvH-^ zTD6ogFRL}8s&yWecvW{|?R*dmB9>B!b?Dpcn`>jXMeH=D2*;=W{pt8P?(fFK{i|0m zq?Fxm*LVJ^?G)*>1nb>tKK=am-Qn;w`ssK$y!rZdn3qp?A4Rf3YWwEZo0lR&%-;I~ zI?mHL&#L(r29>q9WAATy1|UNo~afvO?9VsSs!>T1siy9*@VT_)}rq_q_xem*vCZ z@GrkSiVc?d<=_0xZrdG~IprAyQB<<@-Qc$!$ei++<|lx--L&NntYVqMWj9O{FH3&^ z)BBT&tk_vU&JzQ<j9NlCrF>(`)%TV`m7{1Z_LVFgx$5#v&4OO?KlqO{g`syeV*HP%nKc1p}#;}|PAQ~}+(0^^duAd4Bq)kSE%-0<0J1p!r6Vgy)u z)zw$1-W4G&f>Ka~N+A&xMO73C45A8!2;w|?plgAX zwS^XKeY4^_4n(@DYD7`XzW#oZ3WiY?gN2al*_q1fLsH?$Jf`}*BPj#2_s+1f^l7=P zI$hABT9D8Xnw({SIE`cKd+z(5*+z^hrApXC5C8!zH6k8SPp8vnvpJnk!!R6=N8_w9 z)Xwy@3+7H&9B&+(`owSV=9`H zq}%&X|L{-$u<3e3+%URS^N;w-5}=tNV%}8pl(;WX`!YaIuJp)O3K_{;aCt z^BX__DNwW&D3Eh03IJW#y*-R-{dd25t+A2&Cw)reJYyc&H-^Cyfr=r%xwNyX`>IlwzJ9A1BKFO}p)fl#(Tu96*%;lAI~pXS{4J z*?=!zSnu2V^}cVtxAQ#D^Vs$s00^v-eNibTiz;FfsQt)l07dmPu0sCpfQAYQkg8dC zS%`>4P1mFtht4OCRs07k^LT9BC5YgPUkQnV6jExMW?h-qTdQh0>Djc^QWI`YT*6pal@M!W8e48W*q0HY1+0u9*<^w`QwMXp>GTG)8X-Aw=LNL3rdEF zDh5n-C#p8q@{>$x96r*HJ6Yx2mn|DN3P3g zBsoz`B`oF1T5nP(WD6SKY;V50V$f}OJ&j9}@)VC~mu5tv`7lix+TnWlKWq##8FH7u z`NMLG<20Y9`F=dT`}ut>7HqI<+U<5T3`6VfX1MD6ei#PZ+ph0lUcA2e&6S9x9M|$8 zl2SMxkB^U!$K&zvl<#$xeA-;}DWtd@+p{NAVV;~tB= zJG}e17rV{1-yYH|+l_E*?X_VMoVUeq8s9?vK`zvW>FKa|mKWFm&wu>*%Z9(bd=(#l zzAA3G`RYU*3*8?d{^PLOhT+~zm}j;~%*(Q9n!NXjaCv=61VoU^sS-JvrSCCkETwqg zkfCv&4Y4y$L>`_VyLM=rrhrzRR~@A+HqJR^w~u*zT2d+f&~Dm3l_W^WLRgBXTE|os zptlGBOl1v-igTRPXbk0;LI@!i%|HaqzzmDGh)7&n^=J(7*?OF)##l3eK}A+WN@Gw2 zM_@`hDIj^1ay*`-m|#q6eGBZtv@(vqX3jbP$Y(OJTB8T zp*1NbL8#`3BBDT^TL4Tc=d_-$mWU0hYSAYlBg!mX2*s3DcL9FJ27s)uQgB4r5xkV+0y*j-*2!a8|8ReMI2=BF_)tlpeyKTGXVz)&^V^(o_O6hbu z&GRfGr_=Q5!^a%rG>&PBhB$@z`1n{NG@jbF?K%&QLqTRPnnfU{m~$k9YM^o6I)1u; z5Xp0x;t~*b7>1^CP1EcjmpRxFLe4SdYgouHO45&k0jfX#j+*+@x_I)pB2}sUaQU(BK4%0D&P(X4P5=Fx5;xvv& z^*cgJDdjATrt#gj=Oi{R$M=8!*JByW;bVUFVmoZOyE=}CZ+`pL#l_X}{?p;<(HQZ} zDU{PZNh)lO5|48rqPFc*N>^8xIp_P+)68T*bIzyns80`N2U?^ER>Mtet*Htu-#~?y z%c?T3(~xuc+;^JQst8r}bUNX4f&hYuRw$ry8R`J#y@wnSv3gy8*+5lj1wi-512Y@q zXKWi*1_KKy5qYoR!5HI>W#(WAU`6lhhBN0A#$_DG zoO3DFbA9F913=#y(n5em2@z0~0fopA=l_ltVvI4vFnI4TE-vb#zUpy?Ve7qprXW`* z-L4ys6A~uxhGkBMn-~hHMQ|}L%&bI$s>sO1Wa@@MRZBewic|;2I<3TOcBe7K z47eXB+qRE~V*xzsbT~~}$~e!bY3zwk^K#|v_4W1D)zz=Q|Gvg}lu}Yya?U^e`RBvo zaDRV)e}Dh}!@GLdipaO$er=5Dx^B1ITwGlAeQ%8Ex+aCZeZ6^kbM1^lrN+9}H}8IV z_iumv)Bf==k0SxiaSqeGEK4bRb+NNt-B*jI_;@@#sm3L2`+<=t+8o0vE;?h#n?y@0KE5>C8Nnn0R#!EV?%X=_3b49%yWqIQhcvPRnd%3 zTaT9vrW9eyr^DU558GGHbXS(0^?lpzmM{yPf)bKUb1WsY^8z@BWwX2ZN;mQT;o_o| zl2Vwl?{>rHx&>!uOBh8Q8?IRZkV#>l!{g)q#l;1#p5LW{FhuK_Uj=l`3?PhL$)t4| z0w_p`1&Wqx3!W66cRbbqAH^?Ugsdys^G3P$wXf_^Tx9Q+y~!R~NnCqmZ!)iyy+`QU zR5$a65JGn54ZqLN{~kO(uJ`BtIG zvcI<$kK(OXROk9S+VxFYxpua4sV;7AAkrKw@3(EPr>C_WiZh)!f53_Dx8qx`M41BF75HRoQVUMX1#n5pr}+KTMii z7;TC-w%|CRs^U!wo3sMm2Z1u^qBx?Qb9rvpd;7b(c*=d3wapH8Eh0x(a7VWU!vv&A2HQ>EYH1ho+?f_T~&y7tyckjyJz)KtMa{11tZo${95i`G7N2RvK!mT_i)nc zb~E`-QyG9?$%O)&lX2|h(&dAby?M``3;0M{Q+c9SD7L*Hhh0sM+3?K;GOQEQF>$S{ zK<$_|@F1tTVj@eS!|Yy|v-Omx@x!qM988)KREp#TNgIJJ6e`V;TcxGk!}3H4_hzM1 z;@WsNJCXw~o-Yv4sOi$cGR?K)n@A&R9uo4iqn=q?NX>Pv zb{ZO*m$snhC71Qea?xpLaqTGJJVBUjLH}$W)nr+P`+Sw`Ou4d=W@Udy84`4}8_1mH zi)~*aH^S&iGqb|Bhw`^}elBGjv%Wv0lPEZpWllPkW}(POu)E8vlbsG7i%b9b{3qY1 z0!BRGL_a0WazT08wkkZIW=pd7?H<^xaGOfep>2A$ArP-n{Ud zVIBv&52R5VliZ4?C~rg|6EAy+Az_1S6KT|=3LZO^syn?1RBK8k(pbyVp$1XvdT3HZ z*=%9K@CNBL9-9tM)mB%-AD^5PPJJ#uXv0Ph9Y+#Q%r{hN6g-*x0bOl zIF2UA-^x#|J|%B`ti}PUi+yVh%XAq{ex?+o#*w8Nx5uV&GicxcHdf{cf(Ul=~(dOYwVIf^FRs80W68zGDm55Itp9Hdb8(oR59C zV86=&b1|Khe7O2_8&zVu(c#*b)t!jC?e52gvgnNLhyn(`BtPMs)1uqUz<@km9=OP< zglFH?^qU$&DSQ(raZftD}IW4^E=z)LaWH@ro%fqiQH?kxpm%U+~dL8UWumYeQz@ zDBZdHN0*XTUM~i1f0e)SrbdFkX19sYuh{IXxA`L^iy1p=MiyeampX<*rxd;2HLTXT z@u+4b(-u$z>%(fQXd#L^;5w-IN9xU)=ufRw5F5mpnIPd{n#LiOBp-~gnx+6LzeRUHLL=jT{H&j%?uC`+w1A(dTsX*kY#xEaoqI zFIG%#>&~XhZ^(OtP0S8-<1XBxn?o^_m6+=E_1?~I+@;{%32^^>;^2{gh?{a~UflNw zy0x(W@phiw6LWsu*37^nb9y5^^A$z{YyAjsoO~C9;8N9J$IvK!+XZ##x z=P*;6{&H<~AK!_OG4GL*5=`&KK)shZo+DS?ZDt(pL!AUt1@U4yT^*AZDC&&D$iQCv z`D2B1&kt{-y~+@_?Y7HkG@Wz z)c~89B`7D8#PIW#q$rkYXrEe7W6b|;e;`#|N*xB*{J9Y3FMPeB< z0uKZ=;rh>*N{m*Q2ZAd9MYH`;$V6FXXl6#FIaTClyj|0R(J+R=rm9BQaqe%_Op-;H zjaC%06jVwY{8*0pg>4 zq%Sk+sbK1OpT?cE3EI$NnyO>*>^`*B!#{40xz`hqIVREC(?KGC)w3C&S7OG3YVbJ- zUJB2d0*zL!cF?HtNi-baPG6tC>(;xQH;wp}Nd~*|=&*3ZY!=>|u~+`FHg+5LX$e0# zec>q+!k+?fP(Wic| z3qWa>?BV+Mq+H$AKE1x1-OE}DH*=+hSekn@k4jDh13Zv~U}KZLJvq(_KEJN;%x?&5 ztrz(d<1z07e*3*`UwDQPn4xlC*d}fSobHu>} zDftu9BSEZK`)PQ+fU9z}69s${oSH@Do$KW^Ea{tQEv!(XAIF)l@uF6xHHpw|$L&WF z?gRbyenqDX!&22pzW9r@R z6Fe?$8CHkYKwE1@N87yg(nzL}&%z*DkU(0NkV6I#TXctVwbE4NkTC9%HGYp`_^ewN z?5{pfO^uY=X57Yz)(tQfXns$8Wc#%%GQqzwOaaT?=v8+S7F+L}J2EnYxfKGgYEo|P z*m{--?FfMe&y|qVo2ej^%)7M%u+Nk#tM%NrXT{>^V7 zRW{kzbDm2T?!ETSShtwI*Jmg1eCoe-U*G_s2T&N>qydOV=NmEYa?c?riGI`YRZC0D zIqSOgZ*9P$t-;^uDX1gJ>#W8qd!X}#NZ8XQ|F&8Xg{t3G}&)3Pw4asuqEy+P-%(2cpLSNG7TKs7>PYQjerJm5o zXBzWf1wW&P>OzTs<%h0Re&^sV(iFe$n}KUhNGEq5D$VG4d|g)322U8k(xE?Xj#*P|1xGT|I_Q9#h$}2l?*ibVL`UekgNXaHUlUSA^~S4dV$)Oz$eA{FA@Se@*aS0PaVg&L zu%pw4sNlm?iy{|(KLREtW4Ni9?MA}EhSe9%`CUD{pBZ;4?_HiL+m^zJ3L@vi9;T$r zD+4ol-0u|f`?YX50XuSrRk!s*XLeT}tQc4?`_Ql6^Qm0{Fb!k|FHHLQPS3rib>95F zncrNyM&2r-_}Dm9ofw3t;+ObRcGUufa_C>Rva&LB1%?%*l%vly*Ph4gtApk4dL~el z&lK+Bu-n5a_h8o{0C)UWWD;;dX=z>1Who(=o%l+J=`k(69Z2{`m(&ocjH0mXowF&w z?YvFVRBjjNx)~0Lyy=#6E~t2~tKPk+muD>}ilQdG%Lba;TE*?0p71*aZx+e|l)bF> z-whaSBL5wA+#MYoMl5uN-(vbL?y$*2{sEHwn)4JlTfjUDq(km*8ltVMXyGFp3vD;c z%PfdgbKQKd5%iG|nbN9(7 zltXzEp86A%a(BQb8i}m)+MfT&bv)3$qi zFKm9g)7APg6eFdaUjYzR91fN~iJz}upTx-BRb8|XeUxFL+?2eyap(yrIO>ejqQvcg z_5>bBvY;h}`sAOkNONU%HKH+yslMXNjlaxzAV*6^NUQtPm~55);8Q;tk;R3Iea1w+ zZ$V&4+$ptfUsqR0&yrZcbMA8I+`sf<{FSy&wV1~Aih`sV>FFuw+u#oA=$(Bnx&GVp z?!LY571ePqt;5~w@1AA@zIa(eb(D#Cv&p(;DG!6CaLK*9qvAgig_z}8I zNd*}cOE_2E50keZ2sub|2{Rdk3zQIl{E%*+kKSx)aW|8x-2GOUYs5a+retI#)dbN^ zow_Ej;xW1KUifZRVeXB92*&^_VE^jdi&vq`u8vfxqf{avORi1873sMZT@fY7Dm%}D zE8p<#rDr7XN3EszCn-h~CVsTZFfw_=`bZ`q1042tk8-m!$f>Ja?dWq-O&;<{azWHe zKz0TM+#JKym?g8!$cS9^NM%sk38JR%@lykJuUi3>>2BSBaUk0KZ@W(^G>FKWt%vb{ z<@ff59iMFNPUT7XHL_*?^yqBq!^5MeA&kO_#md5>H}JSrwq3bp^>B}M zUdjj&dwSAPBny56i>|x+<4W@C>}1xVnW1Y1-q%52FF+JA4=GhaiG7lu*^m@Ctu-Zh zf7o+@mdHfX+|RXHY^n1L3%oj+jo+Kv5rtiKcjJ=TJnr^q*sMAy*uuN|JHN>N+e%an zySwfE^7-86KV{UjbZuxXJI#P{@oeC0!nv3359ncK+&nYWf*^Kcjwg-_CVKL9t;YEi z@+n#NxoFJ(@^fc0wu_w|jCn!;E!Uym-=UAaKg5iTD4{E%U7@(0_si~%&;g=EKMg{m z?NK72>;!A|Y@U3`(8eV0rRvwKXFZ2F|J=vT?(fs7A{`$?wx?=q*vKOxL(SV2K4S7L zCB*C~d^{L}cpR=Su*hjkR@hA|-1e-a=A|K?_5|821|T?0>Bl3vOJPZSk&HTi2GgOB z1yVv+VAb_{#1KPA@~XM=Qg6fYvJ?c=xX#EaQOBC|3qnc|sXc{JA14H%evoRAG`}v4ws(IzA z$n|Nu&n*A}=CLTY`R0zDi~DkY0N~m7y1H^aavc`jy85c8%V677SItW6Lsf7ZsJEd| zhAygED^zUSD5Ys!q3C`U?a?oHbTs_UWBXx@bRqh8D5Yrk0jSb56yW_=>Ua?Ehb0>os)o ztvDQ(4wj_;3jg=a*Jx60`As+c5*0$RJs-|O4jWgM2c%AOWt}u8^DHJh%7|E)4l3MH{9)R8OBIAhS^4otUoun;X1(~KbAfM6`B7j#e6>f6Q${Qh& zu&i_inz%A5g<<1l(o$;g%zV6v>{OmLdq1ezGJZ`a8v;rYr$T2}DIceilmdDiZC<>H z|1y85=aI~btmZbInPzCD+F!8is z1XN<6QAgLGC#=G*xT0Rxx|?2{Y)$}RyhFC4U3;fqwyyK7lU)XfDqI{oRTkZ^ho#(X?C zno*G(mSL{^Ysa@L`5uJvS{n6H`8?m-j~eV+2TSLCv;%O@(AGION9zu3(9qn}^l|3q zB(3<00!NnULQX5PQ0%pHt~bRXs1!K$BFG{E?##M!**?~*N8)o}D%IdkWq|1Am&*-F z3o~k!Hi1v(SYq%ygPo+|vH^LXQe(D9CKe;870sI8xh*L?4{2U-J)b<|s8%yVGJw8N z;lphcydc^+4`7bA#Xw_wJ(+{DFcKs*qDzldH43$Q_yJ!xgQdU|=L45qec4J;?~W0X zxd+-?)qT={K-J#;*j^6uv-bD6i3U`k_c&)^`%Ej=D7mJ{HoY-B9ZkkQFsNY^U(fDSk!=9uctxl zvAZ$P8M5y9^Gl5H2u>J-^>JbbyIw<$q*hHn-(MphG z_OU~tiPCe!A$~pQg74zJKGPDJS6IhBKCnj2&iMgQ4E=9AKTQf~j!O=|UDE=Fs@d6h zFmw>M%^lkY&`%u`#A0sVU)*|&^mO$6?7w@8UrYD8e|3Z&L!Ei-ON<8&=(eDkSiL)M zpQTepNU;`e9TVjCh(Rasj^+Nn{%{$1aazH4yLHi^_J6?BNVu<>pkVIm-NEeCv{aVa zD*m_Ip?zmsJW6?xO_eA!%j1gmu0-Tz6JkNxrRwRA<>gpa3r3n$=rb}f7{4xlIt)^t z{(y|2H>ewaz+X=Ds=`6(El(A9Do=F^3gD~Qb0{9l7^m>X^O?&q?YGcbR66o1+_AJd zcu`1_F>UKMc#$7#9c7Pb~rP#VU{ONY|KJXT$$*6GO4mx(n zJS2fJIX)c2gS-#8`Qm3?xK++=ru~SES2*$>*F(cb9=>-foaHV`Qxz396T5^uT4Yi_ zD4Q|3qwPYwCauCS;#1XN_7Yv)G!n^;G9_l=Ru!0~N9F*r0o=Uj3NbN602nMk^6Ftn?7jjKM3Q0BbbSItQV4_ise59!s?a zoNw5$cC4SpcK)NmalD$b>ba!~XAeL>wXzC7Jqti)s}q1+gXp6-ZX46YDU-D>iacCd z<54R;9l^J^XR-JZ%ou>jIsyN$ji_aw@NR0t6$F}sKi@0%s-i9^pYt#S5RSb}l(?FS ziIV->ThIQRv-Ww+P$hlEo@+Nt?QpvKkJXiFa6Y!&#+wH^l8sh-q5mwxNxWhH?eYB@ z5hCU`Ch6xyv+eq;8RVRHCuy-76dX}4eDRoXQpu#Ka@QLN%<;b7xB_WtuV45@%;9?{ zjZNQ>s84xmPi+LMIe7kLXy8@yaAbWP#b7Wyuh_@;=US{CDyZYn{1<%da1PDb%{HHD zcm5w_fe&kYM9=o$+2?G-`PIA?K3}zbFI;7ZFeu4Kzbh;K!>iF8Obvzzz&1JIc-Yw# z=ysa!ApY4Z1!x*RZ)}^;D!a`+>#U{xQ)A@bt3P`u1T??gN=QK=ZOPB@(SpKRroWQ!_>(?$;pNsk!b%n3%b?a#I|!sJp9cV$HH%Drf68k!j+#b1Nyh`g>w&scsK3 zvWbU1he{FK2BEff%YE!vy&?np8Co+)Nl7^^#=+^pWV=FUge{RnS!Aj@<vY~?$>RLR5ZaE<{<=~5RpsjWTgDKf>UlZ5d2n-C7VoAbZ z(pFc75Q6I@rYSAv-2^2}(~$r68*W1r08JCK1H2Jlocs-xEO;SSI<(}ct~>im6Z3CR z?rvvyx74${&0P;Mcf5IC;RlrNHq(y5XV-b*nYsr>9yQS(A-U(M*8J+-?xOIfIpB4% z=$2anQ!dlBxy+Kg{c3!`xY{}g$MkmWGOe^6oYmw_FHTrsOgFN_ua8GC)`$7HYf38_ z*^nk<`7HCt`lOg29v&2x+8;LP6OUI?jUE0w-gka{!%SN?yVDT;^wBIoA0*BC{>y|# zKMOv2afw0%A27bU+?J3k>iW-4#3d7e7MDRdwGfr54%%e z^0mGT?7)edd75|cw8v-TV9SSD$yyjMsH6W3EK_%Kgf#s7;@b7+c$M)czO>EkExQTk zwAX8ULwpb*jb`FKnl|x?rC3!h(%@~m{?1a(-{XHDXHO!KnJTr`mSbWdZJ91*?k9-~ z`go!bbPS;Br7A8b3E#GRPppjl49*`wYcK!qvbCKv`|Ze*y@fsAH13>+)W~SdWR`vJ zc@TrtcxC`rEkUYskq?SkZ%eBg{H%=50PAJycfJtA^pqyf%x(uUoFpV|w@7a1pZR%m zsp;$EC09NGlgK7VIjTb^fI$^Nsy@BX=qmlEZUt6%2B;JZ+v?ln{A?r$MP1t$i78O=UzTw}JWKmIgn4%IMl zHM{X>s=r-6Yue186{>RJC#WjRbYZbC&6o-G2RI9}2-|AD4lL036X@-}!MGa0H1B^( zQ{Mn^x>tKbuD+RiCUo}&`GwzgBiE?o{g!lDX852kB~n8%Vrppa!+4h z{hqF*-q)H?MsPXJ;o%`~Cj&U(=E(Eze8?;Ocb5Zwbo|7=b9hJZO<-t{J)emylWQVu zPGYwXb>7=x38>&*3Por@&J0#L*U}mpDyjd+TXNie*ndwS-y+j~QjhGzn;%lQS21#7 zcgsdxQD2WX9TvzmH%x%c)nTH6r>c*`V1={y9J4OgBd$wVpQ~8efS>D8df)1-rM&Xd z{r=sHYYG17nK3`J^}+@Oi{R+^@>OP=1^tQ z#Dsg^wTCQ=oUf+mq+l}T-ML`+pYExNhQ7|gMokyMYIPUb6P{10?tur0yvLWXr1l*< z2z^YT^e}V<18Y)%7NJgj4&{jD?~ivaKPcSWmo3RGR;}8q=(Gt4)|e7~=#wk2Lj-J~T3Waah#*wz}T8u9dLPXZzD9A^dsPqx?Kms-oC_a?zdLmA#qFATdt^|((VJt{ns|m6QH<~Iuo>W2U(?gokc9qnMLK&;(p_cDe&#uzMmda=~ zrOj^E;E?j&v7&HiVEmyDWd*HCTO^(4t2A+g^gACi%}W{~@G4ACT6FC|%IrtFP?bsa z_a%J-W&ZWC*J@q`fAnL&a0>m*wI{7)f9p;hx)t;(x&wR9cYn?z%_lq6t z#RVM`lbVb+-eX%0*C;V2JrqNZDJUY+C(-cUQDHM@#5zmsWbS=@&qS%5 z?a>4lUV#o)9e3i)$`Z7q%+vh7j(xnvyd6`+pV%OEbpPUuc`FRPBBs@T5kvx22Ir73 z=Zz$oDg&;s*yE zU8xeZTz)Lq?+ne7J}Tf`a1Uv3(LeXoIn(9DdBz5m%B+VW^X?JZZ0W!Ih})8Q-h8za z*3xxzce8pofA^Q|Zktl>_GtCQ_jKRW0JzMq-ZtFSSU*^Omn=t_B^U{0!Em+b zzcoENw$<#{#B7l(5+d~G1J7EYHnJF-F>}2$Cm>Ri$kOa|BJ>e3C~7abd?oq)8T+uY zE4pb?S>LD;Wm-+`C#xOg!gv%m^%ZXvx|#NXnxXDUKzEv;oUxP_oBtoaL7XP^-GH(9 zY<7XQ`{of}C%#DIg6XyE)Jq%u$!aH#Nv}b%)ll1wA@n@fzp~7QGSfItZd?l zSMEw*Xm_bT&vfV17b~}z3TJ@;v!UN)2GUCm!3Qt9>uZ#i3HAYEc~@Ru9VMM1pm;4~>J~dp90ivPeZU+2mKjX7@ z;hhmj%vMCo5yYy*NQ|@1mi5Avb>wUg?k`45ZdpbaP;=b^o_dPMcu5za+uZM`^#^8X zTnJU*Fgutq)(;vwazA9kF7jM!O|TXsqHU*{oV^`=(jk{IHUSUZ!#Y^>x-fRNQS=+# z0f!ox1$6t$*%UhK!4ucI#iAw~`Mm{xMdcS6ns}TxgqgV_gxY3X`GggL05i$2010I6 zj9%xQdRUXw1r<8+3>ue^NvfB+oJ@%F&;IW9mvJ-v#~xjUI%l@jP<}{Ya@K{k?8~g~N=fu|JmBMqRif3=?dbJSM-;@CXd9;TQ zLaiuY!Ki(o=UU~#_lwZ|j6%PUEgn^OgMrOqgkUbC%oWBH-MfdcSc&EFlrE31ThH#UH9QNF_`@rGuJvHS?L2dt$VoR zjZKHwl1a(^!15+oYD%jThFW;OvvXjSTj9Ei# zA9uRbxc=|Mv7Ll}pamUu13Xq`@u8o+4KXehjVoW2R%J!lcZcpU{kNW=G87aYROpkb z%d?{|=xIqd^pmllPrdgrX8D{Z^3gZR3caGCoi73G_D{*WmtTl0`9+=BkOA}}=fd3j z>hP~SzSl=<0B-321r+5b(nf4>M+bHr8IHC12q~ zzDlpVl&nU+CweDBWFU)#WIbb|J;r&_#G;BZypPRufu5$O+?kWm*Z0tH2SZL3BH`I# zlQ#rM|1KdS412xv|F5uUuX}xUvgy!V-R$nyjIwS`?wp ziVrsIlwQ<$EI47)6#8Vv7c@q-aa(3 z@;02M1-H^>j;XCV;r1L$H~wz!H7oQ}k#y}-0Yb+~r{ zWFv-+h2WY+djCZG5BSfhe*2osL0^_eYSWWKvUH!=+E7v3w?q_|HS-8(q#UrZR)qf> z8QD;K$h6%cSpV^-u%=O##E=k`PxH8d72u-hU4y8j~vjJTr+(+<6iLG>9>aUeIeTO*wz#nYhVT!{OH8ph8cUuAd|D_)9 z%qhysgqQ+q*v^;W{$-EV{$-7rCo%T(@_hFLYtxdvm{WC{rz;oJ#DLbWVlWLzTZdb_ z?0a%SZ@xmIh)vf@Y)59EK|*3g1Tnf3hr`WTh2Zv?_60mVn_(5p9>COIBDQ=pbaPBl zZD&iO{Ed3$Aswqa_p1OZcL5+Sb=ZG>-o5NmImi2mgAw0Wf{y;lFb!!4$ejA_q(V=PFNBKE$aV5lMis$vuYQqd3S z_Ato=w0kOtZF6iJh74B+n##)L5aCDoHS5j92R|6Uqw2b-=Zghixde>a zDRXYo(0!h(Jx^A`j*hzMqzzW(r0_>50rU?A00$N*KOOJDCzRUGs_ zHx8Z|4CJA90BWK_4m*L6%x9=kq!Fb4!{g z-YjNIu|KHc_$fl@I!>=En^b8VC!GXD%(j9YM1x4CCDpwB{91cEc#7ZsyxlpTn%b-1 zv+NJMS^peQ_AgL+{{lzKtO(`WOxKm&Vu!hmmH*}K#;e<$yQ=r>t~kpq5J@~;{<5%q zu={vuNjAd$pQt7vKcH*p$XCn=uGV7MTvpwUcq5nMhAP|TgqwY)JEVd-(Lut1tWOQL z<(V|!j>LD$)~eLAt@Al>Y2M@aUr|d6T?L>XTg-=YeTVdtMXW2guZT-L|J-6=<-Wl0 zx@_JPbaMtQKCj)0@zUk@SS!y(w!6DEic#giuVjasEvqwG<;e1^7AjpQ9 zXO9W!=jKFKW^RnuVHOo``7sSYv~_On0QEO(aIcS3A_j;AxA)kDO>=0d4J`s6>GCzA3t?l%@Lf7cf{yDAW zSIRm~y3<(@*Nyq(-TqRfHZ<77nzR_D3V!GFm>P& zXx%`Kiax*7_NNE2DO5CNQh7S>Ugv9_C8v1RizOZ|wVBzN%l<%TCxQ=qA0|L6xaM5- z5DCsNxIFTfB?TMP(>4#R^@VVHx5ru7&(H9a7+AAlGtNi##J&B!d%d==l-C*edR2Hp ziV17JwHG}o!&V{TM<2zWfk|>kslbI}575_$k&ce8e`f$s>fYD{vkyY6Jvn=G?LZ-Q zE67}Tx(aZw0rAI}srp<8s;{fJ`>XgM&NuoWRn>w_GKY4K0)vC4S+;;K8=Q+^p6(ZO zo~Lh#J?iVb>VX}&L}n`0Y_+gKZqf! z7y;4`|8GQvrZvJHXP?_@Ky#a!lWocM1N-v8L7GqTsE9%18sTC9B@*dJYPM>#|4;90 z{T^iu=5G7$>~5v+y52m*GjR+BG+aqYG+1C$sK98iF?F|)*8#>3*&ZHJ9??^u{Y3sO z1mL>O)LlkF4z%Veot&yL{QkQ@-1)}s#s~}zR2!uYGPVy@cy!vTVMM1=bR*Z(Q$w(7 z_GorHkiNc3kQT`ar_cj^!_b|6azeHEl zy#lYyx*bi4qz}ZB^S%~zA|&b7fDB<70Y7_diwV2F4D`8~y>!9zW=l%uncZYE$Cz6z zyM;981Emu&_TQ=Va(X3Hb59|VELLlN8CAQ5k6bU&Tz2e_$BDb1J)BEvc$PXUc6$PR zOd~^q8MooZM##S%Al!dYlr$0)3uENsS)#{YqhE)I zhTg2#Wc?BPFgFHsY%i(HI(EN$`3>90^%QZ_$EwFQ*JB2?szryV)|#H! zAxDUnO=TST>@AN_dgXN0;~MVH`Yo-jq$0Dj@>Xb-0o`}1lN!l0NDhLQdodfd4lc~x zn`e;!d}^VbK(x#J1_a_DjfWDal5dcw2iNN-^Q+Okvl@Z?S}x_1UI+) zN6aI}l6lJZ4_2Y4EFE>!nih+=o*cPp3VN*Z_w(MV45!8@H}ULl^LT4Uib*OtHcMiy z+q1tQ^!;Fm*RNwFu9T1b6UW|1 z7TAwb`5Rt*_v79)iJEYsToR6bC8IcV71e){D*-PR4Ggf>vS?KplAdXPa`ks7GrR5# za11jFYuL49VoLE*_yfas{CVi0AMSwB(AM0ATi)VW12dfP@ZWf6mzKWsLw}1bU6g_3 zHnSob9%2koP?EpU4<#6OUQYsTgP^LZ`x1#T^lL{8=C+y=p3~hvOEz02UW=;WuRaC} za|@$!3IFj3`R~GvmY;@nb2SO3N)XV>`m5hBmM|9 zG-UHJHUBEbg!s*t){ISj_uZ!$AL+)?3D!5XW1W*C^;DP6+61u8I)lRO3(PV%&-Ug4Rmrox7xPOSL)WYg7l z4zgKyGqn0|Svone?KTO3;I!Bo$Q&fkWRxy6Pz?SmGsUWv)Xa4CI2>fCNsFj|aBxz^Uf~r2UWsq`m2PhWDCv%OGRs z&#a&-cZP{0wdyQGr1w}Sh<>O!y3O75+EaYR!6O;ANtIWR+rk~9;dmKVD|g;f1q71~ z;b+V9t9Nr_u!#$?XPQH6fTaZ<@~`@KHm~Fq{nz3_gT9ph${%}$5k+;CjV4&E7Hz2&!R(@Z5}qQ=174x&ALkD0 zwyqTd#>#*?7DIh?`KKLNr@`saOMan2%wcX5# z2r3xp{MoJ0L6DEhD-$Ujt9>>lHGTy2m}=s^Z$VaZ8M5n8iK08XDmQwBD!Jp7BU23S z%cto+Q4$iTlT7>+PmNm!uI=uNqPR*7K)9TmkQg3~^CEpny6EBWjs-J~!Q*(EY2V+^ ztd00dLI$^myL|0>+n}W&V;kFsjUHteu03d;WySn+x#ZcBRBszO-w@xyMy@Bi-xV&lZiBVDF$Mo+J{B5P+7a4CZH*m!+SE1lF33 z5ptAepF8KG5xYSLitd`B-%3$Tt8+N?Yf5&DRo~@TUVgo^_lXE2Ak}C&1D|SY^@83k zd(=*9>r^$1_OwZv=roh5*EbpTgO6>mGCWWZg-@Z>( z!~+BxbG0}z2z4rApuG9x$3kxi=inZccBY%e;$5UzC)cL=Vc9rsHAg=;jruSIyspa` z@q&=YsE;_ceUj`mbp59Csl?9bcnQI$7B&Y@7dnZM8HAGW+tnv?2d$+A9+iJo9Xsp^ z@4)@-Zs=yntgNi0cW`{F^raE&f4gVYpR;8rwzRrR4DgWGb7E8EY6Pbnc`#HQ3jutw-Y>)3JRN!oX@L;^0w!; zCRn$xo}8$NH=qPY^d7Fx58V&+3r(`NtoiEWEf@(d)s9ne8{>8%qTy^3;Qcc~0#;H} z)TjDDLn-wRUc=pZMxMr`WV6a64smGiB~^FWPAsYL1C$bAKph@STmFlUkLPvFOVP7b zoW43{&Mu3~p@vVZvy;U8Z=WfWl*0*d# z-QZ{PZkI-9zNnks)2Ndj%t*TK=CgS%xo2w7m@BG{XarT>GvLVc=_j&?R)yE1^u zMApX(T9a6O{GR;AQx+(f@`P~Mi1BN{^t%O>-Ej4*WOoOLx6@`!BTw#KJ>o?$RO;4@ ziE0M4v#lW9po88$E0+@!6ZUcjru;~RK-zr>;{a1PYcdIjUx2r-#WAUVD0`)Xw z{ZydG!7J&J2?!Yv@zPX#f9YRnW9;zurAcC>k`O4d1Wr5iy?x5S5j8O=%tg@8I;E=( zQXwHEjHNR0(V5jvqfO-mqN1!XR&YxWbB@)8E^GIpkPwCEfWA=tA5CZIfQ&v{s7hbXtB~qq^E|b-N`dthh)ciqcO}=ECQsvVV)_gI4H&_^K~Yt@=#n z9n5yD3N7)+OnbMhrmz2CX`*dWcHgtWT|huUJnvx6Mc}jomUuc(mo+~R#0KmO^)?M= zJ~19mGtLWHM>B5|tN5N}*Us4!J&d4=NFiBZH!6wH{ zi7RoJvJGA}ZUKF{sB@38w1YD6WmP9&P9zHvP~6S!4ekOXYVuS1o*_sFB0Rk?oioSe zD&tIOk23J-tw&w&x3A6`{<|WoqZ_DNJ_K%o{VKz4UU4Cknt;A;kVR|$Fu(KMPuXsf z!IrUIs@~MKR}4*$y)1DBxAXD~sT_vRivd+^%ppotSk2c#L7n?g6_@|C-8j4r)hlFW zpPE?10P%;3kYGs`)?mw`#3gHkzuSxcm#3j2744=IGZKp;bHpHGMKM=xbz?>HbHC|o z1@>nuQ)zEev{PK=f4f(2rvKh7u=Tt&&Wyno6|mfHIVdT^x0|BSfKM~C7Sl7l z^Lui-7_d~cgieBG(%BUnYLk{-`#xf2zU z=A%+8fk z;VzFb&s^$fP+|~@QzUr_uPKf!wLibvLU&Dl4EpM8HXM`xp*?>vFz74AO?%~tmc<4T z)ZUxmRsMd;ybyw+SGG||$YCCloZDZ;O0a-T(koQmxwnH|er@!*^fJFMi`ue)rKsq%iR6RX= zu!nDg+DE$Jg}oFMq`pa8J2!m>qy6I2q!X{hlD-RG`qU=YH<{bH~%q)S~06U8+n+Oy*5Kugv{A$;pnuuoainl=?)JB_m!b zi>~%@9iFB9JDi_)EHBq7pB7n%E!FeKh)|Rj*;#}6{lDtK{M~W40GI1G|IC}uuJ($T zuMSWDIucK~|5QNzG-1t8Lp{G0`tSRui&-ndp;r}AV@Xn9;iceL zm>f|)24d)L>yCLycgq%Cz8v2wjP`ZNn~zLVFKDE?$F{251pzw#dSkTLJEMQk{xi|b z0P3ee|I$2pK|A=88L++Und_~3oj8E~E_73UH0^JgYa{tVNMG6fouzaIJg54rm|c)^ zLE*w=X_xxrr}W-OqyHVHx(5h0hg=27xHr)0ei?MUTxx^}{6@V(C z98_|@XKD`VT{mxU+PVfP`rns)Pd=KJawm!2&WBR}sj|@MteClc5|~AM(2ZU~jCJ5_ zKw@E)t_=MJ$Q%sbi7yXCP2$>a`%ao`EZ> zb-6c13anbUu*Rg?hkEUlU6*2Ak7+K(6vKK$v$;l6lfJ+`(VB@VPuvx}Pe%3|Tpo$- z`C#kn4Ee0-YO->g#ueP&d8YVeS*sfj%FAA!(YM@0g`Cz6_N-UW;jai1Eo>QqNlQ`j z^YcAx&`OyC&bv}F=A}Ha;pDt$-@YnKkdBLdDlea2P%_*cWqV-NnKSb+IRCgHoik!0 z36%FN&HV30u-MT-=VJTGrcB1AC?Z5;j>N8bh%qXJrpU4#63%}r@zl| z7T^EK(T~^qVxkitvchZ1`tH|>E#7a&4ZS+X%7;Y;8}3V001`G+{j9-)HFkL8wUqZVd1Nmx!3O zx>L!>=5}?;{LiTBgB^zw^)tR$-;byP75sy>MS>=Uo&>80JQWEMcu_WE zxOd#uS9^xZ;`|iFyFVtkdA#3olGz5d%pR;^_n?L*jWWhDW>h@ldmWVx9O6{1ua96T z@O7*c33w~tbW9Qy(|lK_(ZwO)<>K-9(Q4^ZvMvY$AucfehjEL#7bfLw7-3X?N%39j zMcXAd5_lxb#zU`P9wRZlEDE`7A6x!L0RBFda(a6{%&=4PmQhT|mN9YQu#M%kA$eN+ zxtQmVi93xl)3)>8ELVM=R6_FCM@O6SxmJ1d0ujXf@SLnathG(0U+};r;J;X5Wqn#F z0bFcojYj5sNh6f62^-XEE1Ra?Vna$fgC0-Ks&C_y@Jn)8?ff< zW1p5>!k%l(#R1jAQkB~E^6LTIipPvj?35{9&@4noRe$^Gx33=_1t@$!^V)G==!7Ye zydYBmJ5}Ng9iT*3!Tyggt2OM53(5alsfe)v2WsBB+Vp$-a)i{Q34D!T&iY=N z>D>?35^zOC98MWzy#{_SA-27}^H|ph8VP<66!xU&4ti`#KJ#((;2@EdK@kN@cX6r8 z$zcZr+g`1nc`;Pl69|?1OMT;?MB=3AIufkiPO=KHl<%{CVWZt*_vMMZ4B4V&wfTAT z>h*&JD~;JcTB2s%Lf?2|vmV91R&r2_l#0C7zq2~spfP@tcXBg5ICH!_cAiv_oi4L7 zTn6BLul5M-H*5Z{5sx+Z;8j^WTPF@LH-FDYZqC_nOyh{@)uxX2z8(3%o7G+Okf(#& zl5wss1eKMMs6F7q?iK#*lqo%p2Mjt*Xvbb;LYG~G58?-7aB|~g9;jt6H&$dBQTLUc zZMjMfIZL&b5{)fN4!MwM3B0^|LMml+qV<$d?aW2jMZI5($P z9{)}QuQ~PT_amawP3*jgJHWx}@@T!IYrSG5Bp=HKC+FyF#rx5Ku)av5IQ6GvblSkN z^3dw^dA0EO3~b-gn6@j6T`=UU8GK8q@83~_N!&b({C94B+nHMT+2KBssUUekE0u9m z0#)#NM&lRx>fB2vKRO^5MxGFI?rSEcDO3_q`Mkk26hp6<9cY z_xq$lu9in2G0YTla{0HcvSQpr5;i-`6N}y6#!4(-+c9RYH>~V6pE65xzm6#tz z!wW{?GdsHKQ;sP}&nb`=6{;n(DvcDl<^EV7!S1Qyp!_{+Ie{dt&H(1DCbfQunB(PLhhl8fg$~Y9(jJIBfd?~B`e?;bIR=nqxiT=#dHCTzGK|veIi-a1<3E}X_OPS*L)L%X0OR6o*)?!~&?lK&Xxh#s@E zqiKaveH4rVLJ~Kh)sa<~btI*ZkcGl#pSn@E_0v45xCiF2qCuTprz`WRiK&FzAg}7` z{%zl!-UMT>j%OCnZ+wz!ubQB5))=eI*={Dz>#;2jXYn_uxy@(CGnOqOW?L<1CpR-u z+U)f6m;cU)0P4?@0&w^IYd&AJ=YxaH*Q_y-y3_j?Qsy`RxWY{K>NXV)WzVZ! zM{3Mh4zb$vS|LZ+MDtW^v#j(IolsKPL?wuZF<06Gu6v~tV(9{GyIT|U=XM6aX~pRK zCsB4oP)SR5O0r?c^05?J^Oc*(0y}O2C0`5R{m1%lHvfoK!EtZE+v6kw)OSJ=;p^Tx z>m!>D>ek(0DByK}Yf;HVYYu80gp}*gYdN5(CpSv4svjlhSqguXlrM5((XT|-dCLD{ zDN#V_0kW4(j9#3s>xudXfsg#29S#Do{ocw*A|?zGWEgi3yp{{U zeLn&ms-y4Z1Q-4EwLAmv_w61r8y~4`Zv_f1-q4{j4P%h1xKnxuUXq+pI<5|jvMfcj z#Xs|HX=vV-5<59LsjDLf>5i&X{jnGfHK*6@o88^rn{)Gq%hO##ne6ebN!=2EiJ(g^!VdUX0XCV7oOAQ0<&kAmy? z`;2D%=e}qXgSTw)edE(VzZkJgtq2_)r?#Be`JTAW6DLrnbOutjF@ti{T|SDhp-jR1 zs~`aUC$g{ho2I&bw^>y3s)>=m(Zrit68p!hYMUX+4arJmSj$=V;)@wX&-v-9XSJ0T z&2mDwNC_A?iR-rHfMc2o8~>cH0(0UgdFLvGbdQE?+xOYiq(}xp5AYxCA4sIkleSF)8@uuXlZOlMmY-@j; z;_%UDY{ZHxolPxC=%ad)v`6BIpMvgnr&j|NA&Cy zF%iNyN_Urnaya_yP;qyx$f09IK!x&3r<2jK`!p zxhA60sD#{(oHo}98aNnSl_grFWlJvLc3Q)Sdp;Ensb*jh=8jWtl2>)S;m#&A|6WVZ z6Hk2-nxY!QhZk!WOFIN8SE+P4P>)U3-MDV6^P)nz665lC9cIN-bhtSD6M1tQa=Aj_}p-<)VBD;*4GPOZz7jvL7R{*Futrd_*7L6SZto&KGWHkf|ke{daZq zEH|WXG$0`G9KUx1($I@e44W;jeoO}yb@QNWk<|$5wDZ}Rw4r7q!5WnjIT}UZ1X&

c zLW0!=)!A~^?;3e{d)F-}$awk*pwxXOla zZi*9JmxQD9mmR4K?f@4$p0P%{2KMdZz*R|_6`xG%UecWn(;T~tQN%>o*Zb8sz4JRg zJ?miqnCn3V`6*NHRUeP6ulhU05z5VT?%Ad-hdt$H@Rjf$X0c!UBO-EA+X2Bo|Joo` zzpZ7gXL;iFys*cm&v`AfYDf_r*5`9F#;Oy>G^^OenHEzb(g@ow7@A#G`q$a5$`eVa z)lH$W5pKaSN(wQmFpgpbvL;$lCs=#v=!A&zVgz7wh;`z-1)tbLPp25TS-df_pXiTfw*eiXFu{)F$-X|}_mtSm0({Z?L(80t) zNkV;>k63Qdw>+amMmHL3FROQvl?!}f9LbtWAYKHeX9*u&@SFTOvb6SuN_A6-neS!X zZZtF+GqsOXW-hSqvsZ&RXY#y}H-Fi$m3dbe7Z-PD>WEtQ|E&VDgL}>A>gW`V=#pey z2e^`iTsAlU>M$(T<-2@ZFaDzKIq)O+v_RmAMK}di*0h6&&kN4@m>GmfJ^|(C=AR^8ULx@^zxS!?OaRegk$=;kqHUx zcE&>B8GP@eXxfKV-`&@yHs+?*p91XvfECRB-`;N{dl zi674x#{L8LE8=hw>Bo9ck{we2C8M~&BuVd5?B+Yk?c*+9UX_(Oh5MZQF*4Ba9}9mS z7Ovka(s)#KH9~*mmA$n)NarilUxqKf`jN#he?@=hAt!+Sy|GTOEo|1uMZ=eeg@*7K z={H?$15R(fibXwj&gZ&g8!_H`1+Ux^QVMwtfGI$5g0j+x&V?!baI%b4!*at$9(mp~ z6cvz+g3OH=%n&A1R-p7H^Hd=9?k(h85O;HT-&YU`AL2KQw6!Sj*T9f$lS^z;I7H^5 zC_+C?(#kL=>qvI@0{^4s42S7|K~ez8X|xDwsLc9N3OWt@q00O4t4acf1dpPdk<`KY#+!hg|7&E|eZ+dA=Cid-qJn&LUDq=6 zh50{XEt++nghW)Yj{4L7elVzG zAZ_{H{$=Fs@RUc_vscB4k-md)zV<{j%3nZxuxH=V*VFf8qD8%KuNe}XRWd$ZJ76nU zaO)7H9aXMF+rNvVt$FjSYJ4+u$n>4fqD$RZsB}tkOZ|CCG%7Z)oRguYA-6kr6_)1N znx@CGvnB)#L$wF)I<75MOAA@SPl?`Y_2)%7S|vn>fw$>imFpaFM4o6l=Z?5QQh~C1 zsJ+Ifmyf&-*4%P=Fl_5bZ>FsQcX@hnGqol@`1rKaqz}m?elOnv8ec~O?O~|W)Kuuw zcIndl%+?5#8+EZ?gR{ULcqO{P<++rmBKriCFHA+t9TgP)N-1c>KqQ;O{6*z7YNVmk zdh+=%s3YiO15%fn3G$@+La%dt+T-%?c2u|<@y2<=PSLwJZ~ATlpKx`4ow`?Cw(z)G zXK-BwA|f2c^14Yr;MWbcYul|?{AhJ2L?^Gep88lM%F5K)(yc5)uzDDklrDuGc{J07 z`mYHH&BG{59BeyBpcVl3fJx2TR!D2xUBD9(m4_UFZZJp5-^PZ93~ES(0w-Pe;iS~N z;IjshMJUaJ5|9c@?Bnan(uL7FyqSQ04Tv*#F?gl%AtpfSwo~z%R=GM2G>tS)!>UuG zI75k=vw%{#gvC&`B!(7rwChTdHz%-dE-mb9$n4uPR4jAra{@>|p!JTh-tPqKySfAu z+VZ8u&_v)n!Jnk>Wj-ox++AZDLsoPx=@@;I(lorVH4Ztz&dbTki5?KsQrGM^)i>)U z>~g}xA?MmRygf`x_LW7ggsrL(aY6~O5((^v6)_uLbA2|K}BU#vt2Je+tmTl$AUncM>-||lJH6UEsHRdg1 zLLy;3ra{slf^OF3A+KXOONQc2tu{2<`n~oDYI6sUy6U&BZO&tIZBA7R2A(%LE(dMZls2?3M|bgRea$E21%u7soCcut&{ z&yQn}wk~`qa%X+!pGL;sCq^b+U{-v0cw&k|2h=0Z&H{Z8FP7F~tablrH!~=>?iOGF z<^E}&gZ9As5zKLR?P1cl=6^Z)>Ur#&NS3@F{Om#3v$H>j?YB~R?=#K-61gG}1zUw$ zrA3nUKVdV zO=O1L`w6eUrDF|zRtD~naFHl%07irZX#4Z!={GSSa>7PfbB2JSp@K}x5BI3u$dY#-Btik_ z9Q2ToCEZA&Ic@}#?p=(CyK;Y)GI7&gN)jGd%+P90#~W{KI)O}*w1_21NU$2{S%$>- zT!~lOUi?3mL0x~dsT~q$wia@E@&?{1A)RF#bEOIFX*FP{ZoD*O()Ff1X1+1vT4z0|$$RrMZD z!V;>Fy?47F>ofr2KNQ^i$=MII@RM4~jQ)r!#~Obhzw2{Q-dg=`23QKiWA8&}^-TfH z!#>N&RV>xEzO%DPC@{3C7i+(2Xfowr-p;zd= zA6Y0kgyG|`JO{u}y-hkk7YIv-{71Zn^xxG&Rzis-r;05BIjqt^|UK_f##m1O3GR^*B)zzG%6aC`#^dx%|KJ>YHy}RCP;0s-D7#!(CZDfO+Z^ zPQ$o52+{O4)M>XsoxU~lcBl?y2-QoAe@qrruEY}0yaZt_nyTgB;PZaLnUT&#oM5V) z%B-|5#AoN`h#{@BQ=&R+wx&@RrX(egoT1>;&?7Wc=vCNAc6Zk*kTKalP=w?YM#66`Q`DQsm^~1wf|2Ko)F6S{%T%F#? zrw2-`tp-SFE)Ik_R;C7Lo~Fdlf7J?242BOAT2y&%gMPeI4`5mblk^PzSAWH}?WwNh zMfSB{<6Pib?7r%(3OTI9=v%NwX*_J>Cvs{-?=zBn#Ex8O2}* zK+Rv8%GIZ)yB@|yh{?U~lJ%(tvK9Z_lZm>!blbAIYe!K?pgz#Q(tTkmNfpJzLBTB; zN9UJ+ml0yG;`k@;_j6;2Hz{=*+3K?wx`B&F803PFKY^c&mX-ss7WrdO+BfPiHT?qh z^qU~m?K|c?BX_M|l~M1QHg`}17?$~NbBXP>mb=_~{T#<@4&dEd&$~$r%6P1K@sx;~ znm~89j#33|P^W%ZfIDjT%c?TCy;@1*>FWMQv7kB-j@!`~9eGOIn-!YH*=!{d3KAkz z6jA$_=cf)E+yunR>(*UpZBDR80jN!%_QY~Ba7AVv5Gy%9Qgs+Oq0RzY<{e+^Q+B4o zYQ#6y`}}hT4>0_F!Ze0a*&o#_0SHH^pYNpWfyd;Yb*$7sqAD|gg`dx^ErIFTkhmtg z;leCmm0fhunT=8q1g$hx5NdU)GsQwgLgboA-L3ZdhP*XGt9WjX5~y$UTRCTVNz^lu z?BXV1^y@OEOL5t!9%^=L+i#x*y!$1u@!v9W#Oa?OIN(QkA8UG!*5Q z(0V;uq4wNcJl~|=x>ch|UPqZmru_k1Q-j!~h!C9FIe5=DXWhYapd>8#xK2Lg{B-7Q z#}ND5#USd)X2fNy=uX4}0d3%Qixc(6M!aLZ6K`FgES65f5f&ERaJPX`0btWugJmhH zpyNn?$DbYWh%j9fLLi`-?BZY;rZ}F2u(FQtLTliXq9cavL1RxFZi8RQM}S`c6>!34 zzl(Yee(LEP>n5O+eelV#QJ8>6tfkAX+0J?_b+BY~jnPQ0u-HEpNO^fv$FA{4V^;qT z5y0kHxqdJAWqCTuBMi5?dK{{>scg+W^NXZl@yW#zJK&)2T|CF6|5yZu<-jbkNw z@<>4S>4tWr)3}s$DxKAK;lSXW%xF?#BB-L`9vMY@j9wGghfQpw7FmwOb*va<`}x}X zhd2taZ4w29mV34`@oiJ+QFIx{Gv#l5RNo~QOauPVlJ`5qdbCtQoZ;f&m${2o^n<8+ znH}rcB7OMpjs-&Hc-Dbs-3^&vV8Cq%wTPN2`Zl1w7cgWr+o0AqPL0M-I(uO78JW%)G4Lg_-g0x%5^}O8kemHcp-5lfX@=XrT(>(5 z@~-_U+=4^(0X3)PMiiY?YXtkg(NV^B1(&%*@egpMw>Ki03A9A)_1HbqzuWC^miVDG z^^bZA+(Yi^VBeG8_2POLfx?}9mjg(@LZ6F70r0+oZ>t4UHka<{*pW7gfXuBqJU%z> zp_mgM|58v-j=_%lL+qe6U)`=J62{(we<((ygE9(yga<5Dc;I+j7{q)`%T3o zD#MY%=K&%XQ~V)g0)lzGwvFLR1x!}!5>RLv3aigJF+tI69-NA!030!!73h^3B2;N~ zo*25bBvPQ--8g|?ya-5nN>#*ATp2o4(4|e2R-52Hm)=8rZ^zgd8uiI~&I&W$*)IH~|au1V&cU5M3iqRJ#+Y$nWi0*Gn>aPG9PIo>{ zqm-!i)kR=84XneZPFXz$LmH8#m($8=KpLHOq97H{*=>p<^{~_GOymI};rf{zZ>*w$ z2>?wg3!8u1x%N4}u5*NwcVa{&+izzD< zVqbwE_l00a`W*ja>J_oI=aqp(CzHr36EAz!G4Xa3vF>lnOikDDv|yt^J)c#&c-ej( zu-|&ipBhmA=;-6QoS!&LqHLihlpCfD>a9V}^dE7Cf6rQ9_2Qo)akOI?>Z*x8>qXIN z5?=R*A9yeIWGyB zF2Z|UyGedSH%R>F4Ef%cp==JT?6ZZ?{V?;G~Y%LdD}TuGl{Xo^{!Cihrbr8wiy6eRa`5Ciu9R{{R+$h>H%sCx|x2TZpuN#8cS;n5D{~I=Ot1q z?UJQE%X&c>6L8Qse_w?GhsNfh5eCYb;2;8dZHnBC!~c%X~0pmvxhLed)>=8F#T z)i)&qB_4sZUw%>cVn6l_r(0p2pQ3NQjWWwN!?P|jD-oQa(o%M&c-Hh;Joy(}s?H!6 z&oCS&rGLyt(l%JGUy~$?e+&HuaCmrVV+VjLbHkD2h<;c?z##_XG_j+r(3#`fGPC<< zv3wjevzvBydK#`*^6<4rSQa#^RhVpex~zseco7>$nwPoc)Ds6tMh}B7V9%X@#udxQNnJKFRTLrXhMU9h=sh!})N<#dumaW&h>bVMK4~EWn>8^J7xWHIMB5vGH?_+tzUmi<8wrpN za$Bw5yLOadn?4rSd{{6NApNY-(MfnR3Ow7CGHn>IhAa~PrRM#(AWlVGOf1xKhMbgS z<3Ep9n#sW|c6QcsGQh2~p5jSXAPh)c&kPT-n)K6{J%y?I4)LixLA#t!eEO>po_+@k zW)}P-0!ZWcPBYixx>?kw);hj@Nv7PZ#KZV7bOlYB^BQ-}sGqsOCP6 zWMVI*=tIp*WTjV24lu!i3^I@3hC9T%cH=GUW+=7DbkzM%FKwrU*LqrEcR#&v&6oN~ z+5R1iS!`sF%uGAMas3JH1-&{71AYV)s?8K~QP^Rx1R&t`+KjD z^Q%_}tnr{zyen-VblujW5*9|s^j0*_nz0C)O~hOyvS+k0`0smPTr___Nd>nHe?qv) z%0UuqxQ&1~Q=o_m>%TKclZmdv!erb_L?~afi+2vB30U?`H;$V^E@jaFF)%han1_)2TqR&hDlpcgJ zp#2EKU+Qkil!Zx9lIW$>4)p!E9@X;}@P7VE5vKu6M+H;~khWgsu;mPZdra&@P6#6% zzm$@V2G*rQA(G@2>R_m{&q@ypMV2m8o*`J7xQ8<-I>8j656l4z?ELjkC)g2#12Ad( zrexJN4WXRYw4Vw04!r!9-OTAza8`PGdJ(?CNL&9buN!>tkrR>6_Q4(gGOSj@J(L}wTZF; z6)deRMU|#!OiZH$hn@>rn^ggQUkIu2NJ)}y0CkI)8au@Sp?AzWyVEySo`4kcK%;jT>FA4ljN@IBP`C z?C-nhtP0Cb;BC-kbDlU=#YAQ)BCVUQ!wcKu&-jN)kB&(t7Rh?N80WJxM&)@I5IJ67 zo-g%X+U46UKX)6L_02~y2&0L=bV0!3OkT!-hcOT{mq%o(Te5!2uP2U{OSEx; zH^-XQD(971o^$QfwYut5iHlppIfMRY(n4o!xzURVNrSRLh3gh-ko%}CT^d3G zA9}2~+}2cCUFt104OyfbEUm%w1M4u5d=dhS?YD}yS<-BLLdIDd9&0*iTUFT{H*+!0 zWcTV;sNQd20&ac%Yx>jdmKQUVvGTH6@^S;rk)~|?4WFT)m&FP$bF;C+G}iVL05H$P zIctkJwh`hXDF_AGp|=b)U?h?l-iSfszs;+T!3p(yF=yD>QE0Nx|Yu{%?D8)M1Ou{Hqvl9AdD~^?ObqdD} z)XLCXa>q+3kf&BhLY?iCA8v9_wL9FrCz0rMaZkOjAxo`)_K>00VOD)X`_~UOeJ>JS zRF*0~J(Zog@{%swIyg88Cce2=N6lFQl{gS8%|nYf&5@nKv@@=(f-eUneR@WLByJMaK}&_ooS zy*FcGD44X%@5dfhSlU_MAc9E=#ThFVxwV5%y|9H7#Re$Ytypqdl&CVc$7 z2KMQ{9dYKSrfEEW{<4cGu9q8eP;zU?NtXw_EHyfFGLov zSy^Z`qnS`?lA-`?5(jmeOU(UvKq3dgK%c!Pk`K>Eq3cOig6 z$fYq=ss4h^&p@As-via56Geo1t}PM=;0wZ)IPLz&T02dR6yUk|v4?|j>j)pYEV|llV^llo+2u6;ZEeDAxW!>tcGQsJG}2?{3kP_3QdCHWUX@1LDxAy z%@$q9;qGx1Z}ypN@?UJr3^d4P>#i*S{Bes@@;z{SmA>1Wof`oEoY|pLbcd9jWKBy7 zu_GolG{0!F-HqezhMx7~cl&Acj4Q_Qt>4gp=)YFIxr&f8kxos0b|2A+cd5%1T&{J8 zah+)Fue=%P(R~g7a;yFO-fuH+KXzC9PmOgNgU}q($+oi#p z@nIAbCLyLzlxC=+0ImD?yz?lS6B0?>p{juT)kJFx!kra;goLM$ao|fDrCCtKBnp_I zICekDC;u_2E}33s9_0(d^=FjF8x^}JMY1%?ybmBJ&KJ7BEwcvPL|yxfrgrRM4{W|# z_^TQPvm;lvl1yPji*1Zoynl4GwsX4SN3{UosbhjeA;zR!aP4ZUUBvg-RS!RJkZwgn`nt74+NgcTYZBKrq1} zomuaDOSio7VBYoFJt+j4u<}o>cAq^$1R8Do=2Z%2JPO*D4pmx6ejmVyUt0~#+J5~G zll=NCi<2Stb-UT^?~*CHJdu`A;&K3^+>NOs@~8BrQ*T}qLN=&d~JHGHYlb4lky4hOa{xO8ONH*bAp({OjE=hE;aJiH1a~WK% z%U^9qt%qDy-vlQ#&f`mB|JZS{mO$^+))oOghDDTU$P%0qYhS6E9&P}6b@BktfT}j& ztU~N8m_OqqNyXUN%Zf-zLi_2Jul%=lpRCjbQWcXgViCy)8=umOvwVtC*B^A!M(5a; z`jfB6h=up&mt*D?M8Ln3{Zbz4^m7s=Ql=xu+QslgY55!HN^&g#o}2n3L$q_I5^ogm zgwfQ!3MW&=5+Qne*SSW{I8>vH?FaPh#}tKsjh8UY9#B1x;e$YaUCFK>*ubFt>!dJG z(Lz4b$O?ytyfJ?fZmU@K@&TJ;e8%QyUxX?pMe^HQot2NB zOmE)IUTwMQ+^aDd7I-l87S3-gBe>0*F=}MY?71RI_M5_jy7uCso45_)_QG$f#EWn45 z%1(nzS1Q@T z^q}m9>b}{VIJh|YAmZ}R`D<(1*sumk?Nh#dN~4xn@pb$7-HQ%PIiITQ4~~Mcg3OEt zBl+JZip&an!bxuie#KjNJBYfyxmA?^gjc~l=*siqIiaJZn|G9Nz0Ys8J+{{Y_Yw6) zPQ~;7gCkWyf~QMmQ>UR6P^NJx=~UVWxwIYyKi3eYkX=Y`_@8+E`w90L(&ut5s~n(#?yml zNUwzCt+JQ0KC{S>k>bGmxt|L$JNN&}0ol}Xqk9a`YSi5Bd27~c}G&I>r^nL%&{h&W4W7D&iDI>xwwx^&F}STx12I zJf^~*R$8aMyI30hL2-clY#53BA&ZSQO%n9A7P*s!VmPT8X-GedN840I%)egS3%a#|CK%=(q?Tk=yu z>!1-{fT~w~`@C2Zb!K2x`QonX+GKyZ=iL{)*(QbKN_pJ2GPw+TfZx2M$T{|^J7v!6 zeod)Aot>5yP;H-R$FlW{Fz*@tcfQKdI1g}^MnkFzamAA%%nFDgTNy02hPx6~liJpZ zG|o;fzMHl@Q)hnX{rgB<$;ceos&^d3kBQKoc7d)H|2^{?eJhr^ezxdmHsP(7b;lzw zEe+NB95;&0%oSSLe1JW+l9r;A3HXso!x^`nrVEFQO&B=gG4@;>G$L7AgOzCA-v|5r zYsN^&p@)%Mh25q017w`{sl&6BQ&cTuJ3mfNIHpj6IIqr$NAGN889#Pp38h<35T@ZI zEL|t`v|Jvib|eHJj}1!d>XHBRHeL21v|Nh1S~tJDSbJNP?0z?6(m_t1eF7Q$8$#A+ zlH-WaI(t|`&jAN=St;m_!IIKh*ZonvMP9OOQgU1Y9FF7VrJ2O^=3~W^6$Qd9*;r{= zW-k&_mg6s7k}fxBm}hASWJ0qYysZ5hvw=VoC=wUkIoehNEJ` zd$a!xpExogU46r8M+uQQiBiuzqf2ZVDeuLsaMfZm!j-9fXRxjoild!bFYLj zx?;*ezc$7cXuxQ^GQH$Z-w^J7`kg#!A8zN8qWZg}63d}0bCs=oN@>IxgG&KgKlVo{ z^W(>*jsqGBq0~Hp9YN~pWrcvT!B7pHnuUeq0s^4b*}A13#Ps$Ff+j5MyEX4jZJDB& z>BRfa3nw~GfRz&xUz?y?g*ISD#AYE(yU0HY*NCQ$r!mAX#1${N6sgVaL59bKe?H;e zezFtAq=Yci?s4<ya$Vy>vh5BLPn3ThQPv3#q~0DwO^Dk)jsukWf^mSv6asT{LI&JsmzGU6AVNL~yC z!ZMo4(<1QKMXTdCGqSV!=o>Zg*>o+6-rJJ}oFl{`V+C?S19Pavzg2 zDjrX57qS#F|LDcDO_8-iqA@Z(6=_yitO~P^?XMV*bPR)P7&}o!^M-SB`GYv6+Q;3T zE)pD8_JU8=<3mp6q9nbrT?2mMuhZ3__42V}4lEq&5;X|NQfb2;(9n7+rZN##U{$w;jxqQXx1Ak>D4)bhk>f(f^RWI()h#h?&??m+h#38# zhqv)5OQab{JOC?M``WmLXGuQOjbch5B^TzPBqQDZrs?}wXAeg1M!F5>Bln~ftGV5; z!>qsg*~B?W)gkQ1V{kWG(ME|a$^l9z<9%2CJOe(WdjU{aYvVA*cTiyN1RCF4S~sC$;4(YgkYce`thumP_B zvCLEGP$>V4yD({O2-xrOADd{9ku)`F+h0QDm;0s1yo`0n(+Abd(JqgT&1p0T`tR(c zm(y4+&0g@%7~J-zcxorq5*z+m9O9jCb0MbMHNQa#5{o6hw9(!8Dedb9K*Fa>!@W)GsFR|(bQT=n>WHblS7U+{+^V4v zp)EFHc6=6{kOn7jZhE&&pqQjA1F*O*2uf_L$c6&aFdPa^_h%a#@}(IL)yC3D+g0fn z1_(fb`tOSEF*mgS>vn1B4bl;%mC+r7yRIcO*b0fCcO6a%7t02=W$!untOlfddWl=S zjK%3ud}bc+2X_{iXd`5o*9dJ-=^cU36eMu!tgX#`=rv6i>sL}>Qbm|E5A!!T4}>FK zf>NnQwUi7zB?VxF+=Dm1rhYT{OO;Z@d(VfJrKX*R>g7|+*Si@>4OmkcEvkSt;}Nvh zk#_5+0UQ(TN!qK42aJIG$s{!{va^{1lR%)N=7KIz1@6`p^9YLNH07DtaDssLdSP3mRPFZOI4yQJ^1Y5+h|=fS<$!5lJdbVcW^()&~oy`5ZGT!P>PFaB@_0TdQk!f%#*M>I(Q9N`K)2nD)cwMb4Kn>d{O%~>7Fc|o)C3A5+s z?tSf86y3^@-iidWA30?wrU&7P_`oVTyHj<%T$Q8M+BRmTF(RnNn-mZoyA~c4oe!NH z&e#%;t6Nq>P&jgDL3-t$UK3vvXXn0~7|nah+1a($Kh&R;lh-viP_!>UnOz8J^Hy$` zmpj0kuexSB7Qr6>keYWidq0uhFaGZV=0jTbz30EC8`EVt$jMXzvtOS1O1S;fvVPcc zt8imsZTvZkE}NY!x{bALP}mx_0Q!V$IQ z&1)_v>s%qn{?@sT$(WHh1=hy*p&KCLhMcZB-vU`Uwhf2%R@T?wM)r;KM@IpvX!9zO z+wiB}3@VgsY1X)BO+&Xq$vO}Lilv1j18NQ4+Hr)6J`|H0(c0x&eec4Y1~&v83SdQ~ zD(JekPCxN;?`s(DDn^8@av) z`KwmiyfU*wzMs!|i&6xq9wH5#OecOKji-#9@~Cz4LBbu2)AYkuT53$1GTIpRchbbQ ze|~6(3w33g4CZT$SdTl*n0aWKh6$VGem#9&OCEPOefh+kh@HlY|WZjWdg)V&;x(MXb2SwhGE{&-NDF=yt$#M0ARSidwaNj1F)2> zib4<7a6;Dp>^cBLNL^?LIpq!kj_D0(?>J^a&2^!U1cUYy(k(Oss?+ku2mX6lM+2p$~*GjOuViK&5CGucED z)exzV*i=P843MmBQW_Ll6duCUx^CM%S4VRdm15qu6CHz-j3Y6wj z9gbcL$O^$Jh3FheQ451m51w#FbBAE=qHU=|WQvJXKSZ7vU8rW|5=>+cRh(_dKPH2D@&_lo!sZ~=cB0^~+3+5;W-nLR_ z=7<#fpwK~43|!H)$6UHM6$f`TU*_%fd|uZT3=r82T2&EHy%flZu-idY2m$u{Fbop~ z3^8Ux0X$qEV&qz@Biz4x`vy6<0y?>ws-i)RDTZJMk53P~-7e?c&TUziZCU!{hlrL| zN6Uz$Qe%kIZbT{LG##ac7?*W@c=(Lqr|0AG^hnq1=Pw`2x-6%U)0paVT9?zu55GU` zhse|kiWj9601zEUG_x1#0U$cMx*IxlrCg6{Dy7Wx`uu!i=IiU*l*iAXKX3B!+n@hp zJWSP=kZ3DMU<82Ww7T(br%aiIHv@Mc$MNp&?q@&y+5P{y_URO4H*`ere#9>_MN@d0 zYl2<^PMImbbYU1mP zKyr2)0q@5>=OHFakx46I2;c+Tm|!Aygv4Q87P9E>0XPzhDK;hMz-SHz7r_@n0pM`hZ@LRHBc!_M zvXqkLi!-a0zZU zN34d1Nv1Ipgg|`QVQb*XhHmCe(0v3E`W&r~*X$+T3K4-JxU-VFS*t1{^Smx+Km)U` zSO#Fone#B^oMViMiby4*kb)tUx;X$uE={*>t4*q!IhyITA4A~SRlmHy94;xPtE;PJ zSz?TU)`vABG7KRFQn&8dB4z}n<_0kB_F9(rKl_XQ&3f7__xSXG+-E@0(_xxnCZ3K$w#LmniMCPH-=Q2k`=xPQ4BT)AMuI@hMNJNpMJCt=h z9IlsTiGP95=UENEdjGTUKYcl$&zqD_kKY4JDRU1?wWf!|Z7E_3pa1-43u(3RSh3PX zM5w3_)J#m(MYTB?_~Yq_VBRV>$rLy^qY^X(@z#(y0L!*$8WJ&D zi|Ay+%mmujnF5bP1b}5(9f1)%nlU{khj)C;xQ%)ZlofMp}1?P zN3&jAT_%}b3g!+75MIrk94_%9;_m3qPKjwql$_WZw}$RGx8|ycJirik*ZZ&Dzip=< z)^%Oi)tZ2rA__DW+}8Q_tG8jT&&PEHLh~=jbBxjLDX@+S03o2GFRfL@vaR#uaW%y^ z*Vof-LeO9(u)cqDL*VEGf<*G*jld91M7m!$a&iog5r7#PAUX#KkyBtsgPuwx7NP+j zZtpj-xzxP7+FBciA%=b*Lu)8ftw{%Z0+E4&1EMn^hRY3+Lm&C8bxWauCMqs5CUO`e z1JtX1yqazR0ES$w^AHG}48a`TJFxSTJmCi2jkaf;C^%u@IOJ(e<23BbTF`KwS5*f` zPI-Sg>~;qVY-Ui8Y9`X{9DN8oaZ$JZer&Cwnw(A{W)4Z4gx*hegDh3GCWx3*xVyV! zW~mLyr!jX~YHI}(i)m@v)EFT!H=|{(?6{P=R88bhr}N|UGem~KV+dRE)A`FfpY|h7 zSG%kIC>hE+hk$_%(XpE^fRQP6dpvXiCs2o;oCM(FP9oxfVQMKN+`Q#<%XCb2f!eq(r8-kOD0mARxFy0~GKEZtegm2n6We?-I}n$Pq&j zHB&%Ba6|_e5mC9}19EgArqR_~A%eg-UEN-C#QOtY9;egklD`HJkaBl;0i^Ke`0#l? zo}{`<%0nWjdOlBwiOC$rth83tH}MK!p*mcmZC>HzMDkN_H3 z0!ZcrXb{Pf#qlKAGz?+yG#-x?kINDxPrHy}8=+oJkO{!&5JDhOZEgx;5Ri#N42o zFUyzLx9O6~6fh8(ReLFje)?7hrXGx8==>7){5nxVBy{e!&X?0I4l0690f|Q3jXQKm zoQQ&|tC)f#1!Pi3Gb78YY9OMa{}15G)#He5$9=| z#%b8?c5!6!ZJa_Zfrv6IKmf;QmlgogfAU7Y zc44huqB7l`i1xc|02Bhp%mFz>;)oQc-C?V;t;_Lzem>31T2x)ZDM}y8ShFf7>ebA^ zLrwrrtwqx}c^pmlgu%tos9Sr1xvP`Ckk70m07&#gR|CZE8RzPPM3K3o7pZHTWBg{! z*>u|vdmd6`3lu`$)$Oyt7)}BJB6qOi?YnpV$yOC2dSzGs?oZpg#;wHlJkO^kak!dB zW{d$Gx|(`vZB|n!Km;w9MqFB|V0L8SUA$#Lm z%G~Vuysq%vc+c+&0CdhMGY=t9&O_62IMimFKPMh`x7Vfm?iTm^!?)l3o3hQz@p*oJ znCEq_x~QA#y6EGxOj%WBX&d_aunWwV@@-%Yfgo^cYG%YCri`4Cf@9J!AP*P^3K=P6 zjQ#rZOX}wZ_u&}CTCXkvET)L!G0+qScQ;WrG_x*vyyRsec0d~X1P~D&#{I!xoMc}{ z;1>`G0FaPgydwI?wX~PNe^H0M@_6y$X#=nCzPmRgYXauf-wHAI4%HL@Fd%hP+q4tQ z2@jFHJJ3S_awLcd;&OL&Fh@sl14BY!M+&XgVHk3X$XM!TV9bmNNU)sesuBRiRHi%!&+n4j!>SBNT{?qaF*I!?!$ou`oZe>}X zpJf_SiXC@!Gf`_qXeNlLstOGN5X@Zw&AmA&x<8lFpPw5eGE-uXL}7|-K+8HiY^tr4 z_2Z`>niWnM6KrkU*2+L`E>gis-7Ao)G4nJ{_xJZF) z?Z5w*``g1dKL@NghYYeYC?lkyy{x=T97=Z){`~ml?zObmTFk?4w==V4Sx)m^)xdEd z2BftTQXJx#sL6uf%!^AwRdR3#BG48Qkr)7w+&WLB&xip4ocb^xfch^Ga8aNgSI^Xah5Ipz(ZHKk#^e{-Kx{B(S}%^oPexw~;S zcZf0PG@y#RIjHyIrpJ5Hi+clFA2$LhxMI)qYDz@RJ(Xr)N&o~vx}Jc!l+`?_wQbwB z^R~6+v^?(i!`8N1WK5F^mLg1(4AX-FeBb5ZL#=^U=zz`EnPf~Z$> zLHzXjvr5%<=34e44#N~Ff}vx?OPX9T2nOMjzu6B8?8Pks0umY`QBRa={kY;@C#md) z!}Zk;<#d?#zxeuI>x^1WW!26ew2Q&l1t6eZ8i>daD9u6$PJB9@PIGxW&ZpA}q^3aQ zZnqmaFwDzSL?=`NMN{s=2>^6344u;EcBxMQcNHpx};Q9-cpb{6t~obbfxGr~P$n?TgM< z3slJvIYtm%Yx{h%^A?KkicQA5JkhQS@fiUXS)`K57=}E=F7rkT7)YE%Rq96S5E5WS zgJzdv&$AiRcc2G>EHtv?rzzFVs$rX8Cxitg8 zVHk2u$iM)BE_Pq;4lh?`{}^ItB(`vg)iy+YjU@5cq5#$0%+2XCBrx*;91wxfP51jr z-H-yONMKe>x^Wi<^Gjy4zibu*-?pvm1Q3ZZ5D8HTJuk6G6FN8(Fms3z&_WA0peU@OIOT121L!{P8; zbS_OqmURIsF7-7(qjz4&{L&Kl3i{$6zir#A4M;6gN&$cv>6o_aV(sgT3 z$D@civN>)|=4G9iZC%%@7Px?Q_Y!TjM>(2-ry;v4N}0fSyS)yk$<+~KFn2}*NQMD0 z0HOgHHb(+vAOa*LL_k0!h~0Uxr=I{K5ycdC`|;hITiqVU(XXzOZI#?x(J9^aRcq$n z4+1Ac0!QS)6b(t5p3lqiv^+hZn;-HxjKd*-Zu3)<70nzfQvYV_ekr7V28bRRzatX^6NMqCT^feKEV}d}5)lcRfeDe2QVN887{a=irJhw$M2tu+ zbeRhg5Rrpglhz6%#uQSB)Kf=Yy8PwS%*W%ol#+8EhT)dx60cSnvdIo(JI(13vf zaPSWG5fKrJR=7~B{dMf%?glC%Z7s4D>LoXTt8KNEQjl@veHd~?mLlpcZ~_GH!5^1c z3v-8QntCPyTqJmmh+!CpuG<3WVetT9VA4m$B7)=`aE!Slj?21=DIzPlI)rf?cKbK$ zy4H3gg*ARz)V zA$n`gniT=Wrh@0w`sK^>)8jMM$LVl2?XH+91Ofn6O%!v^)41!I*26G?w>4-=L+?_u za?aLggx!C897n65-Khr=nSlt6UaG&2={mBT!pjv)lCt>(xCs$<|Z zpV#Aj?$H+t*zIpI#7IWahnGEj+@zyFU;;v>(Gf$4VHn55Fzf~n){XWNfD>RgiiSk0 z&|gg;M=~G+?yU@30JBbz2PI&SF%CE!cGLT>-_EB$dpk-yNj(k0$ybFPfCJ!*u@5>1 z0xGq&r8L_RNW_k(xojTONTE%g}l?bSYorQHnj=vUy0&@p8}Zmqg%2%(8}GiEdh*cd7aq7kF~cy)pI=sj{m zkq|kvDQb|yfan$+8;{~mK}8I#_F%YeS%)FzA&ujh_~Ph}siMu_A+2*gX~oMVD%Vgpv+*2PS} zeEK}E3&rqm3~z6*rnDPMsVcHCnu3zMVz*U61a(zKxUlD`o9`lqtNlLa+y$(T)^~3o z-*D(u1MC@_eUSEMcm3||dsTgUcrK+pJsuB-LmDSDLt>68T z6b(S#>_zkSa_(Gy&(^equS6uxN-2H*wMdJBTWdoM0FYDI@ArqB>rx7F#1I`&)T^4R znj=F6b5m0FCf*dAg97~U>4R&7tz>KaoC!m#iY{r~b&pvN>0DJ%h^ z8@n?Ab(>&dVCIa81Et-NBOXuZVC%Chx5lTPtPBpjRNN} zv@}dZkTFvrf-ppLZMwQSA@nI*U<{Y7&hDa3DgOxi5;}D9^Z9=;B=004ji0s}L| z2r&Q$U^WkcEe0+TT@afZc-%rz2YdwDdC8bnKv2E~*4dTP*Kb+d<6*L|G z*Khw&TAhaMT3k1D?XY4`hD-6{a||z9)2?UfsI$L>ufNtleaN(~Yrk!VVVLIW!-o%j zNYuZ*+objyiq?i9I+HlgBDJX+fO|PFxYkhGA&=L$`5Ji-iNTVcRGl~n&M76PKrX=z ztf2{6cjO(7C*i2bx9OqCV%C6=kOdmRMqyP4*ookJTF!57Ze8!r&jH0x)6>oFfVIU| z-y9CA`sv|^Q`65MKc1SGM&OYp(Hc%ae-zYKp|_FsOs8wX$Zw|U*3L;IMSi3ls0!2|}x z&@9GS9Fz>%s+UdH5CV@88-@@J}#WcPoE#gVLU#c#S9T! zYpy0D^E|(SuiLgYTPbMD2n;>Sc)z&tF8$6c{+Xc_YnwhTb$y`@?iTEe{V5W|nizImQ^9h)5G@ z^P)klPO=}yXw>FKmNS&QUwnPr9WOglBO(YwNQ6C|zMBc41GpK&3w{RvI&pOO4w4Fz9A|p!2_&V$iVAc_4H$_uWab<$$ z255m1<2deMEuAQ^ZL>%viVCh4AD`z#8UW)D&&xE7!n@!9{x{!0%)-oJpW=v=IdjUx z7>3y&E|4;@1w=$O2hiuGnwT1)6FZTDh>8GlPy;u1q^fEPXkMx}Xn;&%$Z0xUUEkc_ z-QB<4@ApfJ7y`s3L|c>7wG#t zKR@?(>U25*0Pv4e;g>~}z|_^iz(q^*t+ut*D&;p39v&WUr;#bvDoTdPj$q)>`|)lL z`0{8f07k$)Ti82GlF40bQy}6PkjTJHq}yc>I)|;Pj|AP?gtaYx@C&u^?eI6P>stSb8xtw#?b+uw`nno2KKYoh$!itK* ztlt&17;*v3YGX=c%B85P%Cx|gDCI5trfCc@ivWsOM5_+oN@qbpz0Z+=>Ku>&0gY9G zAjCchCR-5D7}){F>|(oGUrdJ~rGyMq9zb*OeaVa1>M)#KY#_$dVc_mjw+Z%YI(#)! zXj|$_X_o|L62_(RI3QSMzM~Kkny6$|(ISk7$b@F8il|H#xj;%lq<}Ck0?6!fPPh)O z8l?q}!|CPgqyOS{-+$G0-AMfFzx`Vueee4i<5#a+kGjWVQgc!%u^u=diNRrF< z9(S=VL**PfbX13x1W1fVWFmFU;nJ5Y#a&52uts0o`WRr)-8CLI5FV zhH65l5SnNTz%zPld<39nPIFqojP4(#5M19i(=@ehi&)2i3>la?2CrHSGcx6zEi|ju z3Ku>c_P_Y#v|WEUI1P>+qe?CO>yM>+mYyjs0DwS1>=2;Z>EM0zK{I1=W){e?=e(qG z9B*&0r)eT`OG?Mnslw#Fk13_5Y0_Z^-|Vxy-7gHjcr2wD;Kjv-$zqbwat$EsH{TAM6moM9-&hg|q=85wk~hj6?`zma~=&CBp)y#Z)W+AroP|zS*ZN?44L~ z4qVh+vWvS0TI?@Mh+}D9mguZ;7auKrblIs!G@9l-4zrjtPqFh4c_^BSNXbhnscqXj zPXSTUb3KVjeu8-}fCxZI%sUQ^Pet`4NC@OqDV2OW&2Qe005B;Lt*#z_`0M}V*LPR% zUB8)Lez{-ALK(|a)Ddc7A39P^ID=BNr+O+E|I1bB?e)Nz3 z?tlCD>+LQTY1`J5caAoj&Bb=N-EB6j^=kdBfDu`<>37d}-ZKD6K~pNpm_3Ld_Sdgo ze)$i7ub+SY1!D@)d(T9m2x?*ogaiOZWiA3p-up90Tb1$|$W^syU{!HMmeoKs+c@j~ z<|M{#+qroer!fOG%aV-RQpz-qiSY!r6@BOlfh_r2oMaOw5y?DZ9S#Fnk97kv@up>r1zt1ela6coc+5mxj zx@|e9NDnH8ddCwMRb_;(iSrWnw|Cdm+q=WQ>sBe}X_}U05x^K@N~tZ`7s$nqvmTE* zHfvQs3}X@b`o$46fi7fNNyV%H3R)JMORdYK^Dz=_IQq5~$BNM$5s+mCd_d4wL?Dr)tQi29 zScyH6fsqw6Kw|RF8A`EK2cQ6ke0w*hbT=)=pm?AnM>Mt*zte|Np;H<+>KK=5| z%g?>IA#Z6>EI_~oX+}z@M%3rZLS(3d5!E6n%3vKTK(U;coXNnLz4yM6Fr>P@O(GDQ zpf0AOd155?-uWi3+F)k2JEoKT@*lq__McpP@tc3}hyVOv{CQ6NqyPGYQD!!Zam|Eg z#weW&s7tekZ=MG=>j3~srGk6Okj~>k0ESe+LG}AnP}3!YfM%|0xTfjSSnkTa3YZCx zhXs%Uz!3?OCq@IX>VJuV1rZrdYIdXoU^&f1N|O zNEwklIA4H^sEB|mY3_ovGBdNGU&K@~gGLGQi(dS%w zaVcsUR1}g*Dl+FJDW_p_Oza5AX359-l(KV(9_zBS6wO81>WmaH86X*>c~jMbQlGs- zE&>ZVAVABOv*--QcG%CVizt~u7s!W35Wus{6fU>0-a_o!E>B;NzkK@=PV*yoX_8?P zCJ+x6RGq1Vd8k#k)=PoTfQ?Ml2h~!nB*OxZ2!apfJ#qutn}{hINI}17$dVm8Eik6c zvwB|;v9{+QUA}vmhF4$Q{LO#*`)>W{qu>80fA(i#9P&T>mmgSBik{dF#}jC2`mT-Z zl#jKi#>{|_CD&no6MC)(gdys$HV-&BCU8_a-e!hM48)5R_Axpgjsu3Yxr|`NL3>Jx znao&Gb6!XdO&L**sxd5j4+sRxL`0-1vG?ehK(%B;V29)x48a6MO){7uR2&01c1EOV zq-rW9Co35(wXV^Us}G(CLJC-4dc0Y$_WR?_-M)y$;E2FL zRP8|uR7x@SbqR;i8KJ7G6e-DZFaR{If2&hmgW59Bh`3s<)?GWDj>CBLccdkc)6g_c z)ov5}EShrx;-+i86@+wkbrC}FEM}ro`#5M&A%^8)LWyW{f0sDo+TpmjtUhZu=V`dT zzPY}=nYW{FViUVn*Y@kp&E2gibX}LEh=FsisCw_Q6z^S(7-QiO(1o^beF#q;KLSl4 zshZz0`|49(`?CN4l@~pvANK*VUOG#qMxUBAg%GyeZH0wC^nGgLQ-7x6tYTtXz{DVx zvgA}!&g+E~oR}s#OvfQB88`rII!5Ya$daZI9b#bby;mSKWH5&sAfuRRwvzAiSd@y; zQus8VD4w1vSA4Ei>{_H0|eS0|m@8|O2x4-q9zx6w>U%h?#*%x=?*tw8%@j%`J8>5S4 zgorizpRtp6{v01p5xjp*Qb|(8poXdnY+!(n*b|~r&S=gd1QqPpj~2QPA$Z4HM3z}B zGZ>LWFvIFUR~0120E}pG&$?D+G(`ZFQc`5JYARW>sxT3P0zeE-4YOe}K^2F99vNKp zT!0B(0YW`@`^;=!gj_(5YIObf=IxuiXHVaWv3>sh-4NVizn5IP(69Thjp50YC%D~Q z-@N|%>#t@;zwUS2i;Ih^zT55AkH7frE9b&$wF$u;55wtj^xh*XV@3RlNUkWkiuXWr zgjPrYQnzF7+iFJlbe75=G6WY2aS+_f6&acD*U3#K!M-yIl0{c=-JD z&jk#Lmz;uchItZ$5W?ZwNzy!<5Ow{h63d6DKY%miNMeEa&wKY3?GA@tkd`S78JrD7 zRqF|6m4H>ncz{I6%!H)gc_s%yDrRDAhHS(p1_ENW(GhbZEU;UxyFNGG1@B8rGg_II z$pTkpX$_!o_V%o`9ii6ECorydt6)-cSoPkq2rwZSIz*-O`4h%vTEuJ8NrfB!eSu6zIe_eJE@%a>-z%$v>T$)l@ped}9~KYH&+ zKl;&Oe+{;N`uyn!AAH!aFHGI_tNr!MTP*4fAU445$PQpml1^h+tR@5Xb?ACWZ{odu4QYH@AQx?{jQ-c`iemwij1Z zP9idlV`zNicpOKB+%zqt`|7K&a?Xz*KL&uiySr7_i0p=8X`ANJTdg9EO4Jp|%hIXpJC<4*>;Ktx9-E4N-FzB8SLz6}Aq}au!59 z(~(N4bj0;~tzblij#JL3agdZfaSYK^fyA?eC@6{s2q0>{YY&IJyTe;e3n}#L)impy z+mR!0HXF-x=W!j>L;2vHr&m|fuXmeG@8iXro9S==&0oL&t#5z({qJADdG$($BTuGoz?3q!D>Y}DZjLsuc0U<;mSIazI=izD_RGYMv?P|SU zZjG6lx4%$x;juvF}%Ts%R^1@Fz6p-JcQJ;$+iS=8 z=FRKBOb=^MStyV6QmwCC}-@!brxO6TB=v$v!j}D|Vtbh&7yyyFj!%zhk zQ3(}`shOzqH>I0qHlK!43Okqck|n8@_4?6!@4aV)qE;dKxhiMbGqX*LY7yg{CDDSs zR4Ig%q*)QsaUgUef{vXFB8QsS0KmPa7uBxBeH-XZ|2bz#qIK53Ua#x_^^gSBdaYVt z)Sv0SU&`_P^NB?BFpl#)t4T_!Do~(^mAZTh0I*@a+O8~ZgJ#HcN!f?y_B6kkJ`;!g zGLOe;6Ty2Vv@zekzv}z7xiAAB`;Oj!|MKb6bra#q(lW#90pUk z3~9C5y!hmkJAp$oYz;B41*Gi2uE7>G~+ z2*D#T7z4E~I50NNg0-YN_gUKeEePu0%Sfg0M$!vDn%sCW{z00PV)Ba zFMs}%fB4-$_$Pn#$A9o|{vZFZcHNYvu#b0lZyS!26*!jwl}-;@L`2%q5>ef1f-0J+ z{Ntu^j7;bejS$q3ov|y+;tcD)WL83jX|kg${X4yPEh5`_c1fUGN^yh_hf~!(+*c>Y zd?wXZM{o|lg1Bl2upU}UTr~k{&O#taf@nYrf*@cegg^*@>X3+!{BXEC9gb$^oSUZQv(G+L)&1%4<(Ho~U3~obYPZ^c@yQpb z{i4EOfB9xSoC11}kWwKvRIy^77^N)pnA1=+XBILb?}&VG-m^0^kisAi!66)gRMoVo z7Aqo}O;ioVtmL8q*~|zRDW`dIV8|3R*fQsNP6EDGd1+o!Swe6q+5`{eW=K3nA3$pj3nvV%pO^{EX(aOuS5%)E=#_-+26d`kE6ALfo>bX z7t`zccHdn-^}K50*1PZrfBd`4?akLZvvXNSGVR!#i1V&OLj?>G9fAV`DS-%)k*XQs z88!p}=ICLDSiSxf4U{~caW+VBZ-xVAM$Dy@To#W$BBZ=rfBD5fmh; zqg;?*HAs41WzISz(k zsHO;#bCz*7#Izohqd`(GX%aCN6?hvG%o4#V*Akt?>)Y}Ov^M3$J)@izJ7Cidkp~5 zho)<_$T&}*eeu=J-5sbL$K$U*{oY3(ee~$jqyPLjfBV^|FPPe8E{^=FZ$0zrxG!lj z$PBLa#HMAQbUYTzfsBZpHboNp$SweYN@f#eAjir95z#Rlo0DM9tj;9(G%mS7)3r*T zeMnLqpck=amL(||HL-0&%u<#-TPb;(r{fXSR;$frzwX#C^8o-3hr9j$2Fb2qM?`>n z8V*+%Te`ZKrm5??ci(vj5#PLd)Azj#ah^}hvN*?~ZCZA|l$4T0LUWEh0H_I?A}E2@ zO2>uxu>NXD7_q1#l5-9b#VS}9{JI-XQ^gW>>#jM3VHiI7f|Yy6U|j z$7MoU7E+hJDgqNZRRJ|qsd>jVO<9bHLUc9i)%*DVT!k3pgJik>O^qp@xcYoniERjB zx7*#`-VVd?&F1tyKY>)S+Ax`+nN+(JL<%9C_g7h~(QD-uuqR;DltmXSlUf2mff8Ec zu34`)wsIR+K=U|FYaW*sc=mU<^XFeIiqLj#S(=&Cix*P}K}%kBA#XP0F{fn$3S*kC zzq%=%D>nYe|IfcoGyeV`{;8+-@q3TH`+MJ;=EL>tS1zzeoTnM@UCh4OAUp5GNj(>g z&hXze8Eei1>Lts>~>I9zoO)FCrp}jdO%d5Re@kisaL9Bl*An$G`g1fAv59 z>p%OK|JRqV#^dRDI(_Ted)KeufI9<~2vDqAyEu36KH`Xp=-g})QB?^MOtB;*VQ^4~ zJ7V5@AT|J%MXjKMnWW6aah9x$cdE0=LTEr%R8-6T6<|{ausWM(K&&SAYKEX_rV1HS zF1d&)5IZ76qI&1JA~pQUdDX}Og^U~{8aOntfT5S-6ozRjk|*zAnNNq~-QDfmCVIp^ zrC<5Gm#lyWI|63t+k&4f+KWU)eEL+prnuh|(I~-4gr0f*p#y8GAd-iOarsHwH-`^n_nl^1fv^MyQi*46+ z)3iA9h_)*6ND+BZ&)-lKDl9b&;kbOtQ|BeM5C2IsvK!<14g zbKCZ+Y8B+pj4DQmh?WX@0sxe=ptBU2rYXj@q?DH8T!_(m7fLD1vT)N>Vcvrnq(+Tp z8ci}t2GyLGl!}-+7jABD9v-cSrC}mE9?sFE{^djN^ZfmHo2IGhZc0g2V~o?1yVa() zoNEGAn^{DJmPN!sDq=0=S+x++JkME0ie*tLCT91`pN1-Ef|k*emc_)+tM%pm(hL(3 zo>hNnmP!TyR5ixx6^wvH%*1G!_rL%Bl=U=B*LVBF z;c&d$Pt%lhE_um0r_w2F|-? z9ENFJ$T>rs=LENh!`)rB6kFGZ*f=&RvMgnu0O0cKa-L@ZO=-z#J`IPIoo8~MkEa6> zZ8vK$I-O4QoZ7Z6fw$Z3CU!%&8gJgr^SFRsw0_gC`dAzR82}j*r~#@}fqC7{H#04H zUdCx$oO8>PtwM&F&uKYPu@c)R#>m6uf(wmbg=W3pHgPpgbDFbvvG055LP}{Ir_!ajBa428Dy1U&U(7&!0bksJxiDMvXc|fdBSYAFw4S@BLYe zD216~+pIS0+9#^RGRdhL2aCwI?{dx}8Bn#9ToyAShc)@iqPb`)qFMpq_S{8iJZTQ7 zh$@oDQl`^s|H&^gthU=N&c`rcH}kYA(1N1&U0An|chjiy;yO5(rrUl^-bK@tlZXX& zNGt{*8BQ6Vzq>vR^JyIV_3HH(U;N;2{_`LH(?2d$b{{->`n`|hKmK?bmPo$!T^r&F zCaf?31VmfQI>cfKiVO}CooR6~!XTr-Ao96$c-V^y8=^7GF=gD<~$b9*~1NlNb5 zn{Klf#@X`iu%9g@6!qj7Ne#-c(r;BoRx(Q|&GX`kV;>zkBUZe3WHJFMYIa_^0D839 zzr zGo}Zw`7}-QoPfytfLuFUSU*>0Zkk3_V{DtIsf_n&JV}9i$ItWZuwwS*>go!^6wO;_ z(p|dsBMR%y?&OHS8GTAtVDdgJ%lJ*5$b(Y(AI@Zz4nJ5SYZNIl;{& zKmFpceoQ5XeAqWmK6tiz5|;NLt>2c0p^35GZpQff+C-KmGi9u$q80-6E=CX00|6Wh zWW{O9mLU+|9FCv<@)v*egTMLRKlu|&;pw+O_|EVA=Fk83N6Ty%{Z8jJSwXBiJ@Aa+ zJeW~MkpZA05piufixT0=w<40Hr~pFUO+!GEEXd^85i%+oAtJLQD1_`07}Ww&a10J6 z5a;|jjsN)LpZ@Vbz54dIf9P zP^ZOKs-qr}0}feg2M1LOqd8+HK(#a_1~havOnOpw1VEy>ZtfY90gE9Z0l<0CZ;CZN zG)z)NWVKqIu{MrF2z=h$Ku&^&fK*p<%rYRcQBE@=f>066MY2E^Rl}RNumA49{+EwG z{urHay7hXqYgXIX^r7jRrs=v>-}g<^tasb>Zi{N){hi-bEr-*7S?1gQofu40E*bZC z$43{B;;NbRczfE9c@`Ak`o;%F04Y`qFo=nnv1p!`l5$y+5(NhyluaE%E@G-60w4~M zh#WJaQ-kPZf_xh1q;6SkI1TPzicZdjBDO4x;@LSJ%|Hv9AhCDkh>i4gI@QOfrpNO< zJBO8L%2zv0;q~j+I;RflQP*uRFV{Zrc44&&EQ&?R%po}Ts-Vi% zH5zM<<6LbdgVCIpoHG!WQv9#hJ6)}Br?eE)mG?w+eSI?wQ}hj?XZ8$LV^JG_$Va*x zmYLf_a)fQy2_iD{>3A|(Km^XqHz}(yI{;_6l@U@a!p{jS@W|ePdBUe^>l1JJiWTu*s(0j?&{s9>6t_08%O8}m|(YA&3QB@ z9?`QaiJS*^AOfL<7cUP)uv&%ba4M4?_xr#2i@*5x@BYp{m#asQzxzAC`O6=FoKItD zo13?X6a}$vnWBSx`K1Cfn;KHB{Mpph&_qKF0L2?>WqNXL?ctC{hX_m+g=Shkjf_KJ z=Sh)BJwoGyqGm~Hk=^EES@Nf!e*SCU{_f9z{PWHBVjgDWvuY8lUc)&e0>U&crfNm% z2n@e*oG7!^1)61+l878|Ru>2vh)XFRO{kLkEz~7BB+!yIt7Jqq$X(Y0ssaMw1G-27 z`+f~|-P1dAe0BsE5rwkkahP+?q{Pf6X8`Dol|k;=M?|g;VZ8T*t}vEd?N6*aEC%OJ z!{N=%^}J;C4U!jhAvP{HeYfsb>x$6pnzlk0+KW}!bs@ON&mIfnW_OX6V(Pqa32@!F zZ{M5_!)Z>Fl7%)j(KGXTMW?QC)$xLJAy^SROPLKEF@=x|n?eE60t$juM<$q)_&z!$ z0*uZF9|+h@<8jr6yZ!O*bRFZiX`Lfzz@cT_?+@kFe(-GfIBuv8kTlW@H&aNfnkcdjxV(0D|kryCTV1ajSk8 zV+>8}oaeKZ^_@qrX`D~Tvhu& z$D>ELS*;GI+t>x5O3S3GV6MgT4ug>i%d#v>N|uBJImfZ-iKv40^nOK4Ri0(%M41_z z2B0D*e2+~-P_lsI@slrJ7yZNE{{H{z|MGuctyVIo&1Q44-BwYRj{%r(j(amZoz4W( z$|tBz*&_@AgLAG}nKg-tCyK#J89FuqFlQlzlNKx1wr!WUS-KSmv?OqJm_jI?i?SjZ zLqN@aoNQwRTUf-7QD2zHOsxcRA;A89|Gcyo^V%vSNzS zvsbf}%n-5PtZr{_fh?{} zdG8%k9WX9@T7KdTFxw+Zj9ZbNv zAX%5W^i3ZwuC6wh&mUb7Ifqn=PSeKY*oWZCl!gIv4k7qrhiQEN_-VhnY??;rBaFN< zUbh$m9j8OvWGqp{1lTB`4~qhbrK}3*;wE+%sVK6T%oIHW7CLV|sj3JW$j^WJv-D?w z&dzb5Kl*2XGVjO#{Fi_A^zq|2;|&@}${80&9Go-Jb7$SThkiWX!-jDxOcY{lR{dF{ zU?%TG72Zlk)O$4H*ZCi%M zbW%B%JWkU>nAs7XSCpNa8mKR|{we@QW^5Rg&B!Ox1#ASIP|2i-3+U04IA%m(A}|aF zqGkXvPZJ>%5PNUbY_XkOzpR#}u!y80sb~^O1r~#>H0GQI=3-NpC6&2IR#Vm6H=hqx z*#uBYEyO@Xo6U*|x){;&{^r_ysNIa4(*Xe7(Y0;sJ=a|lk<8A|!!itqToNLL;MVK) zdcB^esrEW@&P~$<s()m#Db+OxSWGIUYX;#UgnZ4`!xQoq2Xa?^hx|Rrv z9HCAOe?3CC9cQ!WaS|BC-S4d)%0qPRopl zDJ2mx#nU(dP}l1;4ODXN@-8`*QbNwtFs{fmTEo->uA|#_&BeOQxtM?zQxQ`sk|n8P zF_39F?_}BitM>4TT>V3>0o{F`zpxTfDe8T6ZD`wecXin`UA=?PsO0nBh9e>?Z}zI} zT?lN3!3XQPWZ7;n+OF^Wz|18V5hDjp7uL&a(|eeCf4WUeLbQ3D-o8!Ae!9MTvg$fZ zL&&EmJ@goZ5(dW<3>kei^6CT8W%XZ8Pu_p_(QkeCKmE;LFRU`B)&-BGsWc&Yc96vrM|Q!75CQqsu)i&(%vy5JOJ0f;kPOSD_gFaJHD(Xgfhx0-Qqtw+MP(J# znLr(wj^pX^^T$owxzH1V8cUI3fN7l9tIj}G>>h8WMNt)7&1R-&UnJi+Hbwv_Bjy<( zc^0sw(gn%V!UZrs|y*}=zod1Vk|Ba?S+~Iysixwm2Ywxv>0hSpgqs|^R z2445!VzYWo-6=L~+>w)MJl-CT>7;A_Xyw~r(7UFI0H6XXm1&$eSAF#C7%Th)Kmp7J zZ#Gmf4j?wAQnZvJ#fa;sq9Izcv@DYf!bpzwxTd5h7wp^*pbJ0pS7My;(Wu%&hPGS{`rL zn}a(5+*_5OYYH9Pdv1Yg+!9@M-I5(rNCHy<7y4PjLt~!F z@sRn&tHbAC9+%ucdHgQyE|0@7-Q8GTL=tiE?4oaP-roMzfB4JqfBV;-KYKPEPP=y> z{}2EAPygyK|3iW+eQ4qeK*^+@POE1$o5(G2~+{Imb@`@i?w|MuVf#dv#rH=Ry- zKAlcA31#Lai2^r$6GHI!FXzs;FCu#Ho2F^ow)!mW_vd1*dPdLlJPZQ>%nQO%avqz8 z+J?G#anbaf&68oc#mf7tc3q^bN`?$sE9j6A2s4r)h?&blfU(_lD<>k$JZCd>-u8oG zK?GtTPef38hS7OYL$z#Vs=Ab%ills)-!ijvftZ0ElJmqs{OICR6qdrvmgf|vrOi1P zm9kLEGH1+%v+*KvUdmjA-Q|)+M8&isv9_yryJ~muw9B%XRwmZutu&<3X>__~T6q`H z_daIRqS?eZF#?cTMf_omA%?zRUF0mQ$j&)4nWkx3rc%l@DJ@Ie_eA8ln(CnU^~+3c zVPx zd*_NCHh0Zxj#EeP=Z-26>Y46LL`OhVODSbFNm*6zy2IVg?Jx}Ioi)5S%tj=luit*b z!6V`@&TsE-=ahZh^y^LN+HA(&`L>IVr&6*MLFyoq0jNbEymvAB%gyeC@9b`dbaxzy z==sw}-+kxuaibl~9*YQCf@NA%;508dE*Gn{1UeQFaWoQ&3x# zvZx}tY>HyG_;o+PV1dhaarh>$Yv{jJ1An_6-ADRrOM)X*wQ{r_<@?=4~y~ ztBSpz!6H&?kjgE1^Tmv4j7C_xmN$Kf0YLM5(>1|0P4G>0E;#R^_sl-QEfCgCi=rUs zbyvJ01UuhLsvse&>O{1)C?#b{nuHaZ&mJa+VIotuI>|>bj0pcn$b{yEd z2}Z0cTXO3br9oAvN?0H(i3UNZsu#&YhVcV`__PgD#3U)-SYx_FFC!(QUwbe%xsAL?_UtL}->WJIE zyRc%_!vfLJsiGO`cuaM&UZ0t=B%;C&AId1z>imA@_1vLD#M<3tz!+m45q4eYy$>Nc z{BMBJm+ya{nGK+sM(l3Kv~;WW`eLVqxkB*}jGaS>s=1Ws9LqB3xR+9Lnp1Mdk>aLp z`m7v0Z?_lQ^|s>~hVe8VBL|(6CT*MGoXeoyX4`MpLb~7YhvTe?n`ZOqk+a-N4s*#w zZoS@mACJRnKb)p{UWVhkiB93K|HFU0y1M#T|I?p`uI(EBJd$F6ArKlWE$1h)e_5I)Y_1HH1{k~2Z>xWWGIUbKyKRpb?@pv4^ zaTtbqp7pS=vF3sIc0X()=UmmYt79UH-h~h-Yz#zl&S@N%Wlj@HE;2W7POB3YJw1 z#sVg63TmV#X~~*Iv30RF$W&FNKq}_Nz%4h2dX;Lf=Nu!Wk%7!gpoEIPfXkYkqkynm zt>z`CVn_iWb#)Vl8B>}ZafnS5qKQH^N)>egSX4wy&f_>v!%$LoNK#sI+;!dKr%&3x zb1pOu0P-{~h5(MmY)Q!gICyt;xfbP%b$5Mp%o$YAA3%%|fx9j~ee(Rt)uX#*wqjP~ z`ue5`anr9?7i;HSU8QG*>H2oQev3NAZMSRN%cj{z-vq|-5MXXyY)CO+(=`rww~1zx z3gj$B@?7$qlzB$-7mY{6R7w^@_JO^bWhrS<#FDZ(C$55toC6}?u$ciNLIoJ4rR3u< zOw$5DX~{9RrD)EX*uQ-Fa_YK_ypX?BJPb=5r)io}D*L+^RzPwQEviMVBuPcHcWpgv zMWp0hg~-*at)4me*4JmemiLi}C_vx!!TG*#pFFyn`Yzs!o3%*JOKpV^kxDx)%bb%~ zp#IV~MH~->k`@<(sGgP~%Rod~C8hL6ZygAxL6^mo64A0O$FE<$eS00cwd1hg?_X2u zu9~$EB{^5bmW9n-tXF*(6549B-adXzA)xD*<$9PBTEng_iHB3--Z|oysPj!grmVCS zE7=uThxmGb^B@26-+%8nzV~~7^oLDOfAr7(Sby~8&wlo^Wlr5{Rd>>aVW6YRlJ;q zUa!~NZQHLxjO*RSFA z5F(go^8vjlL{JsgJgN|wlj6a%BOgOSB!Ds}U^P*2q*~^Ap35jXTPQ@-_kGuOh^S!B zdojpGiz)>�=`YhJAE>bV=DeKm!#h2?}K?#S^7z7*9iN0}u{*8BYUymyct!c7DyP zrVTDGF`Daq|&;@jifo5Stxu*?dICIV^-QcSX>qM4|i4-j_FN9Ha@A48~_e01kF z&^Zzj%?iw%(p(q)L5)qzGTgpBI;wYnl1lJkcFv8Zzq`9To=(i{JWb1TI-RCvUa!~B z-g)}y(W6ypM8r(SoTt-}+}!o8<2GA%C`A0`_4Ow&-hB0TdG=m3v$tQr89M8}`%ZV& zAtI%OHaUxNg@H9mLNxEak6pjArY-xox65&I>O2JRdENGiNB~Je(H9@VH$ib+Qlog$ zu73UxKmNP_{9pgeV<; z#%^ye){QjC=Q@uxZJ^2>>qcVmG_!*Lb=$)NW0T%GST&Vw_C58=!nYY)yia6 zOGd!t44o8^VlD$ZHbYfqW_Gct$odR;vvd zE}Oh8ON?!d@vuKlla^u>LZskaUgN5mySREHdVn=W-!Pzv#Kw1<-q415XhU18su-gW zT(xRu;++$boR{f1o%41OGppT!ob%~)0)W-3k3NXVX*l@vt|K;0v+i1ER#kDj-Z%u9 zrjsP=nAcYgV@E`5T>NzF*;Vn{3gywN**NRkFy4&-&@|0gFJ6B6@@=+{pmmb)Bp8 zkN@GXR(W~%op)Zpe06toj4@IZ3_zLMzPsA2o2ID>_3e7y_k9)Mu2!qfW)niFbMA2* zYnS)Mix;=IxBLBmS(g1W-re1u4#%4FYmrJFlUzQ-kBR7v`>ZyPOrBV>n3#*vE8N}f z5plcSmSL(deIxRJzct;|VQvv|zoDZR{-w8lkCJ_NML~@L(HV#K}D@7y57(IA2 zDF`0XfEd|{;Ua}Nu6Jw4LBQNZWK_oD6SfvT2RQM(-~aN9(=X^}VwuFcwX)RQ@E1$dYuPb4u{74|_sGDJsb%695DU7Pxdo zzO%vQL$7q0uRF-5H(FxNk(%qL+8pe zOym$s+hE}n0qE>~CG zrlH`#g%F$QFFWhIZoA5=6kXePT^oaQus_@)2|FNWF(5)D?*bXnFpeoD=K_)DQvHSy zaIRfjU{a}l69s@JFAq&h5nYzWY+@P?cRwR?b&nzVv%qW|=grmA;~`<=`pqS8LL#4p z$@R-AF_I?+lQ~V-Z{LU&WV(BMTS_sLsx!=GssHJa%uKD)rhi3I(lo^w5ix>z&zs0G zvI%tDJ4cR~9mW{jwqZt9eZFc+aVa%m64(Xj0}&H7J~WOAP;$|dst|fv;Pu<34J;z_ zIGT7x*lc&(Cr{t~_ODmplLnXlazlmdJ_mx5sdwN5b&(u{Cu&9`^!@kW-|cn)FwJw_xgCyoM3i&hA8ud2d1*y*&ea|>=UiLM z^E}VvR9ki2#${P5Qm<|X*U6)W^Mp*nL_}-R1Zc~G`RuC@0Cru6hz;{F4CEYp&-a}m z$yr6>>gV%wEKD(&@NAe0B5XPd@iCc<=F}C!uXtt5ws4*oD4tx7$tMHO|AT+gv<; z)HpWHSL@e59)I}t=l{6A+~fmZh5q85_cqUDf4E)7Nzqo@E-h1;23sanbrrD;(Akjn z-cfEzhKuO{Femd+Oi54@lw^{Z!I%Ku)j1ywOwO@))i8xU6%|ntP@w`&5sU~r>bkaV zo2J&brr;!&ticZgSwyN=u0s?NmzS|*_2@S1-n}`c3_h?F$vM~WHZvrUtf#{;4s&;! z+HOVO8IlU7tRUok2q9l>wvX29hAAh@#gbNPGdSmX)u=EEF9~u<#CoxN1So1L=RAyg zS%#cc6^6s|_U+qgT(7Ix^_)n|a{8oYrA0`yP_W>leK zl#IWf;)RD(^tXI)#h7)Gqpo>EHiJ~WM`(xKxfI%X}RR0uJ! zySceJm%B{9S}QsxBC**cRhBa|H(g(EgFZG7)iD4#haSny(9?tRu)bhzMy3=IlRHBx3$tsE2OpEFQ;MGXQv?LRWlmGxK z6EZLdkDkaeWy+w%O8w*%p-xEL{n|>kjZ&*dC(6to2!If|X}S2pO zr@10pOhEt%qYDaLv@D{UazWu_p~Da&g+NBBNKT~)dLStop0K}i~vwS*BXNmkq|JYv(=XrMDzg=LD1Ay zI7YR#Ge$8pvJ}hhnSy74qFO}L0;1dPjsd`gJO*}-C@(xZe3b$s3K$SN@7umpyngli z;Kod$)ihnU{G?HC9Wx_kb-|EaaK3BKtwl2c===4MaL#3O(SP{ScUGI-mtVf{Kl#}w zKloubnexm;Xv)Bj$e1|zwcog$66o#m`0|SvKl~)YS{;6unV4#UeAUw{1Z$6tQ=<>7Fc=ef?aR^9<3nt32#;s<-bTofvdi6XL)Bcgyj zFG&y)B?D+Kl8dU&fO%S&*}P+Ba|{kxi4mrG@!lhGZMucFt?izs>+7uD)BvDb1{2W; z^SMuPuPLoZ+IqdNCsxg5bIwGxUaw^|X7Uqy!OU`8}X|g=GtCmVa zUC0)%1vQ%_Eutr@cy%CB2B;HE#XP5CMXZ<^5GYjN3oU6bsZ2aQ1oT`qCp(@7A6j&X zh%tuPwCsXwf>TrUwCdY_#m7-Vv}R2)geE%GGNuuW0-0)?2rFx(&OX->ycTK6v(Y9G2Ug;pOYwo9pBC+q;{ao6~S&2dV%Fjfb`k!M9*h z#JGWe`>b8VG%oYJ_>N<2DKwEtRg)s9i!rzW=CUn|LI>o4mXwz%2~A5k;oMP-UJ9By zLgyTzgFKJRv^1-W7cahU)|-oWo(uDvuV1un`-jk2;FaP=nz6DLoWw(0t`0CLG zv&UhWF0L+(&^JBB7JUSUqBhOtILsKg?PkM$Hz9;TkG}KX??1mdzIkK+g_P zMP%LgST&{yq6$gt`tET!RkfFxil|qK-Z8pV@;7rFGf*)yA|liJNdi==Ad#U34tIBV z0Ki+W8C(?&t=jH9<17M2!SHP3Az+4{JbTaHDV*&Oa_a@aArTP*nAGTB)wIi!ivdw2 zVl1X2873e^=X_)bjuT@9UC`Rtl7T7^`uQ}!n7%SCIj7f$FH%Zhe)i&*KmT;M+x6>J z+qS-mAAIn^YO}uBU9DCt7y5p+y|@hCdy`RBQ_0J+Ow%x&4yV&`nT~=YB}ti36PplL zfE2{$?5_u)W?~>}3ZSB>N-C%(aOUA47)i>SOAYh@07grZX-BQyRzOT|Z9o z?uca=()9MzFNf27*iX}_Dh|v8uwgHz2H6w3(6;?<)ow#*5%`eawEYSZX+Od+fg!Lf zM$5ERv8Hp>b|nOFglca8<^WRPD3@X-=Og9lm&Z@89M)wSWKW@iIq5VF2$L)+yYX;y zy+|p5wpwvJ92`7;|GoE~KPD|pIx1!$Nj`wVhX{P&=)7~3OCm5;6;%LrSX31)Njdiq^j-l?fJE-~-NDSPoCSL#ayksh%bb}I1CoT<pnhSzx&D3;SyPLr8|sPEa#JiDsL z(OguO08girnIV8DY`jy){5Z|aJQq_0HV`qzRK_Gt8=5AB5X_)xslyA;z?4L@C?1c? zs_&xn^|lJJi;c`n0oAL^3y$&bbX=rdJ$}@BynFL?*}uWlHH`0F?Rw_`Nr1I=7!=gh zu?s#><4dgRh*dstjXo8YcScB}6%~S9eNDBCYs$Vq!vb2>d`_d}ukVDxuq~chv~M6j%MmImhHp5>`~f*~Y4F zkAfp#PdY@bP%Q%X-g%Ieq!e+Wpa!bU7%?!=#y8@vXvuTR!>B51y18f!p(vEOE$FqFs6Jg5g@{c?$N4gz3ahZnsH8Q21vM!rx2Y( z%q1_=GN5kzK-j9sx zMXls%nE`-#QwLH_6KgLJmU$T`R#~-8>nJb>!foHAKQ70&zxJIEpFO_{k<5gE zs+l&c1f<}-fXi9bifTqxtDE*(R7ACy*&?U=8Hyny6ELH5ZW>NRfM^KnoeO~>um~sI z5D^f1vs6?}jS$I+sQ`*;QDye*9Y*0cz1PU=7hf*J0j-G0;WQmy-`!3Hy z>BM-R6N9&JB4jqMD7w&;XBV(8~7p#7yA1lrqgrn?+T~ao;rL93U%@0v5oM zbCE*qT?}9}kIS46uqQx7=bZnPLS*lRpem17oBrZrx8C$I`VcBt-CuTV%mrxCk_gNP z*9F%)zALW`$*gYJR1vnC*@gw$IL8erGDzdnrm?4w z?Qnu+nS_Cn8C?KkQ(R`6W>XkD2m#)U9vjp$nyORM@l=S~i{6C|GO-Vv*a4G>qR-&H z`Dlo<&WB}QmaKqn*PrG*4zpx&=)8C1Fw~KllIGLAS)CA|sNEg*OUb2_iVa?>5efp_+vboD zL=;zB=Nv1FD4`3nCE!vDauzKnSuLv;SQJnR>^QF-ks~A&MsRGwkPQ2-$vLN-6@iEm zoT4sJ?xtUCaY{}B2%H-M-hKS|58nG|5lAJcaMAMas^b=>n+e_I9(8lJf#T-n?fCW0 zxM-kE#0HR@7B>Yx-t^mT*F3t->9Aj7-yDyFFcuXbtzA(ew|W1neX<@8N14-CpL{k9 zhbOyB0$WAw=8OH`{VY7bZJGv$p%d#pRaXx%^0DpvH3HYPj);=ZjL4`6C{ny3Gsh4e zJ7r5!aw*wiLKagKdAHjEfQdQsZA14Ye|5}oZ#0r~UdmDhJ33`6Ma>Fx<*PDt^nRKq zHj8RRu89pZ&*Qv5-I(X~dhMOBP1!m&Ejc^qa+=M|F*dQOqNGJj2(dB==XnOO(1aSW zW}6h1fC2#mI06JCl>wZ%h`tFiB4SEuC}kpQKl)CVa;S(!ZY^3dH8bFMQz;DCbzSfM zvMlpFipYjzox0ZkF%dN}u45xy;n2c{0r|m*A?Mr#RAn>KbS$M{&Rk0DS|+wK=P8#` zfY8@14YPqchj3_`s-z1cG)?pADmP8DUTtG+7~KhNAE}F8?6Gp1Y(M5(AKbe424xtI z*n(MwWvGx5;0)7{4_&`G4sX5hFLooI z6C#siR6DI!tL#*p3u2&Pr4$tu5tNC&3tq@ow{}z2>Na7;iewQ@ zx#;msIS5U&?pJNwg6KTYUEA+=yHa$S7xoRIrFl_A_R$e9X+q$x>zt#KGKew;P*P!! zp(HyVkL*^&l+scpGrPGI5h-e13NWf_$ui9|`v8*Wd48+u>#sX+uxh+xeDyk!^ELS= z>SN@N8JKpv%TkZ?QqC)j001ydNADv7<(ySvS`wqH5`F?hCeIB!@PzCT(H_UgXRfnp zNqL^fQ(h45lNTS?vpVNoO(Tm)Df-S;zsz}>7F9is(~^q^M8wBWo-#9n2F$da((Q{Z z1(0th*gbmGtR6QuY@dbWXg5Q?xxMbM-sKR4a7v4rbgiFF?+*K}X%2@|P4WqOnwEK< zRp9yacj2jZ?7P_fpTeCHx0IMAU zV&>pNtvUhD&N+O)++g?nrBz%~WrVl4xA)!0|HcH_?o9z|Q|w_+!g)TIPIMln*Y;=E zb@h6xYE=?>@B6+V@}l?V0uKvlRp0#ZZ`F5RC6Qw0h`jd-P``|cTnP2$YD?u0gOq(jmr5wq$1U{^8Ag1Vm)8d7jnESsU55ZQItTrfC}ID8^WKbZg|DCDq>) z08&cxJlA_>yWQS*i=pOvbrgH1Y}ZJwTGv6WE`00RVOY}n!g}wAmv?67I5bVy#x8`= zh8UvrOl{{vFz4DBn2DA&q{Sk6Q#M1DQh*pxjflgl-xMV^OeqY_vEoMUuM$ev<|M0B{jcEnxRl_i}{r{Mf%-RC9U*|F(bBsb25 ziTl1K7jl-Yxzcl{XWe{a0Uo zee>q6GlL+xlzAH5cxs5BU9C^Y^_w?ua?0e`%%*9Yrv!xavOInANZ71Y93EK&urN^ZkUqE@T_T80%24@(djkI`oJ@C}66J zL=O{j#9!&Estah%@GIe@2D`S4_bq>_&r*#bm=?s>3v!SxxThx71E{Wk0~Z(eA^0m`;nCq2o&OK8J~WAg|$>6mIucOLbtE zQUW~JF93jO51O_5We>4(yS4zKw^#dj8M*Ux%5I>D)Ub z+H{x0Fy&OpluVa--sh95sw}Z_O{5TEyX&4j+I4-mSw+zq4b-ctfN05!SSjKZyqYb! zQqa{KDWFqqq}Vv8({eNq(^Am6rVUNIK_fJXOfiJmHBHz05Z>Li$K!FCCIsEA)}f7g zUQ(KeVRGyZy($znwd%pnB9kG~G>4|`x(>(@5umQuD@Fu8i#X7Ly!Y(QY@X-sYQ5QP zZr)z!oY(#8@uN#30`yXgNpg-D(a_O6rKV|Ih`bc7oMXdM&|yLE(=eRI`SNlbV?Qq$ zh<8^za|EX$sTET=@2?j^M;@El&sowU!<5vh>DFJonI7(VB9Dw`G-0eGnqMhIR8_MM z;~g_svb~l22Zs1?I8=2zK^-C{)uO78pES!tP1nBr;d{^C`%r+7!#HOwK-xun( zkH6boJ$v>To_@Fd<3IfH?aLR3c~~!tYC)Z&i|xu;8$FOg2_drcb$vg^_U?GPxVUn$ z*<48Yyd^2l+v!&^$P!YV=3d+O|E9)vU^8wHAFLw9H(oAT3$L!`|KJ15OncjWpxmr3M z$L0F;=Jo#VtJ~G8^WLqvc<Lfn8>wOyL<;~mMU6HBEDSyI(Y!u^1Y#ArU%sfXs$a z)HEd*nzrS(Zz{frz##yVvI8{m49u`zZB-SKFLzrIt)=*8vk{TI{jntVp#{QmJR)*z z+If+b%?Bdy(K!_nBp}WTtF~3uCFLole&wO<$1vE=Kj!QByW^NiTVhBwLYN&vx zVR`-fu8aM8y}fw8t0UfJS;l#u=egbubzppt_^A>S1AwNBRrAWs=+NPLqw>+?UG*sk zENG%yDhMx+FP}fzano;~JYPhA`T5tMzIb(7%4uA(lzMK13W_*SZ4BRT9>0BieLCEQ z5ZbnNu~VCF4$~pMefsR3(5z^n$6=bs=j{{c@Nl?=qAJ-Tw?2S#sQ>`63qCZyX{VI0_lH98?zg^m z@%YK@_1jmUzj*QD#bG=p70(=fV98lzn#RTlRRwK4yXZU;R&izx^wkp4iQsQW^wLJ;4G!2RPHSgoOA5Gt8ulaR|Hf8E1~<< zbpW$qu@pt9DRwC(&)Lih$~E`B|40pTT<;CT2JdIPD=VaiJ5P$&)1(X;F)KrVkX$g(@p1V~$jwfV{K1A0z zW>;GSfK2najN@ro_p83|Py0if$93Os*Q?X%G;3j01A-z7MlBS;C6yS}`PlU^(P9GV zLz)EGQHXiUw|57i&J4$KM!ro;SxO$4F%^Iq0uigGo3!1mmYD2}!w-M)#See>`952>d3+elv?LL6gp6h-1qW@ExO#io@2As=nfty}2%{0irhEMC znJ4<{i##m(FwTKm{eT{KrNwCersY)Q(8-~Y|ai}jCw@YkPw^6{$o z?>&E~@$mIwFU#ygLk22}KtvQmNGTIL^j-kwBC+Mw<*r??wilPpcKznfoAGeGxxUe2 zk9HTkem$3LW?G~q>6(TCYw$=UH40QG^$Hc&B96H}z;G@Eih0jOIj3*-dg@8=aIn|s z_M62^wQ|IJ*AVAi?RI|?-dum+nG@7C@C|Hk{U<^Q70U4t#cBw1&XJzcX!R3+$W0z@ zfEuJ8-dO#aXHJl6ZRG;MvMj`mhzw-aSm=J^q<(!$sa|<@XlC9=B6tYC&YgQ|t5x?< zkRsx;EWN@@b#r3}Fpi-R8qM>GKX0gIJXbIwh{#9FOdveiPWgovighx6_2 z?QXLT(XR`(T3actm7Ck0*)it|2FxCT4Y&`sL>GaqCx?jG`+1vR{0yX9|EMC{@;Y&P!hhOq#uM==8P3@{=N0H7a}RQGf!fBeJ8 z-~Hpq8rQ*cYpTFk3atL@Bj0E`uo56 zTTr>bzg>^dS_(ODttvTU9z&d_`QhpN!)Y<({i|2g!^b73rN9`&=I-V*w7Yk2PKU#9 z|KZmkzW-sZCAMM=MWwaoEE@H>Ch{=23r1VJ98^9O74`R3|VVa2mfVS3px*FUIUMc;}eY3d+ zyME}4@XNJ-e~nrF{T=1KMz{XArIeZrBVB`Lz~F*I#9Ax#$&Y@B_Gjzv?o|&NrKF#N zvzDAQ5t+$VpUA8N0P{sXvIo;1Rj=?oyqIoYz2thW+C%dNQtGkQ`T&geU7@F<&bcAP zw5(04v}Ou9=f1hVdGn@~Dq3=?wN^7JrPNk;yIYZ3RVABf0WC4`Fj!F}g6NsqA&Hr( ziMxOERZ}@VK0h2zfT<|frs!O&+FFxbq*UmB??gnzWI5*=2gkhIOgZP;^2d)KZfUl}dsWv=39fOO(L0VV9I*hl$52-v3 z!F%>0yVOMc^VD5{U-ncYC#=kYTo`zu7yhfU8YmeMU^!PbQPo}@FcNnZIkND#^r=)y1Xi7NxB%OCW zmH+?7kC}9=;^Qc?N+GiM%E~4)$0jG7kexje$B3-rn91glI2nf|a%3FYd(^?P4;}j$ zzx(_6o&V22=kY%G{eHc!>v~=gd0)C31?mfIWne4a-z&Uu>JrVvC^Pl8P6_eyHu~=q6BhEaROU4+UzO$(|o67s(lFmuk+mr|3qAOYr9^$+Ptd+~@qBVGA6_diD}js8--kz9dVHe$!Ak`1 z``XRGH109DX7j3m^4$X!fBIA%^1TYbOZn1R_N%S>*S?Hf>eZXpaW$rNbVEXOO)cJn z%tJ(@5WG5FNpJ5`R!1S^3C@s1%I0E;* zDb7^u>i!*kcb8O(pMQak-G`Osh_hGBX5zPdw()!WVR9B0+%X)*6ufeTcU(vHY#{jO zf>Gwb5lt4W$ZMdj)pwICe{INCU$9Jh&Te4c~?})mbD0cQ+480!hAS@0A?Y1_?mp<=MBlQvJA# z8g>ESVsA?fmhDz<@`d8olZF<@{dMGtUPPQe%nL8}pwg=;lwhlSGCpXZZ9+Z3oq~;z z$F6)#!9!d$rP0@DQmzV(k^Vd#`HRq?OQEAl0L}8&Nte3XP0|U1qqg_|kQd8bJy8&Z z#!F2DSS+aRtoi*2&#V`cm}J8}LgrSjSybM32$#YBiNJ7kevy6wXiiyHbxYf`O}V@g zuP!wN#f+KbS}3-6yml79y~UNTL>)Zn_fuAO|5X>Jzm6Z}zD`t$o6qeqjz5Y#dkAcL zULKRSJII^F#mJzDi({+E11rKPJCN1t!hg(94Nwk$YSeL>)B&_wa~GtF;(&mK1~6X_ z!e(AH$rCasy4E=iJAE1~YrOtvustQr2AjPp1(qtB}UKRI-`wF{(6I*6r{OvW;BC2#_th&|42zy)tJQW zvUTCZ+Zz z&HjWdVV;@7AE#e4qr zWrlu&-rg#3)aO(e|KcJwWsI(1XK%PH{NQOWssd4u9|=f z-uIv^%?!r;l266I3JMa|>1L4!^CGPm72Ehn%6>e%8CO(INfA_4m8he<>A_dJe9*R> z$K~Pxaf?9+sqWX;MQg?+Ky)m}*BlkW?McJpllZ;kLTTnXPLOUk$2(;f!4KQkA0)k z+bKgDbn=AT;XneB@#92Ep8!25u?VKuC;r0w=<9SWWD?H@gA3~ek-T7J*>*K%@$65K zU%q!Bd0URcuM!RaSzVRj|88o)7YC!|wHqeRR9Th)0u&$x}9I5q}kh!BJ3tj_0q1<=xUB7=X|uG+jMoC7-;pHVRs1o zSVcRcOGO|F@sZlHV9JZ@i&b5-19LyOPlFF2Gc>b3PA21->4@db-8UAm2Jc?|oa(y& zZ}((RnWaUJ2AgcOQA`gsA!$F08ZSDU{C*>s=H)FKKO6;;RhjTUm(BH;`>&O}(Y>~v z!%f~HQ`$>Bq?r7{#QeDR2)-jr6RXL?sTrksqVlK_MUqXCB)xC@3XPU>1ND0n~} z6dIcAtFLt>)$MP-q_(=B&fB7K?O60GfhlsJ{0}Q-MS{77r}QK=N7q9-!wJH91zS@F z`|aVI(sqRVM3K91ZP#cSuLjF*NQb=3$}Hq&V&Z0ERnI2QHFvZptacst+;jaG{Y(6H z*Q%OwCI5c1Qv@FJWaDKdIYc%=-K>8wZcr-a<3wTs)eo8DG&~F}JG#5$b#az`vELYW zw0FFB_VQw`)Y>_T=DoA)$qT@Poq7H<(<^x67dY4>@qu3@aFH@F@4|!qx4HL%9_7 z5z+^JD)Z0g8- z5)k;^!-MTVVZOJ`wMu4#8;%w}iwB+Q{Fc{xYRXHi74Ji3nbGL`)YS6;78E4eY%3&T z!FFXyi9urxbUliOJ5DUwX`d6q#evM>clr(O`%>bbTtM*)3XjK|fdiV_8KMWNf3>~N z`xzat!VnXOlg^6x*Ixfvw9L=+jYpK)(%*RQ0-Hf`2`G6AhXNkoI}@fu?r%p2wp@}? z7Ch{E8q0h!iOFLnt8KVhpzIEcE0baD`u<42KG8*U$S612NABM=@|c;UGV9In53LF_ za#DhClyBVR2WucYe}0y*Fx&VTKinqU?OdcY!4MJuD52u0AU^mviksjwxAk1sKv2J_l}}BEUrfNII}T>H?<=xbms=(44~oH&FSiT0kI3`e5lNPCm1@7=nyv8# zI>K2wZ}FdVe$^Mok%ty<7cv1OZ{({>C+o@PoD{5!zJ9XL?RiU!T(bwqyEk z#1tFtj{aWj$Z1umb;WP%!0^N7$lffz`+VFgVUEDTz@S{p+21fX@1f=26oSJ7{#yjf z>0O3}6$yEILH})XC`qeHf}VUy%Cx-K?x5(#Aks7SdJMu3D#>K7mppY&7LBu4wXYZ< zkl4iH*uPRp^4tGN3}X2e;&XI%rUdrGCWBa)^oU=B3v%F0FT2DJ^62WTO+O=e#C{|q zG$8G%=+pxKN_99r2&H4+uS5%yTLptO=%hc4y|-y9j5Eo#as~aK)AYFa3~mEXq!7o5 z#Y*3kKETrz=?ICel&rin3sw^S$>u(m*U!PB{?cVX*^=`ogzMycrxQqvrMfghr@QyC@6TK}`Mad?vUGe#uX~kBH^JR zC!lNSO(#d?MdD}450x>d^nT!itM9HFGM9e*WJ!X=i_<>-w-FH;iU~MV()4Gv!0@Ey zSX*pGsOV^qB{xua4GPpda4V)X-~=s4N;0XzA%n2&vK)0`VFmi zmcJ*+=AMwwk$wZ{+F|G7<)_>~WTsZFtazLVb`(I<%Q%|~-;qbwH>Bm1q&?m3-I8bI zEWT3kb#zE25})T1KPrXNzqkPC(_KeQmwy-A-&l5Bj>`Uzd{H?1uTd0BLM}!Et?^c} zUa_>a4!*vzMcaSQLmLkdfpQBk(y`h?+>0qIBh(1Siqcn9u$&DPShuqfaqyGSKh$w~ zL5`1)_rBPX)zZ?Ne<8!49QO_`U-O*J+V_iqv%(PjqaS6?>56a4kNj)NVRf&!-tdKO zgMTLlzi$70BXoZ~Y_9;n@6bVuSit_0KQb-#%ZH=dEjO-e#S4aVo@5%E$L~S;4)7z= zkslw+ykop`Ky*ZGw%S9cQbNaFVkfm5=1pVF!Qz;xG2-+V%RxS(rp47e>vJCodpKW&D%Ek01=SeVT-s|vI786q{ z_d-Wx$0CvZJ_<4O6HL~La^(gOAfy!*8&}J=&akP(rrBUj3#O%=selliS3dj`lK=eG zuvEfubxLU_G%;Ye#==BEL+o*{5abN8TDsz@J8T9XH)w0G}@5k)q zA|*tk-o#wv&1uq}oF}a8^TgL>(=TGd#B<2we-uwT$KF&X)|TR23LKDd^8CZ3bW*R^LJ2x2hkpmzQO1DT z)lkS%$onulif^579fW}b>79jO9#J@^0R(tHf<6Db95unGwdM*fNW+AWvU{oc2}hhe zQpQ5X{QOTg4{cy-rQF%X6RZ8m;dh5h+XOPn4xb}5BH$Z zx{eU66eD+(h;*Jx!H-<$=eQ4HhEzzu6Y>n4`u@JL>XTDbFnOF^ z*4v&d6)QW7akxC4T)bS*o!b~aK^Rhrvlwa^2xd)~iikSAP|x$5fo}T3yK(V28}~BT zcBztI-x1Li_L#ulmFe=elMrcyN5Z9O#v>lWM5F zaZFi%3j_=}Hw*`UV_pG;BDTg%d#*SRhcb`Q;jvGLXpKdzTpv<%`MT+#CU8(KEIwbe z)*WMZ$bRem)7L&%FWEFhjLd5^QAB2KVb1;q9#ptUq5Y{SieBdPLE8E=yv++G?T~=( z(DsniQKE;-hR(}N4)LC5JekE7LbN`b#B~QrX`QE9Y5V||;?iMzq zODv!scTiWJB-y8Fc6n^V3|VGqr!QCv?JQ9@eP$?f4RNiLIbH|076t-g)240I1srDd zKnuV^Y46pXk$2hP+1Xi5T+{5WST!%{(iwk6>Ux>vS|@qY^)QvzrNJ>ZGicQ~o#xm> z5%CdU+lBVfrdn%{T-n*ZS1S4MXKqG?9HLicTGJ=X3D~92tE30H0mA_-Fqv-Vcpqfh zT6AZyHm#(~*LMDoawmfuM zdHi`Uu>O>7vb=4e=`iJAWr*(I=Fwu3<{p*ED~vx6Y@NphhsiE_1p*h9+l!ICgwjf8 zHMWnxP9O@kL2`x60NrBR;eJdzbbXD8Rt?`)cEI$9=bUFCf7;V_hcvI}yGO_i*)1d^ ztj4D8pDKmZ9Ma6awlmH=<*v)UEN?NsY%J`lfx=1#VJ@)vE+=H!px0!Q9E*F%M@#-< z;o3KGQ#Pw(2}%#!rJFIB+%iFGtN+0@fOhKhnj`Y;X3KtT?d9)}`}n=)+D$e~ZtR|; zv2yfAvkFU=DURSr%-c^Maav>j2*FDcRU*zR+;!oVE7MmNh(xUoB)hvdBlAPjANC4+ z32djwe}CM+wR6Z)LgLr^A9mysrsXF+WHLNT-~m3#n$#7bKwht}?@cLxdwFOs>EzRm z@xcuJ_+plr8SK9m%iahy*S8_jSNkcca2Ei}6aP>@;eHbvOE<4)>{rwB{S3D7OL+xV&_`AC||9d*9St@1*~J?{$0F}^7% zs4yImi98Qy^N(u6&zQ#RWtsi5f~7}ipnY&XeZvzPX*Cngr&_zaFKx}L-vV3(A}4R9 z-`^?oL6avG-!50r<-oavFwC^4fT}?*o)B>IoZRgUaV)I6rNXK_II2C4J_nx;%kCC!fUF# zle_RAi6klTHuqNGYeZ3T0<01kU4oi0gD?*`|0~^NH2fbjM_Ea=o=Nn0WCp6AC38JJ z2(Dr@x<78^F#qD~5I1*$s#3}5iNAs;wi5pkooPS+qRECZhxpTaq3B*3pS$^m7`=2# zGQRu$-owKyq5osBSfD?Nh3DyoBz>0$t(oWV`mXhV#*j5xo=hJE-r^6w_|=EXs;W|V z#?=~%!$gg_^ZZ6(qNQiMT}+^tetw@GI=(C{-LN8FSB$HS+*YAKx*oL@rgCE#<_ECS z5f#;pc0HZ;_vs2(Z3RmU7!Hod3H=_ZeD`@p+fgY7{FHvn=sNZOKf~kne2S=}kA9VT zesKHjHkYQ_O*tIek}(-EumH}#rg`Uc+)|Ys?Kcc2m~>(#nv8FB|0jVIUd_fgPbX7d z1YK@<4*n`h>0EqKwk;>8%@Q1>S%n3u6q<<) z0R;F>Su|SCh!}bH&L+U~o5{Ui))${*GcMZI(l78%-2j0%dc9F1FbvpxF#EYhmTYm+t9O>~&Z!tN_QoRHJ zY0K7?}Ii7g2KiH(2hr5bYLpX6hcb+uR&!~kH@`R;zJrRe~vrU&94rd3mLEbn5 z33o0v>c8D$TfJ{j+AO~iTiU){tOu(20)V?63e6pH{O980VzaKp3zh%=h*l=H?F7TtnO7TAn)v1=WSj zn=|-UdjRSTd3Kigja-ZVkMs3RYhEvC;Qh)o^1NJ%^60PtY~q%c1_FitVlTk4_e0gE zlYIMrTJM1AFS{!!rfOkG=;WKnt-HS)GJ=Q;=Vt`f$n(3GS6Y8h%K>68jW^L(3+f0>q`!IHZ06G8}{u*~x#%N`ZcQJOp~A(BpPzr2w7=FPa-%82*k z#7bLezCcw)Q_9~FU%zEX@B*eE6_ZIQ3hqe_HR+31^Ckwi9}UZXPu~=KF|Nb%U7;AP zt);1|qykdAAvl7hM_2K{{uN%7Uj}z@12qOM^WK4G+2x;SaidaSl5}p@CzME|kV3gE zSMJi9ze&0wTjWBEHMx~-$7>4X)40ncNyQHm>+%Yc9`fs}wgvy(g0e348bcGQl&VBTW(Z3sPtU6-3(e2| z(?s+xc&=_slY8asHfO93#J~^5WbC@mrGNhZ;C@gbeBig{R`^q%Lp$jfgIyY}o>CRuR^^7%!Z zXgPq9y#*>81n!PG!KCro|Nq$YAN`yft+*JQPTESSQs9oa^ca1wM5m3!cK;64*AfCmq^&LM3lYo>o!{yJub2f!nj)n)BR+mN=@%nz7G`m zhm*%HL|>QW57j6jSer*4UjQJ5Fc&DT@oezT^5gLjWcOs3T^>{!y`js?PfWTtG^ues zPjBk-yPF6jVyY5u_8O;902EFM#0ylx5CNEJ#Wg`Zb9i zWHJGZWlCjow`TL7^2R(dw~#MKQ#F$0#Qpk!EEgSREc0>t+9QRBT&q0J(NMT&>4dR| z_>J|>)`xMo6GZ7Y`|~`Q*0}Op4)J6b^*gO<>H`e*M2lD*T(AWk&dYq(G%i^-U;C+mGmA{a_Pb&1#)?O0VqH}^oL-_WK*v8%;_%{?bxCn%)aOyPic^)=O zEZgedQY2<`wk*xd#mb3AF$|{;$1`)j=C^BHokP+iE3XIm-EcHkqbn!1&JT(A;F0D5 zNEu*Lvd*j?B{1?NNCPBX+E;(7#%Jmm7YD+7=y4Wvzh54Ox;%^8yCLPQ`~s#q)dP#%UtcWgws>kx zXjGnTv3Fv5a+P;05Cfc~syEZo_l68;XOL`n#rb^2

^vgqTpM$BHKJHRgOBIwjmS*cSlu<$ryKBSvCdV7-V+vBE- z2!P%<3=aLvZ`!KPY%doZ)!W`*;71Wvmc?mA+RC$;R7)0pWx++;u}Uc@ixyxm<}RO> zKRU+}6=yV@aj$bq8z1yvW#LMHhuY*|i191PM{MEOz|vO2gGe_|&F(0Y*rC!-`>;VM zTnov41$M|A%gg87^jDrB61$#L6p1t1bZy01n}2A_1fI#oNZU_k>SDoxUTT5GP`EO8Ds^I!fz)wuqSH+mx&njuZ+z`3 zeyV|}e{rsFP981nDK+an;%HmE(jgb&wKO)BNuk5Q%FF^*%SGOl6enByTI-`E7`&|e zr3~?6#G_Ec9NSW}`9I}mH-2iYW<_Zk95pfFSW7Gx<=_IpkQ`?uQ?#K{bzq zH6=;BC7Gu*9bbnOyGJuVeUtSEW6)n`w^xIAq*mbcQ4di*p)Aqc**K*k1~AJxGa|cH z?`jbQtO6dLW3^z*ulA^*rCn31I`lF+a;_CBEg=vH%#@UQM&)gy0A`BWS#V|d4{vhU z`5zZ+}x$IhK2?S>halaeuaou{u(Iyt<@8w+sO# zCjN(+xB#vM_p>ATc7_-6US1A2UhCKl5!xOI$jhl}4-L@9rd+)WPMfNy`m0ui(y1O1 zveAH77@}6C2VER{EM*u2B#mdcVYg1ON9os#`MDw0#g&N_Qj`|M>0_U?ZnqtE2J`iz z)6=dNsU3}L&}jLKW|}&KyTocX^>)H4$5WH;-@mR{TC_9_hTbq9z&96sn8Ph0i!D)x zTBQm@DD-S@uFbq6PO2D?9B)@J=d)<-gpQ;Ae4Wzs*CW!r**Af7sg*7F;!DV7VES9TSZV+;JdwJKUYF+ciQ1vnn(+-_;4YJ14az*NSNJzkJo}$l-K4T z#`Z2?-R}^43Yu1dPL7`gf0Ys{b3jFi>j_^ce1+jTUS2UI0)FI6j6AVt)lvRoK|a%K z-oy!e>{&Mx2re{H)OnXmLgtA(7!M&Xe7iWAbm#~@5;bzst0WYRbaJ_uX2Ldp*j!HLc}i`%X8O+-+dq+bziu z;Rt?)UJdv*i~XLm<*#5cOfw2-6T$V8F`y|siVI4 zGE&j|nX&ru7|;M~-jtv^?{+5cLRHI7W|?y^1;<1FFTLOCDK!O4o&0g~;v!J6wVLfd zMVu4Fzy=X9L|%B^24EODZ`4<#@+WP5srU(PA2o<=M+-EXwtlAL1yeRR#16Xg%o=%r znOn`eN6oDLl-G$3aXwH8J~NOXw$CQGFtOWO0_v)vth*~(WaWhhLjL1v!l2~+5I zr<+IQ^U={!?C3AXq1^4jg%=Zz<`Rh7;`~xE((3s3`EzgC*?D3Fc^1HUi06MOXJYFA zb048OHUS2$F$2#2VmUKf^16c5I>+OoaOV2TY!E%K^SFMb0@r(Zi@%}$#Z~RV73Q2A zKPQN&$rDs{jyO4ZA+Xkj?lnRzv|-iOah0%=llBi;3)rwUsBTCPBa<2@){*inzztpz zf@r+DP#CdeW3{D4px8F91`!TFfQ+SJ2w#QL)% zeU)Qx?7kVIrDu=P`#dAFLA7FMvg5>aR|Sab(c#2nAhj*()M8)s*M*DY=mxU&v_Y^$ zoOfI^$Lq&hg5~%O7e-tsbD^q0wIDp8fQ!Q|tBkIyhgz+dv66}j#YUa8%KUmWUzLQ= zbVDEv4fVbvO(f0C`XgTQ#v=81CARUf2J6D>EEMg>(8NS!RBgY++feRi@04Y?ymMyR zN5jFES@xeBC-Ml}5&V5xZX(i?BVb#^mhO|ENK$7!4_ZVpD^p?kQ_U|$LR^}lSiDf9 zXk}*ccn)v;_Hr5}M%?6CU{4C_z4TqfU%A?MQe8NE204etKf;B8x18uRnKTccxRUul zT)$Zv+zJRP6pjyP%-iXk4Vc=u##JdwUp^_^VA5QS34v_1L&A8ekM3!7YEmQA>8MiC z%q))s(Y3e}W)5=?6hpd*zBF~j6tQWb_n|2vFh_sajrxQ0^_mZDhDLV-Hj}mk$@}|b z*ayS)x15y=8O!nV<)|^BlJXY+U>9)GZgo}&=Z|NY$td9E&A5HBc;%JXh+0yOp5-*f8YY(w0MzUKE%!^e(l-UIQIF|rWe+3wZ0m1KFwe50;Yzx&Fk;BHhSw4ER z_sSNtzyLCLpS+p)+G_lp{y6*m2*hQMTGEA3yDjqUW*P93xBrr=PsWcJ-)WZWSkdD6 zHpwF16JVz}u=v`%t{_BbBVY&JG9mv+(?2rO0UrM9mC5*a;4FXh(Tg~`c@7r6mbp3L zU$aQGu<7e>t>qhUNeT;hx7Pg5BGs|H@@H>vvn~=SSuh|i;WsC}f6}p-Rn4C7yi)lv zvAP}iI(3!xg$PH^t*5(T4Ve6<+~B76j*i9ot@DUc`L}SRstL2+yoM+imYWEjpk{uU zGwo|&;}~%`{X9Hlll=E5gUEBuHlXT=c=SK-M@WcoQDWa*S87a-4qT|>AQ%!Scbq#9 zyX{h!_?o*UEkO@e`rx-vC9S6aZ?i9?;G7;q(dUvt=6Ye#pZT@!Foa8CHuf9NZK_oFy(L=c6Z}^;%8RAnaMgGSd z^f&QxV2bD*;g65$M6=ni739{S^$-S{rL5chTT6omECV$;ly+);0d{cFXsQr+sl>=8 zhCCm**?!cGT6b5j)8BFFi%nyG*URkZe$XrfFTEovxV{Y1X2dI*?NZ|8@*3WoPUh(e zj(ngGKJ*H`(X%!L_Zp{y!&I+<$N6$`X|ts-E7UEM?CL-~L4W`Mxps z?%dYmzW1%X#TPQ=qq_F7bK!KONv1d)LO9e|!MYaQLRh z^W*KHmal@{5K4YF6K7iwdylW|<@rnDR~}(Tm~sy|KeInfM=!1{d~aP%o~*BgC++fC z{NZF7?3&VVxNWLm8Q=Ah@R3v95j#59<;B~gy%13r{;wgv>Owf5V(e~C`*ptS0ZF<7 z;DRv6uvf1fVZY;PfmXM!p%xuPkMwA1t%W_>5MY<0Hn%Msr~Yj%%-Z{~1R>hNn4-Yzv?fAggv1xvDKXgcrus7rdOo3wplGGEprThQ zqS7lqbocL9#`=T>M6#}MZAEn3bDm!L$pGEEAJSt8Z8gJAVF!Hx zt{2xL6YKv~^!XLgRybV>2Zf=H1t0xz$tK%777R1RFbJ%M`DmPa6QL?@T02~hl?Vj9 zm+#SuJ4c}~+Sg?hrWe$ATJQ8B?hp1JP~6=AQ7{#7l<;6dqi{1QD(;yZZ5*_ad%*^k z^0HytQXKoDcvLFQ?*oOtnVE?vXy);+P>;{~vZ*RSS+#3|I4w~&*Kp`p=rE zmgN5)0e*<WMsCzQQf{w4u0kNLlQY5D&;K2{1RPUY_~!xtaP}o8L;GKz?wYW?asZ z%>Wl9@KB=`AYsBo{r@_nyuE_7FWOk6)B9Rm0P{1)FrTZSF4wmG{NPbjYXvVf!sY~am!fMnTeGK~hZ|hJ7rfzjw8)rH%Du`tB+OHPjht&` zJj7HiLo18Z=tJvP@zHyrWJIu}=}_<%Erkr=cK@zXs=a$WfR`MpUYjDJMf-N4V(`gz!{G=TozZuG!;g^!HQ4Us%epf^L zUtnU4RL+PLeN{7vyh>rtqw`yi{fZ*u{_qk#X-oAqkS z9TW)Hng4OeK;_ktW+^cPT?=sUIfF2E#?yL&Gu=oK|HsIK6AY%Q&Uh%E*m1s-3+UJ9 zt~4{;(YAQ1WT`L4t?)ld%{p8+h&g#cG~K!)o~FE{qw^jYiCnUGo0#ZA!wWO zOw-cZfqDv)#>i)nHF+4kqW}CH$-ggEZnj-lQ(bUvZf=fq;);N_n)`@{MvYLh6ZIIR z{ZR%Il$)yZiWuH_Kz5(ml9`ozp+VO@2i0%MH&fx1cO`v1@!6-UtGxX_^)q=bW)ZEIKZCNoVRj*zp6CoXV9co!~1f`6kEYksSW*WTna z96Cu3FU{PE^8b0DL5GrVx^0A5@g@(44y9DS&wP^fIHfb;%hjNT+Zm*RsA8B0 z9UV+)>4zGQe_hbR?Aj=pfXDB}rr1!?*!b${Mps1%8z!`CIHxcgm?yaj4Zc}wx1#Xm z@wgxNka?9kf2>rpvuE&y8c1qL(#7UKtLj(S-p(|KVKS$&{KqmJDnM90_fqEO|qfPlj;}g!V_p0KK&C@#* zqw@Le=A>@Dlr+e%0T0okRAYGpU`0t$etLfjnUNFl1Oi)V|JD&qK235sJNsWzeA8lt zFtN}^G?l>55*+Aro8W_#%^m#%$WN%!ibKdkprKqUQmAMTMLw^a7qZr=+>nz{MwFI4WgWdOM*PY}oB_!`(zH z93_(K^6W|rr-iV`)C;T=J!aOYqmbx*L{jo|R4;e5SZd20Ou5|M#$xfQOyx}YFiGN3 z7f#tS|0{V&g}ilfQ0}1daS*0ZqSWW{Se>IC<5qp++_E4iqgjXV7GG zbT@FYMCF_JPO;ilKZ7`P+JY=79a+(byT_b-TBPFr{r&Fb`9wO9?+s@fa~8Piun$lC zR&QQ~G@JPDaMm2O+H`P0snsX%3X=f?pS-rkTX8QioQ^*|OOnDC znbLUL4G~E76u!HTRk3<9-Y&vg(1xt_%#) zd1Ocne&UV~zO9#m_tma;9B=K%B!I*JZ^Ah-^&$>>t6|nF-U!B$HsKL3 zq~iL?e&Zz}+uC8DPoa41+^ftz@m)UBO`}1Z8dmX@vrsnI?wOY(YV}Lmz4QCKQ8;g} zLzULSKw&vcH?7S-VaeN^}}FbDyt_oM;kl}f%~YY&w9M~7;7jjW|hK7nJ*oSK1-c?{xCH%w{& zn1Q*TgbGyvVi>@5Bp#SRzMZ=oto;lK@5@`+v>S}i&21q}M8;t)HRYrn+XXM+*_L!J z2F4?J|Kn4??)-c$NOtsa_4yIe>k@GOo}L#jke@DueVh040+3qnNc)ZmWp7#7s>{Ck z7*u~y3wn=P=%aXaj>PKSpk;eZe&)@aZZ}|-(o$M#Cekb>rR>uUSS*%V9eYr8ExSG0 zdjEddA2I+7v`>^(^DLeyS)S>W3PGVt?hIckiyAA+#kt+nsPKC>MSN>fUCK#{anrZA z@Aw8UQ>#$}{@YlmUEUf^bD$6u#SvLLNg8X-t(epkZ`#p7ve))K@Qy-HkKiviL=Un1ZuGr3B*v#AxAlePZ z<69qWk3r!UP-Y)ZF07A#JtR<+THsl$f-VTu!Lt`<0)1mi@%?-6-k+Bn5(!p90QD~P zg&`(Sln)u*tMzU`Gvr}g-It{^$Q2MU!@Ep&l{5QLDf8;}x6Y=ve@R&={=g`dFt!vX zs-Lk8AqXne$&!H_Ze7f!vXgdiGF1ar;1XpBgt~KYh=B3A<-IsZ@e_Cn2lz#*M(%lI zHbPMcjd>Ox@OQrdc*PX79ARc@K!a%UO@OS#FS`AAwhLhxn{8M^vF~^%yzy;5V*lf1 z0h4`U*Cuz}cR#vL8Uh?uDq!InAI-(`R=hFKW-!^q(~8T=i-9xY!d z-jBBCrF6D*jhZ2rl>sJG)!1K`?55cX9Aa&XS6&j%7hB~q+U!#>r6}2uNGa0vUXNs> zic_Mw?f#Fsf+ABL8qWDC-ZZ*5n8+X6z14ojmZCA!S1#}89P>Tj7chCk+<*6#sw_I8 za1{OO0I-rxKGZ-%HCrnGgq$=N0)+5hwB`E+gicha{)1atF*g7KDz z6Bxv-PB${pHoma*?d2D*Lrj1!3KmGkQm17w__N+ak(04>{7!Jw;wKH?efP4mDtW@} z{KduU;rWs3*)g#4S_B>&O&SR0@t|%yX=_c>ja#n|1R40Zo(71zLgMDVWK&#(PEKA< zv4x)lH3>s1ib~4oA3Df@fZU~8W~$ITqPvWf4_siW_7GX|`0G`O*z%uUF*4f;!$E*$ zEJvJEaO4(OYQ!7zXDzxvCnqh~I(Ou;*2qBU;;t#nk;SA#+lIUU^+0TuMSz`E%qt2h zQ0t4=6PCU+?V+UO#<2&Y*)Hnl5&_iZU4R#cy2*v$n6jk&nF{-^q!wGXcj$$eRa`kf zJ$RmLRTueG7hJd5ylp;jmsER%oCoc_u_}Q|9707ax@heO3u;t`jUAkKYDe7q4 zJ*`{1SDbX-Y#%TB#5P!EpvmcPET$%vC8PhZ!caQGff(a=ebo9S*F)C!2e zmj=9P6OhRcuBJ8n9K6)X$;5p-@}jyh{8fZQjR^J&CV;wm(NiVjl2^M5p7S>wKH~A9 zt1(BsC$2TWNme$9xH>wzes+gYQ++I$oY58iYJn7L)JV zQ$ISZ6!@{p8tU3eu$G51;aL-yim=1_t4oKrCN)5F@=-SHpvBw)8rg9?oZ9a@~YeTzSuqKro7WaE(_B`CJJoHw%n4q^x#Bsf= zbj8rn%J`rX*pvb7E{w{$=5vi@y1s%rP(oj%rICHTgP6K_)E5%ju%4h1`JfhclsTlz zW{pr4eXi<-)HrDSZEOZwN$Jgs0l@#I7k)ZOvM2k$loz+|VC-s8M2CwHnu zok8?OpU$V`)v-Jok>z*2nv&@*@m0R|1xIy6n@RIK`}-13QDeUA!k>xH-y|)Yz7V;_ zOQj(3coqEV$fe{f`C?BAi@7D6g=eb3l9eO=>;HQ8Qt_hSOtCOCV^RMVI{2JDx9O6z zO!0U%h;-QT`ww^jsp8~BNWnGc-#B7eXc`|U5Q!)yscVA2gWRHop2^%VRzsPV$la9` zHmx#`X9Q7~P+Gbbrvn)Pa}-{#=UnRVME_?7MZVoQq^X@zE+*Wju*dCgQji+;pg)B6 z3hw_XIuCa?-!=?~;%ATAo79Thqoh@WP5WaXYr6Xen? z{a`{#O8y|wGd;y*v@$AWI;ErGy-;oVseP5w0;jmgM!8ieYEy<0rnvuM_POBNL9pnK zNRXU!pRX3wfom2mA9qUkgd7xTM8RB~%Er!C$iz>QLMd1+VK^Y`FPHds6j|9;MjGFX z0ya52A4&M(2`V)-iTvNk_OoM|#Oe!~tTR~lo@i(e7CtiksJ>dL_tHrtZ-KcX>!By< zCS{qPzFfzqR}I2h-6q;P^W5WT;AdND3@J&%=5BWq0HF{VfnZ4&KR>eHd%rp<L)b#1HA6wOo{Jh$em;Uz>(R58CDjSYmE7VsPLNQN{x7kDO=ul)1#cT!j5mOiZGy{_I0LA}M-Fw0|&soI}Fa9cQ$%Bi2(4syc zr95ZVJ>%}DEb0k?5U5mH7$v> z%>FQa40b6>QYGFJw|g6`TtPlV2{RpnRy&wziH*n=My>POUF2s|2zxc=st4TO{rh(L zrM~eeb;C~O;ejN3SAtour+c+%?IM}I5^InV8I?{i%_sF&kMRGJDv*f~@zqn8DyvFo zJ`9j`xgJq?4{{!ep6LyFv3!NKvoMfg0s@=i0eOlZ>2JsDTrAwPE+Q6>J~l$=30# zR>e;iWFQFjxFN8HQ7hOeBT8h^8@c+A24W{Qgvk(*-wjAJGhrYT`ATCg$!$#cZqwU( zRQaNFC5Tjc9QyjrCnX?d;mb0B#X4lK{Ay%NbSN(1-_IVfR<|Pe%#K1S4joGgnhI?c{1$auugOwoU8-CrAiF4@Omi;x%y(qeN(vp8JYV%nGmhN#f%t#=`FF6(Ii zpci?)2C-Dm(RCjwUYRg8GAw{5pt)jn^Av=Uc;H{&ct5TGG3*&<>~T5Wf$H^KjFr5E zyvMQnofUIQo`UNK)QI={aT(u-aH*Q;wF_O|gq2{;WsIu{sy*7{yC{T}AnlQOA83(`PFxKlP{8#~0Q~^S?xQJ*1Dkqw zsFyB4!vsdC>1C1e!I8-qhe1z`M{N1hldy+@#*I+X&G2P{JFkx3(EcwOWo6J2+L<|+ z=7nT!0;9aw_^CaOzCDk}7K>Pgv%a+myed{La%B=X*IIx2yEEd~4+^kCl*KXzljWk+ zDCtC|b4!^^;!=TuZJE+KJ<%#Oc-#i*j;3HAmKi1g%uG2SG;2DJ6|etK$NnTQ1O$H2 zYHX6d;M3Uou@CO;=Xc>m*=7%e3O--<-lDLGD$8R0+Kykrh>D;h3fC#0(NnYT;Pr+I z!$ULsznhA)W}?cEp0eS@$Un~(6*G3TTBI2ZoMvT>rbIg@8=k%zzs}s=+1WYUpF2EX zrLH^^b^N3FTRY%BiWAR{p;TJ)L(GokdT%ad6Jg8pO!I5d+TJVPBNaS6iB{7348xZd z(2~Wym7KX+%dWePO5v`{1K`5Eza|sr&sRi%+w^b>&=#&Y3YC>TfUd6oW=||D#RYBm z3CHV3?D{Txs}g^p8x?QBlKnOLY$GmSf?9$qdC_!DG?3J^ z@U^!28-`>9%5c&sPcDRup1?(EiKPHro}dK9h1g8m4A^*1w>C$ zvZn*T(@Pbe+u*$(^OhZps#$+%iN}2HJJX{-VNv^XQF4j^ge9Ds=cF2Zh1>Jk*vTf= z$}Z}=R24?c!oQ~5?l&G~AKr|PdY$CsMojn54QV{K4QUoNmnLRm91G)GST*f{)uIh> zvPXZs4V7~pkob4%$r#m5^O2;7A(Ig1*Fjcqdu0BH*ziD%%=@XJyyuvk`5Y2cI-dL@ z(#Is0u*uK`A=os`@Da(WejG3RZ`TYtBOc3Ws1o4hXkVnqdMq(kT%D$-nq`jI{0XSs zZD7R3QQ;aPgs0iTVg};)yeP+-{h3TheGacJAwlQ!A4Fv78hx{7rY6R)$haZ~o4Gf?IDswKJ=g8wC<0L%E7{U&3wE#e*&N&Ur=&U^9bG2WiD7v_ zEdBso=VQcEO<)Ha`;o&7<4|~o_lrCjK#4Ry(bhn{%FIO$!r?4Hz$buQakL3!?K0N) zX~kU_s&LR^@zO;!K34)L3)xgSTzA5DAw$v$o=gbI{E4#9&3f8vxn2Gj3r1a6eGZnv zcMC?AKsCAR@?3k3B|opix>~Zye$@VFX4d?Jl>-kwLCtq)^Q&^+@i3v-=D4 z>e{a^oc6-2%qL=1B*I`pBf5WQMpw;W^H7t~;dUi3eV zDBvU+8nme>4#4>IaALkW?qcfn369>rEiKiA&x8v-nYKR-%|mgOd2UEc+sBHMa~ejE zCuhCa|GRZ`=OdU357{@K1kU-`d`_+tP$CK?5b*XsT?A50*7dM!&%B|f(w3*T*-e`ESI?1^x z_iMGWZpn(7Zq5(3c5|J4L!u()p*&*LSaf6Tpm_m*Ge!KQE^%;`hIAu z$mcIPD%_^}PGzTsJDPvLisO`YpJEB>L-4(M9*Gm;6WI&5>-oCEJO#mw`A z&r1`AwJ(wR{;f%ZLT^)1zzW5AB&8n%8?R&ED2BDs>2sfqs^Hlb@Zc4|VdFU9dJ}ea zLy6e+q9Y?EO0tl zHadQ0GE9V2PiG~;pGh+d(bPRaZLd1ysV9?N%nlF(1$zw?}8ULd>$&yrquWeTQ`Y!lnMSi>0 zWmT&|O#kKC&W$y=3qroDoi}q5)J4I|=P|esDQlsR3;DcpY?2q_a5?Z^;Zy1(Bq!o~ zR~b1bwad%EfJVjaQ!)cBZDCv7Gj55d)8k9;@gn1`-5v>zmk+=X#9k?b+_)bYqh$N% zZ?_C`oW-qX><{pw!le(SFbXv1Fp(mss1FBPbnJzAt9dW^C1MfY=H+6Zn9rao^KNlH_;>5$&qq-65qZ*Cs%PMZwF(4b% z6h&R83`>5Z(}Kq7-OS3}SS9-OTB_^*Pfz07&9Qc(+-KpDSOShx&CeIiV4Md$+^z-v z@YURLth(f8l=-vZqk=d3~f{b&CPxcwCh9NrIbbAwzR8&5D^yd^m` zMATAKoPQhrvLQRC@)zRrn`xo8c7LR|KB!K7Hwj5t*t~&$5Y7K7-K-d&iByO5YVS8| zs9RL=c!dG23#~Ik+{*4x?VzMy|GI|I}-wj0njG_5(Nt{W$>dXcpT!Tp}ON(AGfhV;m*(FlC9FDZ%a%;s6nW~ zOyiDTEOFI-+s^H7^nrVNL6akOBqbB)BZM2YCFSZZQ>oK~$0^8ec<+oz9B~0(RTHPK z!->y(W`yK*1!8K-JovbL-%ov1C$;g2YaS?uW~YAjIjb;rp{|=<1oDFE#h9kOc0246 z9iNoRG*oPLe6yA4d$tPs(DGXE|A6~FSNl@V^q2)3A2(}1CMlG>bNgH z@*JV>;(wuad=g^nG8F)_Qh31EG`sz+#rLZbZHTCS%w{;Mmm!z;ZCYznoEhh5LIU4{ z&PT_fZg>KeT+yR_hsapttoDz2U+i2`o8FWS0chY|UZUjAN&VW(@uuH< zi}BBGHsYo>MMzy#V-@csUH-nXq0%+f5f_(0TD_TcQ^q4CmjBA{NDbl9+zt-@n-}xE z91~HbO764$GrXs~_uG$frLTC6zofMvUEa!&eieOyc$%LvwWT~_G%*{;KmE3c- z1P{v<_B{6gnr}lrOaa9ja#dV%rqo|1x9e&{ork}Ke7dS8+YxE~JpY?CPN5gy&Zh~ciXiE{yd|j(oB6n4pu^iLBJ*5EE9Y`&N7)oVJT(Op?pP;As_6Ha zoYT?G7L~gLbgr{ZG^l<7+tJg@oAsV(7iNESSb?=+aw%h3DEmixO? zpa^nw2QX{G?!xX9+3)_jJ$7nYa{OSLlKO3YY zB7OV#*t2VG&J1WZ1G%UG006_|G0N<87r^w!{bh50Hs-}ZaAjxLm>=pQSYk&=TIyIf zjYD;D-I*2RSdoPj^3z*JeI)1WTf>X<8CLHu&)u{rNqzih!s_CKP+^y9n{g|S%;(V} zgjNr#CTWReJ|dN=qlN0oCg0_dsk*q8Hep3Z>cc75mE?U*(ia<5sP(zTUZ9lW6B93p zJvT#Y_W;`u(8*}f{{xTj4W_bVjH>twAib*pCc0Jo+=rVoOXw1Sa1~%&Harxal+^m4 z2nYyVYFc@ePJ~=r*m6CJc3WfGQhVXm%_*irQ%$>X!eu%Nu ziuTRr@x&Vpym(VTp5_HJC@>I!c~`- zO8von!wm`GOg<=u7Z+^ibc9`p-2>aBILq6K``Y_|E%!aZD&W_zBt4s=0ER~?v=!fF z1=JHa!SyWK=AV5~bYd9ue^^N_py!E?xV|aV2vlJtd6czm4TW0Ocn$@zi)e@R-=jWt5#^yw`Tcvkob4n_ZvYprfI_p(>d*%F~M z-oeiMzbYq=DyG#JjO-*2cfxRaoU0qT7Bsv}g1wJFxjzA>p1;;#gC8k$)$E^6tCHrZ zCELS(6IHmFoIm~cj2;(8SdB=Yg$FPYc+e%Ya)x_(_lS(2n!V<$C^ciu%AAvwdbFc+ z01mf}wBoYi=dn*5=t-l9L8)=k(>@?m5%oVW%pfhHNrVSYr>8fip-8 zNDH%5ydr`HKr(8yHEO#L?qB~X4f*z%eEPR7i;&OoE5&N)h)Si&*WzwW((6CXthf{$iiuqdtWhh) zELl4NtA`%hva_c#tPkHNzp*fJ{%vNA#GTAOkx$Fol$OR-&bijt!V?a8o~}M(Z%%gv zun*pY?Yc)*m)qJX64{Y2RP z+I2v@?PQY-Ti!_fVe?W4Kz+&iRI}f6O(EJ3EH~DUfI-#zWdO?yaJ{c?`mPy#qUJH3 zZ>b(jbl#&zZE49U4)98SfC-(W>}nqu#HDBVJHObZ@ZC-R?P=JpFhDj~yZ<`;9;o?;6Hh0d2=dvKVo@!B&BI26kHu~KyCT{ z3(_)+ajw~jLinG;PAkE%#d+wJQN}zTMwm5F=Mm4*^USdxf7e8A$>#KQ>`~X{^z~5} z5(wc6ck$2}b{zdr$oD#|iLM2cBSKx*=9(_8i=}O+TsV|ItTriXp`jieF?9;ma{!t%^W2AKCje zUrLfY{`jhlBR=&-4yI&ZqtrZ|R4&0Gt`0%?Rg5BL{*+bkv|E}tY=q&mpR@M|^vH5- z*mtm&GJ@Jcn#0>_3>|r5RAk9g(}!J`9u{Gj84GtkVK?iZA+lu@?6LrgjD0Wg>SkWca%SV5cX5?kDE$>{7yhuj)$){_9J+FlP2m7k`5>^(tof4 z6xWH@J9@~MFNL_$)6cM$!8I7x&A0UCcG^|I`u7GLzoaeKFHOD_dQ~lnmju+5RvNzs z(5@fO$5~;@l|#&!J0ECO%RCP)1|Pd$Ox)de<@znH-5jjl?RebzYC4=tT)hX$u#>BM zG*T#5pGC8W>QEis+BZO4hRPp$(FEgLSgoCivOF?-3>o4 zoH>g|cwm2Q{-E3!ju@+=@7c3)y#Hk?rfqxUIpGQBQm}uyKVMqA@3~kD#pYe(g-m3? zIzqJQLo6lzDD#G*MZ9B;R3}^sev`K0rhQkd5uCGPOnO-(NDD)_*#th#`wC2jDz{R5 zgv2fZ=c_EQ;;fbdd{4my6anDG7@oamvyGp60AeB^%=@h7RBciTb>L*3tpMDJ-8Gn2 zhJL4072W}@B#j^`GG~1|6TMkIwI-y+l`KUhTn<_;fNd0NYpKZcj8|mu@-_?Ex^0|Z z(Rqn!s%higvuDddq@QgG;C?Nj%0!dI&oiKTVdr@vR@4JEe%n*{6S6n=^H&qROEr(> zD9!A1HfvO`Nm=ASIS#ArC1Vt+rm3D&i-w&?DvLTh6(19|a<5}lxB3C?@s%O~8 z$G9*Z9vrZr$2fzZ0}dBm?gc)*nrp@A!v{}GD;F{Exj%eA_Wzyj%fhQaQ;^!gS8hAv zT7o7*<;#+d)K1E|U6+(;_`s{$eUpdyHtN{v&Q0?$O;kgOGcSGP8c=1s?q%gdoBdy>g@RgOf-WG0H;t|Y{|7R70f#<+|QMoXHA zIW?78S7G7oQpQrz;>@OEtiPbx_ysIW^wNNj&YL7>7pH$kcB9G(XH9L#)BpC- zlSfWV2tJJD{va}L%|L)?>V@=`sQtiYguUg`K86uaW)j=TNBfw6*!{GYcD3_nwUD~j#b zocVrL^v~o$`){Gj^X)$8&IpqZ9@j5j&n&9su76jqEr(vw&Y%DNL;1bJ4bc>2ua$S< z7?L7BZxLEvzLSs0h2*lv)eDjlLa7 zWO^Y^fI|aUvLOKDap6a{n|v=bz{lZ>i7FH}G1Sse7RC13;>JdBKcHiXsz`$9qfX%< zlZORd5X}brW*?>LRz*872GjrPaX7bnd|NrCxG+>Pij@{?W5JZr6 zu5X~(sj7+SgYJ-qbQ*+|vk)S9oOh1rm=%MX;!uY3MBvB#r(oG~5ZE_MGg;!5k;P6CluJp>hi>u4rj~;_aN(4A z>3UvjYr&^u&P~~+6*Y_8UtDbLffV*%EkwSeTt>)i92?)jt!^wpF#MLi)^X zx-MO1-2WC93caM=Dt=$tygwGVPjlrJoGMmt(-*q3H9fCAFddp{CG&e97m<{08FJh= zl7BmrqkShv7xa1U(O3J1dUuwl2Lx^fG|$*(s%*uT7$kQ&c60VU^h_H;+ZD=EWzo!BxLj5HCFvw@JO z@1T)(uSP*8T&l#lg*rpH-+F%3x^i=>j%p;8pzM_Jj9+e&z1<7_N`j+;M>B5uRFG=A z#%nxT^d(kvyAo4Bo`$E*5iB3Z)=jefE<3%1YJqZz=9J~zXB8_ANzLghlI4g9T#f`X zYL4lBZP_WLia*fp+tq6n%A?0un96;xiPLQ?)=&9gal?}lY2}~UZqweYKT&WvBn?Z`>ae^B z)h-{Yc`K*;?s>z`&4=C6eBxXsJ5-+xneUM^cKO>vKJQV(pDGj$pAxs`E|X5m%<6QLrTap) zz?aa!h&{4ys}@j9!XZoH_MH`3O8vh6I=tjRs9>B^3{>>xQOL}beg=i})t9#E&$Lvn zJ67sNG41hvj0Ia&ZCk)Oj#yrl*uJQWkZHk| zErWJ|f%?^aS*G6%ceQNWtx?!r&cTRf>!E#EZ_#d67JS2CI5zLb@x(yNCf_EV2U3?00vGvo{qHW?<4b68>!ZQdoJGb*#8Hg# zqT}tcx$-?Kc=SdY(9%>j-KkVi(x7y7E+?lN>e0{4SPf`dri}pdoq= zf(jg!Ip7KW+|Um)rL}O-50ddX3VKtJ1j@8BWVeh4iX#23!_eC0P=QaXTH^u3>$=TG4)g2fplfwPel(q5vEr3m>LR;q!Tt_Rl zZgswxX?CLmR23or{@{SpU#^R}j5{7q6He-Rjo%oIkt!g^oiw^yZ%^X7?u>MDoTRf# zb8Wy5`!#Mx=1>FApG6={+LX^`?!HiE97}&le$M-Vv!VT*efk*b;@>x>@P0tTw8<=Z zadzdEMKvOHGn$cs^x(hs-Vm^w6lZ#`tvB?_RIuxQVHX+tjTZ0zFT*b5<3aY-m9Xox zqt(#s7aw~1e9grfb@ZQ|%#*S5hwi zeMX=d<23vhtT&UA0i7suJYY^te`xz!#4tcD$#uDB#+L_avR)NMK^M)ThOHk2pq7 z)J2bw^Y70t3ikSU6>@zb8?QV}snZ5WMHCI>g<-F8EX-GHvH#o6;njy)HnxNZHP1BP9H_*;bTV`)d@Ujf{AS&nf&-+3%a5SL(MvQ#~3W%Nk1~I0W_!efQ(UZC1<286X7)o4Y_R47P`%U*LCZ0-+8|&)zc#@ zmnYP5*6#c|HX}bY)R3(xr9pf+73`XAv5MxD9o}mRt z(E>Ztz$3wh_L^#+_{JqxojW^zNE?a|gp$`4YKRf-+w>=#wRK~z^C zMSq|ve~$YG3AA7&@pTxfytGoNm4`&t(-mGBKIi44d_$-*l~PzB)^Uchpu7mk16n4Y z?w>5iiGywQBGU0hANE519R%xqmJNJno{V8C-K8KU(|51u-JlIJOx1jOD+Xy5=oz5Xxg-|;9tV}vas zXCz`t*LJpl)@MIH1l({SEy&PNETBg@4-zR~aDB$1Xg_4|15Bif;&wpTbL^v?ay#oD z|5@2N>GMQgP(*)Wmv21S%MiBABku@6lSA+C_T}z!!T=8ES*4}sG#&~%^zAR;U)C2^ z$Vnl71jP=DoP0Y;7LAfY5fWzS!v(TSK>9jL$EsFV@w!R`2pl-Nw4!`RAXY?kdYU%vdC@DKj`v^C1hcYaXSrDWyR>Ilv^AT+R^YRV& ze&=N8>Ts+>$})_3AV7t)knQM@?rXx?IFk#ydOvWfI_g%mpO7Pp43(}27pGb=TP>!H z=At7LVeM!0dF^_BeR6WD-F0(0;btyugJM9cIu8PO0|xPl9oQ{BTkb z7(iWavzxh(X*(+8y-&{qbX794zjx|@UF^C9`Di(Q(x9;sVm1g&^TEc1v={sh6|3H+ zbv}#3|AvXHzR`e-D!Fb(?9r65&cjJP1Dc&ogecJNO|&LQj_Mg~L-AT@xbf{bt3M{N z%O1ibC&T4+e((xB;zX(GQNmoRkMnHeMcK5U6ZU6vS57@tASf^>R>tc!`^J(?l~ZHl z*6OraB>k@S8EbSX!#6Zdcv4m!`msCEv*!T!IE}Zhxf)tkn@B+U()p+%3!V(QqN`As z$GAVhZN2CQH%yWZN}Njc+gEfp1~3aPEw%tMC3<>#y@!~7hH6GsYpR$gXTN|Qrd^XW zrLI|g!1_r)uVm5h&-ou~T4eXF78uvS*QvO5^pb7i7e>}%>^l~SkXJ4Hnk#KJg5yvF z(SG9rJ^(z%sf|+v3dgbYb$$QHE1WbO*`KN>cdpMsUuV-|;}Qv9yUah8h_{RPvC^i&5o_j3)et#Fz-F+mc#>4hu~Y^0x9WW=t0! z60Tsc`qtNu(#+3Et|K@g=ydN@hegQEvDbHUyRBSe-ex?D$_uRxS$1NO!)zqlW2kO={p*QPj<-Bha@-|Rq0fNG?Ux0-ZfPM zpU{^*5}dDGW=yCMAB*7d*FLw-I4?@Ajh`eBOO-x>6iZBOI`x-rt!M_hJ?UTG)f0n~ zeMCun7`n~zO3B`)M zP}T{QJCQCM85!Ah`i-4W&P`BUs*Z6vM{HM}zE`W8W%9t+eB}{W%7D z??27p-ZADJ(ze$Q5Pse7Td9*uD;9?-V50q;8eunw9TXQA(7ut%%C4J{E`#qGOL1d; zC4N4GMB(VlYzzWq)~+LHD@p*WNX-mcNohI*WJFgsEFq|mFvkK)qr1slWTUegR$)VLs& z@D)J?>QscF;NVuv+9%BoFy*1l@j?Z<+cUZA?ToG~?nX`N<%w5xV1rNNX^>Rh9ti;P ztSj@S)zw|NHgKfuPxJZRUZAEFJ?KwX?dWZ2gu%wp6_Y3by9SzOFxUip!;*P#_hgp$ z?ICXESN#{0^^AAt;r8DS5Hld$zh(HSb z@OV`CC_zUVK|cBd!VOP()ur9Xbe!H%7pgv0~y)BSjmO5apW&5cY z7d?@#o>l~>RFCO6W{$Zaf$O|wNN~r-&SKr0%nMBGYn5f+MB7(4Kg4@ z0(=5IHQ~U*APtR_i&vHA8e^nEP13$|iXRAd-sh)}XC!5`6=c?rjTD*4%3^KsBQW^& zny@F6a@g4%$8NGphjJ#+gH+r~q7u#BZyaUJRbsG{q##7tKeuwG)DWqZB#%WWRWdnL z2@j?ikUTWTQ|9Da^4T8_8X{89i^`)(E$RI1iUY*ZV}L6o@LNqBux;3ry}c1Pk~HtI zZ+`ee9!tjLTepQ9+s`reFjWzhG9&%cXCI*!YEx=xZ-i42h?`y$g6YpJGuSZ@djq0}aoA(u7g#Iz=nqn!97|wm zEEW+;$QYl47&;UiYLZF{L8h}-c2aElGxfX7KtP_Yig}B@y@QJ_X+v6M*{yd3QJu$< zZ>L2|4u1)TJ=&VVCLVWqRzy6=12`S1ArO74@YCaYxnd4(X510G_a@b*i0a24@zzDd zw7_vVtK4~s4&-cE7@_7no|YNwLsZlsU#!lgsKyy-<)ukFs!=t%D`TQQ-Yt!f9fCF! zR3kCyIOXJlX|!IfsDrM8ZTzZM*3F3Fl@ z&;~B5EKgB2HLHKRW_= zc}XWgcS(%o_+H7j0_nDrS?ZJ4 zP^45J$)NTlMU@zb1LtDf>`gdqV)utazwn1$6CYf#(!*yFV22?I@UI4Vp9tL>rO$X+ zA(g_Jfvo3(`H74fsMcVqE=fi=- z4%tW;v1n8ch7`7mwlWiQUX9~&n>t*CKB1ljvU#jkHi z_$pX6_qPoPVwtcmM~|EHNaIv=W}r9~qWw=tMxk&>vHJW&Tp@fCy>RbeuI=sfu0~@{ z9o6V2jgVP%Lq#X2ud9$9>xUr_D5FUgE^n~C$QnX5Z(&jO#08w!AHd`Qk{HZ>m0&%k zT<^opVEzf8M;-*WbEy{eK``@C*=R+Y6_>-HFV@h)cO`akv+isq@wQ@PU>hJc(j})Q z4ly&c6isQVdGU8O+B0@KI1rqObqeJ3%Zc+b$0OZIQ6Plog~;qc>Vo7|6$n+wWApIs z1o3QD^%LIze;L2lSefRrckE7oQ8ualunhHd#{_+W;!+?^s1`LgP#cBg73dN-oyF|~ zA*VRYmAi$Ew%+LJLY4EToB;@3-5_COWJw=7yYzDFgLaLJ?8^PW!^56vmM7I=0oRxS zpA|IQ#+|2Hog7O@5ELCKP}8=HcG9|)7?sC)?2=z06igibqE|^M32DK$&6xm04utP&E4L;RSU*MYeSzyD7&hTq~qOp^P zzpIkfPW<+dN_zF>fZ}hXfL1omn;EfZJjJ|W1E=aB*hxx^TebN^Yr21AAiWrE_uGc= zTmz{ZpXXiw6DRFSMTTmPf{W@D!1Xpb1p-fW8#Pt~Un0nanJ8bDP?kxMP5<+kk;alj zEd}qy^W%1;6Fm+W`#qW~Yh2Llsoy2Ykc4HMe*KvYEzWoWTI(a9GOA5qjcdJ z!y*okZI|6I+&ww{Z9k+^mJSRA%VeMVHY~bkmqOa%M_Zed?1?WJI7N?~8;knf$%kS@Mj(^(4>Qr;5K)`8yc5(XwwZo^;)kP() zGJbBXh_)y`{t!0;Az4PqKl^7LFp*XcgMhk5Vd}a!x{OMVKrBUHwJtTHz1(X&;sDn) zmPUn>_hAuNEX;LW_1~%gZ*Bc&l~|*&^OF0Mk?Gsb4!Kaj`}5nHHP0msCqgYrC6$Y| zTW>gZvv1qSEdb`@c6ynAe>H6xf97a2x5 z!}q{2{~nfv{kylszH|3R^~qATX;;Tmm^6M%jGSd>6*K#YPJQ!1EPkGc#YBrb+>GUt zWbZ~m)AY_TUm`a5W={+YE$>T@>t^Xf{0@HtZNII zg_+@ONfOqYrEJ~*H}@+_{P5-lT#)Efdv!hxSZQ#Fp_l?WcKUAn>dvw zYpcmV@C^#;95dO0d-I5%k@PJ1=*O+j|UTl;Z^0R-YfPbRhbc4%%NfmH+JQ zT6R?*?k!OA(%&3way-1gYn2Q3yWd44SE;ekNIh>)r!jrpS32tOO-dwN#)u4mFTx1VgZ=>X$(5QkP>zbLlrMs|U zeyHgA?Z5@Q?B^gkvbRVrBoU{#xnD@g?QNo*K#T~aq%vn8z$~;G7CJ&d2&7OqRClE3 zHhro0{nFaXM4kwpFz-lV5Hq#EwtMk!n&$+aT576C99ObpI!@eA3EE~@?Ftb`CuGb4 zSI)&}^qap0n3W+!IyTvg2Ab!sLBXs%e{$)vcGkV;S=s$^+TA+FpSAc4L%0>mlV@v$ zHqmMeX66iHwsPpGFK$aZUZd9Ll1uw;L2VUJ9$QEPG5$md34}&9AkPeE(@X{6Q@Ym( zISlCw`F4aJ_mv5E-F)u&5;dHz8-mzQ5gl%S857=*CYsq3iLdqWXwJy!y0yITtf{Qb zwy?0c9f{Lsll?pfky&Ps?jh%tlT8@Nd|>Qcb7y&f+T|`}anp0uC5StE=r7aMkHK7o z#yk%sk7gh6ygYM|a8sSyYj`Af(9Dhw9@Iu$q%u z0}O+DLY=g2W$E#1kZlZMWBK0t>!cLqIETZd7|uWG*Pez5nKDHh2#>z99o$~ge9nZ~ zi!S*g(hvdq$Y(->{}u(&BBRu0SLWk`%^G2t5YsGP<3{vSafxIw19@*Eor1A#w^+`1 z0WIta(O;L4;=0qj?a%xK|BuUNrF!WSdOSmaDsyowoq#0d>`%23Dv`*UVT^8=tuw*h zuktHm!b(IzN1ztWC^XVC_;>+0f4aBq4-~pBCZW1L_T#zYRFH0W2ntDxw>L3JpcB z1}RZ0Ab1QArW&>8`bOiFe1)>BEBWLD)AN7@KJ4P?ZUwmJe7QVa8W?yX-`d*x&*6R} zKW?zh(!5xN4Lij-_eBkMD-9$~5B9ZbLBCkUy70tYrI(EA<)6%8S9gP8 zO>>}unLADLKIS^FsxGRHPlNM4G6B+N0L^E7F?21#h>vW#b3MZ@3Ck^ngk7wUEln35 zwa=p1wBCyU-5Z%$3;wtJ#JqEFZ)tx?+j1@Rs@)ifRbHfhECQE&;xE9Z!`r>2owe^~I$>QSK&WZo;X_^ja zK_@fH3vO}O!fupflc}H>elS0WxdkxS1Q)e2Xsfd~ZsDfb3gjS+^sZKbOc(pzpKHLV zJHSvanuIG-B(K1c#$|$_v0JHbuR+ZCu^;+IDjQ5?9c*J#^|-JRgUYo1o=gLx+jtH& z{cq~(f9k~7U<8>rNf~o!?M$ll!@-R@=U{GpxWSXz#ZoT}KW;^;o)@8YVS4--ycgJ> z^{`+wk!qNEpZDLu@_0l6dHS>R_}Bmo8MUU1I1dgf2)6vnpd2?A4s%VN$qq#$W1z+8 zha`B9HQhdAOTq+7Yy<^2<*+QagV31r)878MOV>=4=nim9GpU`^oc>fta-58m&5fc2 zj?4O_Age0NDpG{{zNXRtD7x}^rvE=aqL5q-BVn>KxrVS(Zn=-K+=)r|9!<#lbA6tzDm9UBZ3BU13yjthKDddpjKNv?d}dto>d9 zibF0zjA}ywB?^@Qi~X`y3O6)LzlN!CB2yXy{2dM_iQX$7k7{k!9eZ=#Rc;~E%+RH{ z8$seazHgMdA0TuhCEbCn|HQ#s12`f@s|b|_ z!0=ShT|kaFsU$n)*3frCR_tC}+q$Cg%4s0*Zv@%@?Q@&^#sow2$qj&TfvCZDp9nFOD^L@^JWmuM}e`7BuU8$P3co4X=ylCZkyuWd>zHz$S zb_;kooP{Lby?DkuNwoNCWckAL`{6V2LU2W7vA z$7Y8^r8UjC>UXjp<}A6N26bo1jui|bU-G4?D|BbjGc63LU`;Nx#aKA2gg%mgIKYAy zu(sTq$zvgWfPw44-bzHNT(xekHRJt|nW2~oo;OmRDEB*0eV+@9bJ7%ZO~iKM^Bb4+ z7(X&1@ZA{?DaeO5U|NO(10)KM&mRyQ1w9FYB)WXg(0NjSu8OgbMCu?m)pz1A={k!m z@qNan+y=>EfNZ^&6r>pJEB)`EQV*@A4|v;d4$z_tUPrf22!*SFV1^{0J>%?Kdd`KG zf~Fw2xfh=V1O$X0wz2><6(d`Wrtc6Qx&MW@_3d+ExLLP8bnhr^eQoTF3`WAxKrVO~ z8lAP*x$6Ukm&;{1oh5X5t_Z$(<(4q1WULlVe^YdLC9P_u$^E8hZ8n2}RFwqZ4@;GI zzHUL&8?X1Y=niZAg0xW2+SmSQH0_o3S(g7GcYCAd=%#d_oAwJJWWTOkiTg_lB6(X} zWEA`Vx<;n|UEB?1-+BgEarPok_tZ}JM;^ z?)VW4Al`xz9cjKVQs-Onkr!WBfsfD-pXwd~e02slIj|C(rx^1hFTixo_v! zZE0r2#>sd*P}Nd?-yxb2gN4I%rJa<#cHmpzZPY*LDKAyLE7jR-(i`DyyDf>P>_KCL zo2-3@*@O0e&>x?kh=d=V+!}npeAAM*KeiqOb=DY`1bC<*OcEmVX+l^^y#$vHk#{sX z4=}u`gBKNzp=Zr?m^N@0rQ&)_tLd~F*dk6_KI5j{4daE5jnTTC_~$A@yN6+F9B^0k>x1;wiCaF|>i({-K<7iRoR}qX zP|||^qvheK{33+cVXXu6YpGZsq)t^FMS^n6;V^hvrZ|F6cQwue9{Si}ETEc(uf7Bn znhLQb=ov6@*H*LSnLqZCDR9M0Di}XvQCq-ZzdqqMW8yP%kxoNj(Y`vLwu-yvIb9O) zz|O`~`h)bmk=Wn@_`;csXEvRmsDo9=#=N}h9vzt8GEQa#*Qlnd$#VHskBw@e;bm&7 zjSM7A+&Nl0Jt{i+6F2u|^<=eLmLQ`%Rld@;qr&MgLsGnb-8i=v6gjX-56`&!09s;H zCeE!vi+QbK$t%hXMssQN_Or1?MtT?2pZnWDLP)Xdbd14rz4o! zSn-1cFZk1OD?YoqUEk~qquHfKChnY9sG`=?BKLp6I6Pjy$-s3HZ&TN%k_>dW?H{Y1 z5Y&Y8@jjAql3S__k;+OCH1F^NuEW?~0Qe<+nenN^Q$io`kgP*54(hpCx~Bv~FeV-! zHanF%FZdRIvR5VmnmsFqdH4k~ou@BNBQmYj%|KsXu7t4|bP3;4y)tP>8tQ#3CRT`S zw|@`TU|jNIEz{%S&e$w{LK_8o7@(9!Z&}nffznVimhuJ~TxXyBdR_(=edO}$NicS5`?cW0L}XWN7Q4-aVkj$y#O3I>)@^5NZh zR-nVClJ(0J^ID-F7SWVc9;Cav+e9eMVmMI0(uQG1N=X=obcwV&yCbC0Y%fIE* zziMGS{9XG8D?r>lJ(Yi|I;W_!^LW+2@^)%e@^zH)u>-y97&d!I1)aQQl>Q2z6Rny#+ z%~vM#4===BYf(_BfJ?>OW}N3PLmvG(-iz4kI>~8!wxR4WMp#u$f}eXV#a-$yl?+Ff zx-b#DXRS?p85c4Jg(hq~k8u`e)Bk#d9&71QbC;@EtmXNt20lT-I=g5q)807Spb+@OS0qqmL^1%A+SGlAF1J}ki$WJ0#smWpHv5HjKdvi|&EkVuHNs0hh;<&qMccqGDABHv$E zL)Vqe&JMai26X0;%na(AlsL4hzg2p2$AF0sNFA0ZuPn<8%yoqBJr|W)i6r{umuK(` zW@H;xmAk(tvd9^~jFf4e!n=>H1W1+vOLF0aoJ~_S#rfgbDA$>*1WgtVzpGb9uO%02 z&vO8<5Zbj>lj--07f1f_J=VvTVK^oty&628`|JAM`0o|P(=_KTjdN(O+%LYdL z^3TBeo2N0>&u97#m}@jJNNcit#WSZxv-+{ahcFRQ6{5E=>;m^|mGq)}#Sl_o%^U?V zEMP8NeKnz_ecWvT%H;WcZja!jnY5RB%X6)#CouSzh}vvd?aYrK>pxCzKiJ=w1>CyT zvMmn%79UrlZe~60jbsz!5fo>ZEOSw(iK*%1pv)f?5|d0bRqrr+{&HpySti@Meg-e1 z27W&sK#l>+I$sH3B~v>&;k2@{a-eoT_dO(y%}KG^K$*tA6<*g)LWq2=kxYK*SKhsv ze53onP$A|>ahSfjmtrK*x3RAC@PJ$dyjZ)=3YSe)@ogOk%L`p6n}5yOa#ya4UJPu5 zcH`YIIbPU$Zx7`KYp1*YXP>Kw)$SfcXZ-4@E{H`un}z?_>3OJEmbwoybR0TxZfkA* zuo5>TapALcqX$%C_UHIUxj@leN#pasH3TM4Z>r)@dnGO>JySE6oSv?kn{s&UwD)g) zEmdT$<55Y5KL?ULAxKrq!e(;4N~n~me6-tZ>0AW<R|k$kV} z$d)P@K17$x${oyDEOU$G{vvjZ-X4%uYYJWTcOY`TbZTaj+zA`$6>zj!@?KcM6FZ$3 zg|4|0u5Y|}Qk= zum139zLiO*tzEF2Ra@@_$^=Kcj%*HlMwVNZdff&R`##5fB#xzFmJd~dctddzLidw% z+JKO!V=%Xv0gM#f&o3b#yFYVGw#;Z$%E7JQk*a8@lEgA7N<{gaXrlm5?}Po-B5|e4 z2Wd$N3ln_<3!2JfJWAi&4gr$ukEF3KMAf6 z`zI$ek(&5`r|7`na%8=hdEG|)teE_WzkXFI`)F_Q$pKKceM}#-_8V(r=h9NoBCNdc z`~6fbrj;NPaF6@GU}uejo_r7Z>xpl3%XsI9PJL{f@6&*V$;qV01&XZ9cnFh=PzliUPErv!ae|}#M#IGjRvg!k5%86V`7)5|L_BYfli4*r3we+;QU4>p+}8|kcI?&_K)JzhOjng$j3f%D(#IZ;efTjfnU zpnk}s+wWAk=SNlJpb8`PWoRo$6RP+bcN@!JmE!Rw8H{{%YdI@pooWNURHT$xZ&p`> z@lpYS>js6okQf!+>zXyV7PE45^XOzPHSJ(3fLW5?ai*~RFkH8CW|}&8w7U|2d^+wO z{E zKSV$f^*3&C6qcgYEvE0R+uiCHuwX(4E>7D(m4FeaV?^kS#m%2TOFTo;oFI!;cO?Xu z7IOPZUNvad-X_R~(VB(9If+uU8^kGsOr4BQz<`JIg#Gk$zc9$b$=cs{_v@xy7drS| zHzqR`mU(C&zTM)5s#i~u!<6piu5;uvNLr{~<$T2!RrQlwwz;TpduDJnyyOrae(RDI zB39+p%VEbqxJ@nW$IK0Rs};5OTMqYYKH2w7Kb2k+J|q$9+PSzHdV@7yC*BwsqN_4~ zfTO5S-TwUbRWrWGbG*M~wWV^qJ>qnC<}W2MA$mPS#>|l+?C(#V{+pZ_gBWmo zz1aTKn=M>ZO}gLQg(p7k0IoWkWGU%K_mQ}ur64)@@8-hA?#yg1u8qsG`)ZO2AHYFx z$fMr~n}c7JA*HzLDBz5vo=uA1kduFf105%;r-BefZ9Ced`CgM0e!{6qDE0m`t8qjo$(XQ6d?J6dONQ znK)>wN~ls+wdc~=}8=uf0x-k3dXb)TFnu&SU&8i>hpSfKO7s6q$9OlWB{OLVbB zW=Wa4Ie0Dz%?cHLxu^72$jRueZ-h9#rq0V@s&xx(?4@NpbZ_{YtYhuu0jaGSXtT=9 zsM__msBc6r+4uzoJ>S}0i#RYnW$kM}*?zUtldXMt42^!*(jMfy^^-hzdK}WB)C)>@ z*!To37X07U_C5WVSka4u_h`(l2N%z7zv?>qZCJGKQ%&<|=`p2llQ`9+j6T;z9DBOn z5l*G6$kap6V*`DC5ySC?@<$zPTrw-bcs!3N@^{5heE-ho;DD02YP@{!w0+n3#$Lcw zrkLSdaaCBFm}sWj4J|27Rtc%$OKi3Ym75dL|AvOnCNW$A3?Xc$Kl1)n+=DsV^#2YT zAI6wMSQ)cEMO7wb!YnAk`Q>ugK$Un&%GT5A+m$k}SEkC{^B2`l;!Yh?+*JxWI1k;0 z%rzJ&Rq8C*zDk`$SB8G^vXy*T7pQVRV=0piP$&4QlrWe}7I+*7dvWYXdykD}IL`<@ zBWBf^=VDM5&6TYv!&S!UWh7}sgm_x5w0hsvPJrS?Z<_Y<_Jrpy1Iv-W6(7bYXhFP*haT#9~OL+hX(Jj#oF<3u}fk{1^ z@BI60RAAfXD-ew2BV40H8zBsBVBU286$*{EBlbezW*Ms-aa8~XVUf!yCT}NhX{o>U z+tTvq8jUu{ue^dTAWZ|^7b0=x?n+ZKOaD&R3zGqATE=7=%#Uz^>%W3(>-Q7w7^6|n zZ16J0xUvad+1J=o+?7QaolA1Bi5yllk_X>$dCmYv4qL*Z2R`RyH0bG?o56G+c`ta` zjR|6MmpY4JcL!bndpFBIIT|q4G()3B>BhJhQq!EB76%g(0ez_1JMwY(-uM+iNqBS` zN}|{+2mQqefebtwoUdt~&JK5@-1Xd>@vykzAkZ6Lcs@U{(E)hP)?)UAadm{c$3AtE zkI=l9!p{(8nWYl{EiY>(O{@h1Nl5^eak9?uc)Tfl@>BMHd(hvB;Xz-a!_J7UKX)G) zt*P2rXty`|6@p)Ib?mXL#3x8{*=#VK%JvR#)g5gI?oxBGQm*+7!`37Zr-D#LR@i(N-XMyB&{y$u$&)3a6$v@h5_ z3hrOK*Z6DlySs}zx2vvtdXHeIy+F=yi%1IVpQY}{56+w|Qsn|AcJpZBP`DS>!yY{> zadbj=)dRtw+=gz`SC{NHjYx{dRFxFiRc&@{z>8>HR*acbQ&l~hX?w=W$vLlzds$Bt zyY<*y19itv*_>H1hcUyNi5pm*;F0AxO&l3%%;r2pTQP0jVe5-DF2^SaVBJvt@3g%{ zL5PcQG%&ya{^jOoO#@jNh<#7;Cwj8%9^O{y3_td0bhxl3C$tM{o$z81?N(&RLkA~)c;1-nA^BOQuJXfedMbHX>k~9A<;y2)WLV0rKU-go zEfIT|l&4N{wbE+ezF)OXe(3>CgKc;aZEITr=y%K5+#GEtS(Zc!x_JRWoBfU8%+fzu zKs{2UV(o6Xw8g*KyOyHHWX$t)b3f&>d(}AxJ;vylWe)3{1BpDl8<3Jm1mAmr7K7=g z@fb=WdXS}=;^Y|q)D9b&-LrYj^t8PneN0qE#M>R4q^b+TS;lD3>tE0r$?Y^)Tb#cK$c|f zoD?=!WD+6>*ksGi!ET|!tW_@R!xA=eU~ZsJ$%YqmJB<}Sx3PCD$68;zJa@ET)O9dV zB=Ug@@F9PHGIQHtI)hb#T>u~_sEYfO4vJ$j4{|GxN%Xm$)8m-{ zBqxZlu<-i&`pH4vX^71epl0*6|KQp8F3*OxylP%A*|7Sjj9P)7++1zjaV6frwbUng zm2ImAKiis^n5a{D$K*C>9zgcF{t7+23sPs17|9w_{uo>7wR zJj(MYVE=LD=n0|Z<0Dyl!G^BR7=X!S2f#ozs42W<; zhp`gHk*zl(2jNLjXvrQ2^9Dy;lXp%`%e=ih`^C`0MFC#y=cpUKki`2J9=KOum1NWv zd&QSh{SR zU<5yOQYkFeYxDpPTts1r?ytSLx!K!5JM!G=YU9XhkJ{l<#O|Yr-DAWM9gJFSrSQDO zD#YSF3SWS08UY?Ccbw-6Yo*J#t*d`c2O!y}%+}6PfA&v(6FIL&Q-9BQI0}Cw{rEF? zdKlrz#|DxpV<(A<4SCg=!edoTsZ1agtAJ~hdAGp&WOlMXd|0ap4f-B*`oX3)8IkaD zXll^d;I|?NKre%emsG-;?I&#+vfp1;YQirMAobYZ?`g-I>0eA7D{B=$2UzOCbflhpD!I)LEGOdSM z;S##w6||wZgzs%PGZ7HiJ)XEk9+hgdPl3m|P0&Wu|Ks|q%3)Iu^iz2OZOE~m8;7lc zrAFFj>*V;5i>fNCWJ4Z>Pv$y}e!4L_D!62n053zt1OZYsGxN+60Rz5tRFp?aiNt{0 zI0TxF;$g79l_lE!zy@v@Z~d{j-i-$<)AA6p&>#iYK~Lp()GklT%5^@Aok;lDK!rMI z_z@tx!t5KV=gfyBHQj2e@>@MTHo}yRdLhGtOPLm;UnW&HjO=I~^@v`%o=jAPs<6p5 z?v9q0s+EIjs0MpbHsLPD8J7n(*OG#j+oJ|1bFZ8BvT1!4YScSxDOy{jRV>#7d0Pzb z)8Pf-y1XVmd{q?`-16dljVx$H+lG0aADYmmVBre)uT?fow|Z4*U`%J%6o6gt;Di7E zEVo0C+G+&~9a==7&_=%TM&{9zQ+RyMj@OZsnyPRLoeON4peN&z0PCEa8Q#JreDFA1 z-el+Ctz3;hTt6mrZp=O&D=4onGE*yZ1}_4F5~YF*+{f$9T`PO%w4Mg|+boVB3^e8) zwx|h3oThdGpzPlI@wJG3f5WqvZIxnkvAFs~o$HT$IqVmff_23LhhS=~^ zNJ_WN%FPuhT;<+UJ;%;{gAAw&-TzpH-_?h{(t^=RI zCk-!WzBLG>&bsRF3z12dYz&un1Div&F*-d+3x}GHVhU`;NpJ-56!}Qv6P99SnDNxX z%6m!)J>2dFg$`BC0i6tpYuYZ4l8K#oAty;>xi+fi*;)vZMMRtS2p|vR)GI0IXw{TP zmgl?u)z#GnPHb>q-c7=!&rC?iI=&}ry_N{rcg!fOL`!0IhXXOZaHT8@*89O*QjAdo z*_)Xm#qcqEGVnDT6~6^In_X^i%+l~1tpvht?l)1Ahvo9IWM6SHKW#%fmHZIex9Y2F zBSpQqE8LK!$tLf~0owta*^Z8txVL-R+?D}ETVgQJaD#Mur zYV0J_f=l7tI}F|E*g?Y|K05_GoIAs7i3Qcbq+h?Y`j`ORJlS}}`L7mgVIxsAV|3St zj?1YPq4SF8BoBIWisBH_;g;YB`Jax4pUutOR!x$L_}jx@y<-dZGYXjcMktK?!d1bQ zxJN;*yv8^9&L=nk6CU5a;0}_0qjb7dFot?Zj3(XgH~0ea4-TH#J1&W_QO!Hgu4rPI zd(Q+G;^>$t`hx9WtMj6L377@SQt5BfQ`5#P2`LGS5XZzhxs6p5HXk`LMD_hdMWY5# zhc;k&aepNTlIi1g)242SCKjyl$aeCxP?1(XH{YXF3LQWXIpa>+)Q;#UGkdDDOY|_z z-yIJf?a>CJOqRSctd|9u{)D~7MRy(9u0V0y4B>2+1Y;eNRS$Tf8j~G|2Ok zr>E=7v>WVOs?qF(+57h$JCABQTGR3U`YcRbT>3=u_@oCJ33xtuQjK1uy*9?k?0rL4 zhCWsG!kHI=#%(rr_d7bnRab$k1gPWO>_4C;6-e}o6L~(vbIF~WOwmhGX%}C}O%PU;0#+1o!hxJ5 z?s+MNE4<7IFTeo1(n=U%{%<=E7jGFvbLpmKK`$ z{ZugJZozGdR*wbt1;_z{(PcwIJNIR0=pmx?=wN51y1+DBrXnDW?>B=U76_*Z7S7?+ zLbniB@?9J6*Tn(x3H!c64JY?`V7e^W>~+?F)m2^u-$wm$_3lz>`wlbW87 z=3@Z{?j84YPzRmoB3lWVB*%#3orN7<2?li*r}uA(N!WT9e|e|;wjB>{svt%@$Fj64 zA29^<8Y$9yXba71N7h8+A@_xuiyAO9Fz9<#X%qIUzU%My@~vm}UY||R=~Ix2++`KJ z+htMZnYe)L;KC(9(R>{Dcy4CH6c{t5)R}5QM|`$?hgz##1$`L(K0pOG;w_pc6=bQ|?voFV;*fo@jw(%a$s32Pc&ISWT@IB=XA| zbqtjJx{T$MJ_}olvf=(uO61tAw1I&4WEPC;EFG#uKg3v~z4IlFU#Q>jZ07}|y(gPi z?qQD>*A6jiqia{old!pON=12+h~85-jIWr84#Ud1uHeMuCT@|KMt$`WhOM0~@lFQ5 z;eUVj{CL+^d3`n&&RehtzY?x5ddleF8)bF-MPH-JsDwa5w_c3VZ6}mt9m)b0tf9j zVEQ6zW5!g5Z?He}HpSv(y-gWwOweUiiAd!yr%>)nkxPt*#6*VbqHmg_J5MR?cncvr-&wtIV}~V7hk=MLX+XMYSRd{ z`;Lx}n^$xSn(sf06rxX?cK8Q2PSlK|2af47@`_Z&fa!^WMI6yZ|L;2e9L9U>n^;3= zP)+{LrG=Z*L(Ij|sDv{?0RjHRNqd6w#=VUVwdYc067lYMN}h9-5C@crd!O_4hnmDk z03cfdlm?>@g8KVCE&{#gMGHlvX6Zn z>neMm5UaLGB6)eWd!u8W9=2m6?pPkfc)dn9iL$hG0?+)*5-ld_p}uacXp7W3W#M=u zzuY%VXSPNuZM*Gp*X@H6^(mlPrs(wO=fd?S6Hxy|&zJaOo9M`K>u+8SC;AU;AF%4& zGZ+1sdtTZuKVfA#U<%sj4#Bnr%v!6B_~Pg)=isfKAB=3ACCjj)qUsVhi~5W10<)vI zmFtFZRD#0E?rxKxD~)(aj&s22FD!0)%vH2D%>JF;OKYsc3sHTW&plGt&arr3ni~ZU z61T0JY?ZICWr(Z=I?CBbDHlke_p~zn_50-5E9hy2c7^Ob7#XRp<7M~%-^k2>K$N}c zRlRtOU$+C5QZEC8i>4t)v%#Ek$G@Nc+urfi*4A}Bi{h2Cwfq|%R9X@E9r(6&SW$pM zVJ6toZe`>KtH39SLQbMLHPEM{3NrxB@ns{aY;astzg}XKy&G(}tS4Os+%xcOQBn!k zxy*h>hk&?Rpi#vUNG+T7V&UfGR4Xj_@Wp!NwX%($P)by3bb_uP0}*U(+uMkKF^LpO z3?4TXyQb9E+SDvdXpz5Prgw8Y70%qS+347QvS|Ft-^AJmUtZzF4z-`LPjq@jIa|H3 zO+nUqSL4l5aAvdi>a|2G{*~c%T6O9kpvm1>4CYgErCFX;fla5Tw$1!s&2~#T6@cVE zw6=Z%eAkJCfU0QrokR0#GLT^a5y*W}jkjF~BIvRzc0-n?O)&@C6|(=XeH&MRb^R+W zR1+L@CQff=UwGKh3(F^YUATpQsEc-SMOESlyp(DV)=A_7{^qm5Sdk--dylyoQCd6% zU1eZd*3K+&7)ta`=$#S1Ta4E8efjCrTl_8Uui$0a`fW+1jS})a=mk#(w;WZ4oXb$@ z-a44*F~x(sV?z2+Qi2k{Zbz?9sOPg1&!qZb)wz+}um;A;2brHAK*en|#|siiZ zXZXm-`bkmt$7JtEJm30J@VIlLrit`^L#beG=l{m?>g7+m=o>i?)$ASwXn-Ct-)O-S zvy*k4;eabgN;J)k*gcR2jS};oB8)j0Rf)v45|SG&*yE_~Df;OjKl?;5xhQ&Fx3W2J zq`#@ke3zM9`r;WICc|I3*bi09O$7epr?e&{g{~s#44?iMU}Yu3`7Hlu%$9EPka;4H z978ErnVWgK{qAv9t%F0$n(M=kD`N1(Q$r52L&8JoQ|F}m`3xH5)r1k;i5-A~ss{-k zk|fvy{RCk%1`Lr4H-;sUF)UGDM%1QJGaUJlOb%YvNZg@i^!B0X!(PL4Pnp zc?c_){D!bJLw{p;gTKJL+4kaV>OM-cgB_&a+hpA6tQ@!fVWX-jg^akK@DcTfoUMxV z^b(@W59dutMSIJ@2USvnWS&0jy|px*Q6A_EL=I{hx#Y4(=Jm>PG)k6Ny8pJjx`_94 zQCV;rzhSIZp7*v_F*~DX0iKT}1FZWLC0NA&7g?h;XhXRmc`v+WCHOI{LQYzK{ZnM@ zi1|dU9yc$mg$76lUjp)AuW3U!-*=d`uajwBa}7KF_wJUkyvD+K{Ck?@{Pr&CFkDb= z?sE2a>dIg>^~dK_dc6oN=lo)f6&th$zx?!Od|!hf5IM{3OOdw1KlC_yC9wVK_mZC|=k759z7fryf&N33p>sj|87D;VE{5Bdbbb`ULOE zjQQo|kdDc9j*2#VJ!CID;$2R4;>wt;W7s4?hD|J!orNVPjVnz(d-jqJ=;4-E^WE-+ zE3Z*kBTHcAeE~X2W(mGVDRFf7;B9j7ERbAGWpbJH86FM$(1vj)F&QCZa*4g##Td3=tyy{fTuB_M-_~;__G9hT@_c2f5z_9)lFQ*e&tX)x=VR za(nLwP=Vycoaz8i8?uuG?EEEpm5ShJ(`$iyl?#i@o4W_{OGB}>SDLc(n7Phli>s~Z zDjZw5L~AQwhj2epTK?TS8$CNR4u#CW6L8;?!4!a}74EC%w?Y+Nqs z$TCR9R;H6F1)_V8CSKM+`&n@w-itI=l`hKTFQTp15NSNpu~Csyk#u)TMmgAz&(gW6 zNvYhqfj_`O;r~*9N$kfV=KVEt+)itO|6ktf1k??()0~LX)TNCuO0bX#z@}&pxf5-( zAlXmu>Dog0bhMQovJYOH<~s8RzsO^#@u0P{)Aq5_)C@qSomkIy*%A5*Jd$HK)Mk@O zf5;o1mt!q*m%2_?x`4U)t~(=IHT8{0`$L5kdin2MX?2^U`=bXazE|&{wgx|;OEs-# zj`;l3k+zW{CbFr2LLt5`3FBHLxzxdnOf042VlC8CU-=VOMrV;5_POL4dI#1`CN(v6 z@DH04n~sl?EPgTuDSdSW^5}f5lnrKc({U)6+eKRX*N1P4s@zUT;>8wd@aLK^HHtsD zw^*l^dm(YKAn_%5noM2)`}vv2^@V^&nO0p1a<)wkmbhs1V z$=tg?tfQkxnQR>h!A!dnFKZng#3}2%YyB|WarkE$m`l(1{rr7jX;|p#xvc%{xz6%2 zl4U7M52R=)4&!1>a~ZZVxO2{WQ3?UmR02w`p0i_!w$MBkAYR$IpIDoOVEY67lc>F0 zey-*V=h}eezxv6D&IaPv?37MOU-2Q`eX6OT{8H>mkZMW=V%VKp%d;p{(JDVh5ZKjP zzKd%8twaUOYo4W*czkZf`%>f#argnb>cvd)?o2~{mevmI4Pj1F5SxF)=*^|Z0U=$| z2cQ^BMu{@68h7$7^wB-Gr!`gzGJfSknPd#U$IUA@*X1D>ikWY`V2ly-xrgb^6fW-rm`M)7Au@OfQv;WwDI^kLMFInG4GjVphyTfz|u1vh4PC zl--hp_fC$1)ZxPk8f|B7*mnci62}}P6ilgd>_uU_He&(G{5duQDkEVOM&^VectfLv zYELn~_m(L{Q`FU2mt><;NbFHJo7aETICRAxS3P8ynuccsB@6&5oXyaZhZlT%1%JBH z89 zUb4f$?crCl-`vuDan4CS`Jw_iUb%v`#1%UO=ehONjk&oM4Z?E*L3#dh4RQ3J_)QLK zxZr`tq7*){oD-61mYi8qk|2<&!ON6=)<-^3iI%tAbFJw4bCsi$HjxZxGgTAnwXTB& zTD5HJ@W42JzR#Nd;uQ(0H-?cP4V5`0BkYNYh})w@B1Hg60FO(40rNTUGS zJdXJr#NvgM=OVBjt~6Cox>E1;$&ppaReEPZef>8*YQ2{e1a9Pj>GnL;LY(I^WMYbS z3h)%fFy_yti_~uuO*=lZP@IXc$(U@I=X9k_0@(7Z;cRPDZ5Ywl4kV!y8?GjE)LbHl z@SCEE$s746lx4DO_vRKkbUmd&5W(!QAu$_u1*i0yQfBc?xy1+ZuSy~Gsno}LEgq{O z#0f$hdP>EUo#&DzEhE4LktG-7l)IFf@oEw>)(S*I((V1~2|kz3sN*Y3P=5W)2Ga|q zCpe$)qsT`!&6K<(3!w(_G}HlLF)Roeqg`N({(|GUX_`S^-^a=L-#IVe2zi9AkK~cR zDc_fJ9m5VuwT>CWV39pbTR4XP2aWC?{ieL}`HffEp@wkg%Qx{k3EpT$wAN@gJL!oY zJoBfFtnBNl!jwILLUhGk-dNwz<<*obgOI!cEJhEm1Lo4<{d~xSF%U4*&;H5zUc1+- zEK|`)``_|%`BuOR9mXzKmcLS@NlyK zC^{>0{gaOnnXr-@`~@}^0K90Lk8A|OTfa!%*E;U{s;g$Pv#scsJx7Wa&#`x5h{g6< zJ@V{X{{f8^>UHP8m}w;WawlWwgIYJl54_VG^lWPxrV0n)lxIU(^>Z51uif#{2;{;V zzCgVL$8&%9-jH}l!yP|hD7&jt#@L6msP{^fs`u}#T^8t1u?WG%87Xv|Iouy3?x8IV zkd*o&q{~?+G41dE2O@vohrhom69y1qqg9-5l1pU2658`dnIe!yMO94k*DG^l5T9>d z^LhNZf{Qbbq^d)ApuQg)4$KGmwt@Uwg;iB-QpNpB>%q}O?RQ0Ijkx%4UtgfLc6=Cr zAD~lg^T;se^*L*NIPZ-{Zab&s*bKj#Bt$IBgs6P$&)RC--0f)r=aF*!4F$?z6;DOiL@_=<=`Neu{Y{~b$Wm{>h7P;-(%50RT9!Yh}0cGB&nI7RLj7cxhQsH3*_VW=Ir#Ed)ckVm z#oJN@tY|wSo@)OE&>$ACImp2f*3SZ->4BW7n24i)f9a_}PsIN_-@|wJf#Ya%g1>0Y z`_>_zlg@MAGbxzD>JTEB-IjVJa-X^A?uhBR&kAze7M|mp+m(G5yca3~N5Q+D(Q>X* zZr;_y@aCy17BM9PAbHqGp+{zG!PPoLSEgJs zaZ{7nU3vaYIuQB#ndi%FkOZsvD|~$u@Gib4Dv`HI%5%(t#?;p-q{V}t5kOY-J{wPr znrEaaTy!_U_sGkr6oKMB&N37;di--AJrS8b#x?2~SAnf^kvW#ml^)7~mOa?ruo8s1HtR}a*(#OvuSI!VC*1gIx?d`+j+TMz_^g+_nW%i2Odl3F^K z&QQAtA9lx$Rqse;u#O|2p5+D!#2=i9ioswYQOz%^BTgnQp2|=rCW7RcVqK{^99F%Z z8Yq{2~K#;0R8e0O$^z$RMsCEGkIJP zC;?79d~X`rS6kccXCLD)j4I77sk+9$iQw&ByQ2Z5s){&s>4?AV)8F(}1>6W+53WWv z1p;vK$|2t2GDlqSZkd)2ca8tl+K$SQ$VTU_)zm2(vGEwZ7))^>189K+=7>*iWXnj~ zVT?hKl4k1472%uo;uJGhS3!EBJPov{%0BKo%Bxy&CF-ufb+mlz@)4K88QWNG zZ^UY5ZvH|~^abO)c&}G344OUdQlqwbNv8huQcifI7{c6e9&fFC#jmKkMI8mp@+NJr z8-fK>O-&}!^@!|v@z}41toTD@&KHT;*Z83=K=aJ@3ekjLYkQX3wLc8x0E?stgaJnF z^_Q2M!nzLD=_K0l8gyC6?^DQkT=~p!BEMDiUh3YL!yOv0+rQ(Dli;uAz7!jh!GN0} zb@KDouAxJAGR*1x&Tw^3!ElJ06re<8M`y4zCXJS{>l=Bu;iufrs%fjSZ?WW8;mc<3 zRin>QI}nudNl2W>4b%|wMNm4N0u64OuECdA*TO&!F7+3lUcV*=UqJ2)!ItdN7e;#t zO0f4rsL8TYLl%85ZfWO5RUMI7-y}2IN{a!KhHjheY#2=f#FntABTASV_tWuhT6yQB zxQgl;hv}`$x3361{O0ncD!}GTYf}K^spLzV^ESLZH}CI&!yAO*pKw0;vw6I>8bsSdwm9wf2p(<|uWz}7?_f#*M1&Y#6Xw?M;rbTyVYsNa= zUv;nMq?P)FVlE@AyIH}kuM+lrsZUv0pw(9kUc5d7N~)JP%7^4iCsgXCv4j3JHrLtS zH69kZ^IU};teIw8y&KXzW%%euwW;p`aIvOpg$SOEo$g0;(xipd!Epq3|7mI=$$m(*j*(An1KR@FLQHXd;EX!K#&bV390l+@tVarqdT(<5Wf=Pt_~r@f!& z_ZF3JO}BTQg3CECJjebUUZ#e&;M&M~ktnKY^jrJ0y*| z4M;c8a57htle&q16jK+|Mwq>Gwsll-T2SE)1I>S4rBMzg<;} zZ-vGU!MlBh=sz*mEX0j`LOp&zXuP@7%OnAZa1~jCl}hBj=H)g|%iT18Y1)gpWKS$m z=z#tEAT?N`!Rx%;3whFusDAQAEfqQHU0^W)y@IbbURy3F)ZCa0`VnK>$g+^ znj$+Sa1O{Xe5T&##U_QaP7#Y~ipD}P?a}52uF(}g`dZ%k83du6lYy7vhVfQ`J};y-o#rvCOMRQ$L9$3Bsk$k>$#}3Kry}9z zQ?-m3ZSBud@6W%`U}DxWk4a)n63rN$8g7vCT5fhcT6OO_T%Kw3Af7Ce?$!?j?d^*T zgF7L{qgVSuK{6bLrv`A*LXpY+0HA8PCt7^C;#wlR5p(V}E>`4(cHe_&KS~=3x-^sm zZ*)?k5WWA$(Ruh&`M-bsSbZep5M^bY5K^2&ILJI??_=-1h3t$F$0&QB5IQ)ABTi&1 zdrQX2PC^LDievol@9+Ep9?o&x_x--E*X#M5JZXmtIKQ9;4UO30GR=f%%y}_chOc>h zv#7uW=>NcbC!`|`@R{a@x|BP!J<^(#7=v#tNbC0Wfg5$o;=DqwYY9U?D}dIucE3GW z#ru)WM|n5BNkC`{tC|MX(P<|?aHc+4_1Y00XtAk9>LXq>VeeGt=ZZZ9@~?MZSae%k zBX89xsemd?3vYvX$Rh`}w1tw%%_WC_Q-RXOH4mW$-+>E7>$Y*1zaytOf!9;|-8rqH z=GQd-C!+=`F+D&xy6*kToVg^L?;U*X;3s+l7|Gy^`e^4g$KcC{9DxM|#(a)N@{fPJ z+p>j6M0m>(YtCRJErJPS!!Yi)6Zvvh!_9In$|P!eZTJl#lCyB07PXBLXDZ`>sn%|m zXNloewUrUko5q|arNO+cI8$lzS>Qybbc5DtukKLgKVhs>`p<{`q-LzN)?U*I_ay!!z?5G-1>~!lk<=(vGL^2gKO@8P7g$Z zT4(VEX4!|9jLhukP=R~?BbQHJsKOR}>rA*_JRN%W@#SAHP;{2dK<4ZVLsrsCQNaCn zYH|r33Q*iVRZ~Z8VxaeY$Me9}d0<^~N$OsSMmd}Psu$U~dLS1lAj6s3E%v3Mrdk8; z^a6t`#7i{@NSF*-Xw8rtL_{1^L><@K__Q!ApUX#`$DJ$~~2*b7b20#vs1sL}JRhRGx($-oediO*H zBF>orcaNH0Y&-vbymo)G(T0jSuj)(1nB6S5VdYqe0w_;>w!$tS(W8D(84O|mK~YJQ zq^Mmlk_?fF&%5bDevM_Mbn*k3tPJb0*(W%=$vf8TsQwkrqNiJ0*`T56e{~oVK)S)0 zC?vhX_e9d|L1vOTr8!ulud;xH*n~Ief{$qNfygxaFG{d+XyuO9@VwM++&pf68;7s* zd~yKWIZoA>bRj6IOx9Z+5$8T4I7|DOb3 znaONZf?rTw2d8UXzn)m>pu{8qO<3$`+U**CuZ{P8A?ErDTC`mc1<+|z{JO7orj1|W zMC!`lYipUc;b_?fj2|IQLccv;RCwISO3d~yaA`1K{$TpK2IFoKGj4uDD)ZcmZ16C- zZ`2G{mSCl8@w-)0os=OShc1hTX#T15D8SSgbgj4xnrT=Q^UO2Wc@F&Y*TH=EnJmK_ znu{=ldc*Z18RC}yGqXdqda(h!Ys0q@#?we^qHA;KU(P(Xuh&5t8|me_mIq%oZv9zX z$fN>E&Eqa26}bMhH1)OZ1k$H9bGyJ+>hy4PQf~a#ue0Mu{I(vq{5se);_B#R@3p;d zB^$s1B5q?2Bt)T4nYr5;G)xM^tdAlT#skzok5^Y8nU%3 z6XY?Q%8jCql2}u{6_(>&L1{g2V{09{m)m>SAul(yXqeT4Xqn7( zJUEN5{Tp7P_3W=v_z5X0q~bc{%^yuegfTOMlG2$no;E|nh+f_uMm+--myMYY%GU3^ z`sY5RJn9h~6?HLtdwV{2e%_Ke#Rfx}!E!XZQvVs{@{Tive-BMN*O+_rQd2mn@0=3uc5G|v!29~pU^K`Z|Nb8!5B8(2bKnCh^E2|QsyT=t z#1r9nQsv|9QqnnF@hSu5C2-MU!^$=Uvwt4Oy?yb}TU@mk;;a^r;^R)@qN#UF;Vu8J z3S-J&^`rc9`(tfI=<~2J|FOjdD@MEd#<=54_m7>+cM=RJX(xtFrRI;;JtjHSI)Ub~ zwqjntP*0u_lWKiq2CWuqX&mpvOOs!UGBzJZP?8fA`dsVV0>-8Tk;oA*T5@%Dnyi-r z(9JkE@#di*ot^4lJ|(g{=Kj<_2*C2+`!1y4T^OD$#~G|m3hJDzp6|d}wKsS8TL-ef zcGEHZMps6wVrW2#U7Yr|4(rHqhQIFC4O*4kNXyhT;&5e$7?KT?4sP&kH8$dDk?=0D zluX+J_IU{af8lO1AQS&0y|Baa(<S5eTU1dDQX1ClM!HmqnB z+5ogKA7bpjC8}RDMCczvIfj@Ix+hnCJI``)b2hc%w?N7~#_61~J!0Gpdc|Bkq2zj! z$m$0E*mQ|1&{ucP8fCV`^2#CMctxO5&ZDYcg#4C91&JRA&gjwrRD&zCD&9Fv=69p0 zLe%~-A*`W>#KM{k$w_>b!^oG5UxVFf6%i@IlHJ?x77=!)tEWl)$yY9DJf|I(jo{nT z_-D>iEzBgl{f(8%BB$5FvnD(1>+guX3io_WZ{vMcH7c;89SaMHazKL>flzej>~Guw z*Z>|1Tq@Kf@VCqb)eUGv3z}kKU_z_z&{IldO5Bmt9n`}C`Pdo{&o{Un{@+~1ji%eT zi9|1ncT+8|>D;?e6Bb(3v!VwfQ+Y((bXfbW4s4OHW+X9)bj#FfOhK0ZJdgCgrC~ z#Hl(MG8(Z>P8a-@8#kch?`N$Dy_JsjJ_~xRGwCNK5>P!~2M7c5c}`lr>oi^E`rrF4u&?j0p=J{@3sF<_3v4vtUU^So1)$Qv&dpZ94aG%+pt z%r=>Z(hiVqH_6I*`^!I>vggc)>lz@|LpwSZ#E&RuUs??&@edQFuoELSu5fyoHb`;dw;I$4ExzUDOq>gt!K;PVZ zr@IuC@u-|HW$ft?1kxwN*AeynY^%FfBDHg&k+eczSkCgzr`2O?j{Qlr5WJAsP{t|$ z{N!kn#2}-jWYWaro(D-*m&DG(^=j(n#rCC!nBp&m&Oa!!c{7~Zq&@W zYLD;3%#|B+22gIOSrQ6V5W8-CNee0otQ3T-sh5^2@lYb2HU?PxPebHvtUHf}_0uL` zZuhi;`m*%fKIM=ua08+3)iTxcC%y1nK&-W8O!}N$5z0imD*$eV5u; zH2S+&n!>l{SQz1szKt9Xt+QS;7fE#F)b3wDM8^)nUWk{gqkB(34Z~g?pa!yWH8pvk z-b4G?LHm*U8=2O7C242r#*{I7uzt$KS3-X^`@oi_Y_ai)C5%SpTZS~HG}MTKN3DI2 za*R;&alNAXAXLUr`$Tyfu*HvRu?HG%w|=y2it#(BQQnAu**304JuFt`F^J7;_DL$| zW&A9m1Jt%i7o}KZ;)r$zqXqM7!~!=%H!KsEHYj>2v}3yc!KM~4P!IXZ(67q%a1J#e zEd^VfVxk`mf31;}ukhnMAziq_rMx+k3&fSPjLMCesUKeaY+kpvEA!7j8RsQ0em}qm z-|$LQxH>=B37W4lU5+H})QdC_8vR@MemB-E0;qSrI`l^&};CI>z3($zDMC9k|SCIJ$Zm$G?q17hJx z2C7%g5ozv;7JAnLtJMGV1XfmZg~wOrp~=^vH8kC=iB9pB4fa zHvkkVVt;cr0eGcVILll++RlaDVMqzrd>ediieogA^CUPp;i67bhi$|R^Wc^NyP6#m zp~`YSfIiv2KIi_hju8-6j056DUm9kOQ@f45eLBxGqq5(uHkM(pbwe2W*vR9n3x=u` z3X8Z)>J*+zVm%a%p~ckesy+w_$MknS=z4*7uz7t(joAWYa`4_uF^zP$zY+5x+yUB^ zRsNXzhN;k^C){^Y>(xSt0mWz*4(IxKZ)e<)#n>cM9(#}8~{=C7N5l<RF3_Ch=DLSA0-)l(^fONXI zU`vPUQ6rfrOSu{#UL_G-BD>3imh{dH(z5{oJ{1B#@;;Dm`L$cDzB?Zs*WkU%&P zDzJ*7wl_kmOZ+EF9XQG3Kuae4gcd~|c!!MdM)~iI-4hZ-DY<;cLK5>)$JL{{5J+&k zvlk06+RI&MU0ztIJ_{lMMY6r(q9SE#=#zo3wM%A}0FwFXRThFsYzTZ<-lV-GcH(V% zvM8}(&-Fad+S{`d(GwmO5kc3@2ufn8_}(qX%wS)GZGt0ZaDEpkAI~SX1j& zYv*Yp1EBRt2B_G$LQmsA1nz$KK;XG2ka(PM`T_+??1fdJ2~0)tH?3GpTsfX?(aFB3 zDh$;2J;0!0xL6)m8(zE-|VyHZ+DPl%fb6 z;E8+0IcWB={zn^clw?rv+}+1Gk0yY|9j&F7(9>;`5f>Stvay<1 zXEK;4t!riOVEVm2%)p@r91`{ofkkcN${EZ)HU|sbhd&`~orm$FGQE1!Q^uTXOLg(i~ zg2P`y<~sgz@eR246k5#UEqPB_semZKq-XXgjw1s$Ql zs0u)|6&0zubTl`ce_Y(1yjWS;1O~9(E5KH+U*UqV{QUHw*wK3fxW!l51Ul0{05LZx z^7|A#RO=K;w!c?^pp5?WU;SIN`VDGoM!HfkkHc*Eq{K_`dZ^EQgehq znJD$S+k^%tyB0N)KL+GF$FA|DQL&Y(`477Qh}x1X|WgVSEhVM#Nfpdit=Iu2+_5 z`m610E?Ah{EPmy?g3Nk0A~#<~@C!33M(8W@KMZRE<*72I`^~72k(X{6nGAuV?Z~dD zEAm`Nfgm!bM-frqs6@PY)vi4Z90K2l3B)=*?w#ji3qLn*P_M)yn-`5-<;or3x~Up+yOQQq}IjN$q;Gw z?%JicxkFs{s!zTE?O|(er;TA-#DNEehy3FOaP3FSC{8^xt*Uc~F>)2n(a*>vs5WhM zE2tE2Rau|2ryNZ_evSF<`?TSWk25WET>+ENHFzJVqnO|4qp(=ziHeGQdlx`);hTlp zT?d-ehZ|@rBg5Xd5gs1cE@pM|A|ZG+Vfl1rv$1t4^~7x*>r6Z@u$(^JExZRIr>2B^ zVs2U`$9*xosf^Nch)K$4T$Q_1;YXM)2S>Yw2LkyfF;QMh==!C!-gU253+u(h`5I4I z*mK4UGF~P&6V+>3Mh$<_-k$>=5v=?t_DZp8yf~Om7Vr0a5@HA?re3t_qZbVWDQXBF zjIl8@$h{z^arQ$4*{g&4k%P7^-bVk0*xS^5NdA*4FOEx8DGFm@fhZ^^-K8r%tA?^F1eLvxgFU_%PDC-yJ zA%s~O)7h3G-u1jBGv6`u?Q0@$?YDtI!E5c;(0||v+GK{wQusz5pXQ-CB4{?byeo^> z0iv6jIM9T(C$JLccjEm3SAxLnQl~*lF9CYqHO_qDQTV@aCy(#0GY3!_5gJSY2}%9G zuf{CO01|V9Hm-M((O|Pr0CJ6zlF{yGKn7CMVK&z_jQ2cbDCGIlEB-UtHQBfVAl;{z zgY&kObZMWhZrS-Cnbl^gw^tU%Pa(6r16XEQFE}qCf|a;`IP;Qg)#r|$d8?%H-=xIlgk@wGHY7<-d1*N znVBs>b+z??Me1<(m8Y|EonJ$;n4p#}s~&1KgOzKzKvM5E$KQT3HDxT8<`wH*>Xwx5 z`tnU_37raXq9N}%HJIF4tbpkTweWpAU7_0_5WvGqd`&~(GDT+bDp(b<*X>}{KD)Hg z7TMYyVZSYZ*A+}bTY^y3yq1q-%=_gOIX1K5r1I-O*aq9C3ZhH_&r`IkPtLDO#;2qo zT^_@x@yOt-vM#y)OEe2BEtIX0@Q_!G8t5MMStM7^2kK;q{D1-JD}Nq4kyS81SUIU{ zq*Sv9?wRq+kxRhWJNuE4|44~hSf>Pza`2Neg}yb%1g0w2xu8pq^GSE$0e|K*?Kwi zj`?q%>pQC9@`_BvN)lNB5J7wy%9wt=%pNwwN`WX5Y(~>z1Vib0d#T^4d|@SLHuqO^ z8Dgt|avo=T0JsH{#h}(rQ&YST#yw1i^J4L$US#^r`J)GHU_}fgXbvI2$&-ZntwOJT zgsrXeL^{0Y?K5Le2dcvHfXHL&s_g{tKD6=#;h6VALN)ezAGt35pHR(_#3CB~q?rmk zB`OfLepOmEnDM$kcnFm|d3x{J3@l)~V93As@baR6)u-4&!aAaL_bwL8g$cdVyOREI z`Q+m0*kvwtx<$CZta7r-gPK?Kp~%h%8-yh@;F?I}Gqa*pTKDwd6y%f>fuZbbO zlrbyPk9QbiZJ*P(d9;Gb{SC&IsL1O--TD%5Z%S;ydA@SbS9|p~$c50tIDK?HBrTi3 z`kdM5US{#JtT&*La5>l-JwA4xtWCEvbnYb|y9>H|mxrR1JfrJnU7ullYG5qt!u#n7-3fwF5B@hR`@C$bH#AC;4t0|wI{7FMf z7UFEL{%#C&?kYT%dwj>AHu{}g!8)E zu(<{cfkCTkPNK0&IP=lzCU|q|qf!jSaCVM$SQ|bh{Q&x(#K=q*8;j0POG_Q_9cw?n zb9Ygv72__vZ4B+_cN#P>q>Zn!(7i@@U-DB#gGDjlWA%eOyXV&TZfRW_L!?mLds$jj zBEa1(oW_KOg%#wKoj^tY+py}~q*{5;9WdtA6zEN>rOC^;Jkwa?!FM|+6#crrq4z9g zufm-Mnr5$0%WHh^mA5g{0n!-rDQTl7bn}m9lkq=1zliXEFfHkqKdDs{w)j42XE9!{ zdm2b^LXxql1q1|oquvB8vmN|9%nG>nNh<14T;YG)|FgrDRq2s(r!VK{!^JGJW^&?e zK3g;J3asD$nxm8Qz&(m*?#^Syw&MxdkqzNxQzb>@(Fj5b!J_lh8SAl)dERd*fCC41iUO)J6Jh!g1w2JXyc?cS(Dlm#)&0Esy4L-V9Ta{LiUE|Ck-j_|#x+ zG~3UPNNR*MZ%w>DxeyG9Y|=?)3l< z_C8sqU^F>TECdF+$!yTAK!T}k=UI}s+AtZoQbyDRs zR#ZkBz`0T*EN{1bkL*<7Dr(%m1rkyEyDB(a(%d}v0YA5MoKQhw$^v3_^#7~=UgWJ? zV0krk(BPI)1NA2-mRYmgMrz%(USK17=->y+61bWk0%?iVir`#FLj;`%t%1GaZrCL9 zwd(orilKs|us0NJ<>;?+dVc%DT|KnFwXPVSPW;x);ElG!bD9L^Xj9LyY~scwYB=ft zgMcLD%XtVi!t*VAwe`0sor>D@iokSaFWj+xq^5sh03+R9;W)V99$(IGXc3bt^mbGL z=SB`znAaC=zzG9mM4y6n!WlFBNGA9CuUsSDTixFyv@7*3k=j`f2u438PFdyL(vadz zK?0dX&#rst{HuPRu)qdJ2!3nm+v4-ecR*&q!>aHr?1_v2*hqICZ>%txg=5}lSgG5D zsi^r^f|VDg;fYnd@PFB3%5ZZ29_d(eXCljFz{4!`n{rT2VmP()Ez?aQB0nVv402L_ z2;9!IM0nr(&F{=iBRdzL|9f__c=A8)5FRVfL1=ErZ=bEKb}wJ;EGvyLuMw^`a}j;; zqorKk?wD*$R!U2zcMQ39l852r2CIRWd^ovoHP;h7w(=glp72qx!X=mKgsjzYZrJ(E zvTEpRc(0>lt{VTb5V?FilQ|yR)3)eE>*gplzWV1dx_kAmt|Z>=VtZDPp?Ei_`=;ea z3SGeQ561j2f^6S2xH5#vzF@s`o%8$15T2febLO7>Yont__C*oKynmB|NfZ_zBlfWo z7ma%)mj+-SWoLmdO+=|KY09u?l3A z!`B7jo`(7mp!uZ+qmB{c-$NP>&6JWe9@0Uh*R_S?Cm033pjX944UEb1Ye3Ew9{Gta z*Q*<%WyY8K6`k^%(nL~l$fJGtD9&q0=U61N?gwXo`Pb>4<1ldmuuSW>4^`Vp@%#Br zF^6_YJ^N8wq_GggV6L_Cr)q}?LrsL$P+7TxdM=Ql7NvIoQ~fJ)>6`S8XEAZCiPuqdN9~@409?*{XYP1^A6h199o*jRZcW;6-CK@0c)<6MB>@Oy z-}=%rRiiJY1?xk_pdM5_wEtSk&mp~{4`M5ttaEMSq!RB+jgbwzmiUnVHb{GtvItz~ zO@iI5lEb$H`_z;&*?n8iB*i4Zq9?S;iRGb9& zrFk?n3+og}utZqHAl`T+y5t$Xg{UKENTmFROTpjgk?f>O;a__Hlp*<_&?#X`|J@*e z^$C$?uCcOpCbRJ@_$2Gc;qj68WE1OZMRySb|G!JzP@YVJnaoRFFKS~KW<~?^9H73~ zgpus)mA*-r16XehC>uch8&qEk(M~oQBPl?LJ_}>MRbfkHpUHQ}vC#b0dP=To6O6m3 z=jl$auh!L88zsV_ewc~AyQ!*csJgHb&*AtD?%3}a84|*pO2G(aa{@6=4SdRRH=yO! zi6lpuTDllcGiM4o$6)LGSS?%LffOIokC*4m&uJ$w4f%X%=5N{jY+8;qZ44AA&Y2y0qi~86= zDOI?nSvyTKD)qNmVTCY8_jyd#jsv|HZPsSjOwpH2wBFrIe;1yA+*#6NttLhDw>8s? zZhWCFDgEOWQQeSsGbw9d+AYstt!D|T$?C5ubVI4rCSoNrC@_Eo@Z^FgBhmEzZL~U* z=jX|ysjU|uZKEReCZql(cbe4_RYNGmQHTau|@$`Z0sV+`d_CSa~o0?q@cny>Zw0wM3jdO3yZ8 z`q-+ZkF|^;gRnCnhIX`PyzPZRWX3q%srU?*&~c<=Q-%OHZ=XWoxb%kUUUtHTpLTsq zd+6!bs7F9xDzzJ>CP*2j$Wt<;P6J>i)AU|2WGf!EHTgs3%S*x!MSS&B(#<~^A{?pd zw2qZ5d9}{S%kh!lpqIatPhn0{D+U`x#|yL`|SlFJWTVKmyu4p0()K!qo*Nx zX7NH5v#h4gCLb93dP61czUbw68RbW~@R-WB-sT1*2UVk5JJK7gwU@u9eXsV5*&A8h zjAr>0i=CW3i65X9(=P2jtld+R=i9bzHjliCcCN1{_x}WiPcQgX?4v?`>U7m*_qbb+ zn&O~6<#uR4WP+6E?7FJcoC0S5ebAxDe50gDf*y&~9?0S1nPTL0y@V|GiQjQ?vAoUV zl9!3^1D(}pe6%C^6ieg5H!zQC<{Oc>-Wl6UDMTGT0j?jW?8}I$nHl&!*4Vgn_YZNF z2(nR?=otT-bdgI7Rv&e4^SScQe@H*|p$;Ty5K^JSf&m(u+KZC9^$dh^w4pRT$*;)r zgDJ3>CYbgDI+bE?!MN33L8f^OVGOnXL@y!HWlw+G{wcg-+$PW?ojPIhZYI9Ag`qfx z-Js6R(|}&wsn$D|Wxa{Bf~qrQ|HtL#{_)n?&iqH)h@p&6R$DjHN_g&DF(5zXeIg~~DO z2^d0I)Xi5nkw%Mmf?9o-B1pi%F)Mb*Yx?LwQ@$hWs`JA4Ds`_j>V*5U8?fbW9ZAzg z7Q7MX+2Xumo(k*d?;T_cYA*x?)yxleZdGDSyW&NaIUYMFg2ZyB(&Csi)svv2$=s+) zclJ?v67L-Mu`mts7;twO*AzL~5JQXu+Vyn!%pZ*Pw7cWPyr!4Mq)%x0a;~S%nY%g3H)n;TJ9k>j7<{e$S39%Mj^61(YWirTUV`3L!-a$}qTD1<@SaANd7T=1OBY zD1NDSXi~d&Fj!sF%GH?Tw%L%~Z_cnST^r8Bp6{T=9mvzN{F4;342_Yryb@y;lmd=o zP=y20ME$oxL5j3~ep4VQ#7B|?iEu#h0r5B^V7<_b=HlkcYpw^D1am-&AYL6YQ;6#p z(*~JtPq%Ex7AM$K3ys?^OIQBqjkVh{$Kw5&T{kMK-%E6TlC1v>Sn0A{FL`t^bP z46s9n!6AsQAJ!3?a8xwDwiXbB3GjKG;cGl3Qc#gL4k=6HOC*s&9%^Mi1cfRK{k-5?BMqRBU+-r;X(wPD=Eq#TLiv zc^g4r@bZv5F@4J7cX6Kw)=dDZ%6rkwlhlz)vbM(;pG1QbQKeS9X~Q70;?|pEZ+lLk z2vrm#)Ow?aUdjc0E(G!PReGgB)y>Xoj9CHzT7b!Y2#wG>nwyI&$62WdH+t>!PI1xY zLDc1Vv3$hYsBh%K+U??8b3kYCs@vW|?gd`55VIZMZQr>}v^Ki@p+`T~T>3m;o5#V# zJcejKVMk7-tZdkob@21H5+K9#n0ah2rF{U%iLsxj?6g;v6>n1KUDp@C%ihykQQ^$a z`bf=Iq!DeHJ*;iQ^4F!|c&UPwZHhIAxs;*z_~Nwq>a@G!e>1+QE8iYsWB@5;SZmFjaGBpHt#@LV zIwGfUMx87MlU6^>d6DL){mj<)g+y~uEf{NW$!c6EXO~=@$hGMA@2wdJ%p8%^2{+&T z7ULvy3UAPD7QeRLvTM4tjum}Qa5i7JFECD?1Mu);-;R%u&)8||0Oe{ya)kQ%9Fbn^AA?(v){Cz~uyOUoz|+GzaJk_IA%T zo3GkbhXWfumq-C<+iJa20|R!R?zP@jZiOUuYoe(sK%fOi1iQPRNmnwo-L+9i0Q0y5 zjk!MfK}Xc$`_pHE?G2JIsr0k8GmMUCn3IqO$Y`UH0D0JynIav$B;!5JPg*Rw;#BP! z5Hj>39GMr<_uJ>Eihwyy>c9Ty#P;HNb7d*IJgo+r^M%4mb?R%m z5dX2VVz=}LHGFpH?RsbtxO}fbXL64VK#X;sP1nZ#n@`3ZP!ksaFjYVxDSaSI^;{vVkdZg@cUXp3dY&Y;|KMkEzQi#)JZ!AFV=oU&J1f0I-MBp?y1%*UZtn8` zo{1y-Z+erQ0m=f6*P5r2+=H#KE#@(m9upQVvxono)d?@GC;=KrkK9A|6yg3@@pTWv z7+fokiGn7^CIsUeh2rrjo~Bo7@IrQckIfnlC^C&jU7Fw6zsK?Sd?>L)krLFGXTQI3 zVfDd~ESgM9WXAXi5Iqj=E^Nsf2^(g`V5YZ)>1UblFDzlJ<%uE?+xXojjp0C6B(w2 zf3OY?Xn^L)FGTkGAk@WvGnxnr(%` zf1T3sqC0=T(_zdWrrq@7ALA@06h{a2UiiBoLBuXK-j{`KeQiz&AAf!tP!b3^zDf2; zF4Z~xPridK?VGxLMfb%ub%ovmEXfC0@h0MVcFNShpf zMNMHI6UKDVU}*!8D9>`f4t*y!F3u}JiTDGV*Wu1h%wJyc>6_t7r6{@h-8=UU_^ns{ zxMA#;t(Y|z>qkyqZ#jq22fvM88z9s56}KD7zzV5CFEMz3Z2(+K`pMILI64$Jy_`zeM3aWdB+d_;T9R05*?-)}Tj}eA%gf6+2#1On z>{o_Ee%zMhH#(b@3B{}|Z#r+&1p5IY0;O{WaJtdbb<~!x8V0yv%J~!!*zYZD-`4urbFoT0##qL7mBx$5wK6Vc{COs?|d12 zwZ2R>O-HA7fTtV$j8{!`B_AcDpR26DJLK}oHuCr1Sqtkv z!Csry05wc*KJj$%O04C@jdh5E@5=+O(vAIZf3!chh%`UaNp9kCLI}W0c>K4*`!Y(% zerDA>?X{oywR?!XsM)%1Y&bwYK_aWfK46PS8?zuC2Nd6nbV&U|-H~-H8m)KG3@y5c z2-LAY!2uxI%R=8m3~*iwYR7hxF0E7ckA_WaZLKU3dl8f0e^ewvrPQ9=l-g2yQS>3b zKMEI#izX9+Q68n*S9FB_ph195;Dt-;3_x#ENq<)mzt+S}>ewN5c$yZD3cN#DVB{+N zVG*>brX9)bhQ?V*Rmze6JS1POBo5<_UUBAPAg#h)QGWk6I=m^5eDwMi;q@CR9`AH~ zgI&VvCG{{2i^bh{RMrMMn69;wmXZKIZ}W0x@~3NeLqj~uNL@Y*5d;)XNNLCYA+15) zJM8`2jq+k+k-Swe9tg#u)%GIdwX?;kOD81&efjm6;)n@_D%#A)*;~d(ai3j;Qp%@CZWS9RO%vhDL$O#ttsyf3tO#P%T78ibqyBL9qi44eWv57i(eg}TM_nJK!o`o&NSEr1joQw| zJE=RzXOs3}S5K`L7mh{cw`Vv$F4cCIpX4?( zLoTUL;PVv+fsq9pYr$T>`n#w%`mJ|G>Et)YUBAJcg}Q0d0y10!1cdJCI}KZ)G!=nA zX{$Xet@-qGx!2!(GlrNVh=Kj~*WFE`nW-dr%twEq?Nv=A?DFUeVU0-QJnn?MLt$wV zAlhJj=Z;*8HhdKpE_S)OFBdyciG|3kqrJ;Z+X*omE8(Ub+=M5nb zOHWcv$IR&Ef9G7$eR@q|MB=G5P->$V;T5c!2 zHU&WP{Je3YT>XAPmAyCDqM_zKvB}1O3OUW{nsO95tFV(`FUWAOJd}%7_4H(uSfa^CfF1U~3G=zEjG!omWmOG0M=?IcXw?yya>Q#}T4?^1WZ9f9w!sM50Sd#^=b^cp-wQpT8Il=Z$sNz*DSCc$4kX>8Ojs&WjHmb34QR@W?s3 z(dnI?+nsK;zE;)&i#%*RN>nl0f690^U|Xju+XtDZkq#))?sowtXJrLMy~tD8F}%}X z_xvH9iGnXow`BW4>S_Qb%5kNDr`KUH9oPZa1Ji68o; zm9bj|Z>TKzRJ0khzGVf)=Rr5p8V2ct0nG_MZ-00Ht-GxA5<~au%6E1k%gN|1f3+4x zx(N6D4ot(hca9|Jpl7(zM(pn&2k%D&r)0K;IbZ@4i(;6pp(&N&KIy)=j51!sl0Kxh zrB)80sz!QS@hc@;x-!ENQ6z|Q($j!Bj4lsYGRJq;g zmmSbxFZKGa6DrvjqvTLO(+8$cP(i(d_SN%dy!)lh!;B#vjnWa0pdsApB9QVdR(~iN zO8NnT8Z@G$5=qiFm`Jq^NZee9M56Y($7Ha!DLTqxr1h=%LWnd-D|uKeD1nMdKZRej6kk zbl-S9Lm)ops35eC!AYz%??UqK?LCIv>LnGs9xwfmi_p5)af8{C9-FXL?+EHbqM1Gz}Go|XMg4n#ue}EIl}_g&JO$a|8f_+H~M|B94O$bV)UF!?%wng z>G}SPvG7t*g6eH7r-G;VSaZ39a~oRz!0HvR8WtyCQG$*5=;QexkDi|1{lfd6x$fBQ z0{n{^gE%>=Pd23A8R2-t6&HX@)Y*Gzqk5fB#_8%sXC~hID3|N z-V|4kL{ddp;x+&7f0s2WYxa}YJ_wPsvRjn9v+_vddc#57h3h;mw#q^Bi!FGYu~|wr0_VU=HM(EVJJ-7jEPJvTlUl zB6A4>l~!%HI2WJ^rl#}n<5u@WzA5aS)cIFiy>>>7&OmuwvWdkJIrX~zLOgPPhLyF(w5Zj8qH)#ty^F6U;$$|fJ|9gd3%{G1c?Deu zs#$PO`j=!OJ}gUKlDu(IXB*3rCzqsYkE18Gw#O065_AwL@8u5_Osjo1-?gn-CQ6^;KP$es_FE4OeMp?n= zIMO_a`PSeB*jDPD?)K0dMWtwMWG|&+&lqHqDVBlB(Z69tORN#5sPXPlrGjs=31mNF z4r5y_C{WgoyuK>7k#pb0!{ZTBk_KQOJLjK>+Aci&yy5OQ)3!?z`O~B@@}qNkG=f+& z^Z413M2cQ(>s)@|q_a8@8%DlSk^6nz?MDvUKx;eSHtBu%FU#Q;|I(cVEY_o!m79yZ z7&}x$+&6ct{f$v|EzQkKK5Jr{Y#HWKcNibqSIw{Y9^D^c z0wNrYyC5fFK6hSxp~=L8!Y+Nni*I-nPnd@{f6|EN2ng(G|+mUGvOTJZCY547J+djv|;pPT?W{9xed%HQU&t`K=J6J8up(C;kG z*^2T0raVN8CWA$|{jkl?&txe~1 z;MlhCK_B}HRNh>!Q%;Fh<)IQSS@Qoj&i*(z{)gB7B&sH>JI!_>Z-~S!eL{&JKkD(- zzk7S(7K_QZl-hq?m)pZ0B;TXEZOP~y{pZ{=#x2SJ0ffwLY$7}x^3u_*liSzFCj|<3 zg^7r5u znQ;|kunk_@)xWWQJhx`iXu?SVt%d^@b-#4(JwG3WPsNJS)PN`XDU-`?f#w~q3udGD zUX&j~y7G+FHNfB4VF2#iQ&XH--0>RsO>*@i#Uxtefw@xnJ>f3vr^3}m6#h+a|Aen4 z7qh8^C@Gz9K13=a;PZE)nckOy{-#h6MkoU%)mfZTX`K zV)3n-`02S-4ls;ccy9yx&02QSp4EiS8LKi$P-g2;eAZ!0zW(t5hLdubEY?8_{QSc>{!>Jc^~b83WUWi- zrTx1L3pUG3OO1j23cs#^pVBQx%fP^6NDQ==?;Hy^xwAVR93H0e zZSZLWa}do8HBsJs4qICj{pbKS&qDSz417$hrP*(6TE~%0FVAyq7Tz5o_T5ap(m+)i z=ayk;$U!CM3N%!DSq5%wRoGWX!K{Lypjt@dvi23)C2e zKI*agTC?H<`8JVYw7tu7413P@Uv)9Nj><1MVe#A0^@9eF0%voVLG^FJ54v7qC3OOW zgQdpUm7{rPa`M#$HhyDU+Bnz*Eq()t!QIrm;vA;74UZk%8IBX|&v@!>PIM;4dKbmY z_kQ*!zn{e?Jri_eyvd&yhe-J(n9;uOr6EjCJqg?ETFKPPR7*8e8!Z9x8(&jHSelk+ z7_#;|Qri=Dm)o8*wsn#Y|F~XFcD6)Z%-RA2GyobCaq)LPcuyeE`lC%}2%y3_I@_8v zVS#{Mo}jQLnlyYU4_0Q;XWz%Jn}%L)Tu*;GAzppk>CJ)gfU5YY-gz9{RB%}Iyj=h4 z<|+h2u0Uz<^o6Q*B#L`hnJhctX22|R=-Qj@xyDC5_U!y?tYu73M$`iHGfVklUfz@V z2rl|buR9|0XL(dg`Cev09u~MF<=ii!o zctVx2)fx_!SP|tfKNH*pr)D7b)b^aa!0rgwQInFBM+L&1f*pNYT6RnzXHCoRD7Pa6 z0}i^zjWmTuc-3z?I5WP?0C`g@&Rm(AS4-~y0(A(A_m_=} z&~Mb_lTULI(#@t0P1AJi)>I5c*4laU^E~F1G^?O$CKM=kd{pbvt&v@K@mcxB~)#6t!bR`lad0^X}ZzL z%@|^9o4zYhUT`ib|;0x z#pAQg7vf140zg%*AyC%LAw^ZKt+~0rYMYt>pZ2@`ejk#gqVbMktUFHQ$)#J<*45eL z5>bpX#)yc<7za>hI?l(VKMZY*P30uV;qg8}Qr7Kyql82!%{lG>vrx zad-&(2Uw0ZC(?u{0GtIPs3M7?QDtU{d0Cc}QppEJz=!m2|NXy1w*Td?|K@3TteW=I z(_ud@{PN58`g;57v;O*ut2#@h3=Fm`OyOx0P@!xiAW?*iH0bh~U&b7lo&zp8H|Lwr z|GvD_oSA`_VbwWLyR=o8;ml=lRIWdy09^h(LI%>(aEm92$ue#ni6}`F07U`-6>}UH z;~Lvm3RpFLt?=;pXtR!U1Y|M{oQV*D;_Q>kbBLP7y2`O3zpIM79QV^S zsH$t6t4z+x7`nc`PC3keIvG>c-QnRd&}y~0zP`4mnrEL}eDU(?@nM&<#+c`EKxAia zq0c=k?uO}bIP`7Twry4dWoyke&LDtT;NGk`Bat(vuh?2<$RLWy$Sw*FOA5+DEF#|f z5JFX>BXyiri=x&EktvbpYPIUR4o{;B5P?!kL_kC$@zUa)0U){P)VV}?i1=~`o;Q5~ z3NEeB>EBsYN=x?XD!%f>CpsS?03ucd)MS~Ub`&ATD2Xb=+|+MB|E!SChhbQj;GcyG ztF}8ec`>IVgczfUbY15|EK)0x}V+YVsk3MMT=RZK|3G4u``pfAuG9 zs^5R}&BN~4bnC~b-KU4g?dDo<-gMX3&F0!QJ(8*jf*8n<^?7O#A`$_ilB&82&*;kK z@xQDy{^9DJkN>4jxU@dy&WmY+&r~A)1LW`W4-nI(n>v58%jir0AR8nicy4AYgGh#9 z*mONU^UmNorB~r)PH=6B?Qt5(Faqxm$EE`xQ{DE)SXiQFV8djv-)@XC(FYVvInPTN z4o4NKt*fkKTN9m{X&I0dV0cV`$gZ_6+GdI7-a$I~8X=>XweOtSf(m0O$<1U1N1UQZvvS@1CrtKSRi$<5ElrpHsMG=zt*-uGg z7O|F%afm1x#^bc#9|5#&s@hpqd3ZX81jtp&xcEqb#yDe4VL}j5?J8%ibIurJnft~9 zfN098%x)B);?r@6f=rA=StTZ~Vhm)9KQK{Ch+foSU%YtHG>syNh%pr*i%4Oarj!em zdl@z3GdU10It0rN#`2}Z`^kpXMQdHSJ0U4haj4{MkRdW_=9TB;vR#g^b ziJYx-9?LfhA!r7P;*2%M)Kyj2b!F^w{631JYwOjOvo09}XieSi_TzqBGMJ{WmMq7o zV{K@2{URUUzkl3ML;c5p{KxB?>-)!t!(rz`=vHf6wXK)Or^6D1q8Cs$Zt+J|#jG(# zgJjt`V;kqj!!ain5@Sr&yUM!0?V6@pmce^JO^ar5&Jp80kB9M)MfUr_)wo{w+pBfm zwJ}ETm!@gf>op?o_fI0QSzkF*&GWq3Yzf$zs%Yu7RSh8HG(S8JbzQIeb(WN~lsc3E zIpP2Yz10Eawu*rS&USxT`sr4(Zlk*cby zs(RQz)lJ>4T0c$4r~Bj=XAOIO_x*RPReznXSKCd~GKdJAn+ zkP$% hxR<5({~u=IF*jgGZEOGl002ovPDHLkV1l**K&-OigmnM_ diff --git a/compare_gan/src/test_data/image_mnist-dev-00000-of-00001 b/compare_gan/src/test_data/image_mnist-dev-00000-of-00001 deleted file mode 100644 index 21070334536b3523fa680cc3cc1d530a1e7498db..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 400 zcmZo*WB>!5?hBi^{xWgRVB+G<%uP&B)lbb!&QD2A5o%-NYG&eUVCoF;bLZuf;sPq? z_4IHF0@5-d%mLCLp4Q+2q>g*KIEH9UOiqySYLJX|a9E$Q^~LGw5u(!fe|&y^{=NW5 zJE!ZZO}{q0>U53YkhS{vpXdGZ=`ZH@{}Qm{4vAScqw(O0v~SyVd3eftPU|hos`~N$ z{QTg1JV4;OUT*R18AitsHT5<&HXeL-b$(ty1Yk66ptFceyKL+m(;&M9}lZ8 z|Mva7h?z{0UH<>PDaSLlUNSHk?{n%;asM$HZP~@A?lNJ5{rxVa}twM Nb0Bi;3eOMz1OTR+oBRL( diff --git a/compare_gan/src/test_utils.py b/compare_gan/src/test_utils.py deleted file mode 100644 index 06a7d0c..0000000 --- a/compare_gan/src/test_utils.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# Copyright 2018 Google LLC & Hwalsuk Lee. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helper functions for test case.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - - -def load_fake_dataset(options, num_examples=100): - """Returns a fake dataset with with fake images and labels. - - Args: - options: Dictionary with options. Must contain "input_height", - "input_width" and "c_dim". - num_examples: Number of examples to generate. You can always call - repeat() and the dataset to iterate for longer. - Returns: - `tf.data.Dataset` with `num_examples`. Each example is a tuple of an - image and a label. The image is a 3D tensor with value in [0, 1]. The label - is always 0. - """ - image_shape = (num_examples, options["input_height"], - options["input_width"], options["c_dim"]) - return tf.data.Dataset.from_tensor_slices( - (np.random.uniform(size=image_shape), np.zeros(num_examples))) diff --git a/compare_gan/src/tfhub_models.ipynb b/compare_gan/src/tfhub_models.ipynb deleted file mode 100644 index 4c562e1..0000000 --- a/compare_gan/src/tfhub_models.ipynb +++ /dev/null @@ -1,342 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "Compare GAN Modules Colab", - "version": "0.3.2", - "views": {}, - "default_view": {}, - "provenance": [], - "private_outputs": true, - "collapsed_sections": [ - "7N-eCy31umdo" - ] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - } - }, - "cells": [ - { - "metadata": { - "id": "7N-eCy31umdo", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "##### Copyright 2018 The TensorFlow Hub Authors.\n", - "\n", - "Licensed under the Apache License, Version 2.0 (the \"License\");" - ] - }, - { - "metadata": { - "id": "X_lFsiKEnM33", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.\n", - "#\n", - "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# http://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License.\n", - "# ==============================================================================" - ], - "execution_count": 0, - "outputs": [] - }, - { - "metadata": { - "id": "65PLbu3MviWn", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "## Compare GAN models Colab\n", - "\n", - "This Colab notebook shows how to use a collection of pre-trained generative adversarial network models (GANs) for CIFAR10, CelebA HQ (128x128) and LSUN bedroom datasets to generate images. Random vectors are fed into the latent space to generate RGB images using the pre-trained generators.\n", - "\n", - "For the details of the setup, please refer to our paper: [The GAN Landscape: Losses, Architectures, Regularization, and Normalization](https://arxiv.org/abs/1807.04720).\n", - "\n", - "The code used to train these models is available at: https://github.com/google/compare_gan\n", - "\n", - "These models are available as [TensorFlow Hub Modules](https://tfhub.dev).\n", - "\n", - "\n", - "
\n", - " \n", - " View notebook source on GitHub\n", - "
" - ] - }, - { - "metadata": { - "id": "1A9p20T2uxKO", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "## View the available GAN modules\n", - "\n", - "Run the cells and select the GAN module to use from the table below." - ] - }, - { - "metadata": { - "id": "Uj7C7cE_v7Ev", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - }, - "cellView": "form" - }, - "cell_type": "code", - "source": [ - "#@title Imports, set up, and helper functions\n", - "\n", - "from google.colab import output\n", - "\n", - "import matplotlib.pyplot as plt\n", - "\n", - "import numpy as np\n", - "import pandas as pd\n", - "\n", - "import tensorflow as tf\n", - "import tensorflow_hub as hub\n", - "\n", - "\n", - "tf.logging.set_verbosity(tf.logging.ERROR)\n", - "\n", - "\n", - "# Get the module metadata for the GANs as a pandas DataFrame.\n", - "module_metadata_dict = {'dataset': ['CelebA HQ (128x128)', 'CelebA HQ (128x128)', 'LSUN Bedroom', 'LSUN Bedroom', 'CelebA HQ (128x128)', 'CelebA HQ (128x128)', 'LSUN Bedroom', 'LSUN Bedroom', 'CelebA HQ (128x128)', 'LSUN Bedroom', 'CIFAR10', 'CIFAR10', 'CIFAR10', 'CIFAR10', 'CIFAR10'], 'penalty': ['-', '-', '-', '-', '-', '-', '-', '-', 'DRAGAN (lambda=1.000)', 'WGAN (lambda=0.145)', '-', '-', '-', '-', 'WGAN (lambda=1.000)'], 'architecture': ['ResNet19', 'ResNet19', 'ResNet19', 'ResNet19', 'ResNet19', 'ResNet19', 'ResNet19', 'ResNet19', 'ResNet19', 'ResNet19', 'ResNet CIFAR', 'ResNet CIFAR', 'ResNet CIFAR', 'ResNet CIFAR', 'ResNet CIFAR'], 'beta1': ['0.375', '0.500', '0.585', '0.195', '0.500', '0.500', '0.500', '0.102', '0.500', '0.711', '0.500', '0.500', '0.500', '0.500', '0.500'], 'beta2': ['0.998', '0.999', '0.990', '0.882', '0.999', '0.999', '0.999', '0.998', '0.900', '0.979', '0.999', '0.999', '0.999', '0.999', '0.999'], 'module_url': ['https://tfhub.dev/google/compare_gan/model_1_celebahq128_resnet19/1', 'https://tfhub.dev/google/compare_gan/model_2_celebahq128_resnet19/1', 'https://tfhub.dev/google/compare_gan/model_3_lsun_bedroom_resnet19/1', 'https://tfhub.dev/google/compare_gan/model_4_lsun_bedroom_resnet19/1', 'https://tfhub.dev/google/compare_gan/model_5_celebahq128_resnet19/1', 'https://tfhub.dev/google/compare_gan/model_6_celebahq128_resnet19/1', 'https://tfhub.dev/google/compare_gan/model_7_lsun_bedroom_resnet19/1', 'https://tfhub.dev/google/compare_gan/model_8_lsun_bedroom_resnet19/1', 'https://tfhub.dev/google/compare_gan/model_9_celebahq128_resnet19/1', 'https://tfhub.dev/google/compare_gan/model_10_lsun_bedroom_resnet19/1', 'https://tfhub.dev/google/compare_gan/model_11_cifar10_resnet_cifar/1', 'https://tfhub.dev/google/compare_gan/model_12_cifar10_resnet_cifar/1', 'https://tfhub.dev/google/compare_gan/model_13_cifar10_resnet_cifar/1', 'https://tfhub.dev/google/compare_gan/model_14_cifar10_resnet_cifar/1', 'https://tfhub.dev/google/compare_gan/model_15_cifar10_resnet_cifar/1'], 'disc_iters': [1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 5, 5, 5, 5, 5], 'model': ['Non-saturating GAN', 'Non-saturating GAN', 'Least-squares GAN', 'Non-saturating GAN', 'Non-saturating GAN', 'Non-saturating GAN', 'Least-squares GAN', 'Non-saturating GAN', 'Non-saturating GAN', 'Non-saturating GAN', 'Non-saturating GAN', 'Non-saturating GAN', 'Non-saturating GAN', 'Non-saturating GAN', 'Non-saturating GAN'], 'inception_score': ['2.38', '2.59', '4.23', '4.10', '2.38', '2.54', '3.64', '3.58', '2.34', '3.92', '7.57', '7.47', '7.74', '7.74', '7.70'], 'disc_norm': ['none', 'none', 'none', 'none', 'layer_norm', 'layer_norm', 'spectral_norm', 'spectral_norm', 'layer_norm', 'layer_norm', 'none', 'none', 'spectral_norm', 'spectral_norm', 'spectral_norm'], 'fid': ['34.29', '35.85', '102.74', '112.92', '30.02', '32.05', '41.60', '42.51', '29.13', '40.36', '28.12', '30.08', '22.91', '23.22', '22.73'], 'ms_ssim_score': ['0.32', '0.29', 'N/A', 'N/A', '0.29', '0.28', 'N/A', 'N/A', '0.30', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A'], 'learning_rate': ['3.381e-05', '1.000e-04', '3.220e-05', '1.927e-05', '1.000e-04', '1.000e-04', '2.000e-04', '2.851e-04', '1.000e-04', '1.281e-04', '2.000e-04', '1.000e-04', '2.000e-04', '2.000e-04', '2.000e-04']}\n", - "MODULE_METADATA = pd.DataFrame.from_dict(module_metadata_dict)\n", - "\n", - "# To start, select the module with the lowest FID score.\n", - "MIN_FID_MODULE = MODULE_METADATA.loc[\n", - " MODULE_METADATA['fid'].astype(float).idxmin()]\n", - "\n", - "SELECTED_MODULE = MIN_FID_MODULE['module_url']\n", - "SELECTED_MODULE_DATASET = MIN_FID_MODULE['dataset']\n", - "\n", - "\n", - "# Display multiple images in the same figure.\n", - "def display_images(images, captions=None):\n", - " batch_size, dim1, dim2, channels = images.shape\n", - " num_horizontally = 8\n", - " \n", - " # Use a smaller figure size for the CIFAR10 images\n", - " figsize = (20, 20) if dim1 > 32 else (10, 10)\n", - " f, axes = plt.subplots(\n", - " len(images) // num_horizontally, num_horizontally, figsize=figsize)\n", - " for i in range(len(images)):\n", - " axes[i // num_horizontally, i % num_horizontally].axis(\"off\")\n", - " if captions is not None:\n", - " axes[i // num_horizontally, i % num_horizontally].text(0, -3, captions[i])\n", - " axes[i // num_horizontally, i % num_horizontally].imshow(images[i])\n", - " f.tight_layout()\n", - " \n", - "\n", - "# Show the HTML for the module table\n", - "class ShowModuleTable(object):\n", - " def __init__(self, callback):\n", - " self._callback = callback\n", - "\n", - " def _repr_html_(self):\n", - " # Set up the template with some nice CSS.\n", - " template = \"\"\"\n", - " \n", - " \"\"\"\n", - " \n", - " # Set up the headers with nicely readable names\n", - " table_headers = [\n", - " ('dataset', 'Dataset'),\n", - " ('architecture', 'Architecture'),\n", - " ('fid', 'FID'),\n", - " ('inception_score', 'IS'),\n", - " ('ms_ssim_score', 'MS-SSIM'),\n", - " ('model', 'Model'),\n", - " ('learning_rate', 'Learning rate'),\n", - " ('beta1', 'β1'),\n", - " ('beta2', 'β2'),\n", - " ('disc_iters', 'ndisc'),\n", - " ('disc_norm', 'Disc norm'),\n", - " ('penalty', 'Penalty'),\n", - " ('module_url', 'Module name'),\n", - " ]\n", - " header_template = \"\"\n", - " for _, header_name in table_headers:\n", - " header_template += \"\"\n", - " header_template += \"\"\n", - " template += header_template\n", - " \n", - " for i, (_, row) in enumerate(MODULE_METADATA.iterrows()):\n", - " uuid = \"row-%s\" % i\n", - " \n", - " # Reister the callback for every row.\n", - " output.register_callback(uuid, self._callback)\n", - " \n", - " # By default select the module with the min FID.\n", - " selected_class = \"\"\n", - " if row['module_url'] == MIN_FID_MODULE['module_url']:\n", - " selected_class = \"class=\\\"selected-row\\\"\"\n", - "\n", - " # Get the metadata for each row.\n", - " row_template = \"\"\n", - " for key, _ in table_headers:\n", - " row_template += \"\"\n", - " row_template += \"\"\n", - " template += row_template\n", - " \n", - " # Add the onclick handlers for the table rows.\n", - " template += \"\"\"\n", - "
\" + header_name + \"
\" + str(row[key]) + \"
\n", - " \"\"\"\n", - " return template\n", - "\n", - "\n", - "def set_selected_module(module_name, dataset):\n", - " # Assign the selected module and dataset to the global variables\n", - " global SELECTED_MODULE\n", - " SELECTED_MODULE = module_name\n", - " global SELECTED_MODULE_DATASET\n", - " SELECTED_MODULE_DATASET = dataset\n" - ], - "execution_count": 0, - "outputs": [] - }, - { - "metadata": { - "id": "ziU4PhGh283a", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - }, - "cellView": "form" - }, - "cell_type": "code", - "source": [ - "#@title Run this cell and select which GAN module to use below\n", - "\n", - "ShowModuleTable(set_selected_module)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "metadata": { - "id": "sgzX499wqPTv", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "## Generate images from the selected module" - ] - }, - { - "metadata": { - "id": "ZP0P3TkGoBAx", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "assert SELECTED_MODULE is not None and SELECTED_MODULE_DATASET is not None, \\\n", - " 'You must run the above cell and select a module from the table to generate images.'\n", - "\n", - "print('Using module: \"%s\"' % SELECTED_MODULE)\n", - "print('Generating images like dataset: \"%s\"' % SELECTED_MODULE_DATASET)\n", - "\n", - "# The generator expects a batch of 64 vectors of size 128\n", - "batch_size = 64\n", - "z_dim = 128\n", - "\n", - "with tf.Graph().as_default():\n", - " # Load the selected module\n", - " gan = hub.Module(SELECTED_MODULE)\n", - " z_input = tf.placeholder(dtype=tf.float32, shape=(batch_size, z_dim))\n", - " image_output = gan(z_input, signature=\"generator\") \n", - " \n", - " with tf.train.MonitoredSession() as session:\n", - " # Generate 64 random vectors as input to the latent space to generate images\n", - " z_values = np.random.uniform(-1, 1, size=(batch_size, z_dim))\n", - " images = session.run(image_output, feed_dict={z_input: z_values})\n", - "\n", - " # View the resulting images\n", - " display_images(images)" - ], - "execution_count": 0, - "outputs": [] - } - ] -} \ No newline at end of file diff --git a/compare_gan/bin/compare_gan_run_test.sh b/compare_gan/tpu/__init__.py old mode 100755 new mode 100644 similarity index 57% rename from compare_gan/bin/compare_gan_run_test.sh rename to compare_gan/tpu/__init__.py index 7b95ea2..39eaf76 --- a/compare_gan/bin/compare_gan_run_test.sh +++ b/compare_gan/tpu/__init__.py @@ -13,13 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash - -rm -rf /tmp/results -compare_gan_generate_tasks --workdir=/tmp/results --experiment=test -compare_gan_run_one_task --workdir=/tmp/results --task_num=0 --alsologtostderr --dataset_root=/tmp/datasets - -echo "Training model and evaluating GILBO score" - -compare_gan_generate_tasks --workdir=/tmp/results_gilbo --experiment=test_gilbo -compare_gan_run_one_task --workdir=/tmp/results_gilbo --task_num=0 --alsologtostderr --dataset_root=/tmp/datasets +# coding=utf-8 diff --git a/compare_gan/tpu/tpu_ops.py b/compare_gan/tpu/tpu_ops.py new file mode 100644 index 0000000..435f6da --- /dev/null +++ b/compare_gan/tpu/tpu_ops.py @@ -0,0 +1,125 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tensorflow operations specific to TPUs.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gin +from six.moves import range +import tensorflow as tf + +from tensorflow.contrib.tpu.python.tpu import tpu_function + + +def cross_replica_concat(value, replica_id, num_replicas): + """Reduce a concatenation of the `value` across TPU replicas. + + Args: + value: Tensor to concatenate. + replica_id: Integer tensor that indicates the index of the replica. + num_replicas: Python integer, total number of replicas. + + Returns: + Tensor of the same rank as value with first dimension `num_replicas` + times larger. + + Raises: + ValueError: If `value` is a scalar. + """ + if value.shape.ndims < 1: + raise ValueError("Value must have at least rank 1 but got {}.".format( + value.shape.ndims)) + if num_replicas <= 1: + return value + with tf.name_scope(None, "tpu_cross_replica_concat"): + # Mask is one hot encoded position of the core_index. + mask = tf.to_float(tf.equal(tf.range(num_replicas), replica_id)) + # Expand dims with 1's to match rank of value. + mask = tf.reshape(mask, [num_replicas] + [1] * value.shape.ndims) + if value.dtype in {tf.bfloat16, tf.float32}: + result = mask * value + else: + result = mask * tf.to_float(value) + # Thanks to broadcasting now result is set only in the position pointed by + # replica_id, the rest of the vector is set to 0's. + # All these steps are basically implementing tf.scatter_nd which is missing + # in TPU's backend since it doesn't support sparse operations. + + # Merge first 2 dimensions. + # This is equivalent to (value.shape[0].value * num_replicas). + # Using [-1] trick to support also scalar input. + result = tf.reshape(result, [-1] + result.shape.as_list()[2:]) + # Each core set the "results" in position pointed by replica_id. When we now + # sum across replicas we exchange the information and fill in local 0's with + # values from other cores. + result = tf.contrib.tpu.cross_replica_sum(result) + # Now all the cores see exactly the same data. + return tf.cast(result, dtype=value.dtype) + + +def cross_replica_mean(inputs, group_size=None): + """Calculates the average value of inputs tensor across TPU replicas.""" + num_replicas = tpu_function.get_tpu_context().number_of_shards + if not group_size: + group_size = num_replicas + if group_size == 1: + return inputs + if group_size != num_replicas: + group_assignment = [] + assert num_replicas % group_size == 0 + for g in range(num_replicas // group_size): + replica_ids = [g * group_size + i for i in range(group_size)] + group_assignment.append(replica_ids) + else: + group_assignment = None + return tf.contrib.tpu.cross_replica_sum(inputs, group_assignment) / tf.cast( + group_size, inputs.dtype) + + +@gin.configurable(blacklist=["inputs", "axis"]) +def cross_replica_moments(inputs, axis, parallel=True, group_size=None): + """Compute mean and variance of the inputs tensor across TPU replicas. + + Args: + inputs: A tensor with 2 or more dimensions. + axis: Array of ints. Axes along which to compute mean and variance. + parallel: Use E[x^2] - (E[x])^2 to compute variance. Then can be done + in parallel to computing the mean and reducing the communication overhead. + group_size: Integer, the number of replicas to compute moments arcoss. + None or 0 will use all replicas (global). + + Returns: + Two tensors with mean and variance. + """ + # Compute local mean and then average across replicas. + mean = tf.math.reduce_mean(inputs, axis=axis) + mean = cross_replica_mean(mean) + if parallel: + # Compute variance using the E[x^2] - (E[x])^2 formula. This is less + # numerically stable than the E[(x-E[x])^2] formula, but allows the two + # cross-replica sums to be computed in parallel, saving communication + # overhead. + mean_of_squares = tf.reduce_mean(tf.square(inputs), axis=axis) + mean_of_squares = cross_replica_mean(mean_of_squares, group_size=group_size) + mean_squared = tf.square(mean) + variance = mean_of_squares - mean_squared + else: + variance = tf.math.reduce_mean( + tf.math.square(inputs - mean), axis=axis) + variance = cross_replica_mean(variance, group_size=group_size) + return mean, variance diff --git a/compare_gan/tpu/tpu_ops_test.py b/compare_gan/tpu/tpu_ops_test.py new file mode 100644 index 0000000..53c8900 --- /dev/null +++ b/compare_gan/tpu/tpu_ops_test.py @@ -0,0 +1,132 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests custom TensorFlow operations for TPU.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +from absl.testing import parameterized +from compare_gan.tpu import tpu_ops +import numpy as np +import tensorflow as tf + + +class TpuOpsTpuTest(parameterized.TestCase, tf.test.TestCase): + + def testRunsOnTpu(self): + """Verify that the test cases runs on a TPU chip and has 2 cores.""" + expected_device_names = [ + "/job:localhost/replica:0/task:0/device:CPU:0", + "/job:localhost/replica:0/task:0/device:TPU:0", + "/job:localhost/replica:0/task:0/device:TPU:1", + "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0", + ] + with self.session() as sess: + devices = sess.list_devices() + logging.info("devices:\n%s", "\n".join([str(d) for d in devices])) + self.assertAllEqual([d.name for d in devices], expected_device_names) + + def testCrossReplicaConcat(self): + def computation(x, replica_id): + logging.info("x: %s\nreplica_id: %s", x, replica_id[0]) + return tpu_ops.cross_replica_concat(x, replica_id[0], num_replicas=2) + + inputs = np.asarray([[3, 4], [1, 5]]) + expected_output = np.asarray([[3, 4], [1, 5], [3, 4], [1, 5]]) + + with tf.Graph().as_default(): + x = tf.constant(inputs) + replica_ids = tf.constant([0, 1], dtype=tf.int32) + x_concat, = tf.contrib.tpu.batch_parallel( + computation, [x, replica_ids], num_shards=2) + self.assertAllEqual(x.shape.as_list(), [2, 2]) + self.assertAllEqual(x_concat.shape.as_list(), [4, 2]) + + with self.session() as sess: + sess.run(tf.contrib.tpu.initialize_system()) + sess.run(tf.global_variables_initializer()) + x_concat = sess.run(x_concat) + logging.info("x_concat: %s", x_concat) + self.assertAllClose(x_concat, expected_output) + + # Test with group size 2 (test case has 2 cores, so this global batch norm). + @parameterized.parameters( + {"group_size": None}, # Defaults to number of TPU cores. + {"group_size": 0}, # Defaults to number of TPU cores. + {"group_size": 2}, + ) + def testCrossReplicaMean(self, group_size): + # Verify that we average across replicas by feeding 2 vectors to the system. + # Each replica should get one vector which is then averaged across + # all replicas and simply returned. + # After that each replica has the same vector and since the outputs gets + # concatenated we see the same vector twice. + inputs = np.asarray( + [[0.55, 0.70, -1.29, 0.502], [0.57, 0.90, 1.290, 0.202]], + dtype=np.float32) + expected_output = np.asarray( + [[0.56, 0.8, 0.0, 0.352], [0.56, 0.8, 0.0, 0.352]], dtype=np.float32) + + def computation(x): + self.assertAllEqual(x.shape.as_list(), [1, 4]) + return tpu_ops.cross_replica_mean(x, group_size=group_size) + + with tf.Graph().as_default(): + # Note: Using placeholders for feeding TPUs is discouraged but fine for + # a simple test case. + x = tf.placeholder(name="x", dtype=tf.float32, shape=inputs.shape) + y = tf.contrib.tpu.batch_parallel(computation, inputs=[x], num_shards=2) + with self.session() as sess: + sess.run(tf.contrib.tpu.initialize_system()) + # y is actually a list with one tensor. computation would be allowed + # to return multiple tensors (and ops). + actual_output = sess.run(y, {x: inputs})[0] + + self.assertAllEqual(actual_output.shape, (2, 4)) + self.assertAllClose(actual_output, expected_output) + + def testCrossReplicaMeanGroupSizeOne(self, group_size=1): + # Since the group size is 1 we only average over 1 replica. + inputs = np.asarray( + [[0.55, 0.70, -1.29, 0.502], [0.57, 0.90, 1.290, 0.202]], + dtype=np.float32) + expected_output = np.asarray( + [[0.55, 0.7, -1.29, 0.502], [0.57, 0.9, 1.290, 0.202]], + dtype=np.float32) + + def computation(x): + self.assertAllEqual(x.shape.as_list(), [1, 4]) + return tpu_ops.cross_replica_mean(x, group_size=group_size) + + with tf.Graph().as_default(): + # Note: Using placeholders for feeding TPUs is discouraged but fine for + # a simple test case. + x = tf.placeholder(name="x", dtype=tf.float32, shape=inputs.shape) + y = tf.contrib.tpu.batch_parallel(computation, inputs=[x], num_shards=2) + with self.session() as sess: + sess.run(tf.contrib.tpu.initialize_system()) + # y is actually a list with one tensor. computation would be allowed + # to return multiple tensors (and ops). + actual_output = sess.run(y, {x: inputs})[0] + + self.assertAllEqual(actual_output.shape, (2, 4)) + self.assertAllClose(actual_output, expected_output) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/tpu/tpu_random.py b/compare_gan/tpu/tpu_random.py new file mode 100644 index 0000000..58cc389 --- /dev/null +++ b/compare_gan/tpu/tpu_random.py @@ -0,0 +1,154 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide methods for generating deterministic pseudorandom values. + +Random number generators in `tf.random` ignore the seed values on TPUs. +An alternative are the stateless random number generators in +`tf.contrib.stateless` which are deterministic but do not keep the a state. +To get different but reproducible random values at each training step the user +needs to provide a seed (a tensor of shape (2,)) that should change in every +step. + +This small library handles this for the user by decomposing the seed into two +values: a per operation seed and a global offset +The per operation seed is fixed for each random generator in the graph and +computed from the name of the operation (incl. name scope). +The global offset is passed in as in integer from in the input function and +thus changes every step. This guarantees that is different every step and +always different between TPU cores within a step. + +Usage: +- In your `input_fn` call `add_random_offset_to_features` and use the + returned dataset. +- At the beginning of your `model_fn` call `set_random_offset_from_features`. +- Use the random number generators defined in this module. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import hashlib + +from absl import logging +import tensorflow as tf + + +_RANDOM_OFFSET_FEATURE_KEY = "_RANDOM_OFFSET" +_RANDOM_OFFSET_TENSOR = None + + +def add_random_offset_to_features(dataset, start=1): + """Add a random offset to the dataset. + + Args: + dataset: `tf.data.Dataset` object that contains tuples (features, labels), + where `features` is a Python dictionary. + start: A starting value for the global offset. Optional. + + Returns: + A new `tf.data.Dataset` object with a extra feature for the random offset. + """ + dataset = dataset.apply(tf.data.experimental.enumerate_dataset(start=start)) + def map_fn(offset, data): + offset = tf.cast(offset, tf.int32) + if isinstance(data, tuple) and len(data) == 2 and isinstance(data[0], dict): + # Data is a tuple (features, labels) as expected by the Estimator + # interface. + logging.info("Passing random offset: %s with data %s.", offset, data) + features, labels = data + features[_RANDOM_OFFSET_FEATURE_KEY] = offset + return features, labels + raise ValueError("Data in dataset must be a tuple (features, labels) and " + "features must be a Python dictionary. data was {}".format( + data)) + return dataset.map(map_fn) + + +def set_random_offset_from_features(features): + """Set the global random offset from the random offset feature.""" + # Take the first index in case the TPU core got multiple examples. + global _RANDOM_OFFSET_TENSOR + _RANDOM_OFFSET_TENSOR = features.pop(_RANDOM_OFFSET_FEATURE_KEY)[0] + logging.info("Got global random offset: %s", _RANDOM_OFFSET_TENSOR) + + +def _get_seed(name=None): + """Get a deterministic random seed for stateless generators. + + Args: + name: Name of the operation that will use the seed. If None a unique name + will be determined. + + Returns: + An integer`Tensor` of shape (2,) with the seed for this op and the global + random offset. + """ + if _RANDOM_OFFSET_TENSOR is None: + raise ValueError("_RANDOM_OFFSET_TENSOR is None. Did you call " + "set_random_offset_from_features() in your model_fn?") + # Get a seed from the hash name of a dummy operation. This seed will only + # depend on the name of the operation (incl. the scope name). It will be + # unique within the graph and only change if the name of operation changes. + with tf.name_scope("dummy_for_seed"): + dummy_op = tf.no_op(name) + # Using SHA-512 gives us a non-negative and uniformly distributed seed in the + # interval [0, 2**512). This is consistent with TensorFlow, as TensorFlow + # operations internally use the residue of the given seed modulo `2**31 - 1` + # (see`tensorflow/python/framework/random_seed.py`). + op_seed = int(hashlib.sha512(dummy_op.name.encode("utf-8")).hexdigest(), 16) + op_seed = tf.constant(op_seed % (2**31 - 1)) + logging.info("Using op_seed %s for operation %s.", op_seed, dummy_op.name) + return tf.stack([op_seed, _RANDOM_OFFSET_TENSOR]) + + +def uniform(shape, name=None): + """Outputs pseudorandom random values from a uniform distribution. + + If the _RANDOM_OFFSET_TENSOR is set these output is deterministic based on the + seed and the `name` of this operation. If `name` is None this will use the + index in the graph instead. + + There is no `dtype` parameter since the underlying + tf.contrib.stateless.stateless_random_uniform only supports tf.half, + tf.float32 and tf.float64 and we do not care about tf.half and tf.float64. + Patches welcome. + + Args: + shape: A Tensor. Must be one of the following types: int32, int64. + The shape of the output tensor. + name: A name for the operation (optional). + + Returns: + A Tensor. + """ + if _RANDOM_OFFSET_TENSOR is None: + logging.warning("No global random offset set, falling back to " + "un-deterministic pseudorandom numbers for operation %s.", + name) + return tf.random.uniform(shape, name=name) + return tf.contrib.stateless.stateless_random_uniform( + shape=shape, seed=_get_seed(name), name=name) + + +def normal(shape, name=None): + if _RANDOM_OFFSET_TENSOR is None: + logging.warning("No global random offset set, falling back to " + "un-deterministic pseudorandom numbers for operation %s.", + name) + return tf.random.normal(shape, name=name) + return tf.contrib.stateless.stateless_random_normal( + shape=shape, seed=_get_seed(name), name=name) diff --git a/compare_gan/tpu/tpu_random_test.py b/compare_gan/tpu/tpu_random_test.py new file mode 100644 index 0000000..18d4ac3 --- /dev/null +++ b/compare_gan/tpu/tpu_random_test.py @@ -0,0 +1,172 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for deterministic TensorFlow operations.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +from absl import logging +from absl.testing import parameterized +from compare_gan.tpu import tpu_random +import numpy as np +from six.moves import range +import tensorflow as tf + + +FLAGS = flags.FLAGS + + +class TpuRandomTest(parameterized.TestCase, tf.test.TestCase): + + def _run_graph_op_in_estimator(self, create_op_fn, model_dir, use_tpu, + training_steps=4): + """Helper function to test an operation within a Estimator. + + Args: + create_op_fn: Function that will be called from within the model_fn. + The returned op will be run as part of the training step. + model_dir: Directory for saving checkpoints. + use_tpu: Whether to use TPU. + training_steps: Number of trainings steps. + """ + def input_fn(params): + features = {"x": np.ones((8, 3), dtype=np.float32)} + labels = np.ones((8, 1), dtype=np.float32) + dataset = tf.data.Dataset.from_tensor_slices((features, labels)) + # Add a feature for the random offset of operations in tpu_random.py. + dataset = tpu_random.add_random_offset_to_features(dataset) + return dataset.repeat().batch(params["batch_size"], drop_remainder=True) + + def model_fn(features, labels, mode, params): + # Set the random offset tensor for operations in tpu_random.py. + tpu_random.set_random_offset_from_features(features) + test_op = create_op_fn() + predictions = tf.layers.dense(features["x"], 1) + loss = tf.losses.mean_squared_error(labels, predictions) + optimizer = tf.train.GradientDescentOptimizer(0.01) + if params["use_tpu"]: + optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) + with tf.control_dependencies([test_op]): + train_op = optimizer.minimize( + loss, global_step=tf.train.get_or_create_global_step()) + return tf.contrib.tpu.TPUEstimatorSpec( + mode=mode, + loss=loss, + train_op=train_op) + + if tf.gfile.Exists(model_dir): + tf.gfile.DeleteRecursively(model_dir) + run_config = tf.contrib.tpu.RunConfig( + model_dir=model_dir, + save_checkpoints_steps=1, + tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1)) + estimator = tf.contrib.tpu.TPUEstimator( + config=run_config, + use_tpu=use_tpu, + model_fn=model_fn, + train_batch_size=2) + estimator.train(input_fn, steps=training_steps) + + @parameterized.parameters( + {"use_tpu": False}, + {"use_tpu": True}, + ) + def testIsDeterministic(self, use_tpu): + def create_op_fn(): + z = tf.get_variable("z", (3,), tf.float32) + random_z = tpu_random.uniform((3,), name="random_z") + if use_tpu: + random_z = tf.contrib.tpu.cross_replica_sum(random_z) + return tf.assign(z, random_z).op + + model_dir_1 = os.path.join(FLAGS.test_tmpdir, "1") + self._run_graph_op_in_estimator(create_op_fn, model_dir_1, use_tpu=use_tpu) + + model_dir_2 = os.path.join(FLAGS.test_tmpdir, "2") + self._run_graph_op_in_estimator(create_op_fn, model_dir_2, use_tpu=use_tpu) + + for step in range(1, 5): + self.assertTrue(tf.gfile.Exists( + os.path.join(model_dir_1, "model.ckpt-{}.index".format(step)))) + ckpt_1 = tf.train.load_checkpoint( + os.path.join(model_dir_1, "model.ckpt-{}".format(step))) + ckpt_2 = tf.train.load_checkpoint( + os.path.join(model_dir_2, "model.ckpt-{}".format(step))) + z_1 = ckpt_1.get_tensor("z") + z_2 = ckpt_2.get_tensor("z") + logging.info("step=%d, z_1=%s, z_2=%s", step, z_1, z_2) + # Both runs are the same. + self.assertAllClose(z_1, z_2) + + @parameterized.parameters( + {"use_tpu": False}, + {"use_tpu": True}, + ) + def testIsDifferentAcrossSteps(self, use_tpu): + def create_op_fn(): + z = tf.get_variable("z", (3,), tf.float32) + random_z = tpu_random.uniform((3,), name="random_z") + if use_tpu: + random_z = tf.contrib.tpu.cross_replica_sum(random_z) + return tf.assign(z, random_z).op + + model_dir = os.path.join(FLAGS.test_tmpdir, "1") + self._run_graph_op_in_estimator(create_op_fn, model_dir, use_tpu=use_tpu) + + previous_z = None + for step in range(1, 5): + self.assertTrue(tf.gfile.Exists( + os.path.join(model_dir, "model.ckpt-{}.index".format(step)))) + ckpt = tf.train.load_checkpoint( + os.path.join(model_dir, "model.ckpt-{}".format(step))) + z = ckpt.get_tensor("z") + logging.info("step=%d, z=%s", step, z) + # Different to previous run. + if previous_z is not None: + self.assertNotAllClose(previous_z, z) + previous_z = z + + def testIsDifferentAcrossCores(self): + def create_op_fn(): + z_sum = tf.get_variable("z_sum", (3,), tf.float32) + z_first_core = tf.get_variable("z_first_core", (3,), tf.float32) + random_z = tpu_random.uniform((3,), name="random_z") + random_z_sum = tf.contrib.tpu.cross_replica_sum(random_z) + return tf.group(tf.assign(z_sum, random_z_sum).op, + tf.assign(z_first_core, random_z)) + + model_dir = os.path.join(FLAGS.test_tmpdir, "1") + self._run_graph_op_in_estimator(create_op_fn, model_dir, use_tpu=True) + + for step in range(1, 5): + self.assertTrue(tf.gfile.Exists( + os.path.join(model_dir, "model.ckpt-{}.index".format(step)))) + ckpt = tf.train.load_checkpoint( + os.path.join(model_dir, "model.ckpt-{}".format(step))) + z_sum = ckpt.get_tensor("z_sum") + z_first_core = ckpt.get_tensor("z_first_core") + logging.info("step=%d, z_sum=%s, z_first_core=%s", + step, z_sum, z_first_core) + # Sum is not the first core times 2. + self.assertNotAllClose(z_sum, 2 * z_first_core) + + +if __name__ == "__main__": + tf.test.main() diff --git a/compare_gan/tpu/tpu_summaries.py b/compare_gan/tpu/tpu_summaries.py new file mode 100644 index 0000000..9592f7d --- /dev/null +++ b/compare_gan/tpu/tpu_summaries.py @@ -0,0 +1,113 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide a helper class for using summaries on TPU via a host call. + +TPUEstimator does not support writing TF summaries out of the box and TPUs can't +perform operations that write files to disk. To monitor tensor values during +training you can copy the tensors back to the CPU of the host machine via +a host call function. This small library provides a convienent API to do this. + +Example: +from compare_gan.tpu import tpu_summaries +def model_fn(features, labels, params, mode): + summary = tpu_summries.TpuSummaries(my_model_dir) + + summary.scalar("my_scalar_summary", tensor1) + summary.scalar("my_counter", tensor2, reduce_fn=tf.math.reduce_sum) + + return TPUEstimatorSpec( + host_call=summary.get_host_call(), + ...) + +Warning: The host call function will run every step. Writing large tensors to +summaries can slow down your training. High ranking outfeed operations in your +XProf profile can be an indication for this. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from absl import logging +import tensorflow as tf + + +summary = tf.contrib.summary # TensorFlow Summary API v2. + + +TpuSummaryEntry = collections.namedtuple( + "TpuSummaryEntry", "summary_fn name tensor reduce_fn") + + +class TpuSummaries(object): + """Class to simplify TF summaries on TPU. + + An instance of the class provides simple methods for writing summaries in the + similar way to tf.summary. The difference is that each summary entry must + provide a reduction function that is used to reduce the summary values from + all the TPU cores. + """ + + def __init__(self, log_dir, save_summary_steps=250): + self._log_dir = log_dir + self._entries = [] + # While False no summary entries will be added. On TPU we unroll the graph + # and don't want to add multiple summaries per step. + self.record = True + self._save_summary_steps = save_summary_steps + + def image(self, name, tensor, reduce_fn): + """Add a summary for images. Tensor must be of 4-D tensor.""" + if not self.record: + return + self._entries.append( + TpuSummaryEntry(summary.image, name, tensor, reduce_fn)) + + def scalar(self, name, tensor, reduce_fn=tf.math.reduce_mean): + """Add a summary for a scalar tensor.""" + if not self.record: + return + tensor = tf.convert_to_tensor(tensor) + if tensor.shape.ndims == 0: + tensor = tf.expand_dims(tensor, 0) + self._entries.append( + TpuSummaryEntry(summary.scalar, name, tensor, reduce_fn)) + + def get_host_call(self): + """Returns the tuple (host_call_fn, host_call_args) for TPUEstimatorSpec.""" + # All host_call_args must be tensors with batch dimension. + # All tensors are streamed to the host machine (mind the band width). + global_step = tf.train.get_or_create_global_step() + host_call_args = [tf.expand_dims(global_step, 0)] + host_call_args.extend([e.tensor for e in self._entries]) + logging.info("host_call_args: %s", host_call_args) + return (self._host_call_fn, host_call_args) + + def _host_call_fn(self, step, *args): + """Function that will run on the host machine.""" + # Host call receives values from all tensor cores (concatenate on the + # batch dimension). Step is the same for all cores. + step = step[0] + logging.info("host_call_fn: args=%s", args) + with summary.create_file_writer(self._log_dir).as_default(): + with summary.record_summaries_every_n_global_steps( + self._save_summary_steps, step): + for i, e in enumerate(self._entries): + value = e.reduce_fn(args[i]) + e.summary_fn(e.name, value, step=step) + return summary.all_summary_ops() diff --git a/compare_gan/utils.py b/compare_gan/utils.py new file mode 100644 index 0000000..6e3e5a2 --- /dev/null +++ b/compare_gan/utils.py @@ -0,0 +1,158 @@ +# coding=utf-8 +# Copyright 2018 Google LLC & Hwalsuk Lee. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities library.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import functools +import inspect + +from absl import logging +import six + + +# In Python 2 the inspect module does not have FullArgSpec. Define a named tuple +# instead. +if hasattr(inspect, "FullArgSpec"): + _FullArgSpec = inspect.FullArgSpec # pylint: disable=invalid-name +else: + _FullArgSpec = collections.namedtuple("FullArgSpec", [ + "args", "varargs", "varkw", "defaults", "kwonlyargs", "kwonlydefaults", + "annotations" + ]) + + +def _getfullargspec(fn): + """Python 2/3 compatible version of the inspect.getfullargspec method. + + Args: + fn: The function object. + + Returns: + A FullArgSpec. For Python 2 this is emulated by a named tuple. + """ + arg_spec_fn = inspect.getfullargspec if six.PY3 else inspect.getargspec + try: + arg_spec = arg_spec_fn(fn) + except TypeError: + # `fn` might be a callable object. + arg_spec = arg_spec_fn(fn.__call__) + if six.PY3: + assert isinstance(arg_spec, _FullArgSpec) + return arg_spec + return _FullArgSpec( + args=arg_spec.args, + varargs=arg_spec.varargs, + varkw=arg_spec.keywords, + defaults=arg_spec.defaults, + kwonlyargs=[], + kwonlydefaults=None, + annotations={}) + + +def _has_arg(fn, arg_name): + """Returns True if `arg_name` might be a valid parameter for `fn`. + + Specifically, this means that `fn` either has a parameter named + `arg_name`, or has a `**kwargs` parameter. + + Args: + fn: The function to check. + arg_name: The name fo the parameter. + + Returns: + Whether `arg_name` might be a valid argument of `fn`. + """ + while isinstance(fn, functools.partial): + fn = fn.func + while hasattr(fn, "__wrapped__"): + fn = fn.__wrapped__ + arg_spec = _getfullargspec(fn) + if arg_spec.varkw: + return True + return arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs + + +def call_with_accepted_args(fn, **kwargs): + """Calls `fn` only with the keyword arguments that `fn` accepts.""" + kwargs = {k: v for k, v in six.iteritems(kwargs) if _has_arg(fn, k)} + logging.debug("Calling %s with args %s.", fn, kwargs) + return fn(**kwargs) + + +def get_parameter_overview(variables, limit=40): + """Returns a string with variables names, their shapes, count, and types. + + To get all trainable parameters pass in `tf.trainable_variables()`. + + Args: + variables: List of `tf.Variable`(s). + limit: If not `None`, the maximum number of variables to include. + + Returns: + A string with a table like in the example. + + +----------------+---------------+------------+---------+ + | Name | Shape | Size | Type | + +----------------+---------------+------------+---------+ + | FC_1/weights:0 | (63612, 1024) | 65,138,688 | float32 | + | FC_1/biases:0 | (1024,) | 1,024 | float32 | + | FC_2/weights:0 | (1024, 32) | 32,768 | float32 | + | FC_2/biases:0 | (32,) | 32 | float32 | + +----------------+---------------+------------+---------+ + + Total: 65,172,512 + """ + max_name_len = max([len(v.name) for v in variables] + [len("Name")]) + max_shape_len = max([len(str(v.get_shape())) for v in variables] + [len( + "Shape")]) + max_size_len = max([len("{:,}".format(v.get_shape().num_elements())) + for v in variables] + [len("Size")]) + max_type_len = max([len(v.dtype.base_dtype.name) for v in variables] + [len( + "Type")]) + + var_line_format = "| {: <{}s} | {: >{}s} | {: >{}s} | {: <{}s} |" + sep_line_format = var_line_format.replace(" ", "-").replace("|", "+") + + header = var_line_format.replace(">", "<").format("Name", max_name_len, + "Shape", max_shape_len, + "Size", max_size_len, + "Type", max_type_len) + separator = sep_line_format.format("", max_name_len, "", max_shape_len, "", + max_size_len, "", max_type_len) + + lines = [separator, header, separator] + + total_weights = sum(v.get_shape().num_elements() for v in variables) + + # Create lines for up to 80 variables. + for v in variables: + if limit is not None and len(lines) >= limit: + lines.append("[...]") + break + lines.append(var_line_format.format( + v.name, max_name_len, + str(v.get_shape()), max_shape_len, + "{:,}".format(v.get_shape().num_elements()), max_size_len, + v.dtype.base_dtype.name, max_type_len)) + + lines.append(separator) + lines.append("Total: {:,}".format(total_weights)) + + return "\n".join(lines) diff --git a/example_configs/README.md b/example_configs/README.md new file mode 100644 index 0000000..3dc1111 --- /dev/null +++ b/example_configs/README.md @@ -0,0 +1,9 @@ +This folder contains configurations of popular GANs and links to the +corresponding papers. + +**Please note that we provide them as a starting point for the user and that +they represent the best-effort implementation** -- we do not guarantee that all +the implementation details match exactly due to the vast number of design +options. Given the sensitivity of GANs to design choices, hyperparameters and +differences in platforms (GPUs/TPUs), a small differences might have a +significant impact on the final results. diff --git a/example_configs/biggan_imagenet128.gin b/example_configs/biggan_imagenet128.gin new file mode 100644 index 0000000..98c1db4 --- /dev/null +++ b/example_configs/biggan_imagenet128.gin @@ -0,0 +1,51 @@ +# BigGAN architecture and settings on ImageNet 128. +# http://arxiv.org/abs/1809.11096 + +# This should be similar to row 7 in Table 1. +# It does not include orthogonal regularization (which would be row 8) and uses +# a different learning rate. + +# Recommended training platform: TPU v3-128. + +dataset.name = "imagenet_128" +options.z_dim = 120 + +options.architecture = "resnet5_biggan_arch" +ModularGAN.conditional = True +options.batch_size = 2048 +options.gan_class = @ModularGAN +options.lamba = 1 +options.training_steps = 250000 +weights.initializer = "orthogonal" +spectral_norm.singular_value = "auto" + +# Generator +G.batch_norm_fn = @conditional_batch_norm +G.spectral_norm = True +ModularGAN.g_use_ema = True +resnet5_biggan.Generator.hierarchical_z = True +resnet5_biggan.Generator.embed_y = True +standardize_batch.decay = 0.9 +standardize_batch.epsilon = 1e-5 +standardize_batch.use_moving_averages = False + +# Discriminator +options.disc_iters = 2 +D.spectral_norm = True +resnet5_biggan.Discriminator.project_y = True + +# Loss and optimizer +loss.fn = @hinge +penalty.fn = @no_penalty +ModularGAN.g_lr = 0.0001 +ModularGAN.g_optimizer_fn = @tf.train.AdamOptimizer +ModularGAN.d_lr = 0.0005 +ModularGAN.d_optimizer_fn = @tf.train.AdamOptimizer +tf.train.AdamOptimizer.beta1 = 0.0 +tf.train.AdamOptimizer.beta2 = 0.999 + +z.distribution_fn = @tf.random.normal +eval_z.distribution_fn = @tf.random.normal + +run_config.iterations_per_loop = 500 +run_config.save_checkpoints_steps = 2500 diff --git a/example_configs/dcgan_celeba64.gin b/example_configs/dcgan_celeba64.gin new file mode 100644 index 0000000..9e0251c --- /dev/null +++ b/example_configs/dcgan_celeba64.gin @@ -0,0 +1,25 @@ + +dataset.name = "celeb_a" +options.architecture = "dcgan_arch" +options.batch_size = 64 +options.gan_class = @GPUModularGAN +options.lamba = 1 +options.training_steps = 100000 +options.z_dim = 128 + +# Generator +G.batch_norm_fn = @batch_norm +standardize_batch.decay = 0.9 +standardize_batch.epsilon = 1e-5 + +# Discriminator +options.disc_iters = 1 +D.spectral_norm = False + +# Loss and optimizer +loss.fn = @non_saturating +penalty.fn = @no_penalty +ModularGAN.g_lr = 0.0002 +ModularGAN.g_optimizer_fn = @tf.train.AdamOptimizer +tf.train.AdamOptimizer.beta1 = 0.5 +tf.train.AdamOptimizer.beta2 = 0.999 diff --git a/example_configs/resnet_cifar10.gin b/example_configs/resnet_cifar10.gin new file mode 100644 index 0000000..1cddd3e --- /dev/null +++ b/example_configs/resnet_cifar10.gin @@ -0,0 +1,25 @@ + +dataset.name = "cifar10" +options.architecture = "resnet_cifar_arch" +options.batch_size = 64 +options.gan_class = @GPUModularGAN +options.lamba = 1 +options.training_steps = 40000 +options.z_dim = 128 + +# Generator +G.batch_norm_fn = @batch_norm +standardize_batch.decay = 0.9 +standardize_batch.epsilon = 1e-5 + +# Discriminator +options.disc_iters = 5 +D.spectral_norm = True + +# Loss and optimizer +loss.fn = @non_saturating +penalty.fn = @no_penalty +ModularGAN.g_lr = 0.0002 +ModularGAN.g_optimizer_fn = @tf.train.AdamOptimizer +tf.train.AdamOptimizer.beta1 = 0.5 +tf.train.AdamOptimizer.beta2 = 0.999 diff --git a/example_configs/resnet_lsun-bedroom128.gin b/example_configs/resnet_lsun-bedroom128.gin new file mode 100644 index 0000000..44d35b8 --- /dev/null +++ b/example_configs/resnet_lsun-bedroom128.gin @@ -0,0 +1,25 @@ + +dataset.name = "lsun-bedroom" +options.architecture = "resnet5_arch" +options.batch_size = 64 +options.gan_class = @GPUModularGAN +options.lamba = 10 +options.training_steps = 40000 +options.z_dim = 128 + +# Generator +G.batch_norm_fn = @batch_norm +standardize_batch.decay = 0.9 +standardize_batch.epsilon = 1e-5 + +# Discriminator +options.disc_iters = 5 +D.spectral_norm = False + +# Loss and optimizer +loss.fn = @wasserstein +penalty.fn = @wgangp_penalty +ModularGAN.g_lr = 0.0001 +ModularGAN.g_optimizer_fn = @tf.train.AdamOptimizer +tf.train.AdamOptimizer.beta1 = 0.5 +tf.train.AdamOptimizer.beta2 = 0.9 diff --git a/example_configs/sndcgan_celebahq128.gin b/example_configs/sndcgan_celebahq128.gin new file mode 100644 index 0000000..9c9c780 --- /dev/null +++ b/example_configs/sndcgan_celebahq128.gin @@ -0,0 +1,25 @@ + +dataset.name = "celeb_a_hq_128" +options.architecture = "sndcgan_arch" +options.batch_size = 64 +options.gan_class = @GPUModularGAN +options.lamba = 1 +options.training_steps = 100000 +options.z_dim = 128 + +# Generator +G.batch_norm_fn = @batch_norm +standardize_batch.decay = 0.9 +standardize_batch.epsilon = 1e-5 + +# Discriminator +options.disc_iters = 1 +D.spectral_norm = True + +# Loss and optimizer +loss.fn = @non_saturating +penalty.fn = @no_penalty +ModularGAN.g_lr = 0.0002 +ModularGAN.g_optimizer_fn = @tf.train.AdamOptimizer +tf.train.AdamOptimizer.beta1 = 0.5 +tf.train.AdamOptimizer.beta2 = 0.999 diff --git a/setup.py b/setup.py index 1c36fbb..5895eb0 100644 --- a/setup.py +++ b/setup.py @@ -20,38 +20,34 @@ setup( name='compare_gan', - version='1.0', - description=('Compare GAN - code from "Are GANs created equal? ' - 'A Large Scale Study" paper'), + version='3.0', + description=( + 'Compare GAN - A modular library for training and evaluating GANs.'), author='Google LLC', author_email='no-reply@google.com', - url='http://github.com/TODO', + url='https://github.com/google/compare_gan', license='Apache 2.0', packages=find_packages(), - package_data={ - }, - scripts=[ - 'compare_gan/bin/compare_gan_generate_tasks', - 'compare_gan/bin/compare_gan_prepare_datasets.sh', - 'compare_gan/bin/compare_gan_run_one_task', - 'compare_gan/bin/compare_gan_run_test.sh', - ], + package_data={}, install_requires=[ 'future', + 'gin-config==0.1.4', 'numpy', 'pandas', - 'protobuf', 'six', - 'tensor2tensor', + 'tensorflow-datasets==1.0.1', + 'tensorflow-hub>=0.2.0', + 'tensorflow-gan==0.0.0.dev0', + 'matplotlib>=1.5.2', + 'pstar>=0.1.6', + 'scipy>=1.0.0', ], extras_require={ - 'matplotlib': ['matplotlib>=1.5.2'], + 'tf': ['tensorflow>=1.12'], + # Evaluation of Hub modules with EMA variables requires TF > 1.12. + 'tf_gpu': ['tf-nightly-gpu>=1.13.0.dev20190221'], 'pillow': ['pillow>=5.0.0'], - 'pandas': ['pandas>=0.23.0'], - 'pstar': ['pstar>=0.1.6'], - 'scipy': ['scipy>=1.0.0'], - 'tensorflow': ['tensorflow>=1.7'], - 'tensorflow_gpu': ['tensorflow-gpu>=1.4.1'], + 'tensorflow-probability': ['tensorflow-probability>=0.5.0'], }, classifiers=[ 'Development Status :: 4 - Beta',